repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
CANTM
|
CANTM-main/GateMIcateLib/models/NVDM_ori.py
|
import torch
from torch import nn
from torch.nn import init
from torch.nn import functional as F
import math
from .miscLayer import BERT_Embedding, WVHidden, WVClassifier, Identity, Topics, kld, CLSAW_TopicModel_Base
class ORINVDM(CLSAW_TopicModel_Base):
def __init__(self, config, vocab_dim=None):
super().__init__(config=config)
default_config = {}
self.hidden_layer = nn.Linear(vocab_dim, 500)
##############M1###########################################
self.mu_z1 = nn.Linear(500, self.z_dim)
self.log_sigma_z1 = nn.Linear(500, self.z_dim)
self.x_only_topics = Topics(self.z_dim, vocab_dim)
self.h_to_z = Identity()
self.reset_parameters()
def forward(self,x, mask=None, n_samples=1, bow=None, train=False, true_y=None, pre_embd=False, true_y_ids=None):
#print(true_y.shape)
hidden = F.tanh(self.hidden_layer(bow))
mu_z1 = self.mu_z1(hidden)
log_sigma_z1 = self.log_sigma_z1(hidden)
kldz1 = kld(mu_z1, log_sigma_z1)
rec_loss_z1 = 0
classifier_loss = 0
kldz2 = 0
rec_loss_z2 = 0
log_y_hat_rec_loss = 0
class_topic_rec_loss = 0
for i in range(n_samples):
z1 = torch.zeros_like(mu_z1).normal_() * torch.exp(log_sigma_z1) + mu_z1
z1 = self.h_to_z(z1)
log_probz_1 = self.x_only_topics(z1)
rec_loss_z1 = rec_loss_z1-(log_probz_1 * bow).sum(dim=-1)
rec_loss_z1 = rec_loss_z1/n_samples
elbo_z1 = kldz1 + rec_loss_z1
total_loss = elbo_z1.sum()
y_hat_logis = torch.zeros(x.shape[0], self.n_classes)
elbo_z2 = torch.zeros_like(elbo_z1)
classifier_loss = torch.tensor(0)
y = {
'loss': total_loss,
'elbo_xy': elbo_z2,
'rec_loss': rec_loss_z2,
'kld': kldz2,
'cls_loss': classifier_loss,
'class_topic_loss': class_topic_rec_loss,
'y_hat': y_hat_logis,
'elbo_x': elbo_z1
}
return y, None
def reset_parameters(self):
init.zeros_(self.log_sigma_z1.weight)
init.zeros_(self.log_sigma_z1.bias)
def get_topics(self):
return self.x_only_topics.get_topics()
def get_class_topics(self):
return self.x_only_topics.get_topics()
def get_x_only_topics(self):
return self.x_only_topics.get_topics()
| 2,445 | 28.46988 | 117 |
py
|
CANTM
|
CANTM-main/GateMIcateLib/models/__init__.py
|
from .CLSAW_TopicModel import CLSAW_TopicModel
from .bertSimple import BERT_Simple
from .NVDM import NVDM
from .CLSAW_TopicModel_simple_loss import CLSAW_TopicModelSL
from .NVDM_ori import ORINVDM
from .CLSAW_TopicModelBertEnrich import CLSAW_TopicModel_BERTEN
| 261 | 36.428571 | 63 |
py
|
CANTM
|
CANTM-main/GateMIcateLib/readers/ReaderBase.py
|
import random
import math
import json
import torch
class CLSReaderBase:
def __init__(self, postProcessor=None, shuffle=False, config=None):
self.label_count_dict = {}
self.label_weights_list = None
self._readConfigs(config)
self.shuffle = shuffle
self.postProcessor = postProcessor
self.goPoseprocessor = True
def _readConfigs(self, config):
self.target_labels = None
if config:
if 'TARGET' in config:
self.target_labels = config['TARGET'].get('labels')
print(self.target_labels)
def __iter__(self):
if self.shuffle:
random.shuffle(self.all_ids)
self._reset_iter()
return self
def __next__(self):
#print(self.all_ids)
if self.current_sample_idx < len(self.all_ids):
current_sample = self._readNextSample()
self.current_sample_idx += 1
return current_sample
else:
self._reset_iter()
raise StopIteration
def _readNextSample(self):
current_id = self.all_ids[self.current_sample_idx]
#print(current_id)
self.current_sample_dict_id = current_id
current_sample = self.data_dict[current_id]
if self.postProcessor and self.goPoseprocessor:
current_sample = self.postProcessor.postProcess(current_sample)
return current_sample
def preCalculateEmbed(self, embd_net, embd_field, dataType=torch.long, device='cuda:0'):
for sample, _ in self:
x_embd = sample[embd_field]
input_tensor = torch.tensor([x_embd], dtype=torch.long, device=device)
with torch.no_grad():
embd = embd_net(input_tensor)
self.data_dict[self.current_sample_dict_id]['embd'] = embd[0].tolist()
self.postProcessor.embd_ready = True
#pass
def __len__(self):
return len(self.all_ids)
def _reset_iter(self):
if self.shuffle:
random.shuffle(self.all_ids)
self.current_sample_idx = 0
#print(self.all_ids)
self.current_sample_dict_id = self.all_ids[self.current_sample_idx]
def count_samples(self):
self.goPoseprocessor = False
self.label_count_dict = {}
self.label_count_list = [0]*len(self.postProcessor.labelsFields)
for item in self:
#print(item)
annotation = item['selected_label']
annotation_idx = self.postProcessor.labelsFields.index(annotation)
self.label_count_list[annotation_idx] += 1
if annotation not in self.label_count_dict:
self.label_count_dict[annotation] = 0
self.label_count_dict[annotation] += 1
print(self.label_count_dict)
print(self.label_count_list)
self.goPoseprocessor = True
def cal_sample_weights(self):
self.count_samples()
self.label_weights_list = []
max_count = max(self.label_count_list)
for i in range(len(self.label_count_list)):
current_count = self.label_count_list[i]
num_samples = math.ceil(max_count/current_count)
self.label_weights_list.append(num_samples)
print(self.label_weights_list)
| 3,290 | 31.584158 | 92 |
py
|
CANTM
|
CANTM-main/GateMIcateLib/readers/WVmisInfoReader.py
|
import random
import math
import json
import torch
from .ReaderBase import CLSReaderBase
class WVmisInfoDataIter(CLSReaderBase):
def __init__(self, merged_json, label_field='category', **kwargs):
super().__init__(**kwargs)
self.label_field = label_field
self._initReader(merged_json)
self._reset_iter()
def _initReader(self, merged_json):
with open(merged_json, 'r') as f_json:
merged_data = json.load(f_json)
self.all_ids = []
self.data_dict = {}
numberid = 0
for item in merged_data:
select = True
annotation = item[self.label_field]
if self.target_labels:
if annotation not in self.target_labels:
#self.all_ids.append(item['unique_wv_id'])
#self.data_dict[item['unique_wv_id']] = item
select = False
if select:
try:
self.all_ids.append(item['unique_wv_id'])
self.data_dict[item['unique_wv_id']] = item
except:
self.all_ids.append(str(numberid))
self.data_dict[str(numberid)] = item
numberid += 1
| 1,266 | 29.902439 | 70 |
py
|
CANTM
|
CANTM-main/GateMIcateLib/readers/__init__.py
|
from .WVmisInfoReader import WVmisInfoDataIter
from .aclImdbReader import ACLimdbReader
from .tsvBinaryFolderReader import TsvBinaryFolderReader
| 145 | 35.5 | 56 |
py
|
CANTM
|
CANTM-main/GateMIcateLib/readers/tsvBinaryFolderReader.py
|
import random
import math
import json
import torch
import glob
import os
from .ReaderBase import CLSReaderBase
class TsvBinaryFolderReader(CLSReaderBase):
def __init__(self, input_dir, pos_folder='positive', neg_folder='negative', text_filed=1, id_field=0, **kwargs):
super().__init__(**kwargs)
self.text_filed = text_filed
self.id_field = id_field
pos_dir = os.path.join(input_dir, pos_folder)
neg_dir = os.path.join(input_dir, neg_folder)
self._initReader(pos_dir, neg_dir)
self._reset_iter()
def _initReader(self, pos_dir, neg_dir):
self.all_ids = []
self.data_dict = {}
self.global_ids = 0
all_pos_file_list = glob.glob(pos_dir+'/*.tsv')
self._readDir(all_pos_file_list, 'pos')
all_neg_file_list = glob.glob(neg_dir+'/*.tsv')
self._readDir(all_neg_file_list, 'neg')
def _readDir(self, file_list, label):
for each_file in file_list:
self._readFile(each_file, label)
def _readFile(self, current_file, label):
current_text_id = str(self.global_ids)
with open(current_file, 'r') as fin:
for line in fin:
lineTok = line.split('\t')
#print(self.id_field)
if self.id_field != None:
#print('ssssssssssssss')
current_text_id = lineTok[self.id_field]
raw_text = lineTok[self.text_filed]
#print(current_text_id)
if current_text_id not in self.data_dict:
self.data_dict[current_text_id] = {}
self.data_dict[current_text_id]['text'] = raw_text
self.data_dict[current_text_id]['selected_label'] = label
self.all_ids.append(current_text_id)
else:
self.data_dict[current_text_id]['text'] += '\n'+raw_text
self.global_ids += 1
| 1,959 | 34.636364 | 117 |
py
|
CANTM
|
CANTM-main/GateMIcateLib/readers/aclImdbReader.py
|
import random
import math
import json
import torch
import glob
import os
from .ReaderBase import CLSReaderBase
class ACLimdbReader(CLSReaderBase):
def __init__(self, input_dir, **kwargs):
super().__init__(**kwargs)
pos_dir = os.path.join(input_dir,'pos')
neg_dir = os.path.join(input_dir,'neg')
self._initReader(pos_dir, neg_dir)
self._reset_iter()
def _initReader(self, pos_dir, neg_dir):
self.all_ids = []
self.data_dict = {}
self.global_ids = 0
all_pos_file_list = glob.glob(pos_dir+'/*.txt')
self._readDir(all_pos_file_list, 'pos')
all_neg_file_list = glob.glob(neg_dir+'/*.txt')
self._readDir(all_neg_file_list, 'neg')
def _readDir(self, file_list, label):
for each_file in file_list:
self._readFile(each_file, label)
def _readFile(self, current_file, label):
current_text_id = str(self.global_ids)
with open(current_file, 'r') as fin:
all_text = fin.readlines()
raw_text = ' '.join(all_text)
self.data_dict[current_text_id] = {}
self.data_dict[current_text_id]['text'] = raw_text
self.data_dict[current_text_id]['selected_label'] = label
self.all_ids.append(current_text_id)
self.global_ids += 1
| 1,338 | 29.431818 | 69 |
py
|
CANTM
|
CANTM-main/Scholar_patch/run_scholar.py
|
import os
import sys
from optparse import OptionParser
from sklearn.metrics import f1_score
import gensim
import numpy as np
import pandas as pd
import file_handling as fh
from scholar import Scholar
def main(args):
usage = "%prog input_dir"
parser = OptionParser(usage=usage)
parser.add_option('-k', dest='n_topics', type=int, default=20,
help='Size of latent representation (~num topics): default=%default')
parser.add_option('-l', dest='learning_rate', type=float, default=0.002,
help='Initial learning rate: default=%default')
parser.add_option('-m', dest='momentum', type=float, default=0.99,
help='beta1 for Adam: default=%default')
parser.add_option('--batch-size', dest='batch_size', type=int, default=200,
help='Size of minibatches: default=%default')
parser.add_option('--epochs', type=int, default=200,
help='Number of epochs: default=%default')
parser.add_option('--train-prefix', type=str, default='train',
help='Prefix of train set: default=%default')
parser.add_option('--test-prefix', type=str, default=None,
help='Prefix of test set: default=%default')
parser.add_option('--labels', type=str, default=None,
help='Read labels from input_dir/[train|test].labels.csv: default=%default')
parser.add_option('--prior-covars', type=str, default=None,
help='Read prior covariates from files with these names (comma-separated): default=%default')
parser.add_option('--topic-covars', type=str, default=None,
help='Read topic covariates from files with these names (comma-separated): default=%default')
parser.add_option('--interactions', action="store_true", default=False,
help='Use interactions between topics and topic covariates: default=%default')
parser.add_option('--covars-predict', action="store_true", default=False,
help='Use covariates as input to classifier: default=%default')
parser.add_option('--min-prior-covar-count', type=int, default=None,
help='Drop prior covariates with less than this many non-zero values in the training dataa: default=%default')
parser.add_option('--min-topic-covar-count', type=int, default=None,
help='Drop topic covariates with less than this many non-zero values in the training dataa: default=%default')
parser.add_option('-r', action="store_true", default=False,
help='Use default regularization: default=%default')
parser.add_option('--l1-topics', type=float, default=0.0,
help='Regularization strength on topic weights: default=%default')
parser.add_option('--l1-topic-covars', type=float, default=0.0,
help='Regularization strength on topic covariate weights: default=%default')
parser.add_option('--l1-interactions', type=float, default=0.0,
help='Regularization strength on topic covariate interaction weights: default=%default')
parser.add_option('--l2-prior-covars', type=float, default=0.0,
help='Regularization strength on prior covariate weights: default=%default')
parser.add_option('-o', dest='output_dir', type=str, default='output',
help='Output directory: default=%default')
parser.add_option('--emb-dim', type=int, default=300,
help='Dimension of input embeddings: default=%default')
parser.add_option('--w2v', dest='word2vec_file', type=str, default=None,
help='Use this word2vec .bin file to initialize and fix embeddings: default=%default')
parser.add_option('--alpha', type=float, default=1.0,
help='Hyperparameter for logistic normal prior: default=%default')
parser.add_option('--no-bg', action="store_true", default=False,
help='Do not use background freq: default=%default')
parser.add_option('--dev-folds', type=int, default=0,
help='Number of dev folds: default=%default')
parser.add_option('--dev-fold', type=int, default=0,
help='Fold to use as dev (if dev_folds > 0): default=%default')
parser.add_option('--device', type=int, default=None,
help='GPU to use: default=%default')
parser.add_option('--seed', type=int, default=None,
help='Random seed: default=%default')
options, args = parser.parse_args(args)
input_dir = args[0]
if options.r:
options.l1_topics = 1.0
options.l1_topic_covars = 1.0
options.l1_interactions = 1.0
if options.seed is not None:
rng = np.random.RandomState(options.seed)
seed = options.seed
else:
rng = np.random.RandomState(np.random.randint(0, 100000))
seed = None
# load the training data
train_X, vocab, row_selector, train_ids = load_word_counts(input_dir, options.train_prefix)
train_labels, label_type, label_names, n_labels = load_labels(input_dir, options.train_prefix, row_selector, options)
train_prior_covars, prior_covar_selector, prior_covar_names, n_prior_covars = load_covariates(input_dir, options.train_prefix, row_selector, options.prior_covars, options.min_prior_covar_count)
train_topic_covars, topic_covar_selector, topic_covar_names, n_topic_covars = load_covariates(input_dir, options.train_prefix, row_selector, options.topic_covars, options.min_topic_covar_count)
options.n_train, vocab_size = train_X.shape
options.n_labels = n_labels
if n_labels > 0:
print("Train label proportions:", np.mean(train_labels, axis=0))
# split into training and dev if desired
train_indices, dev_indices = train_dev_split(options, rng)
train_X, dev_X = split_matrix(train_X, train_indices, dev_indices)
train_labels, dev_labels = split_matrix(train_labels, train_indices, dev_indices)
train_prior_covars, dev_prior_covars = split_matrix(train_prior_covars, train_indices, dev_indices)
train_topic_covars, dev_topic_covars = split_matrix(train_topic_covars, train_indices, dev_indices)
if dev_indices is not None:
dev_ids = [train_ids[i] for i in dev_indices]
train_ids = [train_ids[i] for i in train_indices]
else:
dev_ids = None
n_train, _ = train_X.shape
# load the test data
if options.test_prefix is not None:
test_X, _, row_selector, test_ids = load_word_counts(input_dir, options.test_prefix, vocab=vocab)
test_labels, _, _, _ = load_labels(input_dir, options.test_prefix, row_selector, options)
test_prior_covars, _, _, _ = load_covariates(input_dir, options.test_prefix, row_selector, options.prior_covars, covariate_selector=prior_covar_selector)
test_topic_covars, _, _, _ = load_covariates(input_dir, options.test_prefix, row_selector, options.topic_covars, covariate_selector=topic_covar_selector)
n_test, _ = test_X.shape
else:
test_X = None
n_test = 0
test_labels = None
test_prior_covars = None
test_topic_covars = None
# initialize the background using overall word frequencies
init_bg = get_init_bg(train_X)
if options.no_bg:
init_bg = np.zeros_like(init_bg)
# combine the network configuration parameters into a dictionary
network_architecture = make_network(options, vocab_size, label_type, n_labels, n_prior_covars, n_topic_covars)
print("Network architecture:")
for key, val in network_architecture.items():
print(key + ':', val)
# load word vectors
embeddings, update_embeddings = load_word_vectors(options, rng, vocab)
# create the model
model = Scholar(network_architecture, alpha=options.alpha, learning_rate=options.learning_rate, init_embeddings=embeddings, update_embeddings=update_embeddings, init_bg=init_bg, adam_beta1=options.momentum, device=options.device, seed=seed, classify_from_covars=options.covars_predict)
# train the model
print("Optimizing full model")
model = train(model, network_architecture, train_X, train_labels, train_prior_covars, train_topic_covars, training_epochs=options.epochs, batch_size=options.batch_size, rng=rng, X_dev=dev_X, Y_dev=dev_labels, PC_dev=dev_prior_covars, TC_dev=dev_topic_covars)
# make output directory
fh.makedirs(options.output_dir)
# display and save weights
print_and_save_weights(options, model, vocab, prior_covar_names, topic_covar_names)
# Evaluate perplexity on dev and test data
if dev_X is not None:
perplexity = evaluate_perplexity(model, dev_X, dev_labels, dev_prior_covars, dev_topic_covars, options.batch_size, eta_bn_prop=0.0)
print("Dev perplexity = %0.4f" % perplexity)
fh.write_list_to_text([str(perplexity)], os.path.join(options.output_dir, 'perplexity.dev.txt'))
if test_X is not None:
perplexity = evaluate_perplexity(model, test_X, test_labels, test_prior_covars, test_topic_covars, options.batch_size, eta_bn_prop=0.0)
print("Test perplexity = %0.4f" % perplexity)
fh.write_list_to_text([str(perplexity)], os.path.join(options.output_dir, 'perplexity.test.txt'))
# evaluate accuracy on predicting labels
if n_labels > 0:
print("Predicting labels")
predict_labels_and_evaluate(model, train_X, train_labels, train_prior_covars, train_topic_covars, options.output_dir, subset='train')
if dev_X is not None:
predict_labels_and_evaluate(model, dev_X, dev_labels, dev_prior_covars, dev_topic_covars, options.output_dir, subset='dev')
if test_X is not None:
predict_labels_and_evaluate(model, test_X, test_labels, test_prior_covars, test_topic_covars, options.output_dir, subset='test')
# print label probabilities for each topic
if n_labels > 0:
print_topic_label_associations(options, label_names, model, n_prior_covars, n_topic_covars)
# save document representations
print("Saving document representations")
save_document_representations(model, train_X, train_labels, train_prior_covars, train_topic_covars, train_ids, options.output_dir, 'train', batch_size=options.batch_size)
if dev_X is not None:
save_document_representations(model, dev_X, dev_labels, dev_prior_covars, dev_topic_covars, dev_ids, options.output_dir, 'dev', batch_size=options.batch_size)
if n_test > 0:
save_document_representations(model, test_X, test_labels, test_prior_covars, test_topic_covars, test_ids, options.output_dir, 'test', batch_size=options.batch_size)
def load_word_counts(input_dir, input_prefix, vocab=None):
print("Loading data")
# laod the word counts and convert to a dense matrix
#temp = fh.load_sparse(os.path.join(input_dir, input_prefix + '.npz')).todense()
#X = np.array(temp, dtype='float32')
X = fh.load_sparse(os.path.join(input_dir, input_prefix + '.npz')).tocsr()
# load the vocabulary
if vocab is None:
vocab = fh.read_json(os.path.join(input_dir, input_prefix + '.vocab.json'))
n_items, vocab_size = X.shape
assert vocab_size == len(vocab)
print("Loaded %d documents with %d features" % (n_items, vocab_size))
ids = fh.read_json(os.path.join(input_dir, input_prefix + '.ids.json'))
# filter out empty documents and return a boolean selector for filtering labels and covariates
#row_selector = np.array(X.sum(axis=1) > 0, dtype=bool)
row_sums = np.array(X.sum(axis=1)).reshape((n_items,))
row_selector = np.array(row_sums > 0, dtype=bool)
print("Found %d non-empty documents" % np.sum(row_selector))
X = X[row_selector, :]
ids = [doc_id for i, doc_id in enumerate(ids) if row_selector[i]]
return X, vocab, row_selector, ids
def load_labels(input_dir, input_prefix, row_selector, options):
labels = None
label_type = None
label_names = None
n_labels = 0
# load the label file if given
if options.labels is not None:
label_file = os.path.join(input_dir, input_prefix + '.' + options.labels + '.csv')
if os.path.exists(label_file):
print("Loading labels from", label_file)
temp = pd.read_csv(label_file, header=0, index_col=0)
label_names = temp.columns
labels = np.array(temp.values)
# select the rows that match the non-empty documents (from load_word_counts)
labels = labels[row_selector, :]
n, n_labels = labels.shape
print("Found %d labels" % n_labels)
else:
raise(FileNotFoundError("Label file {:s} not found".format(label_file)))
return labels, label_type, label_names, n_labels
def load_covariates(input_dir, input_prefix, row_selector, covars_to_load, min_count=None, covariate_selector=None):
covariates = None
covariate_names = None
n_covariates = 0
if covars_to_load is not None:
covariate_list = []
covariate_names_list = []
covar_file_names = covars_to_load.split(',')
# split the given covariate names by commas, and load each one
for covar_file_name in covar_file_names:
covariates_file = os.path.join(input_dir, input_prefix + '.' + covar_file_name + '.csv')
if os.path.exists(covariates_file):
print("Loading covariates from", covariates_file)
temp = pd.read_csv(covariates_file, header=0, index_col=0)
covariate_names = temp.columns
covariates = np.array(temp.values, dtype=np.float32)
# select the rows that match the non-empty documents (from load_word_counts)
covariates = covariates[row_selector, :]
covariate_list.append(covariates)
covariate_names_list.extend(covariate_names)
else:
raise(FileNotFoundError("Covariates file {:s} not found".format(covariates_file)))
# combine the separate covariates into a single matrix
covariates = np.hstack(covariate_list)
covariate_names = covariate_names_list
_, n_covariates = covariates.shape
# if a covariate_selector has been given (from a previous call of load_covariates), drop columns
if covariate_selector is not None:
covariates = covariates[:, covariate_selector]
covariate_names = [name for i, name in enumerate(covariate_names) if covariate_selector[i]]
n_covariates = len(covariate_names)
# otherwise, choose which columns to drop based on how common they are (for binary covariates)
elif min_count is not None and int(min_count) > 0:
print("Removing rare covariates")
covar_sums = covariates.sum(axis=0).reshape((n_covariates, ))
covariate_selector = covar_sums > int(min_count)
covariates = covariates[:, covariate_selector]
covariate_names = [name for i, name in enumerate(covariate_names) if covariate_selector[i]]
n_covariates = len(covariate_names)
return covariates, covariate_selector, covariate_names, n_covariates
def train_dev_split(options, rng):
# randomly split into train and dev
if options.dev_folds > 0:
n_dev = int(options.n_train / options.dev_folds)
indices = np.array(range(options.n_train), dtype=int)
rng.shuffle(indices)
if options.dev_fold < options.dev_folds - 1:
dev_indices = indices[n_dev * options.dev_fold: n_dev * (options.dev_fold +1)]
else:
dev_indices = indices[n_dev * options.dev_fold:]
train_indices = list(set(indices) - set(dev_indices))
return train_indices, dev_indices
else:
return None, None
def split_matrix(train_X, train_indices, dev_indices):
# split a matrix (word counts, labels, or covariates), into train and dev
if train_X is not None and dev_indices is not None:
dev_X = train_X[dev_indices, :]
train_X = train_X[train_indices, :]
return train_X, dev_X
else:
return train_X, None
def get_init_bg(data):
#Compute the log background frequency of all words
#sums = np.sum(data, axis=0)+1
n_items, vocab_size = data.shape
sums = np.array(data.sum(axis=0)).reshape((vocab_size,))+1.
print("Computing background frequencies")
print("Min/max word counts in training data: %d %d" % (int(np.min(sums)), int(np.max(sums))))
bg = np.array(np.log(sums) - np.log(float(np.sum(sums))), dtype=np.float32)
return bg
def load_word_vectors(options, rng, vocab):
# load word2vec vectors if given
if options.word2vec_file is not None:
vocab_size = len(vocab)
vocab_dict = dict(zip(vocab, range(vocab_size)))
# randomly initialize word vectors for each term in the vocabualry
embeddings = np.array(rng.rand(options.emb_dim, vocab_size) * 0.25 - 0.5, dtype=np.float32)
count = 0
print("Loading word vectors")
# load the word2vec vectors
pretrained = gensim.models.KeyedVectors.load_word2vec_format(options.word2vec_file, binary=True)
# replace the randomly initialized vectors with the word2vec ones for any that are available
for word, index in vocab_dict.items():
if word in pretrained:
count += 1
embeddings[:, index] = pretrained[word]
print("Found embeddings for %d words" % count)
update_embeddings = False
else:
embeddings = None
update_embeddings = True
return embeddings, update_embeddings
def make_network(options, vocab_size, label_type=None, n_labels=0, n_prior_covars=0, n_topic_covars=0):
# Assemble the network configuration parameters into a dictionary
network_architecture = \
dict(embedding_dim=options.emb_dim,
n_topics=options.n_topics,
vocab_size=vocab_size,
label_type=label_type,
n_labels=n_labels,
n_prior_covars=n_prior_covars,
n_topic_covars=n_topic_covars,
l1_beta_reg=options.l1_topics,
l1_beta_c_reg=options.l1_topic_covars,
l1_beta_ci_reg=options.l1_interactions,
l2_prior_reg=options.l2_prior_covars,
classifier_layers=1,
use_interactions=options.interactions,
)
return network_architecture
def train(model, network_architecture, X, Y, PC, TC, batch_size=200, training_epochs=100, display_step=10, X_dev=None, Y_dev=None, PC_dev=None, TC_dev=None, bn_anneal=True, init_eta_bn_prop=1.0, rng=None, min_weights_sq=1e-7):
# Train the model
n_train, vocab_size = X.shape
mb_gen = create_minibatch(X, Y, PC, TC, batch_size=batch_size, rng=rng)
total_batch = int(n_train / batch_size)
batches = 0
eta_bn_prop = init_eta_bn_prop # interpolation between batch norm and no batch norm in final layer of recon
model.train()
n_topics = network_architecture['n_topics']
n_topic_covars = network_architecture['n_topic_covars']
vocab_size = network_architecture['vocab_size']
# create matrices to track the current estimates of the priors on the individual weights
if network_architecture['l1_beta_reg'] > 0:
l1_beta = 0.5 * np.ones([vocab_size, n_topics], dtype=np.float32) / float(n_train)
else:
l1_beta = None
if network_architecture['l1_beta_c_reg'] > 0 and network_architecture['n_topic_covars'] > 0:
l1_beta_c = 0.5 * np.ones([vocab_size, n_topic_covars], dtype=np.float32) / float(n_train)
else:
l1_beta_c = None
if network_architecture['l1_beta_ci_reg'] > 0 and network_architecture['n_topic_covars'] > 0 and network_architecture['use_interactions']:
l1_beta_ci = 0.5 * np.ones([vocab_size, n_topics * n_topic_covars], dtype=np.float32) / float(n_train)
else:
l1_beta_ci = None
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
accuracy = 0.
avg_nl = 0.
avg_kld = 0.
# Loop over all batches
for i in range(total_batch):
# get a minibatch
batch_xs, batch_ys, batch_pcs, batch_tcs = next(mb_gen)
# do one minibatch update
cost, recon_y, thetas, nl, kld = model.fit(batch_xs, batch_ys, batch_pcs, batch_tcs, eta_bn_prop=eta_bn_prop, l1_beta=l1_beta, l1_beta_c=l1_beta_c, l1_beta_ci=l1_beta_ci)
# compute accuracy on minibatch
if network_architecture['n_labels'] > 0:
accuracy += np.sum(np.argmax(recon_y, axis=1) == np.argmax(batch_ys, axis=1)) / float(n_train)
# Compute average loss
avg_cost += float(cost) / n_train * batch_size
avg_nl += float(nl) / n_train * batch_size
avg_kld += float(kld) / n_train * batch_size
batches += 1
if np.isnan(avg_cost):
print(epoch, i, np.sum(batch_xs, 1).astype(np.int), batch_xs.shape)
print('Encountered NaN, stopping training. Please check the learning_rate settings and the momentum.')
sys.exit()
# if we're using regularization, update the priors on the individual weights
if network_architecture['l1_beta_reg'] > 0:
weights = model.get_weights().T
weights_sq = weights ** 2
# avoid infinite regularization
weights_sq[weights_sq < min_weights_sq] = min_weights_sq
l1_beta = 0.5 / weights_sq / float(n_train)
if network_architecture['l1_beta_c_reg'] > 0 and network_architecture['n_topic_covars'] > 0:
weights = model.get_covar_weights().T
weights_sq = weights ** 2
weights_sq[weights_sq < min_weights_sq] = min_weights_sq
l1_beta_c = 0.5 / weights_sq / float(n_train)
if network_architecture['l1_beta_ci_reg'] > 0 and network_architecture['n_topic_covars'] > 0 and network_architecture['use_interactions']:
weights = model.get_covar_interaction_weights().T
weights_sq = weights ** 2
weights_sq[weights_sq < min_weights_sq] = min_weights_sq
l1_beta_ci = 0.5 / weights_sq / float(n_train)
# Display logs per epoch step
if epoch % display_step == 0 and epoch > 0:
if network_architecture['n_labels'] > 0:
print("Epoch:", '%d' % epoch, "; cost =", "{:.9f}".format(avg_cost), "; training accuracy (noisy) =", "{:.9f}".format(accuracy))
else:
print("Epoch:", '%d' % epoch, "cost=", "{:.9f}".format(avg_cost))
if X_dev is not None:
# switch to eval mode for intermediate evaluation
model.eval()
dev_perplexity = evaluate_perplexity(model, X_dev, Y_dev, PC_dev, TC_dev, batch_size, eta_bn_prop=eta_bn_prop)
n_dev, _ = X_dev.shape
if network_architecture['n_labels'] > 0:
dev_pred_probs = predict_label_probs(model, X_dev, PC_dev, TC_dev, eta_bn_prop=eta_bn_prop)
dev_predictions = np.argmax(dev_pred_probs, axis=1)
dev_accuracy = float(np.sum(dev_predictions == np.argmax(Y_dev, axis=1))) / float(n_dev)
print("Epoch: %d; Dev perplexity = %0.4f; Dev accuracy = %0.4f" % (epoch, dev_perplexity, dev_accuracy))
else:
print("Epoch: %d; Dev perplexity = %0.4f" % (epoch, dev_perplexity))
# switch back to training mode
model.train()
# anneal eta_bn_prop from 1.0 to 0.0 over training
if bn_anneal:
if eta_bn_prop > 0:
eta_bn_prop -= 1.0 / float(0.75 * training_epochs)
if eta_bn_prop < 0:
eta_bn_prop = 0.0
# finish training
model.eval()
return model
def create_minibatch(X, Y, PC, TC, batch_size=200, rng=None):
# Yield a random minibatch
while True:
# Return random data samples of a size 'minibatch_size' at each iteration
if rng is not None:
ixs = rng.randint(X.shape[0], size=batch_size)
else:
ixs = np.random.randint(X.shape[0], size=batch_size)
X_mb = np.array(X[ixs, :].todense()).astype('float32')
if Y is not None:
Y_mb = Y[ixs, :].astype('float32')
else:
Y_mb = None
if PC is not None:
PC_mb = PC[ixs, :].astype('float32')
else:
PC_mb = None
if TC is not None:
TC_mb = TC[ixs, :].astype('float32')
else:
TC_mb = None
yield X_mb, Y_mb, PC_mb, TC_mb
def get_minibatch(X, Y, PC, TC, batch, batch_size=200):
# Get a particular non-random segment of the data
n_items, _ = X.shape
n_batches = int(np.ceil(n_items / float(batch_size)))
if batch < n_batches - 1:
ixs = np.arange(batch * batch_size, (batch + 1) * batch_size)
else:
ixs = np.arange(batch * batch_size, n_items)
X_mb = np.array(X[ixs, :].todense()).astype('float32')
if Y is not None:
Y_mb = Y[ixs, :].astype('float32')
else:
Y_mb = None
if PC is not None:
PC_mb = PC[ixs, :].astype('float32')
else:
PC_mb = None
if TC is not None:
TC_mb = TC[ixs, :].astype('float32')
else:
TC_mb = None
return X_mb, Y_mb, PC_mb, TC_mb
def predict_label_probs(model, X, PC, TC, batch_size=200, eta_bn_prop=0.0):
# Predict a probability distribution over labels for each instance using the classifier part of the network
n_items, _ = X.shape
n_batches = int(np.ceil(n_items / batch_size))
pred_probs_all = []
# make predictions on minibatches and then combine
for i in range(n_batches):
batch_xs, batch_ys, batch_pcs, batch_tcs = get_minibatch(X, None, PC, TC, i, batch_size)
Z, pred_probs = model.predict(batch_xs, batch_pcs, batch_tcs, eta_bn_prop=eta_bn_prop)
pred_probs_all.append(pred_probs)
pred_probs = np.vstack(pred_probs_all)
return pred_probs
def print_and_save_weights(options, model, vocab, prior_covar_names=None, topic_covar_names=None):
# print background
bg = model.get_bg()
if not options.no_bg:
print_top_bg(bg, vocab)
# print topics
emb = model.get_weights()
print("Topics:")
maw, sparsity = print_top_words(emb, vocab)
print("sparsity in topics = %0.4f" % sparsity)
save_weights(options.output_dir, emb, bg, vocab, sparsity_threshold=1e-5)
fh.write_list_to_text(['{:.4f}'.format(maw)], os.path.join(options.output_dir, 'maw.txt'))
fh.write_list_to_text(['{:.4f}'.format(sparsity)], os.path.join(options.output_dir, 'sparsity.txt'))
if prior_covar_names is not None:
prior_weights = model.get_prior_weights()
print("Topic prior associations:")
print("Covariates:", ' '.join(prior_covar_names))
for k in range(options.n_topics):
output = str(k) + ': '
for c in range(len(prior_covar_names)):
output += '%.4f ' % prior_weights[c, k]
print(output)
if options.output_dir is not None:
np.savez(os.path.join(options.output_dir, 'prior_w.npz'), weights=prior_weights, names=prior_covar_names)
if topic_covar_names is not None:
beta_c = model.get_covar_weights()
print("Covariate deviations:")
maw, sparsity = print_top_words(beta_c, vocab, topic_covar_names)
print("sparsity in covariates = %0.4f" % sparsity)
if options.output_dir is not None:
np.savez(os.path.join(options.output_dir, 'beta_c.npz'), beta=beta_c, names=topic_covar_names)
if options.interactions:
print("Covariate interactions")
beta_ci = model.get_covar_interaction_weights()
print(beta_ci.shape)
if topic_covar_names is not None:
names = [str(k) + ':' + c for k in range(options.n_topics) for c in topic_covar_names]
else:
names = None
maw, sparsity = print_top_words(beta_ci, vocab, names)
if options.output_dir is not None:
np.savez(os.path.join(options.output_dir, 'beta_ci.npz'), beta=beta_ci, names=names)
print("sparsity in covariate interactions = %0.4f" % sparsity)
def print_top_words(beta, feature_names, topic_names=None, n_pos=10, n_neg=10, sparsity_threshold=1e-5, values=False):
"""
Display the highest and lowest weighted words in each topic, along with mean ave weight and sparisty
"""
sparsity_vals = []
maw_vals = []
for i in range(len(beta)):
# sort the beta weights
order = list(np.argsort(beta[i]))
order.reverse()
output = ''
# get the top words
for j in range(n_pos):
if np.abs(beta[i][order[j]]) > sparsity_threshold:
output += feature_names[order[j]] + ' '
if values:
output += '(' + str(beta[i][order[j]]) + ') '
order.reverse()
if n_neg > 0:
output += ' / '
# get the bottom words
for j in range(n_neg):
if np.abs(beta[i][order[j]]) > sparsity_threshold:
output += feature_names[order[j]] + ' '
if values:
output += '(' + str(beta[i][order[j]]) + ') '
# compute sparsity
sparsity = float(np.sum(np.abs(beta[i]) < sparsity_threshold) / float(len(beta[i])))
maw = np.mean(np.abs(beta[i]))
sparsity_vals.append(sparsity)
maw_vals.append(maw)
output += '; sparsity=%0.4f' % sparsity
# print the topic summary
if topic_names is not None:
output = topic_names[i] + ': ' + output
else:
output = str(i) + ': ' + output
print(output)
# return mean average weight and sparsity
return np.mean(maw_vals), np.mean(sparsity_vals)
def print_top_bg(bg, feature_names, n_top_words=10):
# Print the most highly weighted words in the background log frequency
print('Background frequencies of top words:')
print(" ".join([feature_names[j]
for j in bg.argsort()[:-n_top_words - 1:-1]]))
temp = bg.copy()
temp.sort()
print(np.exp(temp[:-n_top_words-1:-1]))
def evaluate_perplexity(model, X, Y, PC, TC, batch_size, eta_bn_prop=0.0):
# Evaluate the approximate perplexity on a subset of the data (using words, labels, and covariates)
n_items, vocab_size = X.shape
doc_sums = np.array(X.sum(axis=1), dtype=float).reshape((n_items,))
X = X.astype('float32')
if Y is not None:
Y = Y.astype('float32')
if PC is not None:
PC = PC.astype('float32')
if TC is not None:
TC = TC.astype('float32')
losses = []
n_items, _ = X.shape
n_batches = int(np.ceil(n_items / batch_size))
for i in range(n_batches):
batch_xs, batch_ys, batch_pcs, batch_tcs = get_minibatch(X, Y, PC, TC, i, batch_size)
batch_losses = model.get_losses(batch_xs, batch_ys, batch_pcs, batch_tcs, eta_bn_prop=eta_bn_prop)
losses.append(batch_losses)
losses = np.hstack(losses)
perplexity = np.exp(np.mean(losses / doc_sums))
return perplexity
def save_weights(output_dir, beta, bg, feature_names, sparsity_threshold=1e-5):
# Save model weights to npz files (also the top words in each topic
np.savez(os.path.join(output_dir, 'beta.npz'), beta=beta)
if bg is not None:
np.savez(os.path.join(output_dir, 'bg.npz'), bg=bg)
fh.write_to_json(feature_names, os.path.join(output_dir, 'vocab.json'), sort_keys=False)
topics_file = os.path.join(output_dir, 'topics.txt')
lines = []
for i in range(len(beta)):
order = list(np.argsort(beta[i]))
order.reverse()
pos_words = [feature_names[j] for j in order[:100] if beta[i][j] > sparsity_threshold]
output = ' '.join(pos_words)
lines.append(output)
fh.write_list_to_text(lines, topics_file)
def predict_labels_and_evaluate(model, X, Y, PC, TC, output_dir=None, subset='train', batch_size=200):
# Predict labels for all instances using the classifier network and evaluate the accuracy
pred_probs = predict_label_probs(model, X, PC, TC, batch_size, eta_bn_prop=0.0)
np.savez(os.path.join(output_dir, 'pred_probs.' + subset + '.npz'), pred_probs=pred_probs)
predictions = np.argmax(pred_probs, axis=1)
y_true = np.argmax(Y, axis=1)
f1_macro = f1_score(y_true, predictions, average='macro')
f1_micro = f1_score(y_true, predictions, average='micro')
f1_none = f1_score(y_true, predictions, average=None)
print("f1_macro = %0.4f" % f1_macro)
print("f1_micro = %0.4f" % f1_micro)
print("f1_no_ave = %s" % f1_none)
accuracy = float(np.sum(predictions == np.argmax(Y, axis=1)) / float(len(Y)))
print(subset, "accuracy on labels = %0.4f" % accuracy)
if output_dir is not None:
fh.write_list_to_text([str(accuracy)], os.path.join(output_dir, 'accuracy.' + subset + '.txt'))
def print_topic_label_associations(options, label_names, model, n_prior_covars, n_topic_covars):
# Print associations between topics and labels
if options.n_labels > 0 and options.n_labels < 7:
print("Label probabilities based on topics")
print("Labels:", ' '.join([name for name in label_names]))
probs_list = []
for k in range(options.n_topics):
Z = np.zeros([1, options.n_topics]).astype('float32')
Z[0, k] = 1.0
Y = None
if n_prior_covars > 0:
PC = np.zeros([1, n_prior_covars]).astype('float32')
else:
PC = None
if n_topic_covars > 0:
TC = np.zeros([1, n_topic_covars]).astype('float32')
else:
TC = None
probs = model.predict_from_topics(Z, PC, TC)
probs_list.append(probs)
if options.n_labels > 0 and options.n_labels < 7:
output = str(k) + ': '
for i in range(options.n_labels):
output += '%.4f ' % probs[0, i]
print(output)
probs = np.vstack(probs_list)
np.savez(os.path.join(options.output_dir, 'topics_to_labels.npz'), probs=probs, label=label_names)
def save_document_representations(model, X, Y, PC, TC, ids, output_dir, partition, batch_size=200):
# compute the mean of the posterior of the latent representation for each documetn and save it
if Y is not None:
Y = np.zeros_like(Y)
n_items, _ = X.shape
n_batches = int(np.ceil(n_items / batch_size))
thetas = []
for i in range(n_batches):
batch_xs, batch_ys, batch_pcs, batch_tcs = get_minibatch(X, Y, PC, TC, i, batch_size)
thetas.append(model.compute_theta(batch_xs, batch_ys, batch_pcs, batch_tcs))
theta = np.vstack(thetas)
np.savez(os.path.join(output_dir, 'theta.' + partition + '.npz'), theta=theta, ids=ids)
if __name__ == '__main__':
main(sys.argv[1:])
| 35,249 | 44.019157 | 289 |
py
|
kgx
|
kgx-master/examples/scripts/read_from_neo4j.py
|
import os
import argparse
from kgx.transformer import Transformer
"""
A script that demonstrates how to read edges and nodes from Neo4j.
"""
def usage():
print("""
usage: read_from_neo4j.py --edge_filter subject_category=biolink:Gene
--edge_filter filter object_category=biolink:Disease
--edge_filter filter edge_label=biolink:involved_in
""")
parser = argparse.ArgumentParser(description='Read graph (or subgraph) from Neo4j')
parser.add_argument('--uri', help='URI/URL for Neo4j (including port)', default='localhost:7474')
parser.add_argument('--username', help='username', default='neo4j')
parser.add_argument('--password', help='password', default='demo')
args = parser.parse_args()
input_args = {
'uri': args.uri,
'username': args.username,
'password': args.password,
'format': 'neo4j'
}
# Initialize Transformer
t = Transformer()
t.transform(input_args)
print(f"Number of nodes from Neo4j: {t.store.graph.number_of_nodes()}")
print(f"Number of edges from Neo4j: {t.store.graph.number_of_edges()}")
| 1,088 | 29.25 | 97 |
py
|
kgx
|
kgx-master/examples/scripts/convert_nt_to_tsv.py
|
import sys
import argparse
from kgx.transformer import Transformer
"""
A loader script that demonstrates how to convert an RDF N-Triple (*.nt)
to TSV format.
"""
def usage():
print("""
usage: convert_nt_to_tsv.py --input triples.nt --output output
""")
parser = argparse.ArgumentParser(description='Load edges and nodes into Neo4j')
parser.add_argument('--input', help='RDF N-Triple file')
parser.add_argument('--output', help='Output file name')
args = parser.parse_args()
if args.input is None or args.output is None:
usage()
exit()
input_args = {
'filename': [args.input],
'format': 'nt'
}
output_args = {
'filename': args.output,
'format': 'tsv'
}
# Initialize NtTransformer
t = Transformer()
# Transform NT
t.transform(input_args, output_args)
| 794 | 17.928571 | 79 |
py
|
kgx
|
kgx-master/examples/scripts/load_tsv_to_neo4j.py
|
import os
import argparse
from kgx.transformer import Transformer
"""
A loader script that demonstrates how to load edges and nodes into Neo4j.
"""
def usage():
print("""
usage: load_csv_to_neo4j.py --nodes nodes.tsv --edges edges.tsv
""")
parser = argparse.ArgumentParser(description='Load edges and nodes into Neo4j')
parser.add_argument('--nodes', help='file with nodes in TSV format')
parser.add_argument('--edges', help='file with edges in TSV format')
parser.add_argument('--uri', help='URI/URL for Neo4j (including port)', default='localhost:7474')
parser.add_argument('--username', help='username', default='neo4j')
parser.add_argument('--password', help='password', default='demo')
args = parser.parse_args()
if args.nodes is None and args.edges is None:
usage()
exit()
filename = []
if args.nodes:
filename.append(args.nodes)
if args.edges:
filename.append(args.edges)
input_args = {
'filename': filename,
'format': 'tsv'
}
output_args = {
'uri': args.uri,
'username': args.username,
'password': args.password,
'format': 'neo4j'
}
# Initialize Transformer
t = Transformer()
t.transform(input_args, output_args)
| 1,177 | 24.06383 | 97 |
py
|
kgx
|
kgx-master/examples/scripts/load_csv_to_neo4j.py
|
import os
import argparse
from kgx.transformer import Transformer
"""
A loader script that demonstrates how to load edges and nodes into Neo4j.
"""
def usage():
print("""
usage: load_csv_to_neo4j.py --nodes nodes.csv --edges edges.csv
""")
parser = argparse.ArgumentParser(description='Load edges and nodes into Neo4j')
parser.add_argument('--nodes', help='file with nodes in CSV format')
parser.add_argument('--edges', help='file with edges in CSV format')
parser.add_argument('--uri', help='URI/URL for Neo4j (including port)', default='localhost:7474')
parser.add_argument('--username', help='username', default='neo4j')
parser.add_argument('--password', help='password', default='demo')
args = parser.parse_args()
if args.nodes is None and args.edges is None:
usage()
exit()
filename = []
if args.nodes:
filename.append(args.nodes)
if args.edges:
filename.append(args.edges)
input_args = {
'filename': filename,
'format': 'csv'
}
output_args = {
'uri': args.uri,
'username': args.username,
'password': args.password,
'format': 'neo4j'
}
# Initialize Transformer
t = Transformer()
t.transform(input_args, output_args)
| 1,177 | 24.06383 | 97 |
py
|
kgx
|
kgx-master/kgx/prefix_manager.py
|
import re
from typing import Dict, Optional
import prefixcommons.curie_util as cu
from cachetools import LRUCache, cached
from kgx.config import get_jsonld_context, get_logger
from kgx.utils.kgx_utils import contract, expand
log = get_logger()
class PrefixManager(object):
"""
Manages prefix mappings.
These include mappings for CURIEs such as GO:0008150, as well as shortforms such as
biolink types such as Disease
"""
DEFAULT_NAMESPACE = "https://www.example.org/UNKNOWN/"
prefix_map: Dict[str, str]
reverse_prefix_map: Dict[str, str]
def __init__(self, url: str = None):
"""
Initialize an instance of PrefixManager.
Parameters
----------
url: str
The URL from which to read a JSON-LD context for prefix mappings
"""
if url:
context = cu.read_remote_jsonld_context(url)
else:
context = get_jsonld_context()
self.set_prefix_map(context)
def set_prefix_map(self, m: Dict) -> None:
"""
Populate `prefix_map` with contents from a JSON-LD context from self.url
Parameters
----------
m: dict
Dictionary of prefix to URI mappings
"""
self.prefix_map = {}
for k, v in m.items():
if isinstance(v, str):
self.prefix_map[k] = v
else:
self.prefix_map[k] = v.get("@id")
if "biolink" not in self.prefix_map:
self.prefix_map["biolink"] = (
self.prefix_map["@vocab"]
if "@vocab" in self.prefix_map
else "https://w3id.org/biolink/vocab/"
)
if "owlstar" not in self.prefix_map:
self.prefix_map["owlstar"] = "http://w3id.org/owlstar/"
if "@vocab" in self.prefix_map:
del self.prefix_map["@vocab"]
if "MONARCH" not in self.prefix_map:
self.prefix_map["MONARCH"] = "https://monarchinitiative.org/"
self.prefix_map["MONARCH_NODE"] = "https://monarchinitiative.org/MONARCH_"
if "" in self.prefix_map:
log.info(
f"Replacing default prefix mapping from {self.prefix_map['']} to 'www.example.org/UNKNOWN/'"
)
else:
self.prefix_map[""] = self.DEFAULT_NAMESPACE
self.reverse_prefix_map = {y: x for x, y in self.prefix_map.items()}
def update_prefix_map(self, m: Dict[str, str]) -> None:
"""
Update prefix maps with new mappings.
Parameters
----------
m: Dict
New prefix to IRI mappings
"""
for k, v in m.items():
self.prefix_map[k] = v
def update_reverse_prefix_map(self, m: Dict[str, str]) -> None:
"""
Update reverse prefix maps with new mappings.
Parameters
----------
m: Dict
New IRI to prefix mappings
"""
self.reverse_prefix_map.update(m)
@cached(LRUCache(maxsize=1024))
def expand(self, curie: str, fallback: bool = True) -> str:
"""
Expand a given CURIE to an URI, based on mappings from `prefix_map`.
Parameters
----------
curie: str
A CURIE
fallback: bool
Determines whether to fallback to default prefix mappings, as determined
by `prefixcommons.curie_util`, when CURIE prefix is not found in `prefix_map`.
Returns
-------
str
A URI corresponding to the CURIE
"""
uri = expand(curie, [self.prefix_map], fallback)
return uri
@cached(LRUCache(maxsize=1024))
def contract(self, uri: str, fallback: bool = True) -> Optional[str]:
"""
Contract a given URI to a CURIE, based on mappings from `prefix_map`.
Parameters
----------
uri: str
A URI
fallback: bool
Determines whether to fallback to default prefix mappings, as determined
by `prefixcommons.curie_util`, when URI prefix is not found in `reverse_prefix_map`.
Returns
-------
Optional[str]
A CURIE corresponding to the URI
"""
# always prioritize non-CURIE shortform
if self.reverse_prefix_map and uri in self.reverse_prefix_map:
curie = self.reverse_prefix_map[uri]
else:
curie = contract(uri, [self.prefix_map], fallback)
return str(curie)
@staticmethod
@cached(LRUCache(maxsize=1024))
def is_curie(s: str) -> bool:
"""
Check if a given string is a CURIE.
Parameters
----------
s: str
A string
Returns
-------
bool
Whether or not the given string is a CURIE
"""
if isinstance(s, str):
m = re.match(r"^[^ <()>:]*:[^/ :]+$", s)
return bool(m)
else:
return False
@staticmethod
@cached(LRUCache(maxsize=1024))
def is_iri(s: str) -> bool:
"""
Check if a given string as an IRI.
Parameters
----------
s: str
A string
Returns
-------
bool
Whether or not the given string is an IRI.
"""
if isinstance(s, str):
return s.startswith("http") or s.startswith("https")
else:
return False
@staticmethod
@cached(LRUCache(maxsize=1024))
def has_urlfragment(s: str) -> bool:
if "#" in s:
return True
else:
return False
@staticmethod
@cached(LRUCache(maxsize=1024))
def get_prefix(curie: str) -> Optional[str]:
"""
Get the prefix from a given CURIE.
Parameters
----------
curie: str
The CURIE
Returns
-------
str
The CURIE prefix
"""
prefix: Optional[str] = None
if PrefixManager.is_curie(curie):
prefix = curie.split(":", 1)[0]
return prefix
@staticmethod
@cached(LRUCache(maxsize=1024))
def get_reference(curie: str) -> Optional[str]:
"""
Get the reference of a given CURIE.
Parameters
----------
curie: str
The CURIE
Returns
-------
Optional[str]
The reference of a CURIE
"""
reference: Optional[str] = None
if PrefixManager.is_curie(curie):
reference = curie.split(":", 1)[1]
return reference
| 6,617 | 25.578313 | 108 |
py
|
kgx
|
kgx-master/kgx/config.py
|
import importlib
from typing import Dict, Any, Optional
import sys
from os import path
import re
import requests
import yaml
import json
import logging
from kgx.graph.base_graph import BaseGraph
config: Optional[Dict[str, Any]] = None
logger: Optional[logging.Logger] = None
graph_store_class: Optional[BaseGraph] = None
jsonld_context_map: Dict = {}
CONFIG_FILENAME = path.join(path.dirname(path.abspath(__file__)), "config.yml")
def get_config(filename: str = CONFIG_FILENAME) -> Dict:
"""
Get config as a Dictionary
Parameters
----------
filename: str
The filename with all the configuration
Returns
-------
Dict
A Dictionary containing all the entries from the config YAML
"""
global config
if config is None:
config = yaml.load(open(filename), Loader=yaml.FullLoader)
return config
def get_jsonld_context(name: str = "biolink"):
"""
Get contents of a JSON-LD context.
Returns
-------
Dict
the contents of the JSON-LD context
"""
content = None
if name in jsonld_context_map:
content = jsonld_context_map[name]
else:
filepath = config["jsonld-context"][name] # type: ignore
if filepath.startswith("http"):
try:
content = requests.get(filepath).json()
except ConnectionError:
raise Exception(f"Unable to download JSON-LD context from {filepath}")
else:
if path.exists(filepath):
content = json.load(open(filepath))
if "@context" in content:
content = content["@context"]
jsonld_context_map[name] = content
return content
def get_logger(name: str = "KGX") -> logging.Logger:
"""
Get an instance of logger.
Parameters
----------
name: str
The name of logger
Returns
-------
logging.Logger
An instance of logging.Logger
"""
global logger
if logger is None:
config = get_config()
logger = logging.getLogger(name)
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(config["logging"]["format"])
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(config["logging"]["level"])
logger.propagate = False
return logger
def get_graph_store_class() -> Any:
"""
Get a reference to the graph store class, as defined the config.
Defaults to ``kgx.graph.nx_graph.NxGraph``
Returns
-------
Any
A reference to the graph store class
"""
global graph_store_class
if not graph_store_class:
config = get_config()
if "graph_store" in config:
name = config["graph_store"]
else:
name = "kgx.graph.nx_graph.NxGraph"
module_name = ".".join(name.split(".")[0:-1])
class_name = name.split(".")[-1]
graph_store_class = getattr(importlib.import_module(module_name), class_name)
return graph_store_class
# Biolink Release number should be a well formed Semantic Versioning (patch is optional?)
semver_pattern = re.compile(r"^\d+\.\d+\.\d+$")
semver_pattern_v = re.compile(r"^v\d+\.\d+\.\d+$")
def get_biolink_model_schema(biolink_release: Optional[str] = None) -> Optional[str]:
"""
Get Biolink Model Schema
"""
if biolink_release:
if not semver_pattern.fullmatch(biolink_release) and not semver_pattern_v.fullmatch(biolink_release):
raise TypeError(
"The 'biolink_release' argument '"
+ biolink_release
+ "' is not a properly formatted 'major.minor.patch' semantic version?"
)
schema = f"https://raw.githubusercontent.com/biolink/biolink-model/{biolink_release}/biolink-model.yaml"
return schema
else:
return None
| 3,900 | 25.719178 | 112 |
py
|
kgx
|
kgx-master/kgx/transformer.py
|
import itertools
import os
from os.path import exists
from sys import stderr
from typing import Dict, Generator, List, Optional, Callable, Set
from kgx.config import get_logger
from kgx.error_detection import ErrorType, MessageLevel, ErrorDetecting
from kgx.source import (
GraphSource,
Source,
TsvSource,
JsonSource,
JsonlSource,
ObographSource,
TrapiSource,
NeoSource,
RdfSource,
OwlSource,
SssomSource,
)
from kgx.sink import (
Sink,
GraphSink,
TsvSink,
JsonSink,
JsonlSink,
NeoSink,
RdfSink,
NullSink,
SqlSink,
)
from kgx.utils.kgx_utils import (
apply_graph_operations,
GraphEntityType,
knowledge_provenance_properties,
)
SOURCE_MAP = {
"tsv": TsvSource,
"csv": TsvSource,
"graph": GraphSource,
"json": JsonSource,
"jsonl": JsonlSource,
"obojson": ObographSource,
"obo-json": ObographSource,
"trapi-json": TrapiSource,
"neo4j": NeoSource,
"nt": RdfSource,
"owl": OwlSource,
"sssom": SssomSource,
}
SINK_MAP = {
"tsv": TsvSink,
"csv": TsvSink,
"graph": GraphSink,
"json": JsonSink,
"jsonl": JsonlSink,
"neo4j": NeoSink,
"nt": RdfSink,
"null": NullSink,
"sql": SqlSink
}
log = get_logger()
class Transformer(ErrorDetecting):
"""
The Transformer class is responsible for transforming data from one
form to another.
Parameters
----------
stream: bool
Whether or not to stream (default: False)
infores_catalog: Optional[str]
Optional dump of a TSV file of InfoRes CURIE to Knowledge Source mappings
error_log:
Where to write any graph processing error message (stderr, by default).
"""
def __init__(
self,
stream: bool = False,
infores_catalog: Optional[str] = None,
error_log=None
):
"""
stream: bool
Whether or not to stream
infores_catalog: Optional[str]
Optional dump of a TSV file of InfoRes CURIE to Knowledge Source mappings
error_log:
Where to write any graph processing error message (stderr, by default).
"""
ErrorDetecting.__init__(self, error_log)
self.stream = stream
self.node_filters = {}
self.edge_filters = {}
self.inspector: Optional[Callable[[GraphEntityType, List], None]] = None
self.store = self.get_source("graph")
self._seen_nodes = set()
self._infores_catalog: Dict[str, str] = dict()
if infores_catalog and exists(infores_catalog):
with open(infores_catalog, "r") as irc:
for entry in irc:
if len(entry):
entry = entry.strip()
if entry:
source, infores = entry.split("\t")
self._infores_catalog[source] = infores
def transform(
self,
input_args: Dict,
output_args: Optional[Dict] = None,
inspector: Optional[Callable[[GraphEntityType, List], None]] = None,
) -> None:
"""
Transform an input source and write to an output sink.
If ``output_args`` is not defined then the data is persisted to
an in-memory graph.
The 'inspector' argument is an optional Callable which the
transformer.process() method applies to 'inspect' source records
prior to writing them out to the Sink. The first (GraphEntityType)
argument of the Callable tags the record as a NODE or an EDGE.
The second argument given to the Callable is the current record
itself. This Callable is strictly meant to be procedural and should
*not* mutate the record.
Parameters
----------
input_args: Dict
Arguments relevant to your input source
output_args: Optional[Dict]
Arguments relevant to your output sink (
inspector: Optional[Callable[[GraphEntityType, List], None]]
Optional Callable to 'inspect' source records during processing.
"""
sources = []
generators = []
input_format = input_args["format"]
prefix_map = input_args.pop("prefix_map", {})
predicate_mappings = input_args.pop("predicate_mappings", {})
node_property_predicates = input_args.pop("node_property_predicates", {})
node_filters = input_args.pop("node_filters", {})
edge_filters = input_args.pop("edge_filters", {})
operations = input_args.pop("operations", [])
# Optional process() data stream inspector
self.inspector = inspector
if input_format in {"neo4j", "graph"}:
source = self.get_source(input_format)
source.set_prefix_map(prefix_map)
source.set_node_filters(node_filters)
self.node_filters = source.node_filters
self.edge_filters = source.edge_filters
source.set_edge_filters(edge_filters)
self.node_filters = source.node_filters
self.edge_filters = source.edge_filters
if "uri" in input_args:
default_provenance = input_args["uri"]
else:
default_provenance = None
g = source.parse(default_provenance=default_provenance, **input_args)
sources.append(source)
generators.append(g)
else:
filename = input_args.pop("filename", {})
for f in filename:
source = self.get_source(input_format)
source.set_prefix_map(prefix_map)
if isinstance(source, RdfSource):
source.set_predicate_mapping(predicate_mappings)
source.set_node_property_predicates(node_property_predicates)
source.set_node_filters(node_filters)
self.node_filters = source.node_filters
self.edge_filters = source.edge_filters
source.set_edge_filters(edge_filters)
self.node_filters = source.node_filters
self.edge_filters = source.edge_filters
default_provenance = os.path.basename(f)
g = source.parse(f, default_provenance=default_provenance, **input_args)
sources.append(source)
generators.append(g)
source_generator = itertools.chain(*generators)
if output_args:
if self.stream:
if output_args["format"] in {"tsv", "csv"}:
if "node_properties" not in output_args or "edge_properties" not in output_args:
error_type = ErrorType.MISSING_PROPERTY
self.log_error(
entity=f"{output_args['format']} stream",
error_type=error_type,
message=f"'node_properties' and 'edge_properties' must be defined for output while"
f"streaming. The exported format will be limited to a subset of the columns.",
message_level=MessageLevel.WARNING
)
sink = self.get_sink(**output_args)
if "reverse_prefix_map" in output_args:
sink.set_reverse_prefix_map(output_args["reverse_prefix_map"])
if isinstance(sink, RdfSink):
if "reverse_predicate_mapping" in output_args:
sink.set_reverse_predicate_mapping(
output_args["reverse_predicate_mapping"]
)
if "property_types" in output_args:
sink.set_property_types(output_args["property_types"])
# stream from source to sink
self.process(source_generator, sink)
sink.finalize()
else:
# stream from source to intermediate
intermediate_sink = GraphSink(self)
intermediate_sink.node_properties.update(self.store.node_properties)
intermediate_sink.edge_properties.update(self.store.edge_properties)
self.process(source_generator, intermediate_sink)
for s in sources:
intermediate_sink.node_properties.update(s.node_properties)
intermediate_sink.edge_properties.update(s.edge_properties)
apply_graph_operations(intermediate_sink.graph, operations)
# stream from intermediate to output sink
intermediate_source = self.get_source("graph")
intermediate_source.node_properties.update(
intermediate_sink.node_properties
)
intermediate_source.edge_properties.update(
intermediate_sink.edge_properties
)
# Need to propagate knowledge source specifications here?
ks_args = dict()
for ksf in knowledge_provenance_properties:
if ksf in input_args:
ks_args[ksf] = input_args[ksf]
intermediate_source_generator = intermediate_source.parse(
intermediate_sink.graph, **ks_args
)
if output_args["format"] in {"tsv", "csv"}:
if "node_properties" not in output_args:
output_args[
"node_properties"
] = intermediate_source.node_properties
log.debug("output_args['node_properties']: " + str(output_args["node_properties"]), file=stderr)
if "edge_properties" not in output_args:
output_args[
"edge_properties"
] = intermediate_source.edge_properties
sink = self.get_sink(**output_args)
if "reverse_prefix_map" in output_args:
sink.set_reverse_prefix_map(output_args["reverse_prefix_map"])
if isinstance(sink, RdfSink):
if "reverse_predicate_mapping" in output_args:
sink.set_reverse_predicate_mapping(
output_args["reverse_predicate_mapping"]
)
if "property_types" in output_args:
sink.set_property_types(output_args["property_types"])
else:
sink = self.get_sink(**output_args)
sink.node_properties.update(intermediate_source.node_properties)
sink.edge_properties.update(intermediate_source.edge_properties)
self.process(intermediate_source_generator, sink)
sink.finalize()
self.store.node_properties.update(sink.node_properties)
self.store.edge_properties.update(sink.edge_properties)
else:
# stream from source to intermediate
sink = GraphSink(self)
self.process(source_generator, sink)
sink.node_properties.update(self.store.node_properties)
sink.edge_properties.update(self.store.edge_properties)
for s in sources:
sink.node_properties.update(s.node_properties)
sink.edge_properties.update(s.edge_properties)
sink.finalize()
self.store.node_properties.update(sink.node_properties)
self.store.edge_properties.update(sink.edge_properties)
apply_graph_operations(sink.graph, operations)
# Aggregate the InfoRes catalogs from all sources
for s in sources:
for k, v in s.get_infores_catalog().items():
self._infores_catalog[k] = v
def get_infores_catalog(self):
"""
Return catalog of Information Resource mappings
aggregated from all Transformer associated sources
"""
return self._infores_catalog
def process(self, source: Generator, sink: Sink) -> None:
"""
This method is responsible for reading from ``source``
and writing to ``sink`` by calling the relevant methods
based on the incoming data.
.. note::
The streamed data must not be mutated.
Parameters
----------
source: Generator
A generator from a Source
sink: kgx.sink.sink.Sink
An instance of Sink
"""
for rec in source:
if rec:
log.debug("length of rec", len(rec), "rec", rec)
if len(rec) == 4: # infer an edge record
write_edge = True
if "subject_category" in self.edge_filters:
if rec[0] in self._seen_nodes:
write_edge = True
else:
write_edge = False
if "object_category" in self.edge_filters:
if rec[1] in self._seen_nodes:
if "subject_category" in self.edge_filters:
if write_edge:
write_edge = True
else:
write_edge = True
else:
write_edge = False
if write_edge:
if self.inspector:
self.inspector(GraphEntityType.EDGE, rec)
sink.write_edge(rec[-1])
else: # infer a node record
if "category" in self.node_filters:
self._seen_nodes.add(rec[0])
if self.inspector:
self.inspector(GraphEntityType.NODE, rec)
# last element of rec is the node properties
sink.write_node(rec[-1])
def save(self, output_args: Dict) -> None:
"""
Save data from the in-memory store to a desired sink.
Parameters
----------
output_args: Dict
Arguments relevant to your output sink
"""
if not self.store:
raise Exception("self.store is empty.")
source = self.store
source.node_properties.update(self.store.node_properties)
source.edge_properties.update(self.store.edge_properties)
source_generator = source.parse(self.store.graph)
if "node_properties" not in output_args:
output_args["node_properties"] = source.node_properties
if "edge_properties" not in output_args:
output_args["edge_properties"] = source.edge_properties
sink = self.get_sink(**output_args)
sink.node_properties.update(source.node_properties)
sink.edge_properties.update(source.edge_properties)
if "reverse_prefix_map" in output_args:
sink.set_reverse_prefix_map(output_args["reverse_prefix_map"])
if isinstance(sink, RdfSink):
if "reverse_predicate_mapping" in output_args:
sink.set_reverse_predicate_mapping(
output_args["reverse_predicate_mapping"]
)
if "property_types" in output_args:
sink.set_property_types(output_args["property_types"])
self.process(source_generator, sink)
sink.finalize()
def get_source(self, format: str) -> Source:
"""
Get an instance of Source that corresponds to a given format.
Parameters
----------
format: str
The input store format
Returns
-------
Source:
An instance of kgx.source.Source
"""
if format in SOURCE_MAP:
s = SOURCE_MAP[format]
return s(self)
else:
raise TypeError(f"{format} in an unrecognized format")
def get_sink(self, **kwargs: Dict) -> Sink:
"""
Get an instance of Sink that corresponds to a given format.
Parameters
----------
kwargs: Dict
Arguments required for initializing an instance of Sink
Returns
-------
Sink:
An instance of kgx.sink.Sink
"""
if kwargs["format"] in SINK_MAP:
s = SINK_MAP[kwargs["format"]]
return s(self, **kwargs)
else:
raise TypeError(f"{kwargs['format']} in an unrecognized format")
| 16,660 | 36.780045 | 120 |
py
|
kgx
|
kgx-master/kgx/__init__.py
|
"""
KGX Package
"""
__version__ = "2.1.0"
| 42 | 7.6 | 21 |
py
|
kgx
|
kgx-master/kgx/validator.py
|
"""
KGX Validator class
"""
import re
from typing import List, Optional, Dict, Set, Callable
import click
import validators
from bmt import Toolkit
from kgx.error_detection import ErrorType, MessageLevel, ErrorDetecting
from kgx.config import get_jsonld_context, get_logger
from kgx.graph.base_graph import BaseGraph
from kgx.utils.kgx_utils import (
get_toolkit,
snakecase_to_sentencecase,
sentencecase_to_snakecase,
camelcase_to_sentencecase,
GraphEntityType,
)
from kgx.prefix_manager import PrefixManager
logger = get_logger()
class Validator(ErrorDetecting):
"""
Class for validating a property graph.
The optional 'progress_monitor' for the validator should be a lightweight Callable
which is injected into the class 'inspector' Callable, designed to intercepts
node and edge records streaming through the Validator (inside a Transformer.process() call.
The first (GraphEntityType) argument of the Callable tags the record as a NODE or an EDGE.
The second argument given to the Callable is the current record itself.
This Callable is strictly meant to be procedural and should *not* mutate the record.
The intent of this Callable is to provide a hook to KGX applications wanting the
namesake function of passively monitoring the graph data stream. As such, the Callable
could simply tally up the number of times it is called with a NODE or an EDGE, then
provide a suitable (quick!) report of that count back to the KGX application. The
Callable (function/callable class) should not modify the record and should be of low
complexity, so as not to introduce a large computational overhead to validation!
Parameters
----------
verbose: bool
Whether the generated report should be verbose or not (default: ``False``)
progress_monitor: Optional[Callable[[GraphEntityType, List], None]]
Function given a peek at the current record being processed by the class wrapped Callable.
schema: Optional[str]
URL to (Biolink) Model Schema to be used for validated (default: None, use default Biolink Model Toolkit schema)
error_log: str
Where to write any graph processing error message (stderr, by default)
"""
_the_validator = None
@classmethod
def get_the_validator(
cls,
verbose: bool = False,
progress_monitor: Optional[Callable[[GraphEntityType, List], None]] = None,
schema: Optional[str] = None,
error_log: str = None
):
"""
Creates and manages a default singleton Validator in the module, when called
"""
if not cls._the_validator:
cls.set_biolink_model("v3.1.2")
cls._the_validator = Validator(
verbose=verbose,
progress_monitor=progress_monitor,
schema=schema,
error_log=error_log
)
return cls._the_validator
def __init__(
self,
verbose: bool = False,
progress_monitor: Optional[Callable[[GraphEntityType, List], None]] = None,
schema: Optional[str] = None,
error_log: str = None
):
ErrorDetecting.__init__(self, error_log)
# formal arguments
self.verbose: bool = verbose
self.progress_monitor: Optional[
Callable[[GraphEntityType, List], None]
] = progress_monitor
self.schema: Optional[str] = schema
# internal attributes
# associated currently active _currently_active_toolkit with this Validator instance
self.validating_toolkit = self.get_toolkit()
self.prefix_manager = PrefixManager()
self.jsonld = get_jsonld_context()
self.prefixes = self.get_all_prefixes(self.jsonld)
self.required_node_properties = self.get_required_node_properties()
self.required_edge_properties = self.get_required_edge_properties()
def __call__(self, entity_type: GraphEntityType, rec: List):
"""
Transformer 'inspector' Callable
"""
if self.progress_monitor:
self.progress_monitor(entity_type, rec)
if entity_type == GraphEntityType.EDGE:
self.analyse_edge(*rec)
elif entity_type == GraphEntityType.NODE:
self.analyse_node(*rec)
else:
raise RuntimeError("Unexpected GraphEntityType: " + str(entity_type))
def get_validating_toolkit(self):
"""
Get Validating Biolink Model toolkit
"""
return self.validating_toolkit
def get_validation_model_version(self):
"""
Get Validating Biolink Model version
"""
return self.validating_toolkit.get_model_version()
_currently_active_toolkit: Optional[Toolkit] = None
@classmethod
def set_biolink_model(cls, version: Optional[str]):
"""
Set Biolink Model version of Validator Toolkit
"""
cls._currently_active_toolkit = get_toolkit(biolink_release=version)
@classmethod
def get_toolkit(cls) -> Toolkit:
"""
Get the current default Validator Toolkit
"""
if not cls._currently_active_toolkit:
cls._currently_active_toolkit = get_toolkit()
return cls._currently_active_toolkit
_default_model_version = None
@classmethod
def get_default_model_version(cls):
"""
Get the Default Biolink Model version
"""
if not cls._default_model_version:
# get default Biolink version from BMT
cls._default_model_version = get_toolkit().get_model_version()
return cls._default_model_version
def analyse_node(self, n, data):
"""
Analyse Node
"""
self.validate_node_properties(n, data, self.required_node_properties)
self.validate_node_property_types(
n, data, toolkit=self.validating_toolkit
)
self.validate_node_property_values(n, data)
self.validate_categories(n, data, toolkit=self.validating_toolkit)
def analyse_edge(self, u, v, k, data):
"""
Analyse edge
"""
self.validate_edge_properties(
u, v, data, self.required_edge_properties
)
self.validate_edge_property_types(
u, v, data, toolkit=self.validating_toolkit
)
self.validate_edge_property_values(u, v, data)
self.validate_edge_predicate(
u, v, data, toolkit=self.validating_toolkit
)
@staticmethod
def get_all_prefixes(jsonld: Optional[Dict] = None) -> set:
"""
Get all prefixes from Biolink Model JSON-LD context.
It also sets ``self.prefixes`` for subsequent access.
Parameters
---------
jsonld: Optional[Dict]
The JSON-LD context
Returns
-------
Optional[Dict]
A set of prefixes
"""
if not jsonld:
jsonld = get_jsonld_context()
prefixes: Set = set(
k
for k, v in jsonld.items()
if isinstance(v, str)
or (isinstance(v, dict) and v.setdefault("@prefix", False))
) # @type: ignored
if "biolink" not in prefixes:
prefixes.add("biolink")
return prefixes
@staticmethod
def get_required_node_properties(toolkit: Optional[Toolkit] = None) -> list:
"""
Get all properties for a node that are required, as defined by Biolink Model.
Parameters
----------
toolkit: Optional[Toolkit]
Optional externally provided toolkit (default: use Validator class defined toolkit)
Returns
-------
list
A list of required node properties
"""
if not toolkit:
toolkit = Validator.get_toolkit()
node_properties = toolkit.get_all_node_properties()
# TODO: remove this append statement when Biolink 3.1.3 is released - need to add domain:entity to id slot.
node_properties.append("id")
required_properties = []
for p in node_properties:
element = toolkit.get_element(p)
if element and element.deprecated is None:
if (hasattr(element, "required") and element.required) or element.name == "category":
formatted_name = sentencecase_to_snakecase(element.name)
required_properties.append(formatted_name)
return required_properties
@staticmethod
def get_required_edge_properties(toolkit: Optional[Toolkit] = None) -> list:
"""
Get all properties for an edge that are required, as defined by Biolink Model.
Parameters
----------
toolkit: Optional[Toolkit]
Optional externally provided toolkit (default: use Validator class defined toolkit)
Returns
-------
list
A list of required edge properties
"""
if not toolkit:
toolkit = Validator.get_toolkit()
edge_properties = toolkit.get_all_edge_properties()
# TODO: remove this append statement when Biolink 3.1.3 is released - need to add domain:entity to id slot.
edge_properties.append("id")
required_properties = []
for p in edge_properties:
element = toolkit.get_element(p)
if element and element.deprecated is None:
if hasattr(element, "required") and element.required:
formatted_name = sentencecase_to_snakecase(element.name)
required_properties.append(formatted_name)
return required_properties
def validate(self, graph: BaseGraph):
"""
Validate nodes and edges in a graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to validate
"""
self.validate_nodes(graph)
self.validate_edges(graph)
def validate_nodes(self, graph: BaseGraph):
"""
Validate all the nodes in a graph.
This method validates for the following,
- Node properties
- Node property type
- Node property value type
- Node categories
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to validate
"""
with click.progressbar(
graph.nodes(data=True), label="Validating nodes in graph"
) as bar:
for n, data in bar:
self.analyse_node(n, data)
def validate_edges(self, graph: BaseGraph):
"""
Validate all the edges in a graph.
This method validates for the following,
- Edge properties
- Edge property type
- Edge property value type
- Edge predicate
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to validate
"""
with click.progressbar(
graph.edges(data=True), label="Validate edges in graph"
) as bar:
for u, v, data in bar:
self.analyse_edge(u, v, None, data)
def validate_node_properties(
self,
node: str,
data: dict,
required_properties: list
):
"""
Checks if all the required node properties exist for a given node.
Parameters
----------
node: str
Node identifier
data: dict
Node properties
required_properties: list
Required node properties
"""
for p in required_properties:
if p not in data:
error_type = ErrorType.MISSING_NODE_PROPERTY
message = f"Required node property '{p}' is missing"
self.log_error(node, error_type, message, MessageLevel.ERROR)
def validate_edge_properties(
self,
subject: str,
object: str,
data: dict,
required_properties: list
):
"""
Checks if all the required edge properties exist for a given edge.
Parameters
----------
subject: str
Subject identifier
object: str
Object identifier
data: dict
Edge properties
required_properties: list
Required edge properties
"""
for p in required_properties:
if p not in data:
if p == "association_id":
# check for 'id' property instead
if "id" not in data:
error_type = ErrorType.MISSING_EDGE_PROPERTY
message = f"Required edge property '{p}' is missing"
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR
)
else:
error_type = ErrorType.MISSING_EDGE_PROPERTY
message = f"Required edge property '{p}' is missing"
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR
)
def validate_node_property_types(
self,
node: str,
data: dict,
toolkit: Optional[Toolkit] = None
):
"""
Checks if node properties have the expected value type.
Parameters
----------
node: str
Node identifier
data: dict
Node properties
toolkit: Optional[Toolkit]
Optional externally provided toolkit (default: use Validator class defined toolkit)
"""
if not toolkit:
toolkit = Validator.get_toolkit()
error_type = ErrorType.INVALID_NODE_PROPERTY_VALUE_TYPE
if not isinstance(node, str):
message = "Node property 'id' is expected to be of type 'string'"
self.log_error(node, error_type, message, MessageLevel.ERROR)
for key, value in data.items():
element = toolkit.get_element(key)
if element:
if hasattr(element, "typeof"):
if (element.typeof == "string" and not isinstance(value, str)) or \
(element.typeof == "double" and not isinstance(value, (int, float))):
message = f"Node property '{key}' is expected to be of type '{element.typeof}'"
self.log_error(node, error_type, message, MessageLevel.ERROR)
elif (
element.typeof == "uriorcurie"
and not isinstance(value, str)
and not validators.url(value)
):
message = f"Node property '{key}' is expected to be of type 'uri' or 'CURIE'"
self.log_error(node, error_type, message, MessageLevel.ERROR)
else:
logger.warning(
f"Skipping validation for Node property '{key}'. "
f"Expected type '{element.typeof}' v/s Actual type '{type(value)}'"
)
if hasattr(element, "multivalued"):
if element.multivalued:
if not isinstance(value, list):
message = f"Multi-valued node property '{key}' is expected to be of type '{list}'"
self.log_error(node, error_type, message, MessageLevel.ERROR)
else:
if isinstance(value, (list, set, tuple)):
message = f"Single-valued node property '{key}' is expected to be of type '{str}'"
self.log_error(node, error_type, message, MessageLevel.ERROR)
def validate_edge_property_types(
self,
subject: str,
object: str,
data: dict,
toolkit: Optional[Toolkit] = None
):
"""
Checks if edge properties have the expected value type.
Parameters
----------
subject: str
Subject identifier
object: str
Object identifier
data: dict
Edge properties
toolkit: Optional[Toolkit]
Optional externally provided toolkit (default: use Validator class defined toolkit)
"""
if not toolkit:
toolkit = Validator.get_toolkit()
error_type = ErrorType.INVALID_EDGE_PROPERTY_VALUE_TYPE
if not isinstance(subject, str):
message = "'subject' of an edge is expected to be of type 'string'"
self.log_error(
f"{subject}->{object}", error_type, message, MessageLevel.ERROR
)
if not isinstance(object, str):
message = "'object' of an edge is expected to be of type 'string'"
self.log_error(
f"{subject}->{object}", error_type, message, MessageLevel.ERROR
)
for key, value in data.items():
element = toolkit.get_element(key)
if element:
if hasattr(element, "typeof"):
if element.typeof == "string" and not isinstance(value, str):
message = (
f"Edge property '{key}' is expected to be of type 'string'"
)
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR,
)
elif (
element.typeof == "uriorcurie"
and not isinstance(value, str)
and not validators.url(value)
):
message = f"Edge property '{key}' is expected to be of type 'uri' or 'CURIE'"
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR,
)
elif element.typeof == "double" and not isinstance(
value, (int, float)
):
message = (
f"Edge property '{key}' is expected to be of type 'double'"
)
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR,
)
else:
logger.warning(
"Skipping validation for Edge property '{}'. Expected type '{}' v/s Actual type '{}'".format(
key, element.typeof, type(value)
)
)
if hasattr(element, "multivalued"):
if element.multivalued:
if not isinstance(value, list):
message = f"Multi-valued edge property '{key}' is expected to be of type 'list'"
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR,
)
else:
if isinstance(value, (list, set, tuple)):
message = f"Single-valued edge property '{key}' is expected to be of type 'str'"
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR,
)
def validate_node_property_values(
self,
node: str,
data: dict
):
"""
Validate a node property's value.
Parameters
----------
node: str
Node identifier
data: dict
Node properties
"""
error_type = ErrorType.INVALID_NODE_PROPERTY_VALUE
if not PrefixManager.is_curie(node):
message = f"Node property 'id' is expected to be of type 'CURIE'"
self.log_error(node, error_type, message, MessageLevel.ERROR)
else:
prefix = PrefixManager.get_prefix(node)
if prefix and prefix not in self.prefixes:
message = f"Node property 'id' has a value '{node}' with a CURIE prefix '{prefix}'" + \
f" is not represented in Biolink Model JSON-LD context"
self.log_error(node, error_type, message, MessageLevel.ERROR)
def validate_edge_property_values(
self,
subject: str,
object: str,
data: dict
):
"""
Validate an edge property's value.
Parameters
----------
subject: str
Subject identifier
object: str
Object identifier
data: dict
Edge properties
"""
error_type = ErrorType.INVALID_EDGE_PROPERTY_VALUE
prefixes = self.prefixes
if PrefixManager.is_curie(subject):
prefix = PrefixManager.get_prefix(subject)
if prefix and prefix not in prefixes:
message = f"Edge property 'subject' has a value '{subject}' with a CURIE prefix " + \
f"'{prefix}' that is not represented in Biolink Model JSON-LD context"
self.log_error(f"{subject}->{object}", error_type, message, MessageLevel.ERROR)
else:
message = f"Edge property 'subject' has a value '{subject}' which is not a proper CURIE"
self.log_error(f"{subject}->{object}", error_type, message, MessageLevel.ERROR)
if PrefixManager.is_curie(object):
prefix = PrefixManager.get_prefix(object)
if prefix not in prefixes:
message = f"Edge property 'object' has a value '{object}' with a CURIE " + \
f"prefix '{prefix}' that is not represented in Biolink Model JSON-LD context"
self.log_error(f"{subject}->{object}", error_type, message, MessageLevel.ERROR)
else:
message = f"Edge property 'object' has a value '{object}' which is not a proper CURIE"
self.log_error(f"{subject}->{object}", error_type, message, MessageLevel.ERROR)
def validate_categories(
self,
node: str,
data: dict,
toolkit: Optional[Toolkit] = None
):
"""
Validate ``category`` field of a given node.
Parameters
----------
node: str
Node identifier
data: dict
Node properties
toolkit: Optional[Toolkit]
Optional externally provided toolkit (default: use Validator class defined toolkit)
"""
if not toolkit:
toolkit = Validator.get_toolkit()
error_type = ErrorType.INVALID_CATEGORY
categories = data.get("category")
if categories is None:
message = "Node does not have a 'category' property"
self.log_error(node, error_type, message, MessageLevel.ERROR)
elif not isinstance(categories, list):
message = f"Node property 'category' is expected to be of type {list}"
self.log_error(node, error_type, message, MessageLevel.ERROR)
else:
for category in categories:
if PrefixManager.is_curie(category):
category = PrefixManager.get_reference(category)
m = re.match(r"^([A-Z][a-z\d]+)+$", category)
if not m:
# category is not CamelCase
error_type = ErrorType.INVALID_CATEGORY
message = f"Category '{category}' is not in CamelCase form"
self.log_error(node, error_type, message, MessageLevel.ERROR)
formatted_category = camelcase_to_sentencecase(category)
if toolkit.is_mixin(formatted_category):
message = f"Category '{category}' is a mixin in the Biolink Model"
self.log_error(node, error_type, message, MessageLevel.ERROR)
elif not toolkit.is_category(formatted_category):
message = (
f"Category '{category}' is unknown in the current Biolink Model"
)
self.log_error(node, error_type, message, MessageLevel.ERROR)
else:
c = toolkit.get_element(formatted_category.lower())
if c:
if category != c.name and category in c.aliases:
message = f"Category {category} is actually an alias for {c.name}; " + \
f"Should replace '{category}' with '{c.name}'"
self.log_error(node, error_type, message, MessageLevel.ERROR)
def validate_edge_predicate(
self,
subject: str,
object: str,
data: dict,
toolkit: Optional[Toolkit] = None
):
"""
Validate ``edge_predicate`` field of a given edge.
Parameters
----------
subject: str
Subject identifier
object: str
Object identifier
data: dict
Edge properties
toolkit: Optional[Toolkit]
Optional externally provided toolkit (default: use Validator class defined toolkit)
"""
if not toolkit:
toolkit = Validator.get_toolkit()
error_type = ErrorType.INVALID_EDGE_PREDICATE
edge_predicate = data.get("predicate")
if edge_predicate is None:
message = "Edge does not have an 'predicate' property"
self.log_error(f"{subject}->{object}", error_type, message, MessageLevel.ERROR)
elif not isinstance(edge_predicate, str):
message = f"Edge property 'edge_predicate' is expected to be of type 'string'"
self.log_error(f"{subject}->{object}", error_type, message, MessageLevel.ERROR)
else:
if PrefixManager.is_curie(edge_predicate):
edge_predicate = PrefixManager.get_reference(edge_predicate)
m = re.match(r"^([a-z_][^A-Z\s]+_?[a-z_][^A-Z\s]+)+$", edge_predicate)
if m:
p = toolkit.get_element(snakecase_to_sentencecase(edge_predicate))
if p is None:
message = f"Edge predicate '{edge_predicate}' is not in Biolink Model"
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR,
)
elif edge_predicate != p.name and edge_predicate in p.aliases:
message = f"Edge predicate '{edge_predicate}' is actually an alias for {p.name}; " + \
f"Should replace {edge_predicate} with {p.name}"
self.log_error(
f"{subject}->{object}",
error_type,
message,
MessageLevel.ERROR,
)
else:
message = f"Edge predicate '{edge_predicate}' is not in snake_case form"
self.log_error(f"{subject}->{object}", error_type, message, MessageLevel.ERROR)
| 28,313 | 37.005369 | 121 |
py
|
kgx
|
kgx-master/kgx/curie_lookup_service.py
|
import networkx as nx
import rdflib
from kgx.config import get_logger, get_config
from kgx.utils.kgx_utils import generate_edge_key, contract
CURIE_MAP = {"BFO:0000054": "realized_in", "RO:0000091": "has_disposition"}
log = get_logger()
class CurieLookupService(object):
"""
A service to lookup label for a given CURIE.
"""
config = get_config()
ontologies = config['ontologies'] if 'ontologies' in config else {}
ontology_graph = None
def __init__(self, curie_map: dict = None):
if curie_map:
self.curie_map = CURIE_MAP
self.curie_map.update(curie_map)
else:
self.curie_map = CURIE_MAP
self.ontology_graph = nx.MultiDiGraph()
self.load_ontologies()
def load_ontologies(self):
"""
Load all required ontologies.
"""
for ontology in self.ontologies.values():
rdfgraph = rdflib.Graph()
input_format = rdflib.util.guess_format(ontology)
rdfgraph.parse(ontology, format=input_format)
# triples = rdfgraph.triples((None, rdflib.RDFS.subClassOf, None))
# for s,p,o in triples:
# subject_curie = contract(s)
# object_curie = contract(o)
# self.ontology_graph.add_node(subject_curie)
# self.ontology_graph.add_node(object_curie)
# key = generate_edge_key(subject_curie, 'subclass_of', object_curie)
# self.ontology_graph.add_edge(subject_curie, object_curie, key, **{'predicate': 'subclass_of', 'relation': 'rdfs:subClassOf'})
triples = rdfgraph.triples((None, rdflib.RDFS.label, None))
for s, p, o in triples:
key = contract(s)
value = o.value
value = value.replace(" ", "_")
self.curie_map[key] = value
self.ontology_graph.add_node(key, name=value)
| 1,942 | 35.660377 | 143 |
py
|
kgx
|
kgx-master/kgx/error_detection.py
|
"""
Shared graph model error reporting code currently used in
the validator, summarize_graph and meta_knowledge_graph modules
"""
from enum import Enum
from json import dump as json_dump
from sys import stderr
from typing import Dict, List, Optional, TextIO
class ErrorType(Enum):
"""
Validation error types
"""
MISSING_NODE_PROPERTY = 1
MISSING_PROPERTY = 1.5
MISSING_EDGE_PROPERTY = 2
INVALID_NODE_PROPERTY = 3
INVALID_EDGE_PROPERTY = 4
INVALID_NODE_PROPERTY_VALUE_TYPE = 5
INVALID_NODE_PROPERTY_VALUE = 6
INVALID_EDGE_PROPERTY_VALUE_TYPE = 7
INVALID_EDGE_PROPERTY_VALUE = 8
MISSING_CATEGORY = 9
INVALID_CATEGORY = 10
MISSING_EDGE_PREDICATE = 11
INVALID_EDGE_PREDICATE = 12
MISSING_NODE_CURIE_PREFIX = 13
DUPLICATE_NODE = 14
MISSING_NODE = 15,
INVALID_EDGE_TRIPLE = 16,
VALIDATION_SYSTEM_ERROR = 99
class MessageLevel(Enum):
"""
Message level for validation reports
"""
# Recommendations
INFO = 1
# Message to convey 'should'
WARNING = 2
# Message to convey 'must'
ERROR = 3
class ErrorDetecting(object):
"""
Class of object which can capture internal graph parsing error events.
Superclass parent of KGX 'validate' and 'graph-summary'
operational classes (perhaps more KGX operations later?)
"""
def __init__(self, error_log=stderr):
"""
Run KGX validator on an input file to check for Biolink Model compliance.
Parameters
----------
error_log: str or TextIO handle
Output target for logging.
Returns
-------
Dict
A dictionary of entities which have parse errors indexed by [message_level][error_type][message]
"""
self.errors: Dict[
str, # MessageLevel.name
Dict[
str, # ErrorType.name
Dict[
str,
List[str]
]
]
] = dict()
if error_log:
if isinstance(error_log, str):
self.error_log = open(error_log, mode="w")
else:
self.error_log = error_log
else:
self.error_log = None
def clear_errors(self):
"""
Clears the current error log list
"""
self.errors.clear()
def log_error(
self,
entity: str,
error_type: ErrorType,
message: str,
message_level: MessageLevel = MessageLevel.ERROR,
):
"""
Log an error to the list of such errors.
:param entity: source of parse error
:param error_type: ValidationError ErrorType,
:param message: message string describing the error
:param message_level: ValidationError MessageLevel
"""
# index errors by entity identifier
level = message_level.name
error = error_type.name
# clean up entity name string...
entity = str(entity).strip()
if level not in self.errors:
self.errors[level] = dict()
if error not in self.errors[level]:
self.errors[level][error] = dict()
self.errors[level][error][message] = [entity]
self.errors[level][error][message].append(entity)
def get_errors(self, level: str = None) -> Dict:
"""
Get the index list of distinct error messages.
Parameters
----------
level: str
Optional filter (case insensitive) name of error message level (generally either "Error" or "Warning")
Returns
-------
Dict
A raw dictionary of entities indexed by [message_level][error_type][message] or
only just [error_type][message] specific to a given message level if the optional level filter is given
"""
if not level:
return self.errors # default to return all errors
# ... or filter on error type
level = level.upper()
if level in self.errors:
return self.errors[level]
else:
# if errors of a given error_type don't exist, return an empty dictionary
return dict()
def write_report(self, outstream: Optional[TextIO] = None, level: str = None) -> None:
"""
Write error get_errors to a file
Parameters
----------
outstream: TextIO
The stream to which to write
level: str
Optional filter (case insensitive) name of error message level (generally either "Error" or "Warning")
"""
# default error log used if not given
if not outstream and self.error_log:
outstream = self.error_log
else:
# safe here to default to stderr?
outstream = stderr
if level:
outstream.write(f"\nMessages at the '{level}' level:\n")
json_dump(self.get_errors(level), outstream, indent=4)
outstream.write("\n") # print a trailing newline(?)
| 5,094 | 28.114286 | 115 |
py
|
kgx
|
kgx-master/kgx/cli/cli_utils.py
|
import importlib
import os
from os.path import dirname, abspath
from sys import stdout
from multiprocessing import Pool
from typing import List, Tuple, Optional, Dict, Set, Union
import yaml
from kgx.validator import Validator
from kgx.sink import Sink
from kgx.transformer import Transformer, SOURCE_MAP, SINK_MAP
from kgx.config import get_logger
from kgx.graph.base_graph import BaseGraph
from kgx.graph_operations.graph_merge import merge_all_graphs
from kgx.graph_operations import summarize_graph, meta_knowledge_graph
from kgx.utils.kgx_utils import apply_graph_operations, knowledge_provenance_properties
from pprint import pprint
summary_report_types = {
"kgx-map": summarize_graph.GraphSummary,
"meta-knowledge-graph": meta_knowledge_graph.MetaKnowledgeGraph,
}
log = get_logger()
def get_input_file_types() -> Tuple:
"""
Get all input file formats supported by KGX.
Returns
-------
Tuple
A tuple of supported file formats
"""
return tuple(SOURCE_MAP.keys())
def get_output_file_types() -> Tuple:
"""
Get all output file formats supported by KGX.
Returns
-------
Tuple
A tuple of supported file formats
"""
return tuple(SINK_MAP.keys())
def get_report_format_types() -> Tuple:
"""
Get all graph summary report formats supported by KGX.
Returns
-------
Tuple
A tuple of supported file formats
"""
return "yaml", "json"
def graph_summary(
inputs: List[str],
input_format: str,
input_compression: Optional[str],
output: Optional[str],
report_type: str,
report_format: Optional[str] = None,
graph_name: Optional[str] = None,
node_facet_properties: Optional[List] = None,
edge_facet_properties: Optional[List] = None,
error_log: str = "",
) -> Dict:
"""
Loads and summarizes a knowledge graph from a set of input files.
Parameters
----------
inputs: List[str]
Input file
input_format: str
Input file format
input_compression: Optional[str]
The input compression type
output: Optional[str]
Where to write the output (stdout, by default)
report_type: str
The summary report type: "kgx-map" or "meta-knowledge-graph"
report_format: Optional[str]
The summary report format file types: 'yaml' or 'json'
graph_name: str
User specified name of graph being summarized
node_facet_properties: Optional[List]
A list of node properties from which to generate counts per value for those properties.
For example, ``['provided_by']``
edge_facet_properties: Optional[List]
A list of edge properties (e.g. knowledge_source tags) to facet on.
For example, ``['original_knowledge_source', 'aggregator_knowledge_source']``
error_log: str
Where to write any graph processing error message (stderr, by default)
Returns
-------
Dict
A dictionary with the graph stats
"""
if not graph_name:
graph_name = "Graph"
if report_format and report_format not in get_report_format_types():
raise ValueError(f"report_format must be one of {get_report_format_types()}")
if report_type in summary_report_types:
# New design pattern enabling 'stream' processing of statistics on a small memory footprint
# by injecting an inspector in the Transformer.process() source-to-sink data flow.
#
# First, we instantiate the Inspector (generally, a Callable class)...
#
inspector = summary_report_types[report_type](
# ...thus, there is no need to hand the Inspector the graph;
# rather, the inspector will see the graph data after
# being injected into the Transformer.transform() workflow
# graph=transformer.store.graph,
name=graph_name,
node_facet_properties=node_facet_properties,
edge_facet_properties=edge_facet_properties,
error_log=error_log,
)
else:
raise ValueError(f"report_type must be one of {summary_report_types.keys()}")
# streaming assumed, throwing away the output graph
output_args = {
"format": "null"
}
# default here is for Streaming to be applied
transformer = Transformer(stream=True)
transformer.transform(
input_args={
"filename": inputs,
"format": input_format,
"compression": input_compression,
},
output_args=output_args,
# ... Second, we inject the Inspector into the transform() call,
# for the underlying Transformer.process() to use...
inspector=inspector,
)
if output:
with open(output, "w") as gsr:
inspector.save(gsr, file_format=report_format)
else:
inspector.save(stdout, file_format=report_format)
# ... Third, we directly return the graph statistics to the caller.
return inspector.get_graph_summary()
def validate(
inputs: List[str],
input_format: str,
input_compression: Optional[str],
output: Optional[str],
biolink_release: Optional[str] = None,
) -> Dict:
"""
Run KGX validator on an input file to check for Biolink Model compliance.
Parameters
----------
inputs: List[str]
Input files
input_format: str
The input format
input_compression: Optional[str]
The input compression type
output: Optional[str]
Path to output file (stdout, by default)
biolink_release: Optional[str] = None
SemVer version of Biolink Model Release used for validation (default: latest Biolink Model Toolkit version)
Returns
-------
Dict
A dictionary of entities which have parse errors indexed by [message_level][error_type][message]
"""
# Processing of statistics on a small memory footprint using streaming of input KGX
# by injecting an inspector in the Transformer.process() source-to-sink data flow.
#
# First, we instantiate a Validator() class (converted into a Callable class) as an Inspector ...
# In the new "Inspector" design pattern, we need to instantiate it before the Transformer.
#
Validator.set_biolink_model(biolink_release)
# Validator assumes the currently set Biolink Release
validator = Validator()
transformer = Transformer(stream=True)
transformer.transform(
input_args={
"filename": inputs,
"format": input_format,
"compression": input_compression,
},
output_args={
"format": "null"
}, # streaming processing throws the graph data away
# ... Second, we inject the Inspector into the transform() call,
# for the underlying Transformer.process() to use...
inspector=validator,
)
if output:
validator.write_report(open(output, "w"))
else:
validator.write_report(stdout)
# ... Third, we return directly any validation errors to the caller
return validator.get_errors()
def neo4j_download(
uri: str,
username: str,
password: str,
output: str,
output_format: str,
output_compression: Optional[str],
stream: bool,
node_filters: Optional[Tuple] = None,
edge_filters: Optional[Tuple] = None,
) -> Transformer:
"""
Download nodes and edges from Neo4j database.
Parameters
----------
uri: str
Neo4j URI. For example, https://localhost:7474
username: str
Username for authentication
password: str
Password for authentication
output: str
Where to write the output (stdout, by default)
output_format: Optional[str]
The output type (``tsv``, by default)
output_compression: Optional[str]
The output compression type
stream: bool
Whether to parse input as a stream
node_filters: Optional[Tuple]
Node filters
edge_filters: Optional[Tuple]
Edge filters
Returns
-------
kgx.Transformer
The NeoTransformer
"""
transformer = Transformer(stream=stream)
transformer.transform(
{
"uri": uri,
"username": username,
"password": password,
"format": "neo4j",
"node_filters": node_filters,
"edge_filters": edge_filters,
}
)
if not output_format:
output_format = "tsv"
transformer.save(
{"filename": output, "format": output_format, "compression": output_compression}
)
return transformer
def neo4j_upload(
inputs: List[str],
input_format: str,
input_compression: Optional[str],
uri: str,
username: str,
password: str,
stream: bool,
node_filters: Optional[Tuple] = None,
edge_filters: Optional[Tuple] = None,
) -> Transformer:
"""
Upload a set of nodes/edges to a Neo4j database.
Parameters
----------
inputs: List[str]
A list of files that contains nodes/edges
input_format: str
The input format
input_compression: Optional[str]
The input compression type
uri: str
The full HTTP address for Neo4j database
username: str
Username for authentication
password: str
Password for authentication
stream: bool
Whether to parse input as a stream
node_filters: Optional[Tuple]
Node filters
edge_filters: Optional[Tuple]
Edge filters
Returns
-------
kgx.Transformer
The NeoTransformer
"""
transformer = Transformer(stream=stream)
transformer.transform(
{
"filename": inputs,
"format": input_format,
"compression": input_compression,
"node_filters": node_filters,
"edge_filters": edge_filters,
}
)
transformer.save(
{"uri": uri, "username": username, "password": password, "format": "neo4j"}
)
return transformer
def _validate_files(cwd: str, file_paths: List[str], context: str = ""):
"""
Utility method for resolving file paths
:param cwd: current working directory for resolving possible relative file path names
:param context: optional source context of of the file list
:return: resolved list of file paths (as absolute paths)
"""
resolved_files: List[str] = list()
for f in file_paths:
# check if the file exists as an absolute path
if not os.path.exists(f):
# otherwise, check if file exists as a path
# relative to the provided "current working directory"
f = abspath(cwd + "/" + f)
if not os.path.exists(f):
raise FileNotFoundError(
f"Filename '{f}' for source '{context}' does not exist!"
)
if not os.path.isfile(f):
raise FileNotFoundError(
f"Filename '{f}' for source '{context}' is not a file!"
)
resolved_files.append(f)
return resolved_files
def _process_knowledge_source(ksf: str, spec: str) -> Union[str, bool, Tuple]:
if ksf not in knowledge_provenance_properties:
log.warning("Unknown Knowledge Source Field: " + ksf + "... ignoring!")
return False
else:
if spec.lower() == "true":
return True
elif spec.lower() == "false":
return False
else:
# If a Tuple, expect a comma-delimited string?
spec_parts = spec.split(",")
if len(spec_parts) == 1:
# assumed to be just a default string value for the knowledge source field
return spec_parts[0]
else:
# assumed to be an InfoRes Tuple rewrite specification
if len(spec_parts) > 3:
spec_parts = spec_parts[:2]
return tuple(spec_parts)
def transform(
inputs: Optional[List[str]],
input_format: Optional[str] = None,
input_compression: Optional[str] = None,
output: Optional[str] = None,
output_format: Optional[str] = None,
output_compression: Optional[str] = None,
stream: bool = False,
node_filters: Optional[List[Tuple[str, str]]] = None,
edge_filters: Optional[List[Tuple[str, str]]] = None,
transform_config: str = None,
source: Optional[List] = None,
knowledge_sources: Optional[List[Tuple[str, str]]] = None,
# this parameter doesn't get used, but I leave it in
# for now, in case it signifies an unimplemented concept
# destination: Optional[List] = None,
processes: int = 1,
infores_catalog: Optional[str] = None,
) -> None:
"""
Transform a Knowledge Graph from one serialization form to another.
Parameters
----------
inputs: Optional[List[str]]
A list of files that contains nodes/edges
input_format: Optional[str]
The input format
input_compression: Optional[str]
The input compression type
output: Optional[str]
The output file
output_format: Optional[str]
The output format
output_compression: Optional[str]
The output compression type
stream: bool
Whether to parse input as a stream
node_filters: Optional[List[Tuple[str, str]]]
Node input filters
edge_filters: Optional[List[Tuple[str, str]]]
Edge input filters
transform_config: Optional[str]
The transform config YAML
source: Optional[List]
A list of source to load from the YAML
knowledge_sources: Optional[List[Tuple[str, str]]]
A list of named knowledge sources with (string, boolean or tuple rewrite) specification
processes: int
Number of processes to use
infores_catalog: Optional[str]
Optional dump of a TSV file of InfoRes CURIE to
Knowledge Source mappings (not yet available in transform_config calling mode)
"""
if transform_config and inputs:
raise ValueError("Can accept either --transform-config OR inputs, not both")
output_directory = "output"
if transform_config:
# Use the directory within which the 'transform_config' file
# exists as a 'current working directory' for
# resolving relative filename paths in the configuration.
cwd = dirname(transform_config)
cfg = yaml.load(open(transform_config), Loader=yaml.FullLoader)
top_level_args = {}
if "configuration" in cfg:
top_level_args = prepare_top_level_args(cfg["configuration"])
if (
"output_directory" in cfg["configuration"]
and cfg["configuration"]["output_directory"]
):
output_directory = cfg["configuration"]["output_directory"]
if not output_directory.startswith(os.path.sep):
# relative path
output_directory = f"{os.path.abspath(os.path.dirname(transform_config))}{os.path.sep}{output_directory}"
if not os.path.exists(output_directory):
os.mkdir(output_directory)
if not source:
source = cfg["transform"]["source"].keys()
for s in source:
source_properties = cfg["transform"]["source"][s]
if source_properties["input"]["format"] in get_input_file_types():
source_properties["input"]["filename"] = _validate_files(
cwd=cwd,
file_paths=source_properties["input"]["filename"],
context=s,
)
source_to_parse = {}
for key, val in cfg["transform"]["source"].items():
if key in source:
source_to_parse[key] = val
results = []
pool = Pool(processes=processes)
for k, v in source_to_parse.items():
log.info(f"Spawning process for '{k}'")
result = pool.apply_async(
transform_source,
(
k,
v,
output_directory,
top_level_args["prefix_map"],
top_level_args["node_property_predicates"],
top_level_args["predicate_mappings"],
top_level_args["reverse_prefix_map"],
top_level_args["reverse_predicate_mappings"],
top_level_args["property_types"],
top_level_args["checkpoint"],
False,
stream,
),
)
results.append(result)
pool.close()
pool.join()
graphs = [r.get() for r in results]
else:
source_dict: Dict = {
"input": {
"format": input_format,
"compression": input_compression,
"filename": inputs,
"filters": {
"node_filters": node_filters,
"edge_filters": edge_filters,
},
},
"output": {
"format": output_format,
"compression": output_compression,
"filename": output,
},
}
if knowledge_sources:
for ksf, spec in knowledge_sources:
log.debug("ksf", ksf, "spec", spec)
ksf_spec = _process_knowledge_source(ksf, spec)
if isinstance(ksf_spec, tuple):
if ksf not in source_dict["input"]:
source_dict["input"][ksf] = dict()
if isinstance(source_dict["input"][ksf], dict):
key = ksf_spec[0]
source_dict["input"][ksf][key] = ksf_spec
else:
# Unexpected condition - mixing static values with tuple specified rewrites?
raise RuntimeError(
"Inconsistent multivalued specifications: make sure that all the values "
+ "of the knowledge source tag '"
+ ksf
+ "' are all rewrite specifications!"
)
else:
source_dict["input"][ksf] = ksf_spec
log.debug("source_dict", source_dict)
name = os.path.basename(inputs[0])
transform_source(
key=name,
source=source_dict,
output_directory=None,
stream=stream,
infores_catalog=infores_catalog,
)
def merge(
merge_config: str,
source: Optional[List] = None,
destination: Optional[List] = None,
processes: int = 1,
) -> BaseGraph:
"""
Load nodes and edges from files and KGs, as defined in a config YAML, and merge them into a single graph.
The merged graph can then be written to a local/remote Neo4j instance OR be serialized into a file.
Parameters
----------
merge_config: str
Merge config YAML
source: Optional[List]
A list of source to load from the YAML
destination: Optional[List]
A list of destination to write to, as defined in the YAML
processes: int
Number of processes to use
Returns
-------
kgx.graph.base_graph.BaseGraph
The merged graph
"""
# Use the directory within which the 'merge_config' file
# exists as a 'current working directory' for
# resolving relative filename paths in the configuration.
cwd = dirname(merge_config)
with open(merge_config, "r") as YML:
cfg = yaml.load(YML, Loader=yaml.FullLoader)
output_directory = "output"
top_level_args = {}
if "configuration" in cfg:
top_level_args = prepare_top_level_args(cfg["configuration"])
if (
"output_directory" in cfg["configuration"]
and cfg["configuration"]["output_directory"]
):
output_directory = cfg["configuration"]["output_directory"]
if not output_directory.startswith(os.path.sep):
# relative path
output_directory = f"{os.path.abspath(os.path.dirname(merge_config))}{os.path.sep}{output_directory}"
if not os.path.exists(output_directory):
os.mkdir(output_directory)
if not source:
source = cfg["merged_graph"]["source"].keys()
if not destination:
destination = cfg["merged_graph"]["destination"].keys()
for s in source:
source_properties = cfg["merged_graph"]["source"][s]
if source_properties["input"]["format"] in get_input_file_types():
source_properties["input"]["filename"] = _validate_files(
cwd=cwd, file_paths=source_properties["input"]["filename"], context=s
)
sources_to_parse = {}
for key in cfg["merged_graph"]["source"]:
if key in source:
sources_to_parse[key] = cfg["merged_graph"]["source"][key]
results = []
pool = Pool(processes=processes)
for k, v in sources_to_parse.items():
log.info(f"Spawning process for '{k}'")
result = pool.apply_async(
parse_source,
(
k,
v,
output_directory,
top_level_args["prefix_map"],
top_level_args["node_property_predicates"],
top_level_args["predicate_mappings"],
top_level_args["checkpoint"],
),
)
results.append(result)
pool.close()
pool.join()
stores = [r.get() for r in results]
merged_graph = merge_all_graphs([x.graph for x in stores])
log.info(
f"Merged graph has {merged_graph.number_of_nodes()} nodes and {merged_graph.number_of_edges()} edges"
)
if "name" in cfg["merged_graph"]:
merged_graph.name = cfg["merged_graph"]["name"]
if "operations" in cfg["merged_graph"]:
apply_graph_operations(merged_graph, cfg["merged_graph"]["operations"])
destination_to_write: Dict[str, Dict] = {}
for d in destination:
if d in cfg["merged_graph"]["destination"]:
destination_to_write[d] = cfg["merged_graph"]["destination"][d]
else:
raise KeyError(f"Cannot find destination '{d}' in YAML")
# write the merged graph
node_properties = set()
edge_properties = set()
for s in stores:
node_properties.update(s.node_properties)
edge_properties.update(s.edge_properties)
input_args = {"graph": merged_graph, "format": "graph"}
if destination_to_write:
for key, destination_info in destination_to_write.items():
log.info(f"Writing merged graph to {key}")
output_args = {
"format": destination_info["format"],
"reverse_prefix_map": top_level_args["reverse_prefix_map"],
"reverse_predicate_mappings": top_level_args[
"reverse_predicate_mappings"
],
}
if "reverse_prefix_map" in destination_info:
output_args["reverse_prefix_map"].update(
destination_info["reverse_prefix_map"]
)
if "reverse_predicate_mappings" in destination_info:
output_args["reverse_predicate_mappings"].update(
destination_info["reverse_predicate_mappings"]
)
if destination_info["format"] == "neo4j":
output_args["uri"] = destination_info["uri"]
output_args["username"] = destination_info["username"]
output_args["password"] = destination_info["password"]
elif destination_info["format"] in get_input_file_types():
filename = destination_info["filename"]
if isinstance(filename, list):
filename = filename[0]
destination_filename = f"{output_directory}/{filename}"
output_args["filename"] = destination_filename
output_args["compression"] = (
destination_info["compression"]
if "compression" in destination_info
else None
)
if destination_info['format'] == 'nt':
output_args['property_types'] = top_level_args['property_types']
if 'property_types' in top_level_args and 'property_types' in destination_info.keys():
output_args['property_types'].update(destination_info['property_types'])
if destination_info['format'] in {'csv', 'tsv'}:
output_args['node_properties'] = node_properties
output_args['edge_properties'] = edge_properties
else:
raise TypeError(
f"type {destination_info['format']} not yet supported for KGX merge operation."
)
transformer = Transformer()
transformer.transform(input_args, output_args)
else:
log.warning(
f"No destination provided in {merge_config}. The merged graph will not be persisted."
)
return merged_graph
def parse_source(
key: str,
source: dict,
output_directory: str,
prefix_map: Dict[str, str] = None,
node_property_predicates: Set[str] = None,
predicate_mappings: Dict[str, str] = None,
checkpoint: bool = False,
) -> Sink:
"""
Parse a source from a merge config YAML.
Parameters
----------
key: str
Source key
source: Dict
Source configuration
output_directory: str
Location to write output to
prefix_map: Dict[str, str]
Non-canonical CURIE mappings
node_property_predicates: Set[str]
A set of predicates that ought to be treated as node properties (This is applicable for RDF)
predicate_mappings: Dict[str, str]
A mapping of predicate IRIs to property names (This is applicable for RDF)
checkpoint: bool
Whether to serialize each individual source to a TSV
Returns
-------
kgx.sink.sink.Sink
Returns an instance of Sink
"""
log.info(f"Processing source '{key}'")
if not key:
key = os.path.basename(source["input"]["filename"][0])
input_args = prepare_input_args(
key,
source,
output_directory,
prefix_map,
node_property_predicates,
predicate_mappings,
)
transformer = Transformer(stream=True)
transformer.transform(input_args)
transformer.store.graph.name = key
if checkpoint:
log.info(f"Writing checkpoint for source '{key}'")
checkpoint_output = f"{output_directory}/{key}" if output_directory else key
transformer.save({"filename": checkpoint_output, "format": "tsv"})
# Current "Callable" metadata not needed at this point
# but causes peculiar problems downstream, so we clear it.
transformer.store.clear_graph_metadata()
return transformer.store
def transform_source(
key: str,
source: Dict,
output_directory: Optional[str],
prefix_map: Dict[str, str] = None,
node_property_predicates: Set[str] = None,
predicate_mappings: Dict[str, str] = None,
reverse_prefix_map: Dict = None,
reverse_predicate_mappings: Dict = None,
property_types: Dict = None,
checkpoint: bool = False,
preserve_graph: bool = True,
stream: bool = False,
infores_catalog: Optional[str] = None,
) -> Sink:
"""
Transform a source from a transform config YAML.
Parameters
----------
key: str
Source key
source: Dict
Source configuration
output_directory: Optional[str]
Location to write output to
prefix_map: Dict[str, str]
Non-canonical CURIE mappings
node_property_predicates: Set[str]
A set of predicates that ought to be treated as node properties (This is applicable for RDF)
predicate_mappings: Dict[str, str]
A mapping of predicate IRIs to property names (This is applicable for RDF)
reverse_prefix_map: Dict[str, str]
Non-canonical CURIE mappings for export
reverse_predicate_mappings: Dict[str, str]
A mapping of property names to predicate IRIs (This is applicable for RDF)
property_types: Dict[str, str]
The xml property type for properties that are other than ``xsd:string``.
Relevant for RDF export.
checkpoint: bool
Whether to serialize each individual source to a TSV
preserve_graph: true
Whether or not to preserve the graph corresponding to the source
stream: bool
Whether to parse input as a stream
infores_catalog: Optional[str]
Optional dump of a TSV file of InfoRes CURIE to Knowledge Source mappings
Returns
-------
kgx.sink.sink.Sink
Returns an instance of Sink
"""
log.debug(f"Processing source '{key}'")
input_args = prepare_input_args(
key,
source,
output_directory,
prefix_map,
node_property_predicates,
predicate_mappings,
)
output_args = prepare_output_args(
key,
source,
output_directory,
reverse_prefix_map,
reverse_predicate_mappings,
property_types,
)
transformer = Transformer(stream=stream, infores_catalog=infores_catalog)
transformer.transform(input_args, output_args)
if not preserve_graph:
transformer.store.graph.clear()
if infores_catalog:
with open(infores_catalog, "w") as irc:
catalog: Dict[str, str] = transformer.get_infores_catalog()
for source in catalog.keys():
infores = catalog.setdefault(source, "unknown")
log.debug(f"{source}\t{infores}", file=irc)
return transformer.store
def prepare_input_args(
key: str,
source: Dict,
output_directory: Optional[str],
prefix_map: Dict[str, str] = None,
node_property_predicates: Set[str] = None,
predicate_mappings: Dict[str, str] = None,
) -> Dict:
"""
Prepare input arguments for Transformer.
Parameters
----------
key: str
Source key
source: Dict
Source configuration
output_directory: str
Location to write output to
prefix_map: Dict[str, str]
Non-canonical CURIE mappings
node_property_predicates: Set[str]
A set of predicates that ought to be treated as node properties (This is applicable for RDF)
predicate_mappings: Dict[str, str]
A mapping of predicate IRIs to property names (This is applicable for RDF)
Returns
-------
Dict
Input arguments as dictionary
"""
if not key:
key = os.path.basename(source["input"]["filename"][0])
input_format = source["input"]["format"]
input_compression = (
source["input"]["compression"] if "compression" in source["input"] else None
)
inputs = source["input"]["filename"]
filters = (
source["input"]["filters"]
if "filters" in source["input"] and source["input"]["filters"] is not None
else {}
)
node_filters = filters["node_filters"] if "node_filters" in filters else {}
edge_filters = filters["edge_filters"] if "edge_filters" in filters else {}
source_prefix_map = prefix_map.copy() if prefix_map else {}
source_prefix_map.update(
source["prefix_map"] if "prefix_map" in source and source["prefix_map"] else {}
)
source_predicate_mappings = predicate_mappings.copy() if predicate_mappings else {}
source_predicate_mappings.update(
source["predicate_mappings"]
if "predicate_mappings" in source and source["predicate_mappings"] is not None
else {}
)
source_node_property_predicates = (
node_property_predicates if node_property_predicates else set()
)
source_node_property_predicates.update(
source["node_property_predicates"]
if "node_property_predicates" in source
and source["node_property_predicates"] is not None
else set()
)
if input_format in {"nt", "ttl"}:
input_args = {
"filename": inputs,
"format": input_format,
"compression": input_compression,
"node_filters": node_filters,
"edge_filters": edge_filters,
"prefix_map": source_prefix_map,
"predicate_mappings": source_predicate_mappings,
"node_property_predicates": source_node_property_predicates,
}
elif input_format in get_input_file_types():
input_args = {
"filename": inputs,
"format": input_format,
"compression": input_compression,
"node_filters": node_filters,
"edge_filters": edge_filters,
"prefix_map": source_prefix_map,
}
elif input_format == "neo4j":
input_args = {
"uri": source["uri"],
"username": source["username"],
"password": source["password"],
"format": input_format,
"node_filters": node_filters,
"edge_filters": edge_filters,
"prefix_map": prefix_map,
}
else:
raise TypeError(f"Type {input_format} not yet supported")
for ksf in knowledge_provenance_properties:
if ksf in source["input"]:
input_args[ksf] = source["input"][ksf]
input_args["operations"] = source["input"].get("operations", [])
for o in input_args["operations"]:
args = o["args"]
if "filename" in args:
filename = args["filename"]
if not filename.startswith(output_directory):
o["args"] = os.path.join(output_directory, filename)
return input_args
def prepare_output_args(
key: str,
source: Dict,
output_directory: Optional[str],
reverse_prefix_map: Dict = None,
reverse_predicate_mappings: Dict = None,
property_types: Dict = None,
) -> Dict:
"""
Prepare output arguments for Transformer.
Parameters
----------
key: str
Source key
source: Dict
Source configuration
output_directory: str
Location to write output to
reverse_prefix_map: Dict[str, str]
Non-canonical CURIE mappings for export
reverse_predicate_mappings: Dict[str, str]
A mapping of property names to predicate IRIs (This is applicable for RDF)
property_types: Dict[str, str]
The xml property type for properties that are other than ``xsd:string``.
Relevant for RDF export.
Returns
-------
Dict
Output arguments as dictionary
"""
output_format = source["output"]["format"]
output_compression = (
source["output"]["compression"] if "compression" in source["output"] else None
)
output_filename = (
source["output"]["filename"] if "filename" in source["output"] else key
)
source_reverse_prefix_map = reverse_prefix_map.copy() if reverse_prefix_map else {}
source_reverse_prefix_map.update(
source["reverse_prefix_map"]
if "reverse_prefix_map" in source and source["reverse_prefix_map"]
else {}
)
source_reverse_predicate_mappings = (
reverse_predicate_mappings.copy() if reverse_predicate_mappings else {}
)
source_reverse_predicate_mappings.update(
source["reverse_predicate_mappings"]
if "reverse_predicate_mappings" in source
and source["reverse_predicate_mappings"] is not None
else {}
)
source_property_types = property_types.copy() if property_types else {}
source_property_types.update(
source["property_types"]
) if "property_types" in source and source["property_types"] is not None else {}
if isinstance(output_filename, list):
output = output_filename[0]
else:
output = output_filename
if output_directory and not output.startswith(output_directory):
output = os.path.join(output_directory, output)
output_args = {"format": output_format}
if output_format == "neo4j":
output_args["uri"] = source["output"]["uri"]
output_args["username"] = source["output"]["username"]
output_args["password"] = source["output"]["password"]
elif output_format in get_input_file_types():
output_args["filename"] = output
output_args["compression"] = output_compression
if output_format == "nt":
output_args["reify_all_edges"] = (
source["output"]["reify_all_edges"]
if "reify_all_edges" in source["output"]
else True
)
output_args["reverse_prefix_map"] = source_reverse_prefix_map
output_args[
"reverse_predicate_mappings"
] = source_reverse_predicate_mappings
output_args["property_types"] = source_property_types
else:
raise ValueError(f"type {output_format} not yet supported for output")
return output_args
def apply_operations(source: dict, graph: BaseGraph) -> BaseGraph:
"""
Apply operations as defined in the YAML.
Parameters
----------
source: dict
The source from the YAML
graph: kgx.graph.base_graph.BaseGraph
The graph corresponding to the source
Returns
-------
kgx.graph.base_graph.BaseGraph
The graph corresponding to the source
"""
operations = source["operations"]
for operation in operations:
op_name = operation["name"]
op_args = operation["args"]
module_name = ".".join(op_name.split(".")[0:-1])
function_name = op_name.split(".")[-1]
f = getattr(importlib.import_module(module_name), function_name)
log.info(f"Applying operation {op_name} with args: {op_args}")
f(graph, **op_args)
return graph
def prepare_top_level_args(d: Dict) -> Dict:
"""
Parse top-level configuration.
Parameters
----------
d: Dict
The configuration section from the transform/merge YAML
Returns
-------
Dict
A parsed dictionary with parameters from configuration
"""
args = {}
if "checkpoint" in d and d["checkpoint"] is not None:
args["checkpoint"] = d["checkpoint"]
else:
args["checkpoint"] = False
if "node_property_predicates" in d and d["node_property_predicates"]:
args["node_property_predicates"] = set(d["node_property_predicates"])
else:
args["node_property_predicates"] = set()
if "predicate_mappings" in d and d["predicate_mappings"]:
args["predicate_mappings"] = d["predicate_mappings"]
else:
args["predicate_mappings"] = {}
if "prefix_map" in d and d["prefix_map"]:
args["prefix_map"] = d["prefix_map"]
else:
args["prefix_map"] = {}
if "reverse_prefix_map" in d and d["reverse_prefix_map"] is not None:
args["reverse_prefix_map"] = d["reverse_prefix_map"]
else:
args["reverse_prefix_map"] = {}
if (
"reverse_predicate_mappings" in d
and d["reverse_predicate_mappings"] is not None
):
args["reverse_predicate_mappings"] = d["reverse_predicate_mappings"]
else:
args["reverse_predicate_mappings"] = {}
if "property_types" in d and d["property_types"]:
args["property_types"] = d["property_types"]
else:
args["property_types"] = {}
return args
| 39,539 | 32.910806 | 125 |
py
|
kgx
|
kgx-master/kgx/cli/__init__.py
|
from sys import exit
from typing import List, Tuple, Optional, Dict
import click
import kgx
from kgx.config import get_logger, get_config
from kgx.cli.cli_utils import (
get_input_file_types,
parse_source,
apply_operations,
graph_summary,
validate,
neo4j_download,
neo4j_upload,
transform,
merge,
summary_report_types,
get_report_format_types,
)
log = get_logger()
config = get_config()
def error(msg):
log.error(msg)
quit()
@click.group()
@click.version_option(version=kgx.__version__, prog_name=kgx.__name__)
def cli():
"""
Knowledge Graph Exchange CLI entrypoint.
\f
"""
pass
@cli.command(name="graph-summary")
@click.argument("inputs", required=True, type=click.Path(exists=True), nargs=-1)
@click.option(
"--input-format",
"-i",
required=True,
help=f"The input format. Can be one of {get_input_file_types()}",
)
@click.option(
"--input-compression", "-c", required=False, help="The input compression type"
)
@click.option("--output", "-o", required=True, type=click.Path(exists=False))
@click.option(
"--report-type",
"-r",
required=False,
type=str,
help=f"The summary get_errors type. Must be one of {tuple(summary_report_types.keys())}",
default="kgx-map",
)
@click.option(
"--report-format",
"-f",
help=f"The input format. Can be one of {get_report_format_types()}",
)
@click.option(
"--graph-name",
"-n",
required=False,
help="User specified name of graph being summarized (default: 'Graph')",
)
@click.option(
"--node-facet-properties",
required=False,
multiple=True,
help="A list of node properties from which to generate counts per value for those properties",
)
@click.option(
"--edge-facet-properties",
required=False,
multiple=True,
help="A list of edge properties from which to generate counts per value for those properties",
)
@click.option(
"--error-log",
"-l",
required=False,
type=click.Path(exists=False),
help='File within which to get_errors graph data parsing errors (default: "stderr")',
)
def graph_summary_wrapper(
inputs: List[str],
input_format: str,
input_compression: Optional[str],
output: Optional[str],
report_type: str,
report_format: str,
graph_name: str,
node_facet_properties: Optional[List],
edge_facet_properties: Optional[List],
error_log: str = ''
):
"""
Loads and summarizes a knowledge graph from a set of input files.
\f
Parameters
----------
inputs: List[str]
Input file
input_format: str
Input file format
input_compression: Optional[str]
The input compression type
output: Optional[str]
Where to write the output (stdout, by default)
report_type: str
The summary get_errors type: "kgx-map" or "meta-knowledge-graph"
report_format: Optional[str]
The summary get_errors format file types: 'yaml' or 'json' (default is report_type specific)
graph_name: str
User specified name of graph being summarize
node_facet_properties: Optional[List]
A list of node properties from which to generate counts per value for those properties.
For example, ``['provided_by']``
edge_facet_properties: Optional[List]
A list of edge properties from which to generate counts per value for those properties.
For example, ``['original_knowledge_source', 'aggregator_knowledge_source']``
error_log: str
Where to write any graph processing error message (stderr, by default, for empty argument)
"""
try:
graph_summary(
inputs,
input_format,
input_compression,
output,
report_type,
report_format,
graph_name,
node_facet_properties=list(node_facet_properties),
edge_facet_properties=list(edge_facet_properties),
error_log=error_log,
)
exit(0)
except Exception as gse:
get_logger().error(f"kgx.graph_summary error: {str(gse)}")
exit(1)
@cli.command(name="validate")
@click.argument("inputs", required=True, type=click.Path(exists=True), nargs=-1)
@click.option(
"--input-format",
"-i",
required=True,
help=f"The input format. Can be one of {get_input_file_types()}",
)
@click.option(
"--input-compression", "-c", required=False, help="The input compression type"
)
@click.option(
"--output",
"-o",
required=False,
type=click.Path(exists=False),
help="File to write validation reports to",
)
@click.option(
"--biolink-release",
"-b",
required=False,
help="Biolink Model Release (SemVer) used for validation (default: latest Biolink Model Toolkit version)",
)
def validate_wrapper(
inputs: List[str],
input_format: str,
input_compression: str,
output: str,
biolink_release: str = None,
):
"""
Run KGX validator on an input file to check for Biolink Model compliance.
\f
Parameters
----------
inputs: List[str]
Input files
input_format: str
The input format
input_compression: str
The input compression type
output: str
Path to output file
biolink_release: Optional[str]
SemVer version of Biolink Model Release used for validation (default: latest Biolink Model Toolkit version)
"""
errors = []
try:
errors = validate(
inputs, input_format, input_compression, output, biolink_release
)
except Exception as ex:
get_logger().error(str(ex))
exit(2)
if errors:
get_logger().error("kgx.validate() errors encountered... check the error log")
exit(1)
else:
exit(0)
@cli.command(name="neo4j-download")
@click.option(
"--uri",
"-l",
required=True,
type=str,
help="Neo4j URI to download from. For example, https://localhost:7474",
)
@click.option("--username", "-u", required=True, type=str, help="Neo4j username")
@click.option("--password", "-p", required=True, type=str, help="Neo4j password")
@click.option(
"--output", "-o", required=True, type=click.Path(exists=False), help="Output"
)
@click.option(
"--output-format",
"-f",
required=True,
help=f"The output format. Can be one of {get_input_file_types()}",
)
@click.option(
"--output-compression", "-d", required=False, help="The output compression type"
)
@click.option("--stream", "-s", is_flag=True, help="Parse input as a stream")
@click.option(
"--node-filters",
"-n",
required=False,
type=click.Tuple([str, str]),
multiple=True,
help=f"Filters for filtering nodes from the input graph",
)
@click.option(
"--edge-filters",
"-e",
required=False,
type=click.Tuple([str, str]),
multiple=True,
help=f"Filters for filtering edges from the input graph",
)
def neo4j_download_wrapper(
uri: str,
username: str,
password: str,
output: str,
output_format: str,
output_compression: str,
stream: bool,
node_filters: Tuple,
edge_filters: Tuple,
):
"""
Download nodes and edges from Neo4j database.
\f
Parameters
----------
uri: str
Neo4j URI. For example, https://localhost:7474
username: str
Username for authentication
password: str
Password for authentication
output: str
Where to write the output (stdout, by default)
output_format: str
The output type (``tsv``, by default)
output_compression: str
The output compression type
stream: bool
Whether to parse input as a stream
node_filters: Tuple[str, str]
Node filters
edge_filters: Tuple[str, str]
Edge filters
"""
try:
neo4j_download(
uri,
username,
password,
output,
output_format,
output_compression,
stream,
node_filters,
edge_filters,
)
exit(0)
except Exception as nde:
get_logger().error(f"kgx.neo4j_download error: {str(nde)}")
exit(1)
@cli.command(name="neo4j-upload")
@click.argument("inputs", required=True, type=click.Path(exists=True), nargs=-1)
@click.option(
"--input-format",
"-i",
required=True,
help=f"The input format. Can be one of {get_input_file_types()}",
)
@click.option(
"--input-compression", "-c", required=False, help="The input compression type"
)
@click.option(
"--uri",
"-l",
required=True,
type=str,
help="Neo4j URI to upload to. For example, https://localhost:7474",
)
@click.option("--username", "-u", required=True, type=str, help="Neo4j username")
@click.option("--password", "-p", required=True, type=str, help="Neo4j password")
@click.option("--stream", "-s", is_flag=True, help="Parse input as a stream")
@click.option(
"--node-filters",
"-n",
required=False,
type=click.Tuple([str, str]),
multiple=True,
help=f"Filters for filtering nodes from the input graph",
)
@click.option(
"--edge-filters",
"-e",
required=False,
type=click.Tuple([str, str]),
multiple=True,
help=f"Filters for filtering edges from the input graph",
)
def neo4j_upload_wrapper(
inputs: List[str],
input_format: str,
input_compression: str,
uri: str,
username: str,
password: str,
stream: bool,
node_filters: Tuple[str, str],
edge_filters: Tuple[str, str],
):
"""
Upload a set of nodes/edges to a Neo4j database.
\f
Parameters
----------
inputs: List[str]
A list of files that contains nodes/edges
input_format: str
The input format
input_compression: str
The input compression type
uri: str
The full HTTP address for Neo4j database
username: str
Username for authentication
password: str
Password for authentication
stream: bool
Whether to parse input as a stream
node_filters: Tuple[str, str]
Node filters
edge_filters: Tuple[str, str]
Edge filters
"""
try:
neo4j_upload(
inputs,
input_format,
input_compression,
uri,
username,
password,
stream,
node_filters,
edge_filters,
)
exit(0)
except Exception as nue:
get_logger().error(f"kgx.neo4j_upload error: {str(nue)}")
exit(1)
@cli.command(name="transform")
@click.argument("inputs", required=False, type=click.Path(exists=True), nargs=-1)
@click.option(
"--input-format",
"-i",
required=False,
help=f"The input format. Can be one of {get_input_file_types()}",
)
@click.option(
"--input-compression", "-c", required=False, help="The input compression type"
)
@click.option(
"--output", "-o", required=False, type=click.Path(exists=False), help="Output"
)
@click.option(
"--output-format",
"-f",
required=False,
help=f"The output format. Can be one of {get_input_file_types()}",
)
@click.option(
"--output-compression", "-d", required=False, help="The output compression type"
)
@click.option("--stream", is_flag=True, help="Parse input as a stream")
@click.option(
"--node-filters",
"-n",
required=False,
type=click.Tuple([str, str]),
multiple=True,
help=f"Filters for filtering nodes from the input graph",
)
@click.option(
"--edge-filters",
"-e",
required=False,
type=click.Tuple([str, str]),
multiple=True,
help=f"Filters for filtering edges from the input graph",
)
@click.option(
"--transform-config", required=False, type=str, help=f"Transform config YAML"
)
@click.option(
"--source",
required=False,
type=str,
multiple=True,
help="Source(s) from the YAML to process",
)
@click.option(
"--knowledge-sources",
"-k",
required=False,
type=click.Tuple([str, str]),
multiple=True,
help="A named knowledge source with (string, boolean or tuple rewrite) specification",
)
@click.option(
"--infores-catalog",
required=False,
type=click.Path(exists=False),
help="Optional dump of a CSV file of InfoRes CURIE to Knowledge Source mappings",
)
@click.option(
"--processes",
"-p",
required=False,
type=int,
default=1,
help="Number of processes to use",
)
def transform_wrapper(
inputs: List[str],
input_format: str,
input_compression: str,
output: str,
output_format: str,
output_compression: str,
stream: bool,
node_filters: Optional[List[Tuple[str, str]]],
edge_filters: Optional[List[Tuple[str, str]]],
transform_config: str,
source: Optional[List],
knowledge_sources: Optional[List[Tuple[str, str]]],
processes: int,
infores_catalog: Optional[str] = None,
):
"""
Transform a Knowledge Graph from one serialization form to another.
\f
Parameters
----------
inputs: List[str]
A list of files that contains nodes/edges
input_format: str
The input format
input_compression: str
The input compression type
output: str
The output file
output_format: str
The output format
output_compression: str
The output compression typ
stream: bool
Whether or not to stream
node_filters: Optional[List[Tuple[str, str]]]
Node input filters
edge_filters: Optional[List[Tuple[str, str]]]
Edge input filters
transform_config: str
Transform config YAML
source: List
A list of source(s) to load from the YAML
knowledge_sources: Optional[List[Tuple[str, str]]]
A list of named knowledge sources with (string, boolean or tuple rewrite) specification
infores_catalog: Optional[str]
Optional dump of a TSV file of InfoRes CURIE to Knowledge Source mappings
processes: int
Number of processes to use
"""
try:
transform(
inputs,
input_format=input_format,
input_compression=input_compression,
output=output,
output_format=output_format,
output_compression=output_compression,
stream=stream,
node_filters=node_filters,
edge_filters=edge_filters,
transform_config=transform_config,
source=source,
knowledge_sources=knowledge_sources,
processes=processes,
infores_catalog=infores_catalog,
)
exit(0)
except Exception as te:
get_logger().error(f"kgx.transform error: {str(te)}")
exit(1)
@cli.command(name="merge")
@click.option("--merge-config", required=True, type=str)
@click.option(
"--source",
required=False,
type=str,
multiple=True,
help="Source(s) from the YAML to process",
)
@click.option(
"--destination",
required=False,
type=str,
multiple=True,
help="Destination(s) from the YAML to process",
)
@click.option(
"--processes",
"-p",
required=False,
type=int,
default=1,
help="Number of processes to use",
)
def merge_wrapper(merge_config: str, source: List, destination: List, processes: int):
"""
Load nodes and edges from files and KGs, as defined in a config YAML, and merge them into a single graph.
The merged graph can then be written to a local/remote Neo4j instance OR be serialized into a file.
\f
.. note::
Everything here is driven by the ``merge-config`` YAML.
Parameters
----------
merge_config: str
Merge config YAML
source: List
A list of source to load from the YAML
destination: List
A list of destination to write to, as defined in the YAML
processes: int
Number of processes to use
"""
try:
merge(merge_config, source, destination, processes)
exit(0)
except Exception as me:
get_logger().error(f"kgx.merge error: {str(me)}")
exit(1)
| 16,105 | 25.888147 | 115 |
py
|
kgx
|
kgx-master/kgx/graph_operations/graph_merge.py
|
import copy
from typing import List
from kgx.config import get_logger
from kgx.graph.base_graph import BaseGraph
from kgx.utils.kgx_utils import prepare_data_dict
log = get_logger()
def merge_all_graphs(graphs: List[BaseGraph], preserve: bool = True) -> BaseGraph:
"""
Merge one or more graphs.
.. note::
This method will first pick the largest graph in ``graphs`` and use that
as the target to merge the remaining graphs. This is to reduce the memory
footprint for this operation. The criteria for largest graph is the graph
with the largest number of edges.
The caveat is that the merge operation has a side effect where the largest
graph is altered.
If you would like to ensure that all incoming graphs remain as-is, then
look at ``merge_graphs``.
The outcome of the merge on node and edge properties depend on the ``preserve`` parameter.
If preserve is ``True`` then,
- core properties will not be overwritten
- other properties will be concatenated to a list
If preserve is ``False`` then,
- core properties will not be overwritten
- other properties will be replaced
Parameters
----------
graphs: List[kgx.graph.base_graph.BaseGraph]
A list of instances of BaseGraph to merge
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
kgx.graph.base_graph.BaseGraph
The merged graph
"""
graph_size = [len(x.edges()) for x in graphs]
largest = graphs.pop(graph_size.index(max(graph_size)))
log.debug(
f"Largest graph {largest.name} has {len(largest.nodes())} nodes and {len(largest.edges())} edges"
)
merged_graph = merge_graphs(largest, graphs, preserve)
return merged_graph
def merge_graphs(
graph: BaseGraph, graphs: List[BaseGraph], preserve: bool = True
) -> BaseGraph:
"""
Merge all graphs in ``graphs`` to ``graph``.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
An instance of BaseGraph
graphs: List[kgx.graph.base_graph.BaseGraph]
A list of instances of BaseGraph to merge
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
kgx.graph.base_graph.BaseGraph
The merged graph
"""
for g in graphs:
node_merge_count = add_all_nodes(graph, g, preserve)
log.info(
f"Number of nodes merged between {graph.name} and {g.name}: {node_merge_count}"
)
edge_merge_count = add_all_edges(graph, g, preserve)
log.info(
f"Number of edges merged between {graph.name} and {g.name}: {edge_merge_count}"
)
return graph
def add_all_nodes(g1: BaseGraph, g2: BaseGraph, preserve: bool = True) -> int:
"""
Add all nodes from source graph (``g2``) to target graph (``g1``).
Parameters
----------
g1: kgx.graph.base_graph.BaseGraph
Target graph
g2: kgx.graph.base_graph.BaseGraph
Source graph
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
int
Number of nodes merged during this operation
"""
log.info(f"Adding {g2.number_of_nodes()} nodes from {g2.name} to {g1.name}")
merge_count = 0
for n, data in g2.nodes(data=True):
if n in g1.nodes():
merge_node(g1, n, data, preserve)
merge_count += 1
else:
g1.add_node(n, **data)
return merge_count
def merge_node(g: BaseGraph, n: str, data: dict, preserve: bool = True) -> dict:
"""
Merge node ``n`` into graph ``g``.
Parameters
----------
g: kgx.graph.base_graph.BaseGraph
The target graph
n: str
Node id
data: dict
Node properties
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
dict
The merged node
"""
existing_node = g.nodes()[n]
new_data = prepare_data_dict(
copy.deepcopy(existing_node), copy.deepcopy(data), preserve
)
g.add_node(n, **new_data)
return existing_node
def add_all_edges(g1: BaseGraph, g2: BaseGraph, preserve: bool = True) -> int:
"""
Add all edges from source graph (``g2``) to target graph (``g1``).
Parameters
----------
g1: kgx.graph.base_graph.BaseGraph
Target graph
g2: kgx.graph.base_graph.BaseGraph
Source graph
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
int
Number of edges merged during this operation
"""
log.info(f"Adding {g2.number_of_edges()} edges from {g2} to {g1}")
merge_count = 0
for u, v, key, data in g2.edges(keys=True, data=True):
if g1.has_edge(u, v, key):
merge_edge(g1, u, v, key, data, preserve)
merge_count += 1
else:
g1.add_edge(u, v, edge_key=key, **data)
return merge_count
def merge_edge(
g: BaseGraph, u: str, v: str, key: str, data: dict, preserve: bool = True
) -> dict:
"""
Merge edge ``u`` -> ``v`` into graph ``g``.
Parameters
----------
g: kgx.graph.base_graph.BaseGraph
The target graph
u: str
Subject node id
v: str
Object node id
key: str
Edge key
data: dict
Node properties
preserve: bool
Whether or not to preserve conflicting properties
Returns
-------
dict
The merged edge
"""
existing_edge = g.get_edge(u, v, key)
new_data = prepare_data_dict(
copy.deepcopy(existing_edge), copy.deepcopy(data), preserve
)
g.add_edge(u, v, edge_key=key, **new_data)
return existing_edge
| 5,776 | 25.995327 | 105 |
py
|
kgx
|
kgx-master/kgx/graph_operations/meta_knowledge_graph.py
|
"""
Translator Reasoner API 'meta-knowledge-graph' endpoint analogous graph summary module.
"""
from typing import Dict, List, Optional, Any, Callable, Set, Tuple
import re
import yaml
from json import dump
from json.encoder import JSONEncoder
from deprecation import deprecated
from kgx.error_detection import ErrorType, MessageLevel, ErrorDetecting
from kgx.utils.kgx_utils import GraphEntityType
from kgx.prefix_manager import PrefixManager
from kgx.graph.base_graph import BaseGraph
"""
Generate a knowledge map that corresponds to TRAPI KnowledgeMap.
Specification based on TRAPI Draft PR: https://github.com/NCATSTranslator/ReasonerAPI/pull/171
"""
####################################################################
# Next Generation Implementation of Graph Summary coding which
# leverages the new "Transformer.process()" data stream "Inspector"
# design pattern, implemented here as a "Callable" inspection class.
####################################################################
def mkg_default(o):
"""
JSONEncoder 'default' function override to
properly serialize 'Set' objects (into 'List')
"""
if isinstance(o, MetaKnowledgeGraph.Category):
return o.json_object()
else:
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder().default(o)
_category_curie_regexp = re.compile("^biolink:[A-Z][a-zA-Z]*$")
_predicate_curie_regexp = re.compile("^biolink:[a-z][a-z_]*$")
class MetaKnowledgeGraph(ErrorDetecting):
"""
Class for generating a TRAPI 1.1 style of "meta knowledge graph" summary.
The optional 'progress_monitor' for the validator should be a lightweight Callable
which is injected into the class 'inspector' Callable, designed to intercepts
node and edge records streaming through the Validator (inside a Transformer.process() call.
The first (GraphEntityType) argument of the Callable tags the record as a NODE or an EDGE.
The second argument given to the Callable is the current record itself.
This Callable is strictly meant to be procedural and should *not* mutate the record.
The intent of this Callable is to provide a hook to KGX applications wanting the
namesake function of passively monitoring the graph data stream. As such, the Callable
could simply tally up the number of times it is called with a NODE or an EDGE, then
provide a suitable (quick!) report of that count back to the KGX application. The
Callable (function/callable class) should not modify the record and should be of low
complexity, so as not to introduce a large computational overhead to validation!
"""
def __init__(
self,
name="",
node_facet_properties: Optional[List] = None,
edge_facet_properties: Optional[List] = None,
progress_monitor: Optional[Callable[[GraphEntityType, List], None]] = None,
error_log=None,
**kwargs,
):
"""
MetaKnowledgeGraph constructor.
Parameters
----------
name: str
(Graph) name assigned to the summary.
node_facet_properties: Optional[List]
A list of node properties (e.g. knowledge_source tags) to facet on. For example, ``['provided_by']``
edge_facet_properties: Optional[List]
A list of edge properties (e.g. knowledge_source tags) to facet on. For example,
``['original_knowledge_source', 'aggregator_knowledge_source']``
progress_monitor: Optional[Callable[[GraphEntityType, List], None]]
Function given a peek at the current record being stream processed by the class wrapped Callable.
error_log:
Where to write any graph processing error message (stderr, by default).
"""
ErrorDetecting.__init__(self, error_log)
# formal args
self.name = name
# these facet properties are used mainly for knowledge_source counting
# using Biolink 2.0 'knowledge_source' slot values
if node_facet_properties:
self.node_facet_properties: Optional[List] = node_facet_properties
else:
# node counts still default to 'provided_by'
self.node_facet_properties: Optional[List] = ["provided_by"]
if edge_facet_properties:
self.edge_facet_properties: Optional[List] = edge_facet_properties
else:
# node counts still default to 'knowledge_source'
self.edge_facet_properties: Optional[List] = ["knowledge_source"]
self.progress_monitor: Optional[
Callable[[GraphEntityType, List], None]
] = progress_monitor
# internal attributes
# For Nodes...
self.node_catalog: Dict[str, List[int]] = dict()
self.node_stats: Dict[str, MetaKnowledgeGraph.Category] = dict()
# We no longer track 'unknown' categories in meta-knowledge-graph
# computations since such nodes are not TRAPI 1.1 compliant categories
# self.node_stats['unknown'] = self.Category('unknown')
# For Edges...
self.edge_record_count: int = 0
self.predicates: Dict = dict()
self.association_map: Dict = dict()
self.edge_stats = []
# Overall graph statistics
self.graph_stats: Dict[str, Dict] = dict()
def get_name(self) -> str:
"""
Returns
-------
str
Currently assigned knowledge graph name.
"""
return self.name
def __call__(self, entity_type: GraphEntityType, rec: List):
"""
Transformer 'inspector' Callable, for analysing a stream of graph data.
Parameters
----------
entity_type: GraphEntityType
indicates what kind of record being passed to the function for analysis.
rec: Dict
Complete data dictionary of the given record.
"""
if self.progress_monitor:
self.progress_monitor(entity_type, rec)
if entity_type == GraphEntityType.EDGE:
self.analyse_edge(*rec)
elif entity_type == GraphEntityType.NODE:
self.analyse_node(*rec)
else:
raise RuntimeError("Unexpected GraphEntityType: " + str(entity_type))
@staticmethod
def get_facet_counts(facets: Optional[List], counts_by_source: Dict, data: Dict):
"""
Get node or edge facet counts
"""
unknown: bool = True
for facet in facets:
if facet in data:
unknown = False
if isinstance(data[facet], str):
facet_values = [data[facet]]
else:
# assume regular iterable
facet_values = list(data[facet])
for s in facet_values:
if facet not in counts_by_source:
counts_by_source[facet] = dict()
if s in counts_by_source[facet]:
counts_by_source[facet][s] += 1
else:
counts_by_source[facet][s] = 1
if unknown:
if "unknown" in counts_by_source:
counts_by_source["unknown"] += 1
else:
counts_by_source["unknown"] = 1
class Category:
"""
Internal class for compiling statistics about a distinct category.
"""
# The 'category map' just associates a unique int catalog
# index ('cid') value as a proxy for the full curie string,
# to reduce storage in the main node catalog
_category_curie_map: List[str] = list()
def __init__(self, category_curie: str, mkg):
"""
MetaKnowledgeGraph.Category constructor.
category_curie: str
Biolink Model category CURIE identifier.
"""
if not (
_category_curie_regexp.fullmatch(category_curie)
or category_curie == "unknown"
):
raise RuntimeError("Invalid Biolink category CURIE: " + category_curie)
self.category_curie = category_curie
self.mkg = mkg
if self.category_curie not in self._category_curie_map:
self._category_curie_map.append(self.category_curie)
self.category_stats: Dict[str, Any] = dict()
self.category_stats["id_prefixes"] = set()
self.category_stats["count"] = 0
self.category_stats["count_by_source"] = dict()
def get_name(self) -> str:
"""
Returns
-------
str
CURIE name of the category.
"""
return self.category_curie
def get_cid(self):
"""
Returns
-------
int
Internal MetaKnowledgeGraph index id for tracking a Category.
"""
return self._category_curie_map.index(self.category_curie)
@classmethod
def get_category_curie_from_index(cls, cid: int) -> str:
"""
Parameters
----------
cid: int
Internal MetaKnowledgeGraph index id for tracking a Category.
Returns
-------
str
Curie identifier of the Category.
"""
return cls._category_curie_map[cid]
def get_id_prefixes(self) -> Set[str]:
"""
Returns
-------
Set[str]
Set of identifier prefix (strings) used by nodes of this Category.
"""
return self.category_stats["id_prefixes"]
def get_count(self) -> int:
"""
Returns
-------
int
Count of nodes which have this category.
"""
return self.category_stats["count"]
def get_count_by_source(
self, facet: str = "provided_by", source: str = None
) -> Dict[str, Any]:
"""
Parameters
----------
facet: str
Facet tag (default, 'provided_by') from which the count should be returned
source: str
Source name about which the count is desired.
Returns
-------
Dict
Count of nodes, by node 'provided_by' knowledge source, for a given category.
Returns dictionary of all source counts, if input 'source' argument is not specified.
"""
if source and facet in self.category_stats["count_by_source"]:
if source in self.category_stats["count_by_source"][facet]:
return {
source: self.category_stats["count_by_source"][facet][source]
}
else:
return {source: 0}
return self.category_stats["count_by_source"]
def _compile_prefix_stats(self, n: str):
prefix = PrefixManager.get_prefix(n)
if not prefix:
error_type = ErrorType.MISSING_NODE_CURIE_PREFIX
self.mkg.log_error(
entity=n,
error_type=error_type,
message="Node 'id' has no CURIE prefix",
message_level=MessageLevel.WARNING
)
else:
if prefix not in self.category_stats["id_prefixes"]:
self.category_stats["id_prefixes"].add(prefix)
def _compile_category_source_stats(self, data: Dict):
self.mkg.get_facet_counts(
self.mkg.node_facet_properties,
self.category_stats["count_by_source"],
data,
)
def analyse_node_category(self, n, data) -> None:
"""
Analyse metadata of a given graph node record of this category.
Parameters
----------
n: str
Curie identifier of the node record (not used here).
data: Dict
Complete data dictionary of node record fields.
"""
self.category_stats["count"] += 1
self._compile_prefix_stats(n)
self._compile_category_source_stats(data)
def json_object(self):
"""
Returns
-------
Dict[str, Any]
Returns JSON friendly metadata for this category.,
"""
return {
"id_prefixes": list(self.category_stats["id_prefixes"]),
"count": self.category_stats["count"],
"count_by_source": self.category_stats["count_by_source"],
}
def get_category(self, category_curie: str) -> Category:
"""
Counts the number of distinct (Biolink) categories encountered
in the knowledge graph (not including those of 'unknown' category)
Parameters
----------
category_curie: str
Curie identifier for the (Biolink) category.
Returns
-------
Category
MetaKnowledgeGraph.Category object for a given Biolink category.
"""
return self.node_stats[category_curie]
def _process_category_field(self, category_field: str, n: str, data: Dict):
# we note here that category_curie *may be*
# a piped '|' set of Biolink category CURIE values
category_list = category_field.split("|")
# analyse them each independently...
for category_curie in category_list:
if category_curie not in self.node_stats:
try:
self.node_stats[category_curie] = self.Category(
category_curie, self
)
except RuntimeError:
error_type = ErrorType.INVALID_CATEGORY
self.log_error(
entity=n,
error_type=error_type,
message=f"Invalid node 'category' CURIE: '{category_curie}'"
)
continue
category_record = self.node_stats[category_curie]
category_idx: int = category_record.get_cid()
if category_idx not in self.node_catalog[n]:
self.node_catalog[n].append(category_idx)
category_record.analyse_node_category(n, data)
def analyse_node(self, n: str, data: Dict) -> None:
"""
Analyse metadata of one graph node record.
Parameters
----------
n: str
Curie identifier of the node record (not used here).
data: Dict
Complete data dictionary of node record fields.
"""
# The TRAPI release 1.1 meta_knowledge_graph format indexes nodes by biolink:Category
# the node 'category' field is a list of assigned categories (usually just one...).
# However, this may perhaps sometimes result in duplicate counting and conflation of prefixes(?).
if n in self.node_catalog:
# Report duplications of node records, as discerned from node id.
error_type = ErrorType.DUPLICATE_NODE
self.log_error(
entity=n,
error_type=error_type,
message="Node 'id' duplicated in input data",
message_level=MessageLevel.WARNING
)
return
else:
self.node_catalog[n] = list()
if "category" not in data or not data["category"]:
# we now simply exclude nodes with missing categories from the count, since a category
# of 'unknown' in the meta_knowledge_graph output is considered invalid.
# category = self.node_stats['unknown']
# category.analyse_node_category(n, data)
error_type = ErrorType.MISSING_CATEGORY
self.log_error(
entity=n,
error_type=error_type,
message="Missing node 'category'"
)
return
categories = data["category"]
# analyse them each independently...
for category_field in categories:
self._process_category_field(category_field, n, data)
def _capture_predicate(self, subj, obj, data: Dict) -> Optional[str]:
subj_obj_label = f"{str(subj)}->{str(obj)}"
if "predicate" not in data:
# We no longer track edges with 'unknown' predicates,
# since those would not be TRAPI 1.1 JSON compliant...
# self.predicates['unknown'] += 1
# predicate = "unknown"
error_type = ErrorType.MISSING_EDGE_PREDICATE
self.log_error(
entity=subj_obj_label,
error_type=error_type,
message="Empty predicate CURIE in edge data",
message_level=MessageLevel.ERROR
)
self.edge_record_count -= 1
return None
else:
predicate = data["predicate"]
if not _predicate_curie_regexp.fullmatch(predicate):
error_type = ErrorType.INVALID_EDGE_PREDICATE
self.log_error(
entity=subj_obj_label,
error_type=error_type,
message=f"Invalid predicate CURIE: '{predicate}'",
message_level=MessageLevel.ERROR
)
self.edge_record_count -= 1
return None
if predicate not in self.predicates:
# just need to track the number
# of edge records using this predicate
self.predicates[predicate] = 0
self.predicates[predicate] += 1
return predicate
def _compile_triple_source_stats(self, triple: Tuple[str, str, str], data: Dict):
self.get_facet_counts(
self.edge_facet_properties,
self.association_map[triple]["count_by_source"],
data,
)
@staticmethod
def _normalize_relation_field(field) -> Set:
# various non-string iterables...
if isinstance(field, List) or \
isinstance(field, Tuple) or \
isinstance(field, Set):
# eliminate duplicate terms
# and normalize to a set
return set(field)
elif isinstance(field, str):
# for uniformity, we coerce
# to a set of one element
return {field}
else:
raise TypeError(f"Unexpected KGX edge 'relation' data field of type '{type(field)}'")
def _process_triple(
self, subject_category: str, predicate: str, object_category: str, data: Dict
):
# Process the 'valid' S-P-O triple here...
triple = (subject_category, predicate, object_category)
if triple not in self.association_map:
self.association_map[triple] = {
"subject": triple[0],
"predicate": triple[1],
"object": triple[2],
"relations": set(),
"count_by_source": dict(),
"count": 0,
}
# patch for observed defect in some ETL's such as the July 2021 SRI Reference graph
# in which the relation field ends up being a list of terms, sometimes duplicated
if "relation" in data:
# input data["relation"] is normalized to a Set here
data["relation"] = self._normalize_relation_field(data["relation"])
self.association_map[triple]["relations"].update(data["relation"])
self.association_map[triple]["count"] += 1
self._compile_triple_source_stats(triple, data)
def analyse_edge(self, u, v, k, data) -> None:
"""
Analyse metadata of one graph edge record.
Parameters
----------
u: str
Subject node curie identifier of the edge.
v: str
Subject node curie identifier of the edge.
k: str
Key identifier of the edge record (not used here).
data: Dict
Complete data dictionary of edge record fields.
"""
# we blissfully assume that all the nodes of a
# graph stream were analysed first by the MetaKnowledgeGraph
# before the edges are analysed, thus we can test for
# node 'n' existence internally, by identifier.
#
# Given the use case of multiple categories being assigned to a given node in a KGX data file,
# either by category inheritance (ancestry all the way back up to NamedThing)
# or by conflation (i.e. gene == protein id?), then the Cartesian product of
# subject/object edges mappings need to be captured here.
#
self.edge_record_count += 1
predicate: str = self._capture_predicate(u, v, data)
if not predicate:
# relationship needs a predicate to process?
return
if u not in self.node_catalog:
error_type = ErrorType.MISSING_NODE
self.log_error(
entity=u,
error_type=error_type,
message="Subject 'id' not found in the node catalog"
)
# removing from edge count
self.edge_record_count -= 1
self.predicates[predicate] -= 1
return
for subj_cat_idx in self.node_catalog[u]:
subject_category: str = self.Category.get_category_curie_from_index(
subj_cat_idx
)
if v not in self.node_catalog:
error_type = ErrorType.MISSING_NODE
self.log_error(
entity=v,
error_type=error_type,
message="Object 'id' not found in the node catalog"
)
self.edge_record_count -= 1
self.predicates[predicate] -= 1
return
for obj_cat_idx in self.node_catalog[v]:
object_category: str = self.Category.get_category_curie_from_index(
obj_cat_idx
)
self._process_triple(subject_category, predicate, object_category, data)
def get_number_of_categories(self) -> int:
"""
Counts the number of distinct (Biolink) categories encountered
in the knowledge graph (not including those of 'unknown' category)
Returns
-------
int
Number of distinct (Biolink) categories found in the graph (excluding nodes with 'unknown' category)
"""
# 'unknown' not tracked anymore...
# return len([c for c in self.node_stats.keys() if c != 'unknown'])
return len(self.node_stats.keys())
def get_node_stats(self) -> Dict[str, Dict]:
"""
Returns
-------
Dict[str, Category]
Statistics for the nodes in the graph.
"""
# We no longer track 'unknown' node category counts - non TRAPI 1.1. compliant output
# if 'unknown' in self.node_stats and not self.node_stats['unknown'].get_count():
# self.node_stats.pop('unknown')
# Here we assume that the node_stats are complete and will now
# be exported in a graph summary for the module, thus we aim to
# Convert the 'MetaKnowledgeGraph.Category' object into vanilla
# Python dictionary and lists, to facilitate output
category_stats = dict()
for category_curie in self.node_stats.keys():
category_obj = self.node_stats[category_curie]
category_stats[category_curie] = dict()
# Convert id_prefixes Set into a sorted List
category_stats[category_curie]["id_prefixes"] = sorted(category_obj.category_stats["id_prefixes"])
category_stats[category_curie]["count"] = category_obj.category_stats["count"]
category_stats[category_curie]["count_by_source"] = category_obj.category_stats["count_by_source"]
return category_stats
def get_edge_stats(self) -> List[Dict[str, Any]]:
"""
Returns
-------
List[Dict[str, Any]]
Knowledge map for the list of edges in the graph.
"""
# Not sure if this is "safe" but assume
# that edge_stats may be cached once computed?
if not self.edge_stats:
for k, v in self.association_map.items():
kedge = v
relations = list(v["relations"])
kedge["relations"] = relations
self.edge_stats.append(kedge)
return self.edge_stats
def get_total_nodes_count(self) -> int:
"""
Counts the total number of distinct nodes in the knowledge graph
(**not** including those ignored due to being of 'unknown' category)
Returns
-------
int
Number of distinct nodes in the knowledge.
"""
return len(self.node_catalog)
def get_node_count_by_category(self, category_curie: str) -> int:
"""
Counts the number of edges in the graph
with the specified (Biolink) category curie.
Parameters
----------
category_curie: str
Curie identifier for the (Biolink) category.
Returns
-------
int
Number of nodes for the given category.
Raises
------
RuntimeError
Error if category identifier is empty string or None.
"""
if not category_curie:
raise RuntimeError(
"get_node_count_by_category(): null or empty category argument!?"
)
if category_curie in self.node_stats.keys():
return self.node_stats[category_curie].get_count()
else:
return 0
def get_total_node_counts_across_categories(self) -> int:
"""
The aggregate count of all node to category mappings for every category.
Note that nodes with multiple categories will have their count replicated
under each of its categories.
Returns
-------
int
Total count of node to category mappings for the graph.
"""
count = 0
for category in self.node_stats.values():
count += category.get_count()
return count
def get_total_edges_count(self) -> int:
"""
Gets the total number of 'valid' edges in the data set
(ignoring those with 'unknown' subject or predicate category mappings)
Returns
----------
int
Total count of edges in the graph.
"""
return self.edge_record_count
def get_edge_mapping_count(self) -> int:
"""
Counts the number of distinct edge
Subject (category) - P (predicate) -> Object (category)
mappings in the knowledge graph.
Returns
----------
int
Count of subject(category) - predicate -> object(category) mappings in the graph.
"""
return len(self.get_edge_stats())
def get_predicate_count(self) -> int:
"""
Counts the number of distinct edge predicates
in the knowledge graph.
Returns
----------
int
Number of distinct (Biolink) predicates in the graph.
"""
return len(self.predicates)
def get_edge_count_by_predicate(self, predicate_curie: str) -> int:
"""
Counts the number of edges in the graph with the specified predicate.
Parameters
----------
predicate_curie: str
(Biolink) curie identifier for the predicate.
Returns
-------
int
Number of edges for the given predicate.
Raises
------
RuntimeError
Error if predicate identifier is empty string or None.
"""
if not predicate_curie:
raise RuntimeError(
"get_node_count_by_category(): null or empty predicate argument!?"
)
if predicate_curie in self.predicates:
return self.predicates[predicate_curie]
return 0
def get_total_edge_counts_across_mappings(self) -> int:
"""
Aggregate count of the edges in the graph for every mapping. Edges
with subject and object nodes with multiple assigned categories will
have their count replicated under each distinct mapping of its categories.
Returns
-------
int
Number of the edges counted across all mappings.
"""
count = 0
for edge in self.get_edge_stats():
count += edge["count"]
return count
def get_edge_count_by_source(
self,
subject_category: str,
predicate: str,
object_category: str,
facet: str = "knowledge_source",
source: Optional[str] = None,
) -> Dict[str, Any]:
"""
Returns count by source for one S-P-O triple (S, O being Biolink categories; P, a Biolink predicate)
"""
spo_label = f"Edge {str(subject_category)}-{str(predicate)}->{str(object_category)}"
if not (subject_category and predicate and object_category):
error_type = ErrorType.MISSING_EDGE_PROPERTY
self.log_error(
entity=spo_label,
error_type=error_type,
message="Incomplete S-P-O triple",
message_level=MessageLevel.WARNING
)
return dict()
triple = (subject_category, predicate, object_category)
if (
triple in self.association_map
and "count_by_source" in self.association_map[triple]
):
if facet in self.association_map[triple]["count_by_source"]:
if source:
if source in self.association_map[triple]["count_by_source"][facet]:
return self.association_map[triple]["count_by_source"][facet][
source
]
else:
return dict()
else:
return self.association_map[triple]["count_by_source"][facet]
else:
return dict()
else:
error_type = ErrorType.INVALID_EDGE_TRIPLE
self.log_error(
entity=spo_label,
error_type=error_type,
message="Unknown S-P-O triple?",
message_level=MessageLevel.WARNING
)
return dict()
def summarize_graph_nodes(self, graph: BaseGraph) -> Dict:
"""
Summarize the nodes in a graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
Returns
-------
Dict
The node stats
"""
for n, data in graph.nodes(data=True):
self.analyse_node(n, data)
return self.get_node_stats()
def summarize_graph_edges(self, graph: BaseGraph) -> List[Dict]:
"""
Summarize the edges in a graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
Returns
-------
List[Dict]
The edge stats
"""
for u, v, k, data in graph.edges(keys=True, data=True):
self.analyse_edge(u, v, k, data)
return self.get_edge_stats()
def summarize_graph(self, graph: BaseGraph, name: str = None, **kwargs) -> Dict:
"""
Generate a meta knowledge graph that describes the composition of the graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
name: Optional[str]
Name for the graph
kwargs: Dict
Any additional arguments (ignored in this method at present)
Returns
-------
Dict
A TRAPI 1.1 compliant meta knowledge graph of the knowledge graph returned as a dictionary.
"""
if not self.graph_stats:
node_stats = self.summarize_graph_nodes(graph)
edge_stats = self.summarize_graph_edges(graph)
# JSON sent back as TRAPI 1.1 version,
# without the global 'knowledge_map' object tag
self.graph_stats = {"nodes": node_stats, "edges": edge_stats}
if name:
self.graph_stats["name"] = name
else:
self.graph_stats["name"] = self.name
return self.graph_stats
def get_graph_summary(self, name: str = None, **kwargs) -> Dict:
"""
Similar to summarize_graph except that the node and edge statistics are already captured
in the MetaKnowledgeGraph class instance (perhaps by Transformer.process() stream inspection)
and therefore, the data structure simply needs to be 'finalized' for saving or similar use.
Parameters
----------
name: Optional[str]
Name for the graph (if being renamed)
kwargs: Dict
Any additional arguments (ignored in this method at present)
Returns
-------
Dict
A TRAPI 1.1 compliant meta knowledge graph of the knowledge graph returned as a dictionary.
"""
if not self.graph_stats:
# JSON sent back as TRAPI 1.1 version,
# without the global 'knowledge_map' object tag
self.graph_stats = {
"nodes": self.get_node_stats(),
"edges": self.get_edge_stats(),
}
if name:
self.graph_stats["name"] = name
else:
self.graph_stats["name"] = self.name
return self.graph_stats
def save(self, file, name: str = None, file_format: str = "json") -> None:
"""
Save the current MetaKnowledgeGraph to a specified (open) file (device).
Parameters
----------
file: File
Text file handler open for writing.
name: str
Optional string to which to (re-)name the graph.
file_format: str
Text output format ('json' or 'yaml') for the saved meta knowledge graph (default: 'json')
Returns
-------
None
"""
stats = self.get_graph_summary(name)
if not file_format or file_format == "json":
dump(stats, file, indent=4)
else:
yaml.dump(stats, file)
@deprecated(deprecated_in="1.5.8", details="Default is the use streaming graph_summary with inspector")
def generate_meta_knowledge_graph(graph: BaseGraph, name: str, filename: str, **kwargs) -> None:
"""
Generate a knowledge map that describes
the composition of the graph and write to ``filename``.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
name: Optional[str]
Name for the graph
filename: str
The file to write the knowledge map to
"""
graph_stats = summarize_graph(graph, name, **kwargs)
with open(filename, mode="w") as mkgh:
dump(graph_stats, mkgh, indent=4, default=mkg_default)
def summarize_graph(graph: BaseGraph, name: str = None, **kwargs) -> Dict:
"""
Generate a meta knowledge graph that describes the composition of the graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
name: Optional[str]
Name for the graph
kwargs: Dict
Any additional arguments
Returns
-------
Dict
A TRAPI 1.1 compliant meta knowledge graph of the knowledge graph returned as a dictionary.
"""
mkg = MetaKnowledgeGraph(name, **kwargs)
return mkg.summarize_graph(graph)
| 36,314 | 34.99108 | 116 |
py
|
kgx
|
kgx-master/kgx/graph_operations/clique_merge.py
|
import copy
from typing import Tuple, Optional, Dict, List, Any, Set, Union
import networkx as nx
from ordered_set import OrderedSet
from kgx.config import get_logger
from kgx.graph.base_graph import BaseGraph
from kgx.utils.kgx_utils import (
get_prefix_prioritization_map,
get_biolink_element,
get_biolink_ancestors,
current_time_in_millis,
format_biolink_category,
generate_edge_key,
get_toolkit,
)
log = get_logger()
toolkit = get_toolkit()
SAME_AS = "biolink:same_as"
SUBCLASS_OF = "biolink:subclass_of"
LEADER_ANNOTATION = "clique_leader"
ORIGINAL_SUBJECT_PROPERTY = "_original_subject"
ORIGINAL_OBJECT_PROPERTY = "_original_object"
def clique_merge(
target_graph: BaseGraph,
leader_annotation: str = None,
prefix_prioritization_map: Optional[Dict[str, List[str]]] = None,
category_mapping: Optional[Dict[str, str]] = None,
strict: bool = True,
) -> Tuple[BaseGraph, nx.MultiDiGraph]:
"""
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
leader_annotation: str
The field on a node that signifies that the node is the leader of a clique
prefix_prioritization_map: Optional[Dict[str, List[str]]]
A map that gives a prefix priority for one or more categories
category_mapping: Optional[Dict[str, str]]
Mapping for non-Biolink Model categories to Biolink Model categories
strict: bool
Whether or not to merge nodes in a clique that have conflicting node categories
Returns
-------
Tuple[kgx.graph.base_graph.BaseGraph, networkx.MultiDiGraph]
A tuple containing the updated target graph, and the clique graph
"""
ppm = get_prefix_prioritization_map()
if prefix_prioritization_map:
ppm.update(prefix_prioritization_map)
prefix_prioritization_map = ppm
if not leader_annotation:
leader_annotation = LEADER_ANNOTATION
start = current_time_in_millis()
clique_graph = build_cliques(target_graph)
end = current_time_in_millis()
log.info(f"Total time taken to build cliques: {end - start} ms")
start = current_time_in_millis()
elect_leader(
target_graph,
clique_graph,
leader_annotation,
prefix_prioritization_map,
category_mapping,
strict,
)
end = current_time_in_millis()
log.info(f"Total time taken to elect leaders for all cliques: {end - start} ms")
start = current_time_in_millis()
graph = consolidate_edges(target_graph, clique_graph, leader_annotation)
end = current_time_in_millis()
log.info(f"Total time taken to consolidate edges in target graph: {end - start} ms")
return graph, clique_graph
def build_cliques(target_graph: BaseGraph) -> nx.MultiDiGraph:
"""
Builds a clique graph from ``same_as`` edges in ``target_graph``.
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
An instance of BaseGraph that contains nodes and edges
Returns
-------
networkx.MultiDiGraph
The clique graph with only ``same_as`` edges
"""
clique_graph = nx.MultiDiGraph()
for n, data in target_graph.nodes(data=True):
if "same_as" in data:
new_data = copy.deepcopy(data)
del new_data["same_as"]
clique_graph.add_node(n, **new_data)
for s in data["same_as"]:
edge_data1 = {"subject": n, "predicate": SAME_AS, "object": s}
if "provided_by" in data:
edge_data1["provided_by"] = data["provided_by"]
clique_graph.add_edge(n, s, **edge_data1)
edge_data2 = {"subject": s, "predicate": SAME_AS, "object": n}
if "provided_by" in data:
edge_data2["provided_by"] = data["provided_by"]
clique_graph.add_edge(s, n, **edge_data2)
for u, v, data in target_graph.edges(data=True):
if "predicate" in data and data["predicate"] == SAME_AS:
# load all biolink:same_as edges to clique_graph
clique_graph.add_node(u, **target_graph.nodes()[u])
clique_graph.add_node(v, **target_graph.nodes()[v])
clique_graph.add_edge(u, v, **data)
clique_graph.add_edge(
v,
u,
**{
"subject": v,
"predicate": data["predicate"],
"object": v,
"relation": data["relation"],
},
)
return clique_graph
def elect_leader(
target_graph: BaseGraph,
clique_graph: nx.MultiDiGraph,
leader_annotation: str,
prefix_prioritization_map: Optional[Dict[str, List[str]]],
category_mapping: Optional[Dict[str, str]],
strict: bool = True,
) -> BaseGraph:
"""
Elect leader for each clique in a graph.
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
clique_graph: networkx.Graph
The clique graph
leader_annotation: str
The field on a node that signifies that the node is the leader of a clique
prefix_prioritization_map: Optional[Dict[str, List[str]]]
A map that gives a prefix priority for one or more categories
category_mapping: Optional[Dict[str, str]]
Mapping for non-Biolink Model categories to Biolink Model categories
strict: bool
Whether or not to merge nodes in a clique that have conflicting node categories
Returns
-------
kgx.graph.base_graph.BaseGraph
The updated target graph
"""
cliques = list(nx.strongly_connected_components(clique_graph))
log.info(f"Total cliques in clique graph: {len(cliques)}")
count = 0
update_dict = {}
for clique in cliques:
log.info(
f"Processing clique: {clique} with {[clique_graph.nodes()[x]['category'] if 'category' in clique_graph.nodes()[x] else None for x in clique]}"
)
update_node_categories(
target_graph, clique_graph, clique, category_mapping, strict
)
clique_category, clique_category_ancestors = get_clique_category(
clique_graph, clique
)
log.debug(f"Clique category: {clique_category}")
invalid_nodes = set()
for n in clique:
data = clique_graph.nodes()[n]
if "_excluded_from_clique" in data and data["_excluded_from_clique"]:
log.info(
f"Removing invalid node {n} from clique graph; node marked to be excluded"
)
clique_graph.remove_node(n)
invalid_nodes.add(n)
if data["category"][0] not in clique_category_ancestors:
log.info(
f"Removing invalid node {n} from the clique graph; node category {data['category'][0]} not in CCA: {clique_category_ancestors}"
)
clique_graph.remove_node(n)
invalid_nodes.add(n)
filtered_clique = [x for x in clique if x not in invalid_nodes]
if filtered_clique:
if clique_category:
# First check for LEADER_ANNOTATION property
leader, election_strategy = get_leader_by_annotation(
target_graph, clique_graph, filtered_clique, leader_annotation
)
if not leader:
# Leader is None; use prefix prioritization strategy
log.debug(
"Could not elect clique leader by looking for LEADER_ANNOTATION property; "
"Using prefix prioritization instead"
)
if (
prefix_prioritization_map
and clique_category in prefix_prioritization_map.keys()
):
leader, election_strategy = get_leader_by_prefix_priority(
target_graph,
clique_graph,
filtered_clique,
prefix_prioritization_map[clique_category],
)
else:
log.debug(
f"No prefix order found for category '{clique_category}' in PREFIX_PRIORITIZATION_MAP"
)
if not leader:
# Leader is None; fall back to alphabetical sort on prefixes
log.debug(
"Could not elect clique leader by PREFIX_PRIORITIZATION; Using alphabetical sort on prefixes"
)
leader, election_strategy = get_leader_by_sort(
target_graph, clique_graph, filtered_clique
)
log.debug(
f"Elected {leader} as leader via {election_strategy} for clique {filtered_clique}"
)
update_dict[leader] = {
LEADER_ANNOTATION: True,
"election_strategy": election_strategy,
}
count += 1
nx.set_node_attributes(clique_graph, update_dict)
target_graph.set_node_attributes(target_graph, update_dict)
log.info(f"Total merged cliques: {count}")
return target_graph
def consolidate_edges(
target_graph: BaseGraph, clique_graph: nx.MultiDiGraph, leader_annotation: str
) -> BaseGraph:
"""
Move all edges from nodes in a clique to the clique leader.
Original subject and object of a node are preserved via ``ORIGINAL_SUBJECT_PROPERTY`` and ``ORIGINAL_OBJECT_PROPERTY``
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
clique_graph: networkx.MultiDiGraph
The clique graph
leader_annotation: str
The field on a node that signifies that the node is the leader of a clique
Returns
-------
kgx.graph.base_graph.BaseGraph
The target graph where all edges from nodes in a clique are moved to clique leader
"""
cliques = list(nx.strongly_connected_components(clique_graph))
log.info(f"Consolidating edges in {len(cliques)} cliques")
for clique in cliques:
log.debug(f"Processing clique: {clique}")
leaders: List = [
x
for x in clique
if leader_annotation in clique_graph.nodes()[x]
and clique_graph.nodes()[x][leader_annotation]
]
if len(leaders) == 0:
log.debug("No leader elected for clique {}; skipping".format(clique))
continue
leader: str = leaders[0]
# update nodes in target graph
target_graph.set_node_attributes(
target_graph,
{
leader: {
leader_annotation: clique_graph.nodes()[leader].get(
leader_annotation
),
"election_strategy": clique_graph.nodes()[leader].get(
"election_strategy"
),
}
},
)
leader_equivalent_identifiers = set([x for x in clique_graph.neighbors(leader)])
for node in clique:
if node == leader:
continue
log.debug(f"Looking for in_edges for {node}")
in_edges = target_graph.in_edges(node, keys=False, data=True)
filtered_in_edges = [x for x in in_edges if x[2]["predicate"] != SAME_AS]
equiv_in_edges = [x for x in in_edges if x[2]["predicate"] == SAME_AS]
log.debug(f"Moving {len(in_edges)} in-edges from {node} to {leader}")
for u, v, edge_data in filtered_in_edges:
key = generate_edge_key(u, edge_data["predicate"], v)
target_graph.remove_edge(u, v, edge_key=key)
edge_data[ORIGINAL_SUBJECT_PROPERTY] = edge_data["subject"]
edge_data[ORIGINAL_OBJECT_PROPERTY] = edge_data["object"]
edge_data["object"] = leader
key = generate_edge_key(u, edge_data["predicate"], leader)
if (
edge_data["subject"] == edge_data["object"]
and edge_data["predicate"] == SUBCLASS_OF
):
continue
target_graph.add_edge(
edge_data["subject"], edge_data["object"], key, **edge_data
)
log.debug(f"Looking for out_edges for {node}")
out_edges = target_graph.out_edges(node, keys=False, data=True)
filtered_out_edges = [x for x in out_edges if x[2]["predicate"] != SAME_AS]
equiv_out_edges = [x for x in out_edges if x[2]["predicate"] == SAME_AS]
log.debug(f"Moving {len(out_edges)} out-edges from {node} to {leader}")
for u, v, edge_data in filtered_out_edges:
key = generate_edge_key(u, edge_data["predicate"], v)
target_graph.remove_edge(u, v, edge_key=key)
edge_data[ORIGINAL_SUBJECT_PROPERTY] = edge_data["subject"]
edge_data[ORIGINAL_OBJECT_PROPERTY] = edge_data["object"]
edge_data["subject"] = leader
key = generate_edge_key(leader, edge_data["predicate"], v)
if (
edge_data["subject"] == edge_data["object"]
and edge_data["predicate"] == SUBCLASS_OF
):
continue
target_graph.add_edge(
edge_data["subject"], edge_data["object"], key, **edge_data
)
log.debug(f"equiv out edges: {equiv_out_edges}")
equivalent_identifiers = set()
for u, v, edge_data in equiv_in_edges:
if u != leader:
equivalent_identifiers.add(u)
if v != leader:
equivalent_identifiers.add(v)
target_graph.remove_edge(
u, v, edge_key=generate_edge_key(u, SAME_AS, v)
)
log.debug(f"equiv out edges: {equiv_out_edges}")
for u, v, edge_data in equiv_out_edges:
if u != leader:
log.debug(f"{u} is an equivalent identifier of leader {leader}")
equivalent_identifiers.add(u)
if v != leader:
log.debug(f"{v} is an equivalent identifier of leader {leader}")
equivalent_identifiers.add(v)
target_graph.remove_edge(
u, v, edge_key=generate_edge_key(u, SAME_AS, v)
)
leader_equivalent_identifiers.update(equivalent_identifiers)
log.debug(
f"setting same_as property to leader node with {leader_equivalent_identifiers}"
)
target_graph.set_node_attributes(
target_graph, {leader: {"same_as": list(leader_equivalent_identifiers)}}
)
log.debug(
f"removing equivalent nodes of leader: {leader_equivalent_identifiers}"
)
for n in leader_equivalent_identifiers:
target_graph.remove_node(n)
return target_graph
def update_node_categories(
target_graph: BaseGraph,
clique_graph: nx.MultiDiGraph,
clique: List,
category_mapping: Optional[Dict[str, str]],
strict: bool = True,
) -> List:
"""
For a given clique, get category for each node in clique and validate against Biolink Model,
mapping to Biolink Model category where needed.
For example, If a node has ``biolink:Gene`` as its category, then this method adds all of its ancestors.
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
clique_graph: networkx.Graph
The clique graph
clique: List
A list of nodes from a clique
category_mapping: Optional[Dict[str, str]]
Mapping for non-Biolink Model categories to Biolink Model categories
strict: bool
Whether or not to merge nodes in a clique that have conflicting node categories
Returns
-------
List
The clique
"""
updated_clique_graph_properties = {}
updated_target_graph_properties = {}
for node in clique:
# For each node in a clique, get its category property
data = clique_graph.nodes()[node]
if "category" in data:
categories = data["category"]
else:
categories = get_category_from_equivalence(
target_graph, clique_graph, node, data
)
# differentiate between valid and invalid categories
(
valid_biolink_categories,
invalid_biolink_categories,
invalid_categories,
) = check_all_categories(categories)
log.debug(
f"valid biolink categories: {valid_biolink_categories} invalid biolink categories: {invalid_biolink_categories} invalid_categories: {invalid_categories}"
)
# extend categories to have the longest list of ancestors
extended_categories: List = []
for x in valid_biolink_categories:
ancestors = get_biolink_ancestors(x)
if len(ancestors) > len(extended_categories):
extended_categories.extend(ancestors)
log.debug(f"Extended categories: {extended_categories}")
clique_graph_update_dict: Dict = {"category": list(extended_categories)}
target_graph_update_dict: Dict = {}
if invalid_biolink_categories:
if strict:
clique_graph_update_dict["_excluded_from_clique"] = True
target_graph_update_dict["_excluded_from_clique"] = True
clique_graph_update_dict[
"invalid_biolink_category"
] = invalid_biolink_categories
target_graph_update_dict[
"invalid_biolink_category"
] = invalid_biolink_categories
if invalid_categories:
clique_graph_update_dict["_invalid_category"] = invalid_categories
target_graph_update_dict["_invalid_category"] = invalid_categories
updated_clique_graph_properties[node] = clique_graph_update_dict
updated_target_graph_properties[node] = target_graph_update_dict
nx.set_node_attributes(clique_graph, updated_clique_graph_properties)
target_graph.set_node_attributes(target_graph, updated_target_graph_properties)
return clique
def get_clique_category(
clique_graph: nx.MultiDiGraph, clique: List
) -> Tuple[str, List]:
"""
Given a clique, identify the category of the clique.
Parameters
----------
clique_graph: nx.MultiDiGraph
Clique graph
clique: List
A list of nodes in clique
Returns
-------
Tuple[str, list]
A tuple of clique category and its ancestors
"""
l = [clique_graph.nodes()[x]["category"] for x in clique]
u = OrderedSet.union(*l)
uo = sort_categories(u)
log.debug(f"outcome of union (sorted): {uo}")
clique_category = uo[0]
clique_category_ancestors = get_biolink_ancestors(uo[0])
return clique_category, clique_category_ancestors
def check_categories(
categories: List, closure: List, category_mapping: Optional[Dict[str, str]] = None
) -> Tuple[List, List, List]:
"""
Check categories to ensure whether values in ``categories`` are valid biolink categories.
Valid biolink categories are classes that descend from 'NamedThing'.
Mixins, while valid ancestors, are not valid categories.
Parameters
----------
categories: List
A list of categories to check
closure: List
A list of nodes in a clique
category_mapping: Optional[Dict[str, str]]
A map that provides mapping from a non-biolink category to a biolink category
Returns
-------
Tuple[List, List, List]
A tuple consisting of valid biolink categories, invalid biolink categories, and invalid categories
"""
valid_biolink_categories = []
invalid_biolink_categories = []
invalid_categories = []
tk = get_toolkit()
for x in categories:
# use the toolkit to check if the declared category is actually a mixin.
if tk.is_mixin(x):
invalid_categories.append(x)
continue
# get biolink element corresponding to category
element = get_biolink_element(x)
if element:
mapped_category = format_biolink_category(element["name"])
if mapped_category in closure:
valid_biolink_categories.append(x)
else:
log.warning(f"category '{mapped_category}' not in closure: {closure}")
if category_mapping:
mapped = category_mapping[x] if x in category_mapping.keys() else x
if mapped not in closure:
log.warning(
f"category '{mapped_category}' is not in category_mapping."
)
invalid_biolink_categories.append(x)
else:
invalid_biolink_categories.append(x)
else:
log.warning(f"category '{x}' is not in Biolink Model")
invalid_categories.append(x)
continue
return valid_biolink_categories, invalid_biolink_categories, invalid_categories
def check_all_categories(categories) -> Tuple[List, List, List]:
"""
Check all categories in ``categories``.
Parameters
----------
categories: List
A list of categories
Returns
-------
Tuple[List, List, List]
A tuple consisting of valid biolink categories, invalid biolink categories, and invalid categories
Note: the sort_categories method will re-arrange the passed in category list according to the distance
of each list member from the top of their hierarchy. Each category's hierarchy is made up of its
'is_a' and mixin ancestors.
"""
previous: List = []
valid_biolink_categories: List = []
invalid_biolink_categories: List = []
invalid_categories: List = []
sc: List = sort_categories(categories)
for c in sc:
if previous:
vbc, ibc, ic = check_categories(
[c], get_biolink_ancestors(previous[0]), None
)
else:
vbc, ibc, ic = check_categories([c], get_biolink_ancestors(c), None)
if vbc:
valid_biolink_categories.extend(vbc)
if ic:
invalid_categories.extend(ic)
if ibc:
invalid_biolink_categories.extend(ibc)
else:
previous = vbc
return valid_biolink_categories, invalid_biolink_categories, invalid_categories
def sort_categories(categories: Union[List, Set, OrderedSet]) -> List:
"""
Sort a list of categories from most specific to the most generic.
Parameters
----------
categories: Union[List, Set, OrderedSet]
A list of categories
Returns
-------
List
A sorted list of categories where sorted means that the first element in the list returned
has the most number of parents in the class hierarchy.
"""
weighted_categories = []
for c in categories:
weighted_categories.append((len(get_biolink_ancestors(c)), c))
sorted_categories = sorted(weighted_categories, key=lambda x: x[0], reverse=True)
return [x[1] for x in sorted_categories]
def get_category_from_equivalence(
target_graph: BaseGraph, clique_graph: nx.MultiDiGraph, node: str, attributes: Dict
) -> List:
"""
Get category for a node based on its equivalent nodes in a graph.
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
clique_graph: networkx.MultiDiGraph
The clique graph
node: str
Node identifier
attributes: Dict
Node's attributes
Returns
-------
List
Category for the node
"""
category: List = []
for u, v, data in clique_graph.edges(node, data=True):
if data["predicate"] == SAME_AS:
if u == node:
if "category" in clique_graph.nodes()[v]:
category = clique_graph.nodes()[v]["category"]
break
elif v == node:
if "category" in clique_graph.nodes()[u]:
category = clique_graph.nodes()[u]["category"]
break
update = {node: {"category": category}}
nx.set_node_attributes(clique_graph, update)
return category
def get_leader_by_annotation(
target_graph: BaseGraph,
clique_graph: nx.MultiDiGraph,
clique: List,
leader_annotation: str,
) -> Tuple[Optional[str], Optional[str]]:
"""
Get leader by searching for leader annotation property in any of the nodes in a given clique.
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
clique_graph: networkx.MultiDiGraph
The clique graph
clique: List
A list of nodes from a clique
leader_annotation: str
The field on a node that signifies that the node is the leader of a clique
Returns
-------
Tuple[Optional[str], Optional[str]]
A tuple containing the node that has been elected as the leader and the election strategy
"""
leader = None
election_strategy = None
for node in clique:
attributes = clique_graph.nodes()[node]
if leader_annotation in attributes:
if isinstance(attributes[leader_annotation], str):
v = attributes[leader_annotation]
if v == "true" or v == "True":
leader = node
elif isinstance(attributes[leader_annotation], list):
v = attributes[leader_annotation][0]
if isinstance(v, str):
if v == "true" or v == "True":
leader = node
elif isinstance(v, bool):
if eval(str(v)):
leader = node
elif isinstance(attributes[leader_annotation], bool):
v = attributes[leader_annotation]
if eval(str(v)):
leader = node
if leader:
election_strategy = "LEADER_ANNOTATION"
log.debug(f"Elected leader '{leader}' via LEADER_ANNOTATION")
return leader, election_strategy
def get_leader_by_prefix_priority(
target_graph: BaseGraph,
clique_graph: nx.MultiDiGraph,
clique: List,
prefix_priority_list: List,
) -> Tuple[Optional[str], Optional[str]]:
"""
Get leader from clique based on a given prefix priority.
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
clique_graph: networkx.MultiDiGraph
The clique graph
clique: List
A list of nodes that correspond to a clique
prefix_priority_list: List
A list of prefixes in descending priority
Returns
-------
Tuple[Optional[str], Optional[str]]
A tuple containing the node that has been elected as the leader and the election strategy
"""
leader = None
election_strategy = None
for prefix in prefix_priority_list:
log.debug(f"Checking for prefix {prefix} in {clique}")
leader = next((x for x in clique if prefix in x), None)
if leader:
election_strategy = "PREFIX_PRIORITIZATION"
log.debug(f"Elected leader '{leader}' via {election_strategy}")
break
return leader, election_strategy
def get_leader_by_sort(
target_graph: BaseGraph, clique_graph: nx.MultiDiGraph, clique: List
) -> Tuple[Optional[str], Optional[str]]:
"""
Get leader from clique based on the first selection from an alphabetical sort of the node id prefixes.
Parameters
----------
target_graph: kgx.graph.base_graph.BaseGraph
The original graph
clique_graph: networkx.MultiDiGraph
The clique graph
clique: List
A list of nodes that correspond to a clique
Returns
-------
Tuple[Optional[str], Optional[str]]
A tuple containing the node that has been elected as the leader and the election strategy
"""
election_strategy = "ALPHABETICAL_SORT"
prefixes = [x.split(":", 1)[0] for x in clique]
prefixes.sort()
leader_prefix = prefixes[0]
log.debug(f"clique: {clique} leader_prefix: {leader_prefix}")
leader = [x for x in clique if leader_prefix in x]
if leader:
log.debug(f"Elected leader '{leader}' via {election_strategy}")
return leader[0], election_strategy
| 28,863 | 35.816327 | 165 |
py
|
kgx
|
kgx-master/kgx/graph_operations/summarize_graph.py
|
"""
Classical KGX graph summary module.
"""
from typing import Dict, List, Optional, Any, Callable
import re
import yaml
from json import dump
from json.encoder import JSONEncoder
from deprecation import deprecated
from kgx.error_detection import ErrorType, MessageLevel, ErrorDetecting
from kgx.utils.kgx_utils import GraphEntityType
from kgx.graph.base_graph import BaseGraph
from kgx.prefix_manager import PrefixManager
TOTAL_NODES = "total_nodes"
NODE_CATEGORIES = "node_categories"
NODE_ID_PREFIXES_BY_CATEGORY = "node_id_prefixes_by_category"
NODE_ID_PREFIXES = "node_id_prefixes"
COUNT_BY_CATEGORY = "count_by_category"
COUNT_BY_ID_PREFIXES_BY_CATEGORY = "count_by_id_prefixes_by_category"
COUNT_BY_ID_PREFIXES = "count_by_id_prefixes"
TOTAL_EDGES = "total_edges"
EDGE_PREDICATES = "predicates"
COUNT_BY_EDGE_PREDICATES = "count_by_predicates"
COUNT_BY_SPO = "count_by_spo"
# Note: the format of the stats generated might change in the future
####################################################################################
# New "Inspector Class" design pattern for KGX stream data processing
####################################################################################
def gs_default(o):
"""
JSONEncoder 'default' function override to
properly serialize 'Set' objects (into 'List')
:param o
"""
if isinstance(o, GraphSummary.Category):
return o.json_object()
else:
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
# Let the base class default method raise the TypeError
return JSONEncoder().default(o)
_category_curie_regexp = re.compile("^biolink:[A-Z][a-zA-Z]*$")
_predicate_curie_regexp = re.compile("^biolink:[a-z][a-z_]*$")
class GraphSummary(ErrorDetecting):
"""
Class for generating a "classical" knowledge graph summary.
The optional 'progress_monitor' for the validator should be a lightweight Callable
which is injected into the class 'inspector' Callable, designed to intercepts
node and edge records streaming through the Validator (inside a Transformer.process() call.
The first (GraphEntityType) argument of the Callable tags the record as a NODE or an EDGE.
The second argument given to the Callable is the current record itself.
This Callable is strictly meant to be procedural and should *not* mutate the record.
The intent of this Callable is to provide a hook to KGX applications wanting the
namesake function of passively monitoring the graph data stream. As such, the Callable
could simply tally up the number of times it is called with a NODE or an EDGE, then
provide a suitable (quick!) report of that count back to the KGX application. The
Callable (function/callable class) should not modify the record and should be of low
complexity, so as not to introduce a large computational overhead to validation!
"""
def __init__(
self,
name="",
node_facet_properties: Optional[List] = None,
edge_facet_properties: Optional[List] = None,
progress_monitor: Optional[Callable[[GraphEntityType, List], None]] = None,
error_log: str = None,
**kwargs,
):
"""
GraphSummary constructor.
Parameters
----------
name: str
(Graph) name assigned to the summary.
node_facet_properties: Optional[List]
A list of properties to facet on. For example, ``['provided_by']``
edge_facet_properties: Optional[List]
A list of properties to facet on. For example, ``['knowledge_source']``
progress_monitor: Optional[Callable[[GraphEntityType, List], None]]
Function given a peek at the current record being stream processed by the class wrapped Callable.
error_log: str
Where to write any graph processing error message (stderr, by default)
"""
ErrorDetecting.__init__(self, error_log)
# formal arguments
self.name = name
self.nodes_processed = False
self.node_stats: Dict = {
TOTAL_NODES: 0,
NODE_CATEGORIES: set(),
NODE_ID_PREFIXES: set(),
NODE_ID_PREFIXES_BY_CATEGORY: dict(),
COUNT_BY_CATEGORY: dict(),
COUNT_BY_ID_PREFIXES_BY_CATEGORY: dict(),
COUNT_BY_ID_PREFIXES: dict(),
}
self.edges_processed: bool = False
self.edge_stats: Dict = {
TOTAL_EDGES: 0,
EDGE_PREDICATES: set(),
COUNT_BY_EDGE_PREDICATES: {"unknown": {"count": 0}},
COUNT_BY_SPO: {},
}
self.node_facet_properties: Optional[List] = node_facet_properties
if self.node_facet_properties:
for facet_property in self.node_facet_properties:
self.add_node_stat(facet_property, set())
self.edge_facet_properties: Optional[List] = edge_facet_properties
if self.edge_facet_properties:
for facet_property in self.edge_facet_properties:
self.edge_stats[facet_property] = set()
self.progress_monitor: Optional[
Callable[[GraphEntityType, List], None]
] = progress_monitor
# internal attributes
self.node_catalog: Dict[str, List[int]] = dict()
self.node_categories: Dict[str, GraphSummary.Category] = dict()
# indexed internally with category index id '0'
self.node_categories["unknown"] = GraphSummary.Category("unknown", self)
self.graph_stats: Dict[str, Dict] = dict()
def get_name(self):
"""
Returns
-------
str
Currently assigned knowledge graph name.
"""
return self.name
def __call__(self, entity_type: GraphEntityType, rec: List):
"""
Transformer 'inspector' Callable, for analysing a stream of graph data.
Parameters
----------
entity_type: GraphEntityType
indicates what kind of record being passed to the function for analysis.
rec: Dict
Complete data dictionary of the given record.
"""
if self.progress_monitor:
self.progress_monitor(entity_type, rec)
if entity_type == GraphEntityType.EDGE:
self.analyse_edge(*rec)
elif entity_type == GraphEntityType.NODE:
self.analyse_node(*rec)
else:
raise RuntimeError("Unexpected GraphEntityType: " + str(entity_type))
class Category:
"""
Internal class for compiling statistics about a distinct category.
"""
# The 'category map' just associates a unique int catalog
# index ('cid') value as a proxy for the full curie string,
# to reduce storage in the main node catalog
_category_curie_map: List[str] = list()
def __init__(self, category_curie: str, summary):
"""
GraphSummary.Category constructor.
category: str
Biolink Model category curie identifier.
"""
if not (
_category_curie_regexp.fullmatch(category_curie)
or category_curie == "unknown"
):
raise RuntimeError("Invalid Biolink category CURIE: " + category_curie)
# generally, a Biolink category class CURIE but also 'unknown'
self.category_curie = category_curie
# it is useful to point to the GraphSummary within
# which this Category metadata is bring tracked...
self.summary = summary
# ...so that Category related entries at that
# higher level may be properly initialized
# for subsequent facet metadata access
if self.category_curie != "unknown":
self.summary.node_stats[NODE_CATEGORIES].add(self.category_curie)
self.summary.node_stats[NODE_ID_PREFIXES_BY_CATEGORY][
self.category_curie
] = list()
self.summary.node_stats[COUNT_BY_CATEGORY][self.category_curie] = {
"count": 0
}
if self.category_curie not in self._category_curie_map:
self._category_curie_map.append(self.category_curie)
self.category_stats: Dict[str, Any] = dict()
self.category_stats["count"]: int = 0
self.category_stats["count_by_source"]: Dict[str, int] = {"unknown": 0}
self.category_stats["count_by_id_prefix"]: Dict[str, int] = dict()
def get_name(self) -> str:
"""
Returns
-------
str
Biolink CURIE name of the category.
"""
return self.category_curie
def get_cid(self) -> int:
"""
Returns
-------
int
Internal GraphSummary index id for tracking a Category.
"""
return self._category_curie_map.index(self.category_curie)
@classmethod
def get_category_curie_by_index(cls, cid: int) -> str:
"""
Parameters
----------
cid: int
Internal GraphSummary index id for tracking a Category.
Returns
-------
str
Curie identifier of the Category.
"""
return cls._category_curie_map[cid]
def get_id_prefixes(self) -> List:
"""
Returns
-------
List[str]
List of identifier prefix (strings) used by nodes of this Category.
"""
return list(self.category_stats["count_by_id_prefix"].keys())
def get_count_by_id_prefixes(self):
"""
Returns
-------
int
Count of nodes by id_prefixes for nodes which have this category.
"""
return self.category_stats["count_by_id_prefix"]
def get_count(self):
"""
Returns
-------
int
Count of nodes which have this category.
"""
return self.category_stats["count"]
def _capture_prefix(self, n: str):
prefix = PrefixManager.get_prefix(n)
if not prefix:
error_type = ErrorType.MISSING_NODE_CURIE_PREFIX
self.summary.log_error(
entity=n,
error_type=error_type,
message="Node 'id' has no CURIE prefix",
message_level=MessageLevel.WARNING
)
else:
if prefix in self.category_stats["count_by_id_prefix"]:
self.category_stats["count_by_id_prefix"][prefix] += 1
else:
self.category_stats["count_by_id_prefix"][prefix] = 1
def _capture_knowledge_source(self, data: Dict):
if "provided_by" in data:
for s in data["provided_by"]:
if s in self.category_stats["count_by_source"]:
self.category_stats["count_by_source"][s] += 1
else:
self.category_stats["count_by_source"][s] = 1
else:
self.category_stats["count_by_source"]["unknown"] += 1
def analyse_node_category(self, summary, n, data):
"""
Analyse metadata of a given graph node record of this category.
Parameters
----------
summary: GraphSummary
GraphSunmmary within which the Category is being analysed.
n: str
Curie identifier of the node record (not used here).
data: Dict
Complete data dictionary of node record fields.
"""
self.category_stats["count"] += 1
self._capture_prefix(n)
self._capture_knowledge_source(data)
if summary.node_facet_properties:
for facet_property in summary.node_facet_properties:
summary.node_stats = summary.get_facet_counts(
data,
summary.node_stats,
COUNT_BY_CATEGORY,
self.category_curie,
facet_property,
)
def json_object(self):
"""
Returns
-------
Dict[str, Any]
Returns JSON friendly metadata for this category.,
"""
return {
"id_prefixes": list(self.category_stats["count_by_id_prefix"].keys()),
"count": self.category_stats["count"],
"count_by_source": self.category_stats["count_by_source"],
"count_by_id_prefix": self.category_stats["count_by_id_prefix"],
}
def get_category(self, category_curie: str) -> Category:
"""
Counts the number of distinct (Biolink) categories encountered
in the knowledge graph (not including those of 'unknown' category)
Parameters
----------
category_curie: str
Curie identifier for the (Biolink) category.
Returns
-------
Category
MetaKnowledgeGraph.Category object for a given Biolink category.
"""
return self.node_stats[category_curie]
def _process_category_field(self, category_field: str, n: str, data: Dict):
# we note here that category_curie *may be*
# a piped '|' set of Biolink category CURIE values
category_list = category_field.split("|")
# analyse them each independently...
for category_curie in category_list:
if category_curie not in self.node_categories:
try:
self.node_categories[category_curie] = self.Category(
category_curie, self
)
except RuntimeError:
error_type = ErrorType.INVALID_CATEGORY
self.log_error(
entity=n,
error_type=error_type,
message=f"Invalid node 'category' CURIE: '{category_curie}'"
)
continue
category_record = self.node_categories[category_curie]
category_idx: int = category_record.get_cid()
if category_idx not in self.node_catalog[n]:
self.node_catalog[n].append(category_idx)
category_record.analyse_node_category(self, n, data)
#
# Moved this computation from the 'analyse_node_category() method above
#
# if self.node_facet_properties:
# for facet_property in self.node_facet_properties:
# self.node_stats = self.get_facet_counts(
# data, self.node_stats, COUNT_BY_CATEGORY, category_curie, facet_property
# )
def analyse_node(self, n, data):
"""
Analyse metadata of one graph node record.
Parameters
----------
n: str
Curie identifier of the node record (not used here).
data: Dict
Complete data dictionary of node record fields.
"""
if n in self.node_catalog:
# Report duplications of node records, as discerned from node id.
error_type = ErrorType.DUPLICATE_NODE
self.log_error(
entity=n,
error_type=error_type,
message="Node 'id' duplicated in input data",
message_level=MessageLevel.WARNING
)
return
else:
self.node_catalog[n] = list()
if "category" in data and data["category"]:
categories = data["category"]
else:
error_type = ErrorType.MISSING_CATEGORY
self.log_error(
entity=n,
error_type=error_type,
message="Missing node 'category' tagged as 'unknown'."
)
categories = ["unknown"]
# analyse them each independently...
for category_field in categories:
self._process_category_field(category_field, n, data)
def _capture_predicate(self, data: Dict) -> Optional[str]:
if "predicate" not in data:
self.edge_stats[COUNT_BY_EDGE_PREDICATES]["unknown"]["count"] += 1
predicate = "unknown"
else:
predicate = data["predicate"]
if not _predicate_curie_regexp.fullmatch(predicate):
error_type = ErrorType.INVALID_EDGE_PREDICATE
self.log_error(
entity=predicate,
error_type=error_type,
message="Invalid 'predicate' CURIE?"
)
return None
self.edge_stats[EDGE_PREDICATES].add(predicate)
if predicate in self.edge_stats[COUNT_BY_EDGE_PREDICATES]:
self.edge_stats[COUNT_BY_EDGE_PREDICATES][predicate]["count"] += 1
else:
self.edge_stats[COUNT_BY_EDGE_PREDICATES][predicate] = {"count": 1}
if self.edge_facet_properties:
for facet_property in self.edge_facet_properties:
self.edge_stats = self.get_facet_counts(
data,
self.edge_stats,
COUNT_BY_EDGE_PREDICATES,
predicate,
facet_property,
)
return predicate
def _process_triple(
self, subject_category: str, predicate: str, object_category: str, data: Dict
):
# Process the 'valid' S-P-O triple here...
key = f"{subject_category}-{predicate}-{object_category}"
if key in self.edge_stats[COUNT_BY_SPO]:
self.edge_stats[COUNT_BY_SPO][key]["count"] += 1
else:
self.edge_stats[COUNT_BY_SPO][key] = {"count": 1}
if self.edge_facet_properties:
for facet_property in self.edge_facet_properties:
self.edge_stats = self.get_facet_counts(
data, self.edge_stats, COUNT_BY_SPO, key, facet_property
)
def analyse_edge(self, u: str, v: str, k: str, data: Dict):
"""
Analyse metadata of one graph edge record.
Parameters
----------
u: str
Subject node curie identifier of the edge.
v: str
Subject node curie identifier of the edge.
k: str
Key identifier of the edge record (not used here).
data: Dict
Complete data dictionary of edge record fields.
"""
# we blissfully now assume that all the nodes of a
# graph stream were analysed first by the GraphSummary
# before the edges are analysed, thus we can test for
# node 'n' existence internally, by identifier.
self.edge_stats[TOTAL_EDGES] += 1
predicate: str = self._capture_predicate(data)
if u not in self.node_catalog:
error_type = ErrorType.MISSING_NODE
self.log_error(
entity=u,
error_type=error_type,
message="Subject 'id' not found in the node catalog"
)
# removing from edge count
self.edge_stats[TOTAL_EDGES] -= 1
self.edge_stats[COUNT_BY_EDGE_PREDICATES]["unknown"]["count"] -= 1
return
for subj_cat_idx in self.node_catalog[u]:
subject_category = self.Category.get_category_curie_by_index(subj_cat_idx)
if v not in self.node_catalog:
error_type = ErrorType.MISSING_NODE
self.log_error(
entity=v,
error_type=error_type,
message="Object 'id' not found in the node catalog"
)
self.edge_stats[TOTAL_EDGES] -= 1
self.edge_stats[COUNT_BY_EDGE_PREDICATES]["unknown"]["count"] -= 1
return
for obj_cat_idx in self.node_catalog[v]:
object_category = self.Category.get_category_curie_by_index(
obj_cat_idx
)
self._process_triple(subject_category, predicate, object_category, data)
def _compile_prefix_stats_by_category(self, category_curie: str):
for prefix in self.node_stats[COUNT_BY_ID_PREFIXES_BY_CATEGORY][category_curie]:
if prefix not in self.node_stats[COUNT_BY_ID_PREFIXES]:
self.node_stats[COUNT_BY_ID_PREFIXES][prefix] = 0
self.node_stats[COUNT_BY_ID_PREFIXES][prefix] += self.node_stats[
COUNT_BY_ID_PREFIXES_BY_CATEGORY
][category_curie][prefix]
def _compile_category_stats(self, node_category: Category):
category_curie = node_category.get_name()
self.node_stats[COUNT_BY_CATEGORY][category_curie][
"count"
] = node_category.get_count()
id_prefixes = node_category.get_id_prefixes()
self.node_stats[NODE_ID_PREFIXES_BY_CATEGORY][category_curie] = id_prefixes
self.node_stats[NODE_ID_PREFIXES].update(id_prefixes)
self.node_stats[COUNT_BY_ID_PREFIXES_BY_CATEGORY][
category_curie
] = node_category.get_count_by_id_prefixes()
self._compile_prefix_stats_by_category(category_curie)
def get_node_stats(self) -> Dict[str, Any]:
"""
Returns
-------
Dict[str, Any]
Statistics for the nodes in the graph.
"""
if not self.nodes_processed:
self.nodes_processed = True
for node_category in self.node_categories.values():
self._compile_category_stats(node_category)
self.node_stats[NODE_CATEGORIES] = sorted(self.node_stats[NODE_CATEGORIES])
self.node_stats[NODE_ID_PREFIXES] = sorted(self.node_stats[NODE_ID_PREFIXES])
if self.node_facet_properties:
for facet_property in self.node_facet_properties:
self.node_stats[facet_property] = sorted(
list(self.node_stats[facet_property])
)
if not self.node_stats[TOTAL_NODES]:
self.node_stats[TOTAL_NODES] = len(self.node_catalog)
return self.node_stats
def add_node_stat(self, tag: str, value: Any):
"""
Compile/add a nodes statistic for a given tag = value annotation of the node.
:param tag:
:param value:
:return:
Parameters
----------
tag: str
Tag label for the annotation.
value: Any
Value of the specific tag annotation.
"""
self.node_stats[tag] = value
def get_edge_stats(self) -> Dict[str, Any]:
# Not sure if this is "safe" but assume that edge_stats may be finalized
# and cached once after the first time the edge stats are accessed
if not self.edges_processed:
self.edges_processed = True
self.edge_stats[EDGE_PREDICATES] = sorted(
list(self.edge_stats[EDGE_PREDICATES])
)
if self.edge_facet_properties:
for facet_property in self.edge_facet_properties:
self.edge_stats[facet_property] = sorted(
list(self.edge_stats[facet_property])
)
return self.edge_stats
def _wrap_graph_stats(
self,
graph_name: str,
node_stats: Dict[str, Any],
edge_stats: Dict[str, Any],
):
# Utility wrapper function to support DRY code below.
if not self.graph_stats:
self.graph_stats = {
"graph_name": graph_name,
"node_stats": node_stats,
"edge_stats": edge_stats,
}
return self.graph_stats
def get_graph_summary(self, name: str = None, **kwargs) -> Dict:
"""
Similar to summarize_graph except that the node and edge statistics are already captured
in the GraphSummary class instance (perhaps by Transformer.process() stream inspection)
and therefore, the data structure simply needs to be 'finalized' for saving or similar use.
Parameters
----------
name: Optional[str]
Name for the graph (if being renamed)
kwargs: Dict
Any additional arguments (ignored in this method at present)
Returns
-------
Dict
A knowledge map dictionary corresponding to the graph
"""
return self._wrap_graph_stats(
graph_name=name if name else self.name,
node_stats=self.get_node_stats(),
edge_stats=self.get_edge_stats(),
)
def summarize_graph(self, graph: BaseGraph) -> Dict:
"""
Summarize the entire graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
Returns
-------
Dict
The stats dictionary
"""
return self._wrap_graph_stats(
graph_name=self.name if self.name else graph.name,
node_stats=self.summarize_graph_nodes(graph),
edge_stats=self.summarize_graph_edges(graph),
)
def summarize_graph_nodes(self, graph: BaseGraph) -> Dict:
"""
Summarize the nodes in a graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
Returns
-------
Dict
The node stats
"""
for n, data in graph.nodes(data=True):
self.analyse_node(n, data)
return self.get_node_stats()
def summarize_graph_edges(self, graph: BaseGraph) -> Dict:
"""
Summarize the edges in a graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
Returns
-------
Dict
The edge stats
"""
for u, v, k, data in graph.edges(keys=True, data=True):
self.analyse_edge(u, v, k, data)
return self.get_edge_stats()
def _compile_facet_stats(
self, stats: Dict, x: str, y: str, facet_property: str, value: str
):
if facet_property not in stats[x][y]:
stats[x][y][facet_property] = {}
if value in stats[x][y][facet_property]:
stats[x][y][facet_property][value]["count"] += 1
else:
stats[x][y][facet_property][value] = {"count": 1}
stats[facet_property].update([value])
def get_facet_counts(
self, data: Dict, stats: Dict, x: str, y: str, facet_property: str
) -> Dict:
"""
Facet on ``facet_property`` and record the count for ``stats[x][y][facet_property]``.
Parameters
----------
data: dict
Node/edge data dictionary
stats: dict
The stats dictionary
x: str
first key
y: str
second key
facet_property: str
The property to facet on
Returns
-------
Dict
The stats dictionary
"""
if facet_property in data:
if isinstance(data[facet_property], list):
for k in data[facet_property]:
self._compile_facet_stats(stats, x, y, facet_property, k)
else:
k = data[facet_property]
self._compile_facet_stats(stats, x, y, facet_property, k)
else:
self._compile_facet_stats(stats, x, y, facet_property, "unknown")
return stats
def save(self, file, name: str = None, file_format: str = "yaml"):
"""
Save the current GraphSummary to a specified (open) file (device).
Parameters
----------
file: File
Text file handler open for writing.
name: str
Optional string to which to (re-)name the graph.
file_format: str
Text output format ('json' or 'yaml') for the saved meta knowledge graph (default: 'json')
Returns
-------
None
"""
stats = self.get_graph_summary(name)
if not file_format or file_format == "yaml":
yaml.dump(stats, file)
else:
dump(stats, file, indent=4, default=gs_default)
@deprecated(deprecated_in="1.5.8", details="Default is the use streaming graph_summary with inspector")
def generate_graph_stats(
graph: BaseGraph,
graph_name: str,
filename: str,
node_facet_properties: Optional[List] = None,
edge_facet_properties: Optional[List] = None,
) -> None:
"""
Generate stats from Graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
graph_name: str
Name for the graph
filename: str
Filename to write the stats to
node_facet_properties: Optional[List]
A list of properties to facet on. For example, ``['provided_by']``
edge_facet_properties: Optional[List]
A list of properties to facet on. For example, ``['knowledge_source']``
"""
stats = summarize_graph(
graph, graph_name, node_facet_properties, edge_facet_properties
)
with open(filename, "w") as gsh:
yaml.dump(stats, gsh)
def summarize_graph(
graph: BaseGraph,
name: str = None,
node_facet_properties: Optional[List] = None,
edge_facet_properties: Optional[List] = None,
) -> Dict:
"""
Summarize the entire graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
name: str
Name for the graph
node_facet_properties: Optional[List]
A list of properties to facet on. For example, ``['provided_by']``
edge_facet_properties: Optional[List]
A list of properties to facet on. For example, ``['knowledge_source']``
Returns
-------
Dict
The stats dictionary
"""
gs = GraphSummary(name, node_facet_properties, edge_facet_properties)
return gs.summarize_graph(graph)
| 30,601 | 33.002222 | 109 |
py
|
kgx
|
kgx-master/kgx/graph_operations/__init__.py
|
"""
Graph Operations module
"""
from typing import Dict, Optional
from kgx.config import get_logger
from kgx.graph.base_graph import BaseGraph
from kgx.utils.kgx_utils import (
CORE_NODE_PROPERTIES,
CORE_EDGE_PROPERTIES,
generate_edge_key,
current_time_in_millis,
)
log = get_logger()
def remap_node_identifier(
graph: BaseGraph, category: str, alternative_property: str, prefix=None
) -> BaseGraph:
"""
Remap a node's 'id' attribute with value from a node's ``alternative_property`` attribute.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
category: string
category referring to nodes whose 'id' needs to be remapped
alternative_property: string
property name from which the new value is pulled from
prefix: string
signifies that the value for ``alternative_property`` is a list
and the ``prefix`` indicates which value to pick from the list
Returns
-------
kgx.graph.base_graph.BaseGraph
The modified graph
"""
mapping: Dict = {}
for nid, data in graph.nodes(data=True):
node_data = data.copy()
if "category" in node_data and category not in node_data["category"]:
continue
if alternative_property in node_data:
alternative_values = node_data[alternative_property]
if isinstance(alternative_values, (list, set, tuple)):
if prefix:
for v in alternative_values:
if prefix in v:
# take the first occurring value that contains the given prefix
mapping[nid] = {"id": v}
break
else:
# no prefix defined; pick the 1st one from list
mapping[nid] = {"id": next(iter(alternative_values))}
elif isinstance(alternative_values, str):
if prefix:
if alternative_values.startswith(prefix):
mapping[nid] = {"id": alternative_values}
else:
# no prefix defined
mapping[nid] = {"id": alternative_values}
else:
log.error(
f"Cannot use {alternative_values} from alternative_property {alternative_property}"
)
graph.set_node_attributes(graph, attributes=mapping)
graph.relabel_nodes(graph, {k: list(v.values())[0] for k, v in mapping.items()})
# update 'subject' of all outgoing edges
update_edge_keys = {}
updated_subject_values = {}
updated_object_values = {}
for u, v, k, edge_data in graph.edges(data=True, keys=True):
if u is not edge_data["subject"]:
updated_subject_values[(u, v, k)] = {"subject": u}
update_edge_keys[(u, v, k)] = {
"edge_key": generate_edge_key(u, edge_data["predicate"], v)
}
if v is not edge_data["object"]:
updated_object_values[(u, v, k)] = {"object": v}
update_edge_keys[(u, v, k)] = {
"edge_key": generate_edge_key(u, edge_data["predicate"], v)
}
graph.set_edge_attributes(graph, attributes=updated_subject_values)
graph.set_edge_attributes(graph, attributes=updated_object_values)
graph.set_edge_attributes(graph, attributes=update_edge_keys)
return graph
def remap_node_property(
graph: BaseGraph, category: str, old_property: str, new_property: str
) -> None:
"""
Remap the value in node ``old_property`` attribute with value
from node ``new_property`` attribute.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
category: string
Category referring to nodes whose property needs to be remapped
old_property: string
old property name whose value needs to be replaced
new_property: string
new property name from which the value is pulled from
"""
mapping = {}
if old_property in CORE_NODE_PROPERTIES:
raise AttributeError(
f"node property {old_property} cannot be modified as it is a core property."
)
for nid, data in graph.nodes(data=True):
node_data = data.copy()
if category in node_data and category not in node_data["category"]:
continue
if new_property in node_data:
mapping[nid] = {old_property: node_data[new_property]}
graph.set_node_attributes(graph, attributes=mapping)
def remap_edge_property(
graph: BaseGraph, edge_predicate: str, old_property: str, new_property: str
) -> None:
"""
Remap the value in an edge ``old_property`` attribute with value
from edge ``new_property`` attribute.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
edge_predicate: string
edge_predicate referring to edges whose property needs to be remapped
old_property: string
Old property name whose value needs to be replaced
new_property: string
New property name from which the value is pulled from
"""
mapping = {}
if old_property in CORE_EDGE_PROPERTIES:
raise AttributeError(
f"edge property {old_property} cannot be modified as it is a core property."
)
for u, v, k, data in graph.edges(data=True, keys=True):
edge_data = data.copy()
if edge_predicate is not edge_data["predicate"]:
continue
if new_property in edge_data:
mapping[(u, v, k)] = {old_property: edge_data[new_property]}
graph.set_edge_attributes(graph, attributes=mapping)
def fold_predicate(
graph: BaseGraph, predicate: str, remove_prefix: bool = False
) -> None:
"""
Fold predicate as node property where every edge with ``predicate``
will be folded as a node property.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
predicate: str
The predicate to fold
remove_prefix: bool
Whether or not to remove prefix from the predicate (``False``, by default)
"""
node_cache = []
edge_cache = []
start = current_time_in_millis()
p = predicate.split(":", 1)[1] if remove_prefix else predicate
for u, v, k, data in graph.edges(keys=True, data=True):
if data["predicate"] == predicate:
node_cache.append((u, p, v))
edge_cache.append((u, v, k))
while node_cache:
n = node_cache.pop()
graph.add_node_attribute(*n)
while edge_cache:
e = edge_cache.pop()
graph.remove_edge(*e)
end = current_time_in_millis()
log.info(f"Time taken: {end - start} ms")
def unfold_node_property(
graph: BaseGraph, node_property: str, prefix: Optional[str] = None
) -> None:
"""
Unfold node property as a predicate where every node with ``node_property``
will be unfolded as an edge.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
node_property: str
The node property to unfold
prefix: Optional[str]
The prefix to use
"""
node_cache = []
edge_cache = []
start = current_time_in_millis()
p = f"{prefix}:{node_property}" if prefix else node_property
for n, data in graph.nodes(data=True):
sub = n
if node_property in data:
obj = data[node_property]
edge_cache.append((sub, obj, p))
node_cache.append((n, node_property))
while edge_cache:
e = edge_cache.pop()
graph.add_edge(
*e, **{"subject": e[0], "object": e[1], "predicate": e[2], "relation": e[2]}
)
while node_cache:
n = node_cache.pop()
del graph.nodes()[n[0]][n[1]]
end = current_time_in_millis()
log.info(f"Time taken: {end - start} ms")
def remove_singleton_nodes(graph: BaseGraph) -> None:
"""
Remove singleton nodes (nodes that have a degree of 0) from the graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
"""
start = current_time_in_millis()
singleton = []
for n, d in graph.degree():
if d == 0:
singleton.append(n)
while singleton:
n = singleton.pop()
log.debug(f"Removing singleton node {n}")
graph.remove_node(n)
end = current_time_in_millis()
log.info(f"Time taken: {end - start} ms")
| 8,498 | 31.814672 | 103 |
py
|
kgx
|
kgx-master/kgx/sink/tsv_sink.py
|
import os
import tarfile
from typing import Optional, Dict, Set, Any, List
from ordered_set import OrderedSet
from kgx.sink.sink import Sink
from kgx.utils.kgx_utils import (
extension_types,
archive_write_mode,
archive_format,
build_export_row
)
DEFAULT_NODE_COLUMNS = {"id", "name", "category", "description", "provided_by"}
DEFAULT_EDGE_COLUMNS = {
"id",
"subject",
"predicate",
"object",
"relation",
"category",
"knowledge_source",
}
DEFAULT_LIST_DELIMITER = "|"
class TsvSink(Sink):
"""
TsvSink is responsible for writing data as records to a TSV/CSV.
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
filename: str
The filename to write to
format: str
The file format (``tsv``, ``csv``)
compression: str
The compression type (``tar``, ``tar.gz``)
kwargs: Any
Any additional arguments
"""
def __init__(
self,
owner,
filename: str,
format: str,
compression: Optional[str] = None,
**kwargs: Any,
):
super().__init__(owner)
if format not in extension_types:
raise Exception(f"Unsupported format: {format}")
self.delimiter = extension_types[format]
self.dirname = os.path.abspath(os.path.dirname(filename))
self.basename = os.path.basename(filename)
self.extension = format.split(":")[0]
self.mode = (
archive_write_mode[compression]
if compression in archive_write_mode
else None
)
self.list_delimiter = kwargs["list_delimiter"] if "list_delimiter" in kwargs else DEFAULT_LIST_DELIMITER
self.nodes_file_basename = f"{self.basename}_nodes.{self.extension}"
self.edges_file_basename = f"{self.basename}_edges.{self.extension}"
if self.dirname:
os.makedirs(self.dirname, exist_ok=True)
if "node_properties" in kwargs:
self.node_properties.update(set(kwargs["node_properties"]))
else:
self.node_properties.update(DEFAULT_NODE_COLUMNS)
if "edge_properties" in kwargs:
self.edge_properties.update(set(kwargs["edge_properties"]))
else:
self.edge_properties.update(DEFAULT_EDGE_COLUMNS)
self.ordered_node_columns = TsvSink._order_node_columns(self.node_properties)
self.ordered_edge_columns = TsvSink._order_edge_columns(self.edge_properties)
self.nodes_file_name = os.path.join(
self.dirname if self.dirname else "", self.nodes_file_basename
)
self.NFH = open(self.nodes_file_name, "w")
self.NFH.write(self.delimiter.join(self.ordered_node_columns) + "\n")
self.edges_file_name = os.path.join(
self.dirname if self.dirname else "", self.edges_file_basename
)
self.EFH = open(self.edges_file_name, "w")
self.EFH.write(self.delimiter.join(self.ordered_edge_columns) + "\n")
def write_node(self, record: Dict) -> None:
"""
Write a node record to the underlying store.
Parameters
----------
record: Dict
A node record
"""
row = build_export_row(record, list_delimiter=self.list_delimiter)
row["id"] = record["id"]
values = []
for c in self.ordered_node_columns:
if c in row:
values.append(str(row[c]))
else:
values.append("")
self.NFH.write(self.delimiter.join(values) + "\n")
def write_edge(self, record: Dict) -> None:
"""
Write an edge record to the underlying store.
Parameters
----------
record: Dict
An edge record
"""
row = build_export_row(record, list_delimiter=self.list_delimiter)
values = []
for c in self.ordered_edge_columns:
if c in row:
values.append(str(row[c]))
else:
values.append("")
self.EFH.write(self.delimiter.join(values) + "\n")
def finalize(self) -> None:
"""
Close file handles and create an archive if compression mode is defined.
"""
self.NFH.close()
self.EFH.close()
if self.mode:
archive_basename = f"{self.basename}.{archive_format[self.mode]}"
archive_name = os.path.join(
self.dirname if self.dirname else "", archive_basename
)
with tarfile.open(name=archive_name, mode=self.mode) as tar:
tar.add(self.nodes_file_name, arcname=self.nodes_file_basename)
tar.add(self.edges_file_name, arcname=self.edges_file_basename)
if os.path.isfile(self.nodes_file_name):
os.remove(self.nodes_file_name)
if os.path.isfile(self.edges_file_name):
os.remove(self.edges_file_name)
@staticmethod
def _order_node_columns(cols: Set) -> OrderedSet:
"""
Arrange node columns in a defined order.
Parameters
----------
cols: Set
A set with elements in any order
Returns
-------
OrderedSet
A set with elements in a defined order
"""
node_columns = cols.copy()
core_columns = OrderedSet(
["id", "category", "name", "description", "xref", "provided_by", "synonym"]
)
ordered_columns = OrderedSet()
for c in core_columns:
if c in node_columns:
ordered_columns.add(c)
node_columns.remove(c)
internal_columns = set()
remaining_columns = node_columns.copy()
for c in node_columns:
if c.startswith("_"):
internal_columns.add(c)
remaining_columns.remove(c)
ordered_columns.update(sorted(remaining_columns))
ordered_columns.update(sorted(internal_columns))
return ordered_columns
@staticmethod
def _order_edge_columns(cols: Set) -> OrderedSet:
"""
Arrange edge columns in a defined order.
Parameters
----------
cols: Set
A set with elements in any order
Returns
-------
OrderedSet
A set with elements in a defined order
"""
edge_columns = cols.copy()
core_columns = OrderedSet(
[
"id",
"subject",
"predicate",
"object",
"category",
"relation",
"provided_by",
]
)
ordered_columns = OrderedSet()
for c in core_columns:
if c in edge_columns:
ordered_columns.add(c)
edge_columns.remove(c)
internal_columns = set()
remaining_columns = edge_columns.copy()
for c in edge_columns:
if c.startswith("_"):
internal_columns.add(c)
remaining_columns.remove(c)
ordered_columns.update(sorted(remaining_columns))
ordered_columns.update(sorted(internal_columns))
return ordered_columns
def set_node_properties(self, node_properties: List) -> None:
"""
Update node properties index with a given list.
Parameters
----------
node_properties: List
A list of node properties
"""
self._node_properties.update(node_properties)
self.ordered_node_columns = TsvSink._order_node_columns(self._node_properties)
def set_edge_properties(self, edge_properties: List) -> None:
"""
Update edge properties index with a given list.
Parameters
----------
edge_properties: List
A list of edge properties
"""
self._edge_properties.update(edge_properties)
self.ordered_edge_columns = TsvSink._order_edge_columns(self._edge_properties)
| 8,070 | 30.775591 | 112 |
py
|
kgx
|
kgx-master/kgx/sink/sql_sink.py
|
from typing import Dict, Set, Any, List
from ordered_set import OrderedSet
import sqlite3
from kgx.sink.sink import Sink
from kgx.utils.kgx_utils import (
extension_types,
build_export_row,
create_connection,
get_toolkit,
sentencecase_to_snakecase,
)
from kgx.config import get_logger
log = get_logger()
DEFAULT_NODE_COLUMNS = {"id", "name", "category", "description", "provided_by"}
DEFAULT_EDGE_COLUMNS = {
"id",
"subject",
"predicate",
"object",
"relation",
"category",
"knowledge_source",
}
# TODO: incorporate closurizer, add denormalizer method?
# TODO: add denormalization options to config
class SqlSink(Sink):
"""
SqlSink is responsible for writing data as records to a SQLlite DB.
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
filename: str
The filename to write to
format: str
The file format (sqllite, tsv?)
kwargs: Any
Any additional arguments
"""
def __init__(
self,
owner,
filename: str,
format: str,
**kwargs: Any,
):
super().__init__(owner)
if format not in extension_types:
raise Exception(f"Unsupported format: {format}")
self.conn = create_connection(filename)
self.edge_data = []
self.node_data = []
self.filename = filename
if "node_properties" in kwargs:
self.node_properties.update(set(kwargs["node_properties"]))
else:
self.node_properties.update(DEFAULT_NODE_COLUMNS)
if "edge_properties" in kwargs:
self.edge_properties.update(set(kwargs["edge_properties"]))
else:
self.edge_properties.update(DEFAULT_EDGE_COLUMNS)
self.ordered_node_columns = SqlSink._order_node_columns(self.node_properties)
self.ordered_edge_columns = SqlSink._order_edge_columns(self.edge_properties)
if "node_table_name" in kwargs:
self.node_table_name = kwargs["node_table_name"]
else:
self.node_table_name = "nodes"
if "edge_table_name" in kwargs:
self.edge_table_name = kwargs["edge_table_name"]
else:
self.edge_table_name = "edges"
if "denormalize" in kwargs:
self.denormalize = kwargs["denormalize"]
else:
self.denormalize = False
self.create_tables()
def create_tables(self):
# Create the nodes table if it does not already exist
try:
if self.ordered_node_columns:
c = self.conn.cursor()
columns_str = ', '.join([f'{column} TEXT' for column in self.ordered_node_columns])
create_table_sql = f'CREATE TABLE IF NOT EXISTS {self.node_table_name} ({columns_str});'
log.info(create_table_sql)
c.execute(create_table_sql)
self.conn.commit()
except sqlite3.Error as e:
log.error(f"Error occurred while creating nodes table: {e}", "rolling back")
self.conn.rollback()
# Create the edges table if it does not already exist
try:
if self.ordered_edge_columns:
if self.denormalize:
tk = get_toolkit()
denormalized_slots = tk.get_denormalized_association_slots(formatted=False)
for slot in denormalized_slots:
self.ordered_edge_columns.append(sentencecase_to_snakecase(slot))
c = self.conn.cursor()
columns_str = ', '.join([f'{column} TEXT' for column in self.ordered_edge_columns])
create_table_sql = f'CREATE TABLE IF NOT EXISTS {self.edge_table_name} ({columns_str});'
c.execute(create_table_sql)
self.conn.commit()
except sqlite3.Error as e:
log.error(f"Error occurred while creating edges table: {e}", "rolling back")
self.conn.rollback()
self.conn.commit()
def write_node(self, record: Dict) -> None:
"""
Write a node record to the underlying store.
Parameters
----------
record: Dict
A node record
"""
row = build_export_row(record, list_delimiter=",")
row["id"] = record["id"]
values = []
for c in self.ordered_node_columns:
if c in row:
values.append(str(row[c]))
else:
values.append("")
ordered_tuple = tuple(values)
self.node_data.append(ordered_tuple)
def write_edge(self, record: Dict) -> None:
"""
Write an edge record to a tuple list for bulk insert in finalize.
Parameters
----------
record: Dict
An edge record
"""
row = build_export_row(record, list_delimiter="|")
if self.denormalize:
self._denormalize_edge(row)
values = []
for c in self.ordered_edge_columns:
if c in row:
values.append(str(row[c]))
else:
values.append("")
ordered_tuple = tuple(values)
self.edge_data.append(ordered_tuple)
def finalize(self) -> None:
self._bulk_insert(self.node_table_name, self.node_data)
self._bulk_insert(self.edge_table_name, self.edge_data)
self._create_indexes()
self.conn.close()
def _create_indexes(self):
c = self.conn.cursor()
try:
c.execute(f"CREATE INDEX IF NOT EXISTS node_id_index ON {self.node_table_name} (id);")
log.info("created index: " + f"CREATE INDEX IF NOT EXISTS node_id_index ON {self.node_table_name} (id);")
c.execute(f"CREATE INDEX IF NOT EXISTS edge_unique_id_index ON {self.edge_table_name} (subject, predicate, object);")
log.info("created index: " + f"CREATE INDEX IF NOT EXISTS edge_unique_id_index ON {self.edge_table_name} (subject, predicate, object);")
self.conn.commit()
except sqlite3.Error as e:
log.error(f"Error occurred while creating indexes", {e})
self.conn.rollback()
self.conn.commit()
def _bulk_insert(self, table_name: str, data_list: List[Dict]):
c = self.conn.cursor()
# Get the column names in the order they appear in the table
c.execute(f"SELECT * FROM {table_name}")
cols = [description[0] for description in c.description]
# Insert the rows into the table
query = f"INSERT INTO {table_name} ({','.join(cols)}) VALUES ({','.join(['?']*len(cols))})"
try:
c.executemany(query, data_list)
self.conn.commit()
except sqlite3.Error as e:
log.error(f"Error occurred while inserting data into table: {e}")
self.conn.rollback()
def _denormalize_edge(self, row: dict):
"""
Add the denormalized node properties to the edge.
Parameters
----------
row: Dict
An edge record
"""
pass
# subject = row['subject']
# print(self.node_properties)
@staticmethod
def _order_node_columns(cols: Set) -> OrderedSet:
"""
Arrange node columns in a defined order.
Parameters
----------
cols: Set
A set with elements in any order
Returns
-------
OrderedSet
A set with elements in a defined order
"""
node_columns = cols.copy()
core_columns = OrderedSet(
["id", "category", "name", "description", "xref", "provided_by", "synonym"]
)
ordered_columns = OrderedSet()
for c in core_columns:
if c in node_columns:
ordered_columns.add(c)
node_columns.remove(c)
internal_columns = set()
remaining_columns = node_columns.copy()
for c in node_columns:
if c.startswith("_"):
internal_columns.add(c)
remaining_columns.remove(c)
ordered_columns.update(sorted(remaining_columns))
ordered_columns.update(sorted(internal_columns))
return ordered_columns
@staticmethod
def _order_edge_columns(cols: Set) -> OrderedSet:
"""
Arrange edge columns in a defined order.
Parameters
----------
cols: Set
A set with elements in any order
Returns
-------
OrderedSet
A set with elements in a defined order
"""
edge_columns = cols.copy()
core_columns = OrderedSet(
[
"id",
"subject",
"predicate",
"object",
"category",
"relation",
"provided_by",
]
)
ordered_columns = OrderedSet()
for c in core_columns:
if c in edge_columns:
ordered_columns.add(c)
edge_columns.remove(c)
internal_columns = set()
remaining_columns = edge_columns.copy()
for c in edge_columns:
if c.startswith("_"):
internal_columns.add(c)
remaining_columns.remove(c)
ordered_columns.update(sorted(remaining_columns))
ordered_columns.update(sorted(internal_columns))
return ordered_columns
def set_node_properties(self, node_properties: List) -> None:
"""
Update node properties index with a given list.
Parameters
----------
node_properties: List
A list of node properties
"""
self._node_properties.update(node_properties)
self.ordered_node_columns = SqlSink._order_node_columns(self._node_properties)
def set_edge_properties(self, edge_properties: List) -> None:
"""
Update edge properties index with a given list.
Parameters
----------
edge_properties: List
A list of edge properties
"""
self._edge_properties.update(edge_properties)
self.ordered_edge_columns = SqlSink._order_edge_columns(self._edge_properties)
| 10,281 | 31.955128 | 148 |
py
|
kgx
|
kgx-master/kgx/sink/jsonl_sink.py
|
import gzip
import os
from typing import Optional, Dict, Any
import jsonlines
from kgx.sink.sink import Sink
class JsonlSink(Sink):
"""
JsonlSink is responsible for writing data as records
to JSON lines.
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
filename: str
The filename to write to
format: str
The file format (``jsonl``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
"""
def __init__(
self,
owner,
filename: str,
format: str = "jsonl",
compression: Optional[str] = None,
**kwargs: Any,
):
super().__init__(owner)
dirname = os.path.abspath(os.path.dirname(filename))
basename = os.path.basename(filename)
nodes_filename = os.path.join(
dirname if dirname else "", f"{basename}_nodes.{format}"
)
edges_filename = os.path.join(
dirname if dirname else "", f"{basename}_edges.{format}"
)
if dirname:
os.makedirs(dirname, exist_ok=True)
if compression == "gz":
nodes_filename += f".{compression}"
edges_filename += f".{compression}"
NFH = gzip.open(nodes_filename, "wb")
self.NFH = jsonlines.Writer(NFH)
EFH = gzip.open(edges_filename, "wb")
self.EFH = jsonlines.Writer(EFH)
else:
self.NFH = jsonlines.open(nodes_filename, "w")
self.EFH = jsonlines.open(edges_filename, "w")
def write_node(self, record: Dict) -> None:
"""
Write a node record to JSON.
Parameters
----------
record: Dict
A node record
"""
self.NFH.write(record)
def write_edge(self, record: Dict) -> None:
"""
Write an edge record to JSON.
Parameters
----------
record: Dict
A node record
"""
self.EFH.write(record)
def finalize(self) -> None:
"""
Perform any operations after writing the file.
"""
self.NFH.close()
self.EFH.close()
| 2,242 | 23.922222 | 68 |
py
|
kgx
|
kgx-master/kgx/sink/graph_sink.py
|
from typing import Dict
from kgx.graph.base_graph import BaseGraph
from kgx.config import get_graph_store_class
from kgx.sink.sink import Sink
from kgx.utils.kgx_utils import generate_edge_key
class GraphSink(Sink):
"""
GraphSink is responsible for writing data as records
to an in memory graph representation.
The underlying store is determined by the graph store
class defined in config (``kgx.graph.nx_graph.NxGraph``, by default).
"""
def __init__(self, owner):
"""
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
"""
super().__init__(owner)
if owner.store and owner.store.graph:
self.graph = owner.store.graph
else:
self.graph = get_graph_store_class()()
def write_node(self, record: Dict) -> None:
"""
Write a node record to graph.
Parameters
----------
record: Dict
A node record
"""
self.graph.add_node(record["id"], **record)
def write_edge(self, record: Dict) -> None:
"""
Write an edge record to graph.
Parameters
----------
record: Dict
An edge record
"""
if "key" in record:
key = (record["key"])
else:
key = generate_edge_key(
record["subject"], record["predicate"], record["object"]
)
self.graph.add_edge(record["subject"], record["object"], key, **record)
def finalize(self) -> None:
"""
Perform any operations after writing nodes and edges to graph.
"""
pass
| 1,703 | 24.058824 | 79 |
py
|
kgx
|
kgx-master/kgx/sink/sink.py
|
from typing import Dict
from kgx.prefix_manager import PrefixManager
class Sink(object):
"""
A Sink is responsible for writing data as records
to a store where the store is a file or a database.
Parameters:
----------
:param owner: Transformer
Transformer to which the GraphSink belongs
"""
def __init__(self, owner):
self.owner = owner
self.prefix_manager = PrefixManager()
self.node_properties = set()
self.edge_properties = set()
def set_reverse_prefix_map(self, m: Dict) -> None:
"""
Update default reverse prefix map.
Parameters
----------
m: Dict
A dictionary with IRI to prefix mappings
"""
self.prefix_manager.update_reverse_prefix_map(m)
def write_node(self, record) -> None:
"""
Write a node record to the underlying store.
Parameters
----------
record: Any
A node record
"""
pass
def write_edge(self, record) -> None:
"""
Write an edge record to the underlying store.
Parameters
----------
record: Any
An edge record
"""
pass
def finalize(self) -> None:
"""
Operations that ought to be done after
writing all the incoming data should be called
by this method.
"""
pass
| 1,436 | 20.447761 | 56 |
py
|
kgx
|
kgx-master/kgx/sink/rdf_sink.py
|
import gzip
from collections import OrderedDict
from typing import Optional, Union, Tuple, Any, Dict
import rdflib
from linkml_runtime.linkml_model.meta import Element, ClassDefinition, SlotDefinition
from rdflib import URIRef, Literal, Namespace, RDF
from rdflib.plugins.serializers.nt import _nt_row
from rdflib.term import _is_valid_uri
from kgx.error_detection import ErrorType
from kgx.prefix_manager import PrefixManager
from kgx.config import get_logger
from kgx.sink.sink import Sink
from kgx.utils.kgx_utils import (
get_toolkit,
sentencecase_to_camelcase,
get_biolink_ancestors,
sentencecase_to_snakecase,
generate_uuid,
get_biolink_property_types,
)
from kgx.utils.rdf_utils import process_predicate
log = get_logger()
property_mapping: OrderedDict = OrderedDict()
reverse_property_mapping: OrderedDict = OrderedDict()
class RdfSink(Sink):
"""
RdfSink is responsible for writing data as records
to an RDF serialization.
.. note::
Currently only RDF N-Triples serialization is supported.
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
filename: str
The filename to write to
format: str
The file format (``nt``)
compression: str
The compression type (``gz``)
reify_all_edges: bool
Whether or not to reify all the edges
kwargs: Any
Any additional arguments
"""
def __init__(
self,
owner,
filename: str,
format: str = "nt",
compression: Optional[bool] = None,
reify_all_edges: bool = True,
**kwargs: Any,
):
super().__init__(owner)
if format not in {"nt"}:
raise ValueError(f"Only RDF N-Triples ('nt') serialization supported.")
self.DEFAULT = Namespace(self.prefix_manager.prefix_map[""])
# self.OBO = Namespace('http://purl.obolibrary.org/obo/')
self.OBAN = Namespace(self.prefix_manager.prefix_map["OBAN"])
self.PMID = Namespace(self.prefix_manager.prefix_map["PMID"])
self.BIOLINK = Namespace(self.prefix_manager.prefix_map["biolink"])
self.toolkit = get_toolkit()
self.reverse_predicate_mapping = {}
self.property_types = get_biolink_property_types()
self.cache = {}
self.reify_all_edges = reify_all_edges
self.reification_types = {
RDF.Statement,
self.BIOLINK.Association,
self.OBAN.association,
}
if compression == "gz":
f = gzip.open(filename, "wb")
else:
f = open(filename, "wb")
self.FH = f
self.encoding = "ascii"
def set_reverse_predicate_mapping(self, m: Dict) -> None:
"""
Set reverse predicate mappings.
Use this method to update mappings for predicates that are
not in Biolink Model.
Parameters
----------
m: Dict
A dictionary where the keys are property names and values
are their corresponding IRI.
"""
for k, v in m.items():
self.reverse_predicate_mapping[v] = URIRef(k)
def set_property_types(self, m: Dict) -> None:
"""
Set export type for properties that are not in
Biolink Model.
Parameters
----------
m: Dict
A dictionary where the keys are property names and values
are their corresponding types.
"""
for k, v in m.items():
(element_uri, canonical_uri, predicate, property_name) = process_predicate(
self.prefix_manager, k
)
if element_uri:
key = element_uri
elif predicate:
key = predicate
else:
key = property_name
self.property_types[key] = v
def write_node(self, record: Dict) -> None:
"""
Write a node record as triples.
Parameters
----------
record: Dict
A node record
"""
for k, v in record.items():
if k in {"id", "iri"}:
continue
(
element_uri,
canonical_uri,
predicate,
property_name,
) = self.process_predicate(k)
if element_uri is None:
# not a biolink predicate
if k in self.reverse_predicate_mapping:
prop_uri = self.reverse_predicate_mapping[k]
# prop_uri = self.prefix_manager.contract(prop_uri)
else:
prop_uri = k
else:
prop_uri = canonical_uri if canonical_uri else element_uri
prop_type = self._get_property_type(prop_uri)
log.debug(f"prop {k} has prop_uri {prop_uri} and prop_type {prop_type}")
prop_uri = self.uriref(prop_uri)
if isinstance(v, (list, set, tuple)):
for x in v:
value_uri = self._prepare_object(k, prop_type, x)
self._write_triple(self.uriref(record["id"]), prop_uri, value_uri)
else:
value_uri = self._prepare_object(k, prop_type, v)
self._write_triple(self.uriref(record["id"]), prop_uri, value_uri)
def _write_triple(self, s: URIRef, p: URIRef, o: Union[URIRef, Literal]) -> None:
"""
Serialize a triple.
Parameters
----------
s: rdflib.URIRef
The subject
p: rdflib.URIRef
The predicate
o: Union[rdflib.URIRef, rdflib.Literal]
The object
"""
self.FH.write(_nt_row((s, p, o)).encode(self.encoding, "_rdflib_nt_escape"))
def write_edge(self, record: Dict) -> None:
"""
Write an edge record as triples.
Parameters
----------
record: Dict
An edge record
"""
ecache = []
associations = set(
[self.prefix_manager.contract(x) for x in self.reification_types]
)
associations.update(
[str(x) for x in set(self.toolkit.get_all_associations(formatted=True))]
)
if self.reify_all_edges:
reified_node = self.reify(record["subject"], record["object"], record)
s = reified_node["subject"]
p = reified_node["predicate"]
o = reified_node["object"]
ecache.append((s, p, o))
n = reified_node["id"]
for prop, value in reified_node.items():
if prop in {"id", "association_id", "edge_key"}:
continue
(
element_uri,
canonical_uri,
predicate,
property_name,
) = self.process_predicate(prop)
if element_uri:
prop_uri = canonical_uri if canonical_uri else element_uri
else:
if prop in self.reverse_predicate_mapping:
prop_uri = self.reverse_predicate_mapping[prop]
# prop_uri = self.prefix_manager.contract(prop_uri)
else:
prop_uri = predicate
prop_type = self._get_property_type(prop)
log.debug(
f"prop {prop} has prop_uri {prop_uri} and prop_type {prop_type}"
)
prop_uri = self.uriref(prop_uri)
if isinstance(value, list):
for x in value:
value_uri = self._prepare_object(prop, prop_type, x)
self._write_triple(URIRef(n), prop_uri, value_uri)
else:
value_uri = self._prepare_object(prop, prop_type, value)
self._write_triple(URIRef(n), prop_uri, value_uri)
else:
if "type" in record:
for type in record["type"]:
if type in associations:
at_least_one_type_in_associations = True
if (
("type" in record and at_least_one_type_in_associations)
or (
"association_type" in record
and record["association_type"] in associations
)
or ("category" in record and any(record["category"]) in associations)
):
reified_node = self.reify(record["subject"], record["object"], record)
s = reified_node["subject"]
p = reified_node["predicate"]
o = reified_node["object"]
ecache.append((s, p, o))
n = reified_node["id"]
for prop, value in reified_node.items():
if prop in {"id", "association_id", "edge_key"}:
continue
(
element_uri,
canonical_uri,
predicate,
property_name,
) = self.process_predicate(prop)
if element_uri:
prop_uri = canonical_uri if canonical_uri else element_uri
else:
if prop in self.reverse_predicate_mapping:
prop_uri = self.reverse_predicate_mapping[prop]
# prop_uri = self.prefix_manager.contract(prop_uri)
else:
prop_uri = predicate
prop_type = self._get_property_type(prop)
prop_uri = self.uriref(prop_uri)
if isinstance(value, list):
for x in value:
value_uri = self._prepare_object(prop, prop_type, x)
self._write_triple(URIRef(n), prop_uri, value_uri)
else:
value_uri = self._prepare_object(prop, prop_type, value)
self._write_triple(URIRef(n), prop_uri, value_uri)
else:
s = self.uriref(record["subject"])
p = self.uriref(record["predicate"])
o = self.uriref(record["object"])
self._write_triple(s, p, o)
for t in ecache:
self._write_triple(t[0], t[1], t[2])
def uriref(self, identifier: str) -> URIRef:
"""
Generate a rdflib.URIRef for a given string.
Parameters
----------
identifier: str
Identifier as string.
Returns
-------
rdflib.URIRef
URIRef form of the input ``identifier``
"""
if identifier.startswith("urn:uuid:"):
uri = identifier
elif identifier in reverse_property_mapping:
# identifier is a property
uri = reverse_property_mapping[identifier]
else:
# identifier is an entity
fixed_identifier = identifier
if fixed_identifier.startswith(":"):
fixed_identifier = fixed_identifier.replace(":", "", 1)
if " " in identifier:
fixed_identifier = fixed_identifier.replace(" ", "_")
if self.prefix_manager.is_curie(fixed_identifier):
uri = self.prefix_manager.expand(fixed_identifier)
if fixed_identifier == uri:
uri = self.DEFAULT.term(fixed_identifier)
elif self.prefix_manager.is_iri(fixed_identifier):
uri = fixed_identifier
else:
uri = self.DEFAULT.term(fixed_identifier)
# if identifier == uri:
# if PrefixManager.is_curie(identifier):
# identifier = identifier.replace(':', '_')
return URIRef(uri)
def _prepare_object(
self, prop: str, prop_type: str, value: Any
) -> rdflib.term.Identifier:
"""
Prepare the object of a triple.
Parameters
----------
prop: str
property name
prop_type: str
property type
value: Any
property value
Returns
-------
rdflib.term.Identifier
An instance of rdflib.term.Identifier
"""
if prop_type == "uriorcurie" or prop_type == "xsd:anyURI":
if isinstance(value, str) and PrefixManager.is_curie(value):
o = self.uriref(value)
elif isinstance(value, str) and PrefixManager.is_iri(value):
if _is_valid_uri(value):
o = URIRef(value)
else:
o = Literal(value)
else:
o = Literal(value)
elif prop_type.startswith("xsd"):
o = Literal(value, datatype=self.prefix_manager.expand(prop_type))
else:
o = Literal(value, datatype=self.prefix_manager.expand("xsd:string"))
return o
def _get_property_type(self, p: str) -> str:
"""
Get type for a given property name.
Parameters
----------
p: str
property name
Returns
-------
str
The type for property name
"""
default_uri_types = {
"biolink:type",
"biolink:category",
"biolink:subject",
"biolink:object",
"biolink:relation",
"biolink:predicate",
"rdf:type",
"rdf:subject",
"rdf:predicate",
"rdf:object",
"type"
}
if p in default_uri_types:
t = "uriorcurie"
else:
if p in self.property_types:
t = self.property_types[p]
elif f":{p}" in self.property_types:
t = self.property_types[f":{p}"]
elif f"biolink:{p}" in self.property_types:
t = self.property_types[f"biolink:{p}"]
else:
t = "xsd:string"
return t
def process_predicate(self, p: Optional[Union[URIRef, str]]) -> Tuple:
"""
Process a predicate where the method checks if there is a mapping in Biolink Model.
Parameters
----------
p: Optional[Union[URIRef, str]]
The predicate
Returns
-------
Tuple
A tuple that contains the Biolink CURIE (if available), the Biolink slot_uri CURIE (if available),
the CURIE form of p, the reference of p
"""
if p in self.cache:
# already processed this predicate before; pull from cache
element_uri = self.cache[p]["element_uri"]
canonical_uri = self.cache[p]["canonical_uri"]
predicate = self.cache[p]["predicate"]
property_name = self.cache[p]["property_name"]
else:
# haven't seen this property before; map to element
if self.prefix_manager.is_iri(p):
predicate = self.prefix_manager.contract(str(p))
else:
predicate = None
if self.prefix_manager.is_curie(p):
property_name = self.prefix_manager.get_reference(p)
predicate = p
else:
if predicate and self.prefix_manager.is_curie(predicate):
property_name = self.prefix_manager.get_reference(predicate)
else:
property_name = p
predicate = f":{p}"
element = self.get_biolink_element(p)
canonical_uri = None
if element:
if isinstance(element, SlotDefinition):
# predicate corresponds to a biolink slot
if element.definition_uri:
element_uri = self.prefix_manager.contract(
element.definition_uri
)
else:
element_uri = (
f"biolink:{sentencecase_to_snakecase(element.name)}"
)
if element.slot_uri:
canonical_uri = element.slot_uri
elif isinstance(element, ClassDefinition):
# this will happen only when the IRI is actually
# a reference to a class
element_uri = self.prefix_manager.contract(element.class_uri)
else:
element_uri = f"biolink:{sentencecase_to_camelcase(element.name)}"
if "biolink:Attribute" in get_biolink_ancestors(element.name):
element_uri = f"biolink:{sentencecase_to_snakecase(element.name)}"
if not predicate:
predicate = element_uri
else:
# no mapping to biolink model;
# look at predicate mappings
element_uri = None
if p in self.reverse_predicate_mapping:
property_name = self.reverse_predicate_mapping[p]
predicate = f":{property_name}"
self.cache[p] = {
"element_uri": element_uri,
"canonical_uri": canonical_uri,
"predicate": predicate,
"property_name": property_name,
}
return element_uri, canonical_uri, predicate, property_name
def get_biolink_element(self, predicate: Any) -> Optional[Element]:
"""
Returns a Biolink Model element for a given predicate.
Parameters
----------
predicate: Any
The CURIE of a predicate
Returns
-------
Optional[Element]
The corresponding Biolink Model element
"""
toolkit = get_toolkit()
if self.prefix_manager.is_iri(predicate):
predicate_curie = self.prefix_manager.contract(predicate)
else:
predicate_curie = predicate
if self.prefix_manager.is_curie(predicate_curie):
reference = self.prefix_manager.get_reference(predicate_curie)
else:
reference = predicate_curie
element = toolkit.get_element(reference)
if not element:
try:
mapping = toolkit.get_element_by_mapping(predicate)
if mapping:
element = toolkit.get_element(mapping)
except ValueError as e:
self.owner.log_error(
entity=str(predicate),
error_type=ErrorType.INVALID_EDGE_PREDICATE,
message=str(e)
)
element = None
return element
def reify(self, u: str, v: str, data: Dict) -> Dict:
"""
Create a node representation of an edge.
Parameters
----------
u: str
Subject
v: str
Object
k: str
Edge key
data: Dict
Edge data
Returns
-------
Dict
The reified node
"""
s = self.uriref(u)
p = self.uriref(data["predicate"])
o = self.uriref(v)
if "id" in data:
node_id = self.uriref(data["id"])
else:
# generate a UUID for the reified node
node_id = self.uriref(generate_uuid())
reified_node = data.copy()
if "category" in reified_node:
del reified_node["category"]
reified_node["id"] = node_id
reified_node["type"] = "biolink:Association"
reified_node["subject"] = s
reified_node["predicate"] = p
reified_node["object"] = o
return reified_node
def finalize(self) -> None:
"""
Perform any operations after writing the file.
"""
self.FH.close()
| 20,032 | 34.084063 | 110 |
py
|
kgx
|
kgx-master/kgx/sink/neo_sink.py
|
from typing import List, Union, Any
from neo4j import GraphDatabase, Neo4jDriver, Session
from kgx.config import get_logger
from kgx.error_detection import ErrorType
from kgx.sink.sink import Sink
from kgx.source.source import DEFAULT_NODE_CATEGORY
log = get_logger()
class NeoSink(Sink):
"""
NeoSink is responsible for writing data as records
to a Neo4j instance.
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
uri: str
The URI for the Neo4j instance.
For example, http://localhost:7474
username: str
The username
password: str
The password
kwargs: Any
Any additional arguments
"""
CACHE_SIZE = 100000
node_cache = {}
edge_cache = {}
node_count = 0
edge_count = 0
CATEGORY_DELIMITER = "|"
CYPHER_CATEGORY_DELIMITER = ":"
_seen_categories = set()
def __init__(self, owner, uri: str, username: str, password: str, **kwargs: Any):
if "cache_size" in kwargs:
self.CACHE_SIZE = kwargs["cache_size"]
self.http_driver:Neo4jDriver = GraphDatabase.driver(
uri, auth=(username, password)
)
self.session: Session = self.http_driver.session()
super().__init__(owner)
def _flush_node_cache(self):
self._write_node_cache()
self.node_cache.clear()
self.node_count = 0
def write_node(self, record) -> None:
"""
Cache a node record that is to be written to Neo4j.
This method writes a cache of node records when the
total number of records exceeds ``CACHE_SIZE``
Parameters
----------
record: Dict
A node record
"""
sanitized_category = self.sanitize_category(record["category"])
category = self.CATEGORY_DELIMITER.join(sanitized_category)
if self.node_count >= self.CACHE_SIZE:
self._flush_node_cache()
if category not in self.node_cache:
self.node_cache[category] = [record]
else:
self.node_cache[category].append(record)
self.node_count += 1
def _write_node_cache(self) -> None:
"""
Write cached node records to Neo4j.
"""
batch_size = 10000
categories = self.node_cache.keys()
filtered_categories = [x for x in categories if x not in self._seen_categories]
self.create_constraints(filtered_categories)
for category in self.node_cache.keys():
log.debug("Generating UNWIND for category: {}".format(category))
cypher_category = category.replace(
self.CATEGORY_DELIMITER, self.CYPHER_CATEGORY_DELIMITER
)
query = self.generate_unwind_node_query(cypher_category)
log.debug(query)
nodes = self.node_cache[category]
for x in range(0, len(nodes), batch_size):
y = min(x + batch_size, len(nodes))
log.debug(f"Batch {x} - {y}")
batch = nodes[x:y]
try:
self.session.run(query, parameters={"nodes": batch})
except Exception as e:
self.owner.log_error(
entity=f"{category} Nodes {batch}",
error_type=ErrorType.INVALID_CATEGORY,
message=str(e)
)
def _flush_edge_cache(self):
self._flush_node_cache()
self._write_edge_cache()
self.edge_cache.clear()
self.edge_count = 0
def write_edge(self, record) -> None:
"""
Cache an edge record that is to be written to Neo4j.
This method writes a cache of edge records when the
total number of records exceeds ``CACHE_SIZE``
Parameters
----------
record: Dict
An edge record
"""
if self.edge_count >= self.CACHE_SIZE:
self._flush_edge_cache()
# self.validate_edge(data)
edge_predicate = record["predicate"]
if edge_predicate in self.edge_cache:
self.edge_cache[edge_predicate].append(record)
else:
self.edge_cache[edge_predicate] = [record]
self.edge_count += 1
def _write_edge_cache(self) -> None:
"""
Write cached edge records to Neo4j.
"""
batch_size = 10000
for predicate in self.edge_cache.keys():
query = self.generate_unwind_edge_query(predicate)
log.debug(query)
edges = self.edge_cache[predicate]
for x in range(0, len(edges), batch_size):
y = min(x + batch_size, len(edges))
batch = edges[x:y]
log.debug(f"Batch {x} - {y}")
log.debug(edges[x:y])
try:
self.session.run(
query, parameters={"relationship": predicate, "edges": batch}
)
except Exception as e:
self.owner.log_error(
entity=f"{predicate} Edges {batch}",
error_type=ErrorType.INVALID_CATEGORY,
message=str(e)
)
def finalize(self) -> None:
"""
Write any remaining cached node and/or edge records.
"""
self._write_node_cache()
self._write_edge_cache()
@staticmethod
def sanitize_category(category: List) -> List:
"""
Sanitize category for use in UNWIND cypher clause.
This method adds escape characters to each element in category
list to ensure the category is processed correctly.
Parameters
----------
category: List
Category
Returns
-------
List
Sanitized category list
"""
return [f"`{x}`" for x in category]
@staticmethod
def generate_unwind_node_query(category: str) -> str:
"""
Generate UNWIND cypher query for saving nodes into Neo4j.
There should be a CONSTRAINT in Neo4j for ``self.DEFAULT_NODE_CATEGORY``.
The query uses ``self.DEFAULT_NODE_CATEGORY`` as the node label to increase speed for adding nodes.
The query also sets label to ``self.DEFAULT_NODE_CATEGORY`` for any node to make sure that the CONSTRAINT applies.
Parameters
----------
category: str
Node category
Returns
-------
str
The UNWIND cypher query
"""
query = f"""
UNWIND $nodes AS node
MERGE (n:`{DEFAULT_NODE_CATEGORY}` {{id: node.id}})
ON CREATE SET n += node, n:{category}
ON MATCH SET n += node, n:{category}
"""
return query
@staticmethod
def generate_unwind_edge_query(edge_predicate: str) -> str:
"""
Generate UNWIND cypher query for saving edges into Neo4j.
Query uses ``self.DEFAULT_NODE_CATEGORY`` to quickly lookup the required subject and object node.
Parameters
----------
edge_predicate: str
Edge label as string
Returns
-------
str
The UNWIND cypher query
"""
query = f"""
UNWIND $edges AS edge
MATCH (s:`{DEFAULT_NODE_CATEGORY}` {{id: edge.subject}}), (o:`{DEFAULT_NODE_CATEGORY}` {{id: edge.object}})
MERGE (s)-[r:`{edge_predicate}`]->(o)
SET r += edge
"""
return query
def create_constraints(self, categories: Union[set, list]) -> None:
"""
Create a unique constraint on node 'id' for all ``categories`` in Neo4j.
Parameters
----------
categories: Union[set, list]
Set of categories
"""
categories_set = set(categories)
categories_set.add(f"`{DEFAULT_NODE_CATEGORY}`")
for category in categories_set:
if self.CATEGORY_DELIMITER in category:
subcategories = category.split(self.CATEGORY_DELIMITER)
self.create_constraints(subcategories)
else:
query = NeoSink.create_constraint_query(category)
try:
self.session.run(query)
self._seen_categories.add(category)
except Exception as e:
self.owner.log_error(
entity=category,
error_type=ErrorType.INVALID_CATEGORY,
message=str(e)
)
@staticmethod
def create_constraint_query(category: str) -> str:
"""
Create a Cypher CONSTRAINT query
Parameters
----------
category: str
The category to create a constraint on
Returns
-------
str
The Cypher CONSTRAINT query
"""
query = f"CREATE CONSTRAINT IF NOT EXISTS ON (n:{category}) ASSERT n.id IS UNIQUE"
return query
| 9,073 | 30.397924 | 122 |
py
|
kgx
|
kgx-master/kgx/sink/__init__.py
|
from .sink import Sink
from .tsv_sink import TsvSink
from .json_sink import JsonSink
from .jsonl_sink import JsonlSink
from .neo_sink import NeoSink
from .rdf_sink import RdfSink
from .graph_sink import GraphSink
from .null_sink import NullSink
from .sql_sink import SqlSink
| 274 | 29.555556 | 33 |
py
|
kgx
|
kgx-master/kgx/sink/json_sink.py
|
import gzip
from typing import Any, Optional, Dict
import jsonstreams
from kgx.config import get_logger
from kgx.sink import Sink
log = get_logger()
class JsonSink(Sink):
"""
JsonSink is responsible for writing data as records
to a JSON.
Parameters
----------
wner: Transformer
Transformer to which the GraphSink belongs
filename: str
The filename to write to
format: str
The file format (``json``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
"""
def __init__(
self,
owner,
filename: str,
format: str = "json",
compression: Optional[str] = None,
**kwargs: Any,
):
super().__init__(owner)
self.filename = filename
if compression:
self.compression = compression
else:
self.compression = None
self.FH = jsonstreams.Stream(
jsonstreams.Type.OBJECT, filename=filename, pretty=True, indent=4
)
self.NH = None
self.EH = None
def write_node(self, record: Dict) -> None:
"""
Write a node record to JSON.
Parameters
----------
record: Dict
A node record
"""
if self.EH:
self.EH.close()
self.EH = None
if not self.NH:
self.NH = self.FH.subarray("nodes")
self.NH.write(record)
def write_edge(self, record: Dict) -> None:
"""
Write an edge record to JSON.
Parameters
----------
record: Dict
An edge record
"""
if self.NH:
self.NH.close()
self.NH = None
if not self.EH:
self.EH = self.FH.subarray("edges")
self.EH.write(record)
def finalize(self) -> None:
"""
Finalize by creating a compressed file, if needed.
"""
if self.NH:
self.NH.close()
if self.EH:
self.EH.close()
if self.FH:
self.FH.close()
if self.compression:
WH = gzip.open(f"{self.filename}.gz", "wb")
with open(self.filename, "r") as FH:
for line in FH.buffer:
WH.write(line)
| 2,335 | 21.901961 | 77 |
py
|
kgx
|
kgx-master/kgx/sink/null_sink.py
|
from typing import Any
from kgx.sink import Sink
class NullSink(Sink):
"""
A NullSink just ignores any date written to it,
effectively a /dev/null device for Transformer
data flows, in which the inspection of the input
knowledge graph is the important operation, but
the graph itself is not persisted in the output
(in particular, not in memory, where the huge
memory footprint may be problematics, e.g. when
stream processing huge graphs).
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
n/a (**kwargs allowed, but ignored)
"""
def __init__(self, owner, **kwargs: Any):
super().__init__(owner)
def write_node(self, record) -> None:
"""
Write a node record to the underlying store.
Parameters
----------
record: Any
A node record
"""
pass
def write_edge(self, record) -> None:
"""
Write an edge record to the underlying store.
Parameters
----------
record: Any
An edge record
"""
pass
def finalize(self) -> None:
"""
Operations that ought to be done after
writing all the incoming data should be called
by this method.
"""
pass
| 1,356 | 22.396552 | 54 |
py
|
kgx
|
kgx-master/kgx/graph/nx_graph.py
|
from typing import Dict, Any, Optional, List, Generator
from kgx.graph.base_graph import BaseGraph
from networkx import (
MultiDiGraph,
set_node_attributes,
relabel_nodes,
set_edge_attributes,
get_node_attributes,
get_edge_attributes,
)
from kgx.utils.kgx_utils import prepare_data_dict
class NxGraph(BaseGraph):
"""
NxGraph is a wrapper that provides methods to interact with a networkx.MultiDiGraph.
NxGraph extends kgx.graph.base_graph.BaseGraph and implements all the methods from BaseGraph.
"""
def __init__(self):
super().__init__()
self.graph = MultiDiGraph()
self.name = None
def add_node(self, node: str, **kwargs: Any) -> None:
"""
Add a node to the graph.
Parameters
----------
node: str
Node identifier
**kwargs: Any
Any additional node properties
"""
if "data" in kwargs:
data = kwargs["data"]
else:
data = kwargs
self.graph.add_node(node, **data)
def add_edge(
self, subject_node: str, object_node: str, edge_key: str = None, **kwargs: Any
) -> None:
"""
Add an edge to the graph.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
kwargs: Any
Any additional edge properties
"""
if "data" in kwargs:
data = kwargs["data"]
else:
data = kwargs
return self.graph.add_edge(subject_node, object_node, key=edge_key, **data)
def add_node_attribute(self, node: str, attr_key: str, attr_value: Any) -> None:
"""
Add an attribute to a given node.
Parameters
----------
node: str
The node identifier
attr_key: str
The key for an attribute
attr_value: Any
The value corresponding to the key
"""
self.graph.add_node(node, **{attr_key: attr_value})
def add_edge_attribute(
self,
subject_node: str,
object_node: str,
edge_key: Optional[str],
attr_key: str,
attr_value: Any,
) -> None:
"""
Add an attribute to a given edge.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
attr_key: str
The attribute key
attr_value: Any
The attribute value
"""
self.graph.add_edge(
subject_node, object_node, key=edge_key, **{attr_key: attr_value}
)
def update_node_attribute(
self, node: str, attr_key: str, attr_value: Any, preserve: bool = False
) -> Dict:
"""
Update an attribute of a given node.
Parameters
----------
node: str
The node identifier
attr_key: str
The key for an attribute
attr_value: Any
The value corresponding to the key
preserve: bool
Whether or not to preserve existing values for the given attr_key
Returns
-------
Dict
A dictionary corresponding to the updated node properties
"""
node_data = self.graph.nodes[node]
updated = prepare_data_dict(
node_data, {attr_key: attr_value}, preserve=preserve
)
self.graph.add_node(node, **updated)
return updated
def update_edge_attribute(
self,
subject_node: str,
object_node: str,
edge_key: Optional[str],
attr_key: str,
attr_value: Any,
preserve: bool = False,
) -> Dict:
"""
Update an attribute of a given edge.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
attr_key: str
The attribute key
attr_value: Any
The attribute value
preserve: bool
Whether or not to preserve existing values for the given attr_key
Returns
-------
Dict
A dictionary corresponding to the updated edge properties
"""
e = self.graph.edges(
(subject_node, object_node, edge_key), keys=True, data=True
)
edge_data = list(e)[0][3]
updated = prepare_data_dict(edge_data, {attr_key: attr_value}, preserve)
self.graph.add_edge(subject_node, object_node, key=edge_key, **updated)
return updated
def get_node(self, node: str) -> Dict:
"""
Get a node and its properties.
Parameters
----------
node: str
The node identifier
Returns
-------
Dict
The node dictionary
"""
n = {}
if self.graph.has_node(node):
n = self.graph.nodes[node]
return n
def get_edge(
self, subject_node: str, object_node: str, edge_key: Optional[str] = None
) -> Dict:
"""
Get an edge and its properties.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
Returns
-------
Dict
The edge dictionary
"""
e = {}
if self.graph.has_edge(subject_node, object_node, edge_key):
e = self.graph.get_edge_data(subject_node, object_node, edge_key)
return e
def nodes(self, data: bool = True) -> Dict:
"""
Get all nodes in a graph.
Parameters
----------
data: bool
Whether or not to fetch node properties
Returns
-------
Dict
A dictionary of nodes
"""
return self.graph.nodes(data)
def edges(self, keys: bool = False, data: bool = True) -> Dict:
"""
Get all edges in a graph.
Parameters
----------
keys: bool
Whether or not to include edge keys
data: bool
Whether or not to fetch node properties
Returns
-------
Dict
A dictionary of edges
"""
return self.graph.edges(keys=keys, data=data)
def in_edges(self, node: str, keys: bool = False, data: bool = False) -> List:
"""
Get all incoming edges for a given node.
Parameters
----------
node: str
The node identifier
keys: bool
Whether or not to include edge keys
data: bool
Whether or not to fetch node properties
Returns
-------
List
A list of edges
"""
return self.graph.in_edges(node, keys=keys, data=data)
def out_edges(self, node: str, keys: bool = False, data: bool = False) -> List:
"""
Get all outgoing edges for a given node.
Parameters
----------
node: str
The node identifier
keys: bool
Whether or not to include edge keys
data: bool
Whether or not to fetch node properties
Returns
-------
List
A list of edges
"""
return self.graph.out_edges(node, keys=keys, data=data)
def nodes_iter(self) -> Generator:
"""
Get an iterable to traverse through all the nodes in a graph.
Returns
-------
Generator
A generator for nodes where each element is a Tuple that
contains (node_id, node_data)
"""
for n in self.graph.nodes(data=True):
yield n
def edges_iter(self) -> Generator:
"""
Get an iterable to traverse through all the edges in a graph.
Returns
-------
Generator
A generator for edges where each element is a 4-tuple that
contains (subject, object, edge_key, edge_data)
"""
for u, v, k, data in self.graph.edges(keys=True, data=True):
yield u, v, k, data
def remove_node(self, node: str) -> None:
"""
Remove a given node from the graph.
Parameters
----------
node: str
The node identifier
"""
self.graph.remove_node(node)
def remove_edge(
self, subject_node: str, object_node: str, edge_key: Optional[str] = None
) -> None:
"""
Remove a given edge from the graph.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
"""
self.graph.remove_edge(subject_node, object_node, edge_key)
def has_node(self, node: str) -> bool:
"""
Check whether a given node exists in the graph.
Parameters
----------
node: str
The node identifier
Returns
-------
bool
Whether or not the given node exists
"""
return self.graph.has_node(node)
def has_edge(
self, subject_node: str, object_node: str, edge_key: Optional[str] = None
) -> bool:
"""
Check whether a given edge exists in the graph.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
Returns
-------
bool
Whether or not the given edge exists
"""
return self.graph.has_edge(subject_node, object_node, key=edge_key)
def number_of_nodes(self) -> int:
"""
Returns the number of nodes in a graph.
Returns
-------
int
"""
return self.graph.number_of_nodes()
def number_of_edges(self) -> int:
"""
Returns the number of edges in a graph.
Returns
-------
int
"""
return self.graph.number_of_edges()
def degree(self):
"""
Get the degree of all the nodes in a graph.
"""
return self.graph.degree()
def clear(self) -> None:
"""
Remove all the nodes and edges in the graph.
"""
self.graph.clear()
@staticmethod
def set_node_attributes(graph: BaseGraph, attributes: Dict) -> None:
"""
Set nodes attributes from a dictionary of key-values.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to modify
attributes: Dict
A dictionary of node identifier to key-value pairs
"""
return set_node_attributes(graph.graph, attributes)
@staticmethod
def set_edge_attributes(graph: BaseGraph, attributes: Dict) -> None:
"""
Set nodes attributes from a dictionary of key-values.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to modify
attributes: Dict
A dictionary of node identifier to key-value pairs
Returns
-------
Any
"""
return set_edge_attributes(graph.graph, attributes)
@staticmethod
def get_node_attributes(graph: BaseGraph, attr_key: str) -> Dict:
"""
Get all nodes that have a value for the given attribute ``attr_key``.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to modify
attr_key: str
The attribute key
Returns
-------
Dict
A dictionary where nodes are the keys and the values
are the attribute values for ``key``
"""
return get_node_attributes(graph.graph, attr_key)
@staticmethod
def get_edge_attributes(graph: BaseGraph, attr_key: str) -> Dict:
"""
Get all edges that have a value for the given attribute ``attr_key``.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to modify
attr_key: str
The attribute key
Returns
-------
Dict
A dictionary where edges are the keys and the values
are the attribute values for ``attr_key``
"""
return get_edge_attributes(graph.graph, attr_key)
@staticmethod
def relabel_nodes(graph: BaseGraph, mapping: Dict) -> None:
"""
Relabel identifiers for a series of nodes based on mappings.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to modify
mapping: Dict
A dictionary of mapping where the key is the old identifier
and the value is the new identifier.
"""
relabel_nodes(graph.graph, mapping, copy=False)
| 13,455 | 24.245779 | 97 |
py
|
kgx
|
kgx-master/kgx/graph/__init__.py
| 0 | 0 | 0 |
py
|
|
kgx
|
kgx-master/kgx/graph/base_graph.py
|
from typing import Dict, Optional, List, Generator, Any
class BaseGraph(object):
"""
BaseGraph that is a wrapper and provides methods to interact with a graph store.
All implementations should extend this BaseGraph class and implement all the defined methods.
"""
def __init__(self):
self.graph = None
self.name = None
def add_node(self, node: str, **kwargs: Any) -> Any:
"""
Add a node to the graph.
Parameters
----------
node: str
Node identifier
**kwargs: Any
Any additional node properties
"""
pass
def add_edge(
self,
subject_node: str,
object_node: str,
edge_key: Optional[str] = None,
**kwargs: Any
) -> Any:
"""
Add an edge to the graph.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
kwargs: Any
Any additional edge properties
Returns
-------
Any
"""
pass
def add_node_attribute(self, node: str, key: str, value: Any) -> Any:
"""
Add an attribute to a given node.
Parameters
----------
node: str
The node identifier
key: str
The key for an attribute
value: Any
The value corresponding to the key
Returns
-------
Any
"""
pass
def add_edge_attribute(
self,
subject_node: str,
object_node: str,
edge_key: Optional[str],
attr_key: str,
attr_value: Any,
) -> Any:
"""
Add an attribute to a given edge.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
attr_key: str
The attribute key
attr_value: Any
The attribute value
Returns
-------
Any
"""
pass
def update_node_attribute(self, node, key: str, value: Any) -> Dict:
"""
Update an attribute of a given node.
Parameters
----------
node: str
The node identifier
key: str
The key for an attribute
value: Any
The value corresponding to the key
Returns
-------
Dict
A dictionary corresponding to the updated node properties
"""
pass
def update_edge_attribute(
self,
subject_node: str,
object_node: str,
edge_key: Optional[str],
attr_key: str,
attr_value: Any,
) -> Dict:
"""
Update an attribute of a given edge.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
attr_key: str
The attribute key
attr_value: Any
The attribute value
Returns
-------
Dict
A dictionary corresponding to the updated edge properties
"""
pass
def get_node(self, node: str) -> Dict:
"""
Get a node and its properties.
Parameters
----------
node: str
The node identifier
Returns
-------
Dict
The node dictionary
"""
pass
def get_edge(
self, subject_node: str, object_node: str, edge_key: Optional[str]
) -> Dict:
"""
Get an edge and its properties.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
Returns
-------
Dict
The edge dictionary
"""
pass
def nodes(self, data: bool = True) -> Dict:
"""
Get all nodes in a graph.
Parameters
----------
data: bool
Whether or not to fetch node properties
Returns
-------
Dict
A dictionary of nodes
"""
pass
def edges(self, keys: bool = False, data: bool = True) -> Dict:
"""
Get all edges in a graph.
Parameters
----------
keys: bool
Whether or not to include edge keys
data: bool
Whether or not to fetch node properties
Returns
-------
Dict
A dictionary of edges
"""
pass
def in_edges(self, node: str, keys: bool = False, data: bool = False) -> List:
"""
Get all incoming edges for a given node.
Parameters
----------
node: str
The node identifier
keys: bool
Whether or not to include edge keys
data: bool
Whether or not to fetch node properties
Returns
-------
List
A list of edges
"""
pass
def out_edges(self, node: str, keys: bool = False, data: bool = False) -> List:
"""
Get all outgoing edges for a given node.
Parameters
----------
node: str
The node identifier
keys: bool
Whether or not to include edge keys
data: bool
Whether or not to fetch node properties
Returns
-------
List
A list of edges
"""
pass
def nodes_iter(self) -> Generator:
"""
Get an iterable to traverse through all the nodes in a graph.
Returns
-------
Generator
A generator for nodes
"""
pass
def edges_iter(self) -> Generator:
"""
Get an iterable to traverse through all the edges in a graph.
Returns
-------
Generator
A generator for edges
"""
for u, v, k, data in self.edges(keys=True, data=True):
yield (u, v, k, data)
def remove_node(self, node: str) -> Any:
"""
Remove a given node from the graph.
Parameters
----------
node: str
The node identifier
Returns
-------
Any
"""
pass
def remove_edge(
self, subject_node: str, object_node: str, edge_key: Optional[str] = None
) -> Any:
"""
Remove a given edge from the graph.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
Returns
-------
Any
"""
pass
def has_node(self, node: str) -> bool:
"""
Check whether a given node exists in the graph.
Parameters
----------
node: str
The node identifier
Returns
-------
bool
Whether or not the given node exists
"""
pass
def has_edge(
self, subject_node: str, object_node: str, edge_key: Optional[str] = None
) -> bool:
"""
Check whether a given edge exists in the graph.
Parameters
----------
subject_node: str
The subject (source) node
object_node: str
The object (target) node
edge_key: Optional[str]
The edge key
Returns
-------
bool
Whether or not the given edge exists
"""
pass
def number_of_nodes(self) -> int:
"""
Returns the number of nodes in a graph.
Returns
-------
int
"""
pass
def number_of_edges(self) -> int:
"""
Returns the number of edges in a graph.
Returns
-------
int
"""
pass
def degree(self):
"""
Get the degree of all the nodes in a graph.
"""
pass
def clear(self) -> None:
"""
Remove all the nodes and edges in the graph.
"""
pass
@staticmethod
def set_node_attributes(graph: Any, attributes: Dict) -> Any:
"""
Set nodes attributes from a dictionary of key-values.
Parameters
----------
graph: Any
The graph to modify
attributes: Dict
A dictionary of node identifier to key-value pairs
Returns
-------
Any
"""
pass
@staticmethod
def set_edge_attributes(graph: Any, attributes: Dict) -> Any:
"""
Set nodes attributes from a dictionary of key-values.
Parameters
----------
graph: Any
The graph to modify
attributes: Dict
A dictionary of node identifier to key-value pairs
Returns
-------
Any
"""
pass
@staticmethod
def get_node_attributes(graph: Any, attr_key: str) -> Any:
"""
Get all nodes that have a value for the given attribute ``attr_key``.
Parameters
----------
graph: Any
The graph to modify
attr_key: str
The attribute key
Returns
-------
Any
"""
pass
@staticmethod
def get_edge_attributes(graph: Any, attr_key: str) -> Any:
"""
Get all edges that have a value for the given attribute ``attr_key``.
Parameters
----------
graph: Any
The graph to modify
attr_key: str
The attribute key
Returns
-------
Any
"""
pass
@staticmethod
def relabel_nodes(graph: Any, mapping: Dict) -> Any:
"""
Relabel identifiers for a series of nodes based on mappings.
Parameters
----------
graph: Any
The graph to modify
mapping: Dict[str, str]
A dictionary of mapping where the key is the old identifier
and the value is the new identifier.
Returns
-------
Any
"""
pass
| 10,649 | 19.841487 | 97 |
py
|
kgx
|
kgx-master/kgx/utils/infores.py
|
"""
Information Resource (InfoRes) utilities
"""
import re
from typing import Optional, Tuple, Callable, Dict, List, Any
from kgx.utils.kgx_utils import knowledge_provenance_properties, column_types
from kgx.error_detection import ErrorType, MessageLevel
from kgx.config import get_logger
log = get_logger()
class InfoResContext:
"""
Information Resource CURIE management context for knowledge sources.
"""
def __init__(self):
self.default_provenance = "Graph"
# this dictionary captures the operational mappings
# for a specified knowledge source field in the graph
self.mapping: Dict[str, Any] = dict()
# this dictionary records specific knowledge source
# name to infores associations for the given graph
self.catalog: Dict[str, str] = dict()
def get_catalog(self) -> Dict[str, str]:
"""
Retrieves the catalog of mappings of Knowledge Source names to an InfoRes.
Returns
-------
Dict[str, str]
Dictionary where the index string is Knowledge Source Names and values are the corresponding InfoRes CURIE
"""
return self.catalog
class InfoResMapping:
"""
Knowledge Source mapping onto an Information Resource identifier.
"""
def __init__(self, context, ksf: str):
"""
InfoRes mapping specification for a single knowledge_source (or related) field
Parameters
----------
context: InfoResContext
The KGX knowledge graph and default configuration context within which this InfoResMapping exists.
ksf: str
Knowledge Source Field being processed.
"""
self.context = context # parent InfoRes context
self.ksf = ksf # 'Knowledge Source Field' slot name
self.filter = None
self.substr = ""
self.prefix = ""
def processor(self, infores_rewrite_filter: Optional[Tuple] = None) -> Callable:
"""
Full processor of a Knowledge Source name into an InfoRes. The conversion is made based on
client-caller specified rewrite rules for a given knowledge source field ('ksf').
Parameters
----------
infores_rewrite_filter: Optional[Tuple]
The presence of this optional Tuple argument signals an InfoRes rewrite of any
knowledge source field name in node and edge data records.
The mere presence of a (possibly empty) Tuple signals a rewrite. If the Tuple is empty,
then only a standard transformation of the field value is performed. If the Tuple has
an infores_rewrite[0] value, it is assumed to be a regular expression (string) to match
against. If there is no infores_rewrite[1] value or it is empty, then matches of the
infores_rewrite[0] are simply deleted from the field value prior to coercing the field
value into an InfoRes CURIE. Otherwise, a non-empty second string value of infores_rewrite[1]
is a substitution string for the regex value matched in the field. If the Tuple contains
a third non-empty string (as infores_rewrite[2]), then the given string is added as a prefix
to the InfoRes. Whatever the transformations, unique InfoRes identifiers once generated,
are used in the meta_knowledge_graph and also reported using the get_infores_catalog() method.
Returns
-------
Callable
A locally configured Callable that knows how to process
a source name string into an infores CURIE, using on client-specified
rewrite rules applied alongside standard formatting rules.
"""
# Check for non-empty infores_rewrite_filter
if infores_rewrite_filter:
self.filter = (
re.compile(infores_rewrite_filter[0])
if infores_rewrite_filter[0]
else None
)
self.substr = (
infores_rewrite_filter[1] if len(infores_rewrite_filter) > 1 else ""
)
self.prefix = (
infores_rewrite_filter[2] if len(infores_rewrite_filter) > 2 else ""
)
def _get_infores(source: str) -> str:
"""
Get InfoRes CURIE inferred from source name.
Parameters
----------
source: str
Name of Information Resource associated with the InfoRes
(i.e. from which the InfoRes was inferred)
Returns
-------
str:
infores CURIE, retrieved or generated.
"""
if source in self.context.catalog:
return self.context.catalog[source]
else:
infores: str = _process_infores(source)
if infores:
self.context.catalog[source] = infores
return infores
else:
return ""
def _process_infores(source: str) -> str:
"""
Process a single knowledge Source name string into an infores, by applying rules
in the _infores_processor() closure context, followed by standard formatting.
Parameters
----------
source: str
Knowledge source name string being processed.
Returns
-------
str
Infores CURIE inferred from the input Knowledge Source name string.
"""
# don't touch something that already looks like an infores CURIE
if source.startswith("infores:"):
return source
if self.filter:
infores = self.filter.sub(self.substr, source)
else:
infores = source
infores = self.prefix + " " + infores
infores = infores.strip()
infores = infores.lower()
infores = re.sub(r"\s+", "_", infores)
infores = re.sub(r"\.+", "_", infores)
infores = re.sub(r"[\W]", "", infores)
infores = re.sub(r"_", "-", infores)
infores = "infores:" + infores
return infores
def parser_list(sources: Optional[List[str]] = None) -> List[str]:
"""
Infores parser for a list of input knowledge source names.
Parameters
----------
sources: List[str]
List of Knowledge source name strings being processed.
Returns
-------
List[str]
Source name strings transformed into infores CURIES, using _process_infores().
"""
if not sources:
return [self.context.default_provenance]
results: List[str] = list()
for source in sources:
infores = _get_infores(source)
if infores:
results.append(infores)
return results
def parser_scalar(source=None) -> str:
"""
Infores parser for a single knowledge source name string.
Parameters
----------
source: str
Knowledge source name string being processed.
Returns
-------
str
Source name string transformed into an infores CURIE, using _process_infores().
"""
return (
self.context.default_provenance
if not source
else _get_infores(source)
)
if self.ksf in column_types and column_types[self.ksf] == list:
return parser_list
else:
# not sure how safe an assumption for all non-list column_types, but...
return parser_scalar
def default(self, default=None):
"""
Lightweight alternative to the KS processor() which simply assigns knowledge_source fields
simple client-defined default knowledge source strings (not constrained to be formatted as infores CURIEs).
Parameters
----------
default: str
(Optional) default value of the knowledge source field.
Returns
-------
Callable
A locally configured Callable that knows how to process a source name string
(possibly empty) into a suitable (possibly default) infores string identifier.
"""
def default_value_list(sources: List[str] = None):
"""
Infores default method for a list of input knowledge source names.
Parameters
----------
sources: List[str]
List of Knowledge source name strings being processed.
Returns
-------
List[str]
Infores identifiers mapped to input source strings.
"""
if not default:
return list()
if not sources:
return [default]
else:
return sources
def default_value_scalar(source=None):
"""
Infores default method for single input knowledge source name.
Parameters
----------
source: str
Knowledge source name string being processed.
Returns
-------
str
Infores identifier mapped to the input source string.
"""
if not default:
return None
if not source:
return default
else:
return source
if self.ksf in column_types and column_types[self.ksf] == list:
return default_value_list
else:
# not sure how safe an assumption for non-list column_types, but...
return default_value_scalar
def set_provenance_map_entry(self, ksf_value: Any) -> Any:
"""
Set up a provenance (Knowledge Source to InfoRes) map entry
"""
if isinstance(ksf_value, str):
ksf_value = ksf_value.strip()
if ksf_value.lower() == "true":
mapping = self.processor()
elif ksf_value.lower() == "false":
mapping = self.default() # source suppressed
else:
mapping = self.default(ksf_value)
elif isinstance(ksf_value, bool):
if ksf_value:
mapping = self.processor()
else: # false, ignore this source?
mapping = self.default() # source suppressed
elif isinstance(ksf_value, (list, set, tuple)):
mapping = self.processor(infores_rewrite_filter=ksf_value)
else:
mapping = ksf_value
return mapping
def get_mapping(self, ksf: str) -> InfoResMapping:
"""
InfoRes mapping for a specified knowledge source field ('ksf').
Parameters
----------
ksf: str
Knowledge Source Field whose mapping is being managed.
"""
irm = self.InfoResMapping(self, ksf)
return irm
def set_provenance_map(self, kwargs: Dict):
"""
A knowledge_source property indexed map set up with various mapping
Callable methods to process input knowledge source values into
suitable InfoRes identifiers.
Parameters
----------
kwargs: Dict
The input keyword argument dictionary was likely propagated from the
Transformer.transform() method input_args, and is here harvested for
static defaults or rewrite rules for knowledge_source slot InfoRes value processing.
"""
if "default_provenance" in kwargs:
self.default_provenance = kwargs.pop("default_provenance")
ksf_found = []
for ksf in knowledge_provenance_properties:
if ksf in kwargs:
ksf_found.append(ksf)
ksf_value = kwargs.pop(ksf)
if isinstance(ksf_value, dict):
for ksf_pattern in ksf_value.keys():
log.debug("ksf_pattern: ", ksf_pattern)
if ksf not in self.mapping:
log.debug("not in the mapping", ksf)
self.mapping[ksf] = dict()
log.debug("self.mapping[ksf]: ", self.mapping[ksf])
ir = self.get_mapping(ksf)
self.mapping[ksf][ksf_pattern] = ir.set_provenance_map_entry(
ksf_value[ksf_pattern]
)
log.debug("self.mapping[ksf][ksf_pattern]: ", self.mapping[ksf][ksf_pattern])
else:
ir = self.get_mapping(ksf)
self.mapping[ksf] = ir.set_provenance_map_entry(ksf_value)
# if none specified, add at least one generic 'knowledge_source'
if len(ksf_found) == 0:
ir = self.get_mapping("knowledge_source")
if "name" in kwargs:
self.mapping["knowledge_source"] = ir.default(kwargs["name"])
else:
self.mapping["knowledge_source"] = ir.default(self.default_provenance)
if "provided_by" not in self.mapping:
ir = self.get_mapping("provided_by")
self.mapping["provided_by"] = ir.default(self.default_provenance)
def set_provenance(self, ksf: str, data: Dict):
"""
Compute the knowledge_source value for the current node or edge data, using the
infores rewrite context previously established by a call to set_provenance_map().
Parameters
----------
ksf: str
Knowledge source field being processed.
data: Dict
Current node or edge data entry being processed.
"""
if ksf not in data.keys():
if ksf in self.mapping and not isinstance(self.mapping[ksf], dict):
data[ksf] = self.mapping[ksf]()
else:
# if unknown ksf or is an inapplicable pattern
# dictionary, then just set the value to the default
data[ksf] = [self.default_provenance]
else:
# If data is s a non-string iterable then, coerce into a simple list of sources
if isinstance(data[ksf], (list, set, tuple)):
sources = list(data[ksf])
else:
# wraps knowledge sources that are multivalued in a list even if single valued
# in ingest data
if column_types[ksf] == list:
sources = [data[ksf]]
else:
sources = data[ksf]
if ksf in self.mapping:
log.debug("self.mapping[ksf]", self.mapping[ksf])
if isinstance(self.mapping[ksf], dict):
log.debug("self.mapping[ksf].keys()", self.mapping[ksf].keys())
for pattern in self.mapping[ksf].keys():
log.debug("pattern", pattern)
for source in sources:
log.debug("source", source)
if re.compile(pattern).match(source):
index_of_source = data[ksf].index(source)
del data[ksf][index_of_source]
data[ksf] = data[ksf] + self.mapping[ksf][pattern]([source])
else:
if source not in data[ksf] and source not in self.mapping[ksf].keys():
data[ksf].append(source)
log.debug("data[ksf]", data[ksf])
else:
data[ksf] = self.mapping[ksf](sources)
else: # leave data intact if no mapping found
data[ksf] = sources
# ignore if still empty at this point
if not data[ksf]:
data.pop(ksf)
def set_node_provenance(self, node_data: Dict):
"""
Sets the node knowledge_source value for the current node.
Parameters
----------
node_data: Dict
Current node data entry being processed.
"""
self.set_provenance("provided_by", node_data)
def set_edge_provenance(self, edge_data: Dict):
"""
Sets the edge knowledge_source value for the current edge. Edge knowledge_source properties
include the 'knowledge_source' related properties.
Parameters
----------
edge_data: Dict
Current edge data entry being processed.
"""
data_fields = list(edge_data.keys())
for ksf in data_fields:
if ksf in knowledge_provenance_properties:
self.set_provenance(ksf, edge_data)
for ksf in self.mapping:
if ksf != "provided_by":
self.set_provenance(ksf, edge_data)
| 18,032 | 38.202174 | 119 |
py
|
kgx
|
kgx-master/kgx/utils/graph_utils.py
|
from typing import List, Set, Dict, Optional
import inflection
from cachetools import cached
from kgx.config import get_logger
from kgx.graph.base_graph import BaseGraph
from kgx.utils.kgx_utils import get_toolkit, get_cache, get_curie_lookup_service
from kgx.prefix_manager import PrefixManager
ONTOLOGY_PREFIX_MAP: Dict = {}
ONTOLOGY_GRAPH_CACHE: Dict = {}
log = get_logger()
def get_parents(graph: BaseGraph, node: str, relations: List[str] = None) -> List[str]:
"""
Return all direct `parents` of a specified node, filtered by ``relations``.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
Graph to traverse
node: str
node identifier
relations: List[str]
list of relations
Returns
-------
List[str]
A list of parent node(s)
"""
parents = []
if graph.has_node(node):
out_edges = [x for x in graph.out_edges(node, keys=False, data=True)]
if relations is None:
parents = [x[1] for x in out_edges]
else:
parents = [x[1] for x in out_edges if x[2]["predicate"] in relations]
return parents
def get_ancestors(
graph: BaseGraph, node: str, relations: List[str] = None
) -> List[str]:
"""
Return all `ancestors` of specified node, filtered by ``relations``.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
Graph to traverse
node: str
node identifier
relations: List[str]
list of relations
Returns
-------
List[str]
A list of ancestor nodes
"""
seen = []
nextnodes = [node]
while len(nextnodes) > 0:
nn = nextnodes.pop()
if nn not in seen:
seen.append(nn)
nextnodes += get_parents(graph, nn, relations=relations)
seen.remove(node)
return seen
@cached(get_cache())
def get_category_via_superclass(
graph: BaseGraph, curie: str, load_ontology: bool = True
) -> Set[str]:
"""
Get category for a given CURIE by tracing its superclass, via ``subclass_of`` hierarchy,
and getting the most appropriate category based on the superclass.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
Graph to traverse
curie: str
Input CURIE
load_ontology: bool
Determines whether to load ontology, based on CURIE prefix, or to simply
rely on ``subclass_of`` hierarchy from graph
Returns
-------
Set[str]
A set containing one (or more) category for the given CURIE
"""
log.debug("curie: {}".format(curie))
new_categories = []
toolkit = get_toolkit()
if PrefixManager.is_curie(curie):
ancestors = get_ancestors(graph, curie, relations=["subclass_of"])
if len(ancestors) == 0 and load_ontology:
cls = get_curie_lookup_service()
ontology_graph = cls.ontology_graph
new_categories += [
x for x in get_category_via_superclass(ontology_graph, curie, False)
]
log.debug("Ancestors for CURIE {} via subClassOf: {}".format(curie, ancestors))
seen = []
for anc in ancestors:
mapping = toolkit.get_by_mapping(anc)
seen.append(anc)
if mapping:
# there is direct mapping to BioLink Model
log.debug("Ancestor {} mapped to {}".format(anc, mapping))
seen_labels = [
graph.nodes()[x]["name"] for x in seen if "name" in graph.nodes()[x]
]
new_categories += [x for x in seen_labels]
new_categories += [x for x in toolkit.ancestors(mapping)]
break
return set(new_categories)
def curie_lookup(curie: str) -> Optional[str]:
"""
Given a CURIE, find its label.
This method first does a lookup in predefined maps. If none found,
it makes use of CurieLookupService to look for the CURIE in a set
of preloaded ontologies.
Parameters
----------
curie: str
A CURIE
Returns
-------
Optional[str]
The label corresponding to the given CURIE
"""
cls = get_curie_lookup_service()
name: Optional[str] = None
prefix = PrefixManager.get_prefix(curie)
if prefix in ["OIO", "OWL", "owl", "OBO", "rdfs"]:
name = inflection.underscore(curie.split(":", 1)[1])
elif curie in cls.curie_map:
name = cls.curie_map[curie]
elif curie in cls.ontology_graph:
name = cls.ontology_graph.nodes()[curie]["name"]
return name
| 4,575 | 27.962025 | 92 |
py
|
kgx
|
kgx-master/kgx/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
kgx
|
kgx-master/kgx/utils/rdf_utils.py
|
from collections import OrderedDict
from typing import List, Optional, Any, Union, Dict, Tuple
import rdflib
from linkml_runtime.linkml_model.meta import Element, SlotDefinition, ClassDefinition
from cachetools import cached, LRUCache
from rdflib import Namespace, URIRef
from rdflib.namespace import RDF, RDFS, OWL, SKOS
from kgx.config import get_logger
from kgx.prefix_manager import PrefixManager
from kgx.utils.graph_utils import get_category_via_superclass
from kgx.utils.kgx_utils import (
get_curie_lookup_service,
contract,
get_cache,
get_toolkit,
sentencecase_to_snakecase,
sentencecase_to_camelcase,
get_biolink_ancestors,
)
from pprint import pprint
log = get_logger()
OBAN = Namespace("http://purl.org/oban/")
BIOLINK = Namespace("https://w3id.org/biolink/vocab/")
OIO = Namespace("http://www.geneontology.org/formats/oboInOwl#")
OBO = Namespace("http://purl.obolibrary.org/obo/")
property_mapping: Dict = dict()
reverse_property_mapping: Dict = dict()
is_property_multivalued = {
"id": False,
"subject": False,
"object": False,
"predicate": False,
"description": False,
"synonym": True,
"in_taxon": False,
"same_as": True,
"name": False,
"has_evidence": False,
"provided_by": True,
"category": True,
"publications": True,
"type": False,
"relation": False,
}
top_level_terms = {
OBO.term("CL_0000000"): "cell",
OBO.term("UBERON_0001062"): "anatomical_entity",
OBO.term("PATO_0000001"): "quality",
OBO.term("NCBITaxon_131567"): "organism",
OBO.term("CLO_0000031"): "cell_line",
OBO.term("MONDO_0000001"): "disease",
OBO.term("CHEBI_23367"): "molecular_entity",
OBO.term("CHEBI_23888"): "drug",
OBO.term("UPHENO_0001001"): "phenotypic_feature",
OBO.term("GO_0008150"): "biological_process",
OBO.term("GO_0009987"): "cellular_process",
OBO.term("GO_0005575"): "cellular_component",
OBO.term("GO_0003674"): "molecular_function",
OBO.term("SO_0000704"): "gene",
OBO.term("GENO_0000002"): "variant_locus",
OBO.term("GENO_0000536"): "genotype",
OBO.term("SO_0000110"): "sequence_feature",
OBO.term("ECO_0000000"): "evidence",
OBO.term("PW_0000001"): "pathway",
OBO.term("IAO_0000310"): "publication",
OBO.term("SO_0001483"): "snv",
OBO.term("GENO_0000871"): "haplotype",
OBO.term("SO_0001024"): "haplotype",
OBO.term("SO_0000340"): "chromosome",
OBO.term("SO_0000104"): "protein",
OBO.term("SO_0001500"): "phenotypic_marker",
OBO.term("SO_0000001"): "region",
OBO.term("HP_0032223"): "blood_group",
OBO.term("HP_0031797"): "clinical_course",
OBO.term("HP_0040279"): "frequency",
OBO.term("HP_0000118"): "phenotypic_abnormality",
OBO.term("HP_0032443"): "past_medical_history",
OBO.term("HP_0000005"): "mode_of_inheritance",
OBO.term("HP_0012823"): "clinical_modifier",
}
def infer_category(iri: URIRef, rdfgraph: rdflib.Graph) -> Optional[List]:
"""
Infer category for a given iri by traversing rdfgraph.
Parameters
----------
iri: rdflib.term.URIRef
IRI
rdfgraph: rdflib.Graph
A graph to traverse
Returns
-------
Optional[List]
A list of category corresponding to the given IRI
"""
closure = list(rdfgraph.transitive_objects(iri, URIRef(RDFS.subClassOf)))
category = [top_level_terms[x] for x in closure if x in top_level_terms.keys()]
if category:
log.debug(
"Inferred category as {} based on transitive closure over 'subClassOf' relation".format(
category
)
)
else:
subj = closure[-1]
if subj == iri:
return category
subject_curie: Optional[str] = contract(subj)
if subject_curie and "_" in subject_curie:
fixed_curie = subject_curie.split(":", 1)[1].split("_", 1)[1]
log.warning(
"Malformed CURIE {} will be fixed to {}".format(
subject_curie, fixed_curie
)
)
subject_curie = fixed_curie
cls = get_curie_lookup_service()
category = list(get_category_via_superclass(cls.ontology_graph, subject_curie))
return category
@cached(LRUCache(maxsize=1024))
def get_biolink_element(
prefix_manager: PrefixManager, predicate: Any
) -> Optional[Element]:
"""
Returns a Biolink Model element for a given predicate.
Parameters
----------
prefix_manager: PrefixManager
An instance of prefix manager
predicate: Any
The CURIE of a predicate
Returns
-------
Optional[Element]
The corresponding Biolink Model element
"""
toolkit = get_toolkit()
element = None
reference = None
if prefix_manager.is_iri(predicate):
predicate_curie = prefix_manager.contract(predicate)
else:
predicate_curie = predicate
if prefix_manager.is_curie(predicate_curie):
element = toolkit.get_element(predicate_curie)
if element is None:
reference = prefix_manager.get_reference(predicate_curie)
else:
reference = predicate_curie
if element is None and reference is not None:
element = toolkit.get_element(reference)
if not element:
try:
mapping = toolkit.get_element_by_mapping(predicate)
if mapping:
element = toolkit.get_element(mapping)
except ValueError as e:
log.error(e)
return element
def process_predicate(
prefix_manager: PrefixManager,
p: Union[URIRef, str],
predicate_mapping: Optional[Dict] = None,
) -> Tuple:
"""
Process a predicate where the method checks if there is a mapping in Biolink Model.
Parameters
----------
prefix_manager: PrefixManager
An instance of prefix manager
p: Union[URIRef, str]
The predicate
predicate_mapping: Optional[Dict]
Predicate mappings
Returns
-------
Tuple[str, str, str, str]
A tuple that contains the Biolink CURIE (if available), the Biolink slot_uri CURIE (if available),
the CURIE form of p, the reference of p
"""
if prefix_manager.is_iri(p):
predicate = prefix_manager.contract(str(p))
else:
predicate = None
if prefix_manager.is_curie(p):
property_name = prefix_manager.get_reference(p)
predicate = p
else:
if predicate and prefix_manager.is_curie(predicate):
property_name = prefix_manager.get_reference(predicate)
else:
property_name = p
predicate = f":{p}"
element = get_biolink_element(prefix_manager, p)
canonical_uri = None
if element is None:
element = get_biolink_element(prefix_manager, predicate)
if element:
if isinstance(element, SlotDefinition):
# predicate corresponds to a biolink slot
if element.definition_uri:
element_uri = prefix_manager.contract(element.definition_uri)
else:
element_uri = f"biolink:{sentencecase_to_snakecase(element.name)}"
canonical_uri = element_uri
if element.slot_uri:
canonical_uri = element.slot_uri
elif isinstance(element, ClassDefinition):
# this will happen only when the IRI is actually
# a reference to a class
element_uri = prefix_manager.contract(element.class_uri)
else:
element_uri = f"biolink:{sentencecase_to_camelcase(element.name)}"
if "biolink:Attribute" in get_biolink_ancestors(element.name):
element_uri = f"biolink:{sentencecase_to_snakecase(element.name)}"
if not predicate:
predicate = element_uri
else:
# no mapping to biolink model;
# look at predicate mappings
element_uri = None
if predicate_mapping:
if p in predicate_mapping:
property_name = predicate_mapping[p]
predicate = f":{property_name}"
return element_uri, canonical_uri, predicate, property_name
| 8,159 | 32.036437 | 106 |
py
|
kgx
|
kgx-master/kgx/utils/kgx_utils.py
|
import importlib
import re
import time
import uuid
import sqlite3
from enum import Enum
from functools import lru_cache
from typing import List, Dict, Set, Optional, Any, Union
import stringcase
from inflection import camelize
from linkml_runtime.linkml_model.meta import (
TypeDefinitionName,
EnumDefinition,
ElementName,
SlotDefinition,
ClassDefinition,
TypeDefinition,
Element,
)
from bmt import Toolkit
from cachetools import LRUCache
import pandas as pd
import numpy as np
from prefixcommons.curie_util import contract_uri
from prefixcommons.curie_util import expand_uri
from kgx.config import get_logger, get_jsonld_context, get_biolink_model_schema
from kgx.graph.base_graph import BaseGraph
curie_lookup_service = None
cache = None
log = get_logger()
CORE_NODE_PROPERTIES = {"id", "name"}
CORE_EDGE_PROPERTIES = {"id", "subject", "predicate", "object", "type"}
XSD_STRING = "xsd:string"
tk = Toolkit()
class GraphEntityType(Enum):
GRAPH = "graph"
NODE = "node"
EDGE = "edge"
# Biolink 2.0 "Knowledge Source" association slots,
# including the deprecated 'provided_by' slot
provenance_slot_types = {
"knowledge_source": str,
"primary_knowledge_source": str,
"original_knowledge_source": str,
"aggregator_knowledge_source": list,
"supporting_data_source": list,
"provided_by": list,
}
column_types = {
"publications": list,
"qualifiers": list,
"category": list,
"synonym": list,
"same_as": list,
"negated": bool,
"xrefs": list,
}
column_types.update(provenance_slot_types)
knowledge_provenance_properties = set(provenance_slot_types.keys())
extension_types = {"csv": ",", "tsv": "\t", "csv:neo4j": ",", "tsv:neo4j": "\t", "sql": "|"}
archive_read_mode = {"tar": "r", "tar.gz": "r:gz", "tar.bz2": "r:bz2"}
archive_write_mode = {"tar": "w", "tar.gz": "w:gz", "tar.bz2": "w:bz2"}
archive_format = {
"r": "tar",
"r:gz": "tar.gz",
"r:bz2": "tar.bz2",
"w": "tar",
"w:gz": "tar.gz",
"w:bz2": "tar.bz2",
}
is_provenance_property_multivalued = {
"knowledge_source": True,
"primary_knowledge_source": False,
"original_knowledge_source": False,
"aggregator_knowledge_source": True,
"supporting_data_source": True,
"provided_by": True,
}
is_property_multivalued = {
"id": False,
"subject": False,
"object": False,
"predicate": False,
"description": False,
"synonym": True,
"in_taxon": False,
"same_as": True,
"name": False,
"has_evidence": False,
"category": True,
"publications": True,
"type": False,
"relation": False,
}
is_property_multivalued.update(is_provenance_property_multivalued)
def camelcase_to_sentencecase(s: str) -> str:
"""
Convert CamelCase to sentence case.
Parameters
----------
s: str
Input string in CamelCase
Returns
-------
str
string in sentence case form
"""
return stringcase.sentencecase(s).lower()
def snakecase_to_sentencecase(s: str) -> str:
"""
Convert snake_case to sentence case.
Parameters
----------
s: str
Input string in snake_case
Returns
-------
str
string in sentence case form
"""
return stringcase.sentencecase(s).lower()
@lru_cache(maxsize=1024)
def sentencecase_to_snakecase(s: str) -> str:
"""
Convert sentence case to snake_case.
Parameters
----------
s: str
Input string in sentence case
Returns
-------
str
string in snake_case form
"""
return stringcase.snakecase(s).lower()
@lru_cache(maxsize=1024)
def sentencecase_to_camelcase(s: str) -> str:
"""
Convert sentence case to CamelCase.
Parameters
----------
s: str
Input string in sentence case
Returns
-------
str
string in CamelCase form
"""
return camelize(stringcase.snakecase(s))
@lru_cache(maxsize=1024)
def format_biolink_category(s: str) -> str:
"""
Convert a sentence case Biolink category name to
a proper Biolink CURIE with the category itself
in CamelCase form.
Parameters
----------
s: str
Input string in sentence case
Returns
-------
str
a proper Biolink CURIE
"""
if re.match("biolink:.+", s):
return s
else:
formatted = sentencecase_to_camelcase(s)
return f"biolink:{formatted}"
@lru_cache(maxsize=1024)
def format_biolink_slots(s: str) -> str:
if re.match("biolink:.+", s):
return s
else:
formatted = sentencecase_to_snakecase(s)
return f"biolink:{formatted}"
def contract(
uri: str, prefix_maps: Optional[List[Dict]] = None, fallback: bool = True
) -> str:
"""
Contract a given URI to a CURIE, based on mappings from `prefix_maps`.
If no prefix map is provided then will use defaults from prefixcommons-py.
This method will return the URI as the CURIE if there is no mapping found.
Parameters
----------
uri: str
A URI
prefix_maps: Optional[List[Dict]]
A list of prefix maps to use for mapping
fallback: bool
Determines whether to fallback to default prefix mappings, as determined
by `prefixcommons.curie_util`, when URI prefix is not found in `prefix_maps`.
Returns
-------
str
A CURIE corresponding to the URI
"""
curie = uri
default_curie_maps = [
get_jsonld_context("monarch_context"),
get_jsonld_context("obo_context"),
]
if prefix_maps:
curie_list = contract_uri(uri, prefix_maps)
if len(curie_list) == 0:
if fallback:
curie_list = contract_uri(uri, default_curie_maps)
if curie_list:
curie = curie_list[0]
else:
curie = curie_list[0]
else:
curie_list = contract_uri(uri, default_curie_maps)
if len(curie_list) > 0:
curie = curie_list[0]
return curie
def expand(
curie: str, prefix_maps: Optional[List[dict]] = None, fallback: bool = True
) -> str:
"""
Expand a given CURIE to an URI, based on mappings from `prefix_map`.
This method will return the CURIE as the IRI if there is no mapping found.
Parameters
----------
curie: str
A CURIE
prefix_maps: Optional[List[dict]]
A list of prefix maps to use for mapping
fallback: bool
Determines whether to fallback to default prefix mappings, as determined
by `prefixcommons.curie_util`, when CURIE prefix is not found in `prefix_maps`.
Returns
-------
str
A URI corresponding to the CURIE
"""
default_curie_maps = [
get_jsonld_context("monarch_context"),
get_jsonld_context("obo_context"),
]
if prefix_maps:
uri = expand_uri(curie, prefix_maps)
if uri == curie and fallback:
uri = expand_uri(curie, default_curie_maps)
else:
uri = expand_uri(curie, default_curie_maps)
return uri
_default_toolkit = None
_toolkit_versions: Dict[str, Toolkit] = dict()
def get_toolkit(biolink_release: Optional[str] = None) -> Toolkit:
"""
Get an instance of bmt.Toolkit
If there no instance defined, then one is instantiated and returned.
Parameters
----------
biolink_release: Optional[str]
URL to (Biolink) Model Schema to be used for validated (default: None, use default Biolink Model Toolkit schema)
"""
global _default_toolkit, _toolkit_versions
if biolink_release:
if biolink_release in _toolkit_versions:
toolkit = _toolkit_versions[biolink_release]
else:
schema = get_biolink_model_schema(biolink_release)
toolkit = Toolkit(schema=schema)
_toolkit_versions[biolink_release] = toolkit
else:
if _default_toolkit is None:
_default_toolkit = Toolkit()
toolkit = _default_toolkit
biolink_release = toolkit.get_model_version()
if biolink_release not in _toolkit_versions:
_toolkit_versions[biolink_release] = toolkit
return toolkit
def generate_edge_key(s: str, edge_predicate: str, o: str) -> str:
"""
Generates an edge key based on a given subject, predicate, and object.
Parameters
----------
s: str
Subject
edge_predicate: str
Edge label
o: str
Object
id: str
Optional identifier that is used as the key if provided
Returns
-------
str
Edge key as a string
"""
return "{}-{}-{}".format(s, edge_predicate, o)
def get_curie_lookup_service():
"""
Get an instance of kgx.curie_lookup_service.CurieLookupService
Returns
-------
kgx.curie_lookup_service.CurieLookupService
An instance of ``CurieLookupService``
"""
global curie_lookup_service
if curie_lookup_service is None:
from kgx.curie_lookup_service import CurieLookupService
curie_lookup_service = CurieLookupService()
return curie_lookup_service
def get_cache(maxsize=10000):
"""
Get an instance of cachetools.cache
Parameters
----------
maxsize: int
The max size for the cache (``10000``, by default)
Returns
-------
cachetools.cache
An instance of cachetools.cache
"""
global cache
if cache is None:
cache = LRUCache(maxsize)
return cache
def current_time_in_millis():
"""
Get current time in milliseconds.
Returns
-------
int
Time in milliseconds
"""
return int(round(time.time() * 1000))
def get_prefix_prioritization_map() -> Dict[str, List]:
"""
Get prefix prioritization map as defined in Biolink Model.
Returns
-------
Dict[str, List]
"""
toolkit = get_toolkit()
prefix_prioritization_map = {}
descendants = toolkit.get_descendants("named thing")
descendants.append("named thing")
for d in descendants:
element = toolkit.get_element(d)
if element and "id_prefixes" in element:
prefixes = element.id_prefixes
key = format_biolink_category(element.name)
prefix_prioritization_map[key] = prefixes
return prefix_prioritization_map
def get_biolink_element(name) -> Optional[Element]:
"""
Get Biolink element for a given name, where name can be a class, slot, or relation.
Parameters
----------
name: str
The name
Returns
-------
Optional[linkml_model.meta.Element]
An instance of linkml_model.meta.Element
"""
toolkit = get_toolkit()
element = toolkit.get_element(name)
return element
def get_biolink_ancestors(name: str):
"""
Get ancestors for a given Biolink class.
Parameters
----------
name: str
Returns
-------
List
A list of ancestors
"""
toolkit = get_toolkit()
ancestors_mixins = toolkit.get_ancestors(name, formatted=True, mixin=True)
return ancestors_mixins
def get_biolink_property_types() -> Dict:
"""
Get all Biolink property types.
This includes both node and edges properties.
Returns
-------
Dict
A dict containing all Biolink property and their types
"""
toolkit = get_toolkit()
types = {}
node_properties = toolkit.get_all_node_properties(formatted=True)
edge_properties = toolkit.get_all_edge_properties(formatted=True)
for p in node_properties:
property_type = get_type_for_property(p)
types[p] = property_type
for p in edge_properties:
property_type = get_type_for_property(p)
types[p] = property_type
types["biolink:predicate"] = "uriorcurie"
types["biolink:edge_label"] = "uriorcurie"
return types
def get_type_for_property(p: str) -> str:
"""
Get type for a property.
Parameters
----------
p: str
Returns
-------
str
The type for a given property
"""
toolkit = get_toolkit()
e = toolkit.get_element(p)
t = XSD_STRING
if e:
if isinstance(e, ClassDefinition):
t = "uriorcurie"
elif isinstance(e, TypeDefinition):
t = e.uri
elif isinstance(e, EnumDefinition):
t = "uriorcurie"
else:
r = e.range
if isinstance(r, SlotDefinition):
t = r.range
t = get_type_for_property(t)
elif isinstance(r, TypeDefinitionName):
t = get_type_for_property(r)
elif isinstance(r, ElementName):
t = get_type_for_property(r)
else:
t = XSD_STRING
if t is None:
t = XSD_STRING
return t
def prepare_data_dict(d1: Dict, d2: Dict, preserve: bool = True) -> Dict:
"""
Given two dict objects, make a new dict object that is the intersection of the two.
If a key is known to be multivalued then it's value is converted to a list.
If a key is already multivalued then it is updated with new values.
If a key is single valued, and a new unique value is found then the existing value is
converted to a list and the new value is appended to this list.
Parameters
----------
d1: Dict
Dict object
d2: Dict
Dict object
preserve: bool
Whether or not to preserve values for conflicting keys
Returns
-------
Dict
The intersection of d1 and d2
"""
new_data = {}
for key, value in d2.items():
if isinstance(value, (list, set, tuple)):
new_value = [x for x in value]
else:
new_value = value
if key in is_property_multivalued:
if is_property_multivalued[key]:
# value for key is supposed to be multivalued
if key in d1:
# key is in data
if isinstance(d1[key], (list, set, tuple)):
# existing key has value type list
new_data[key] = d1[key]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
if new_value not in new_data[key]:
new_data[key].append(new_value)
else:
if key in CORE_NODE_PROPERTIES or key in CORE_EDGE_PROPERTIES:
log.debug(
f"cannot modify core property '{key}': {d2[key]} vs {d1[key]}"
)
else:
# existing key does not have value type list; converting to list
new_data[key] = [d1[key]]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
if new_value not in new_data[key]:
new_data[key].append(new_value)
else:
# key is not in data; adding
if isinstance(new_value, (list, set, tuple)):
new_data[key] = [x for x in new_value]
else:
new_data[key] = [new_value]
else:
# key is not multivalued; adding/replacing as-is
if key in d1:
if isinstance(d1[key], (list, set, tuple)):
new_data[key] = d1[key]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [x for x in new_value]
else:
new_data[key].append(new_value)
else:
if key in CORE_NODE_PROPERTIES or key in CORE_EDGE_PROPERTIES:
log.debug(
f"cannot modify core property '{key}': {d2[key]} vs {d1[key]}"
)
else:
if preserve:
new_data[key] = [d1[key]]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
new_data[key].append(new_value)
else:
new_data[key] = new_value
else:
new_data[key] = new_value
else:
# treating key as multivalued
if key in d1:
# key is in data
if key in CORE_NODE_PROPERTIES or key in CORE_EDGE_PROPERTIES:
log.debug(
f"cannot modify core property '{key}': {d2[key]} vs {d1[key]}"
)
else:
if isinstance(d1[key], (list, set, tuple)):
# existing key has value type list
new_data[key] = d1[key]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
new_data[key].append(new_value)
else:
# existing key does not have value type list; converting to list
if preserve:
new_data[key] = [d1[key]]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
new_data[key].append(new_value)
else:
new_data[key] = new_value
else:
new_data[key] = new_value
for key, value in d1.items():
if key not in new_data:
new_data[key] = value
return new_data
def apply_filters(
graph: BaseGraph,
node_filters: Dict[str, Union[str, Set]],
edge_filters: Dict[str, Union[str, Set]],
) -> None:
"""
Apply filters to graph and remove nodes and edges that
do not pass given filters.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
node_filters: Dict[str, Union[str, Set]]
Node filters
edge_filters: Dict[str, Union[str, Set]]
Edge filters
"""
apply_node_filters(graph, node_filters)
apply_edge_filters(graph, edge_filters)
def apply_node_filters(
graph: BaseGraph, node_filters: Dict[str, Union[str, Set]]
) -> None:
"""
Apply filters to graph and remove nodes that do not pass given filters.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
node_filters: Dict[str, Union[str, Set]]
Node filters
"""
nodes_to_remove = []
for node, node_data in graph.nodes(data=True):
pass_filter = True
for k, v in node_filters.items():
if k == "category":
if not any(x in node_data[k] for x in v):
pass_filter = False
if not pass_filter:
nodes_to_remove.append(node)
for node in nodes_to_remove:
# removing node that fails category filter
log.debug(f"Removing node {node}")
graph.remove_node(node)
def apply_edge_filters(
graph: BaseGraph, edge_filters: Dict[str, Union[str, Set]]
) -> None:
"""
Apply filters to graph and remove edges that do not pass given filters.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph
edge_filters: Dict[str, Union[str, Set]]
Edge filters
"""
edges_to_remove = []
for subject_node, object_node, key, data in graph.edges(keys=True, data=True):
pass_filter = True
for k, v in edge_filters.items():
if k == "predicate":
if data[k] not in v:
pass_filter = False
elif k == "relation":
if data[k] not in v:
pass_filter = False
if not pass_filter:
edges_to_remove.append((subject_node, object_node, key))
for edge in edges_to_remove:
# removing edge that fails edge filters
log.debug(f"Removing edge {edge}")
graph.remove_edge(edge[0], edge[1], edge[2])
def generate_uuid():
"""
Generates a UUID.
Returns
-------
str
A UUID
"""
return f"urn:uuid:{uuid.uuid4()}"
def generate_edge_identifiers(graph: BaseGraph):
"""
Generate unique identifiers for edges in a graph that do not
have an ``id`` field.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
"""
for u, v, data in graph.edges(data=True):
if "id" not in data:
data["id"] = generate_uuid()
def sanitize_import(data: Dict, list_delimiter: str=None) -> Dict:
"""
Sanitize key-value pairs in dictionary.
This should be used to ensure proper syntax and types for node and edge data as it is imported.
Parameters
----------
data: Dict
A dictionary containing key-value pairs
list_delimiter: str
Optionally provide a delimiter character or string to be used to split strings into lists.
Returns
-------
Dict
A dictionary containing processed key-value pairs
"""
tidy_data = {}
for key, value in data.items():
new_value = remove_null(value)
if new_value is not None:
tidy_data[key] = _sanitize_import_property(key, new_value, list_delimiter)
return tidy_data
@lru_cache(maxsize=1)
def _get_all_multivalued_slots() -> Set[str]:
return set([sentencecase_to_snakecase(x) for x in tk.get_all_multivalued_slots()])
def _sanitize_import_property(key: str, value: Any, list_delimiter: str) -> Any:
"""
Sanitize value for a key for the purpose of import.
Casts all values to primitive types like str or bool according to the
specified type in ``column_types``.
If a list_delimiter is provided lists will be converted into strings using the delimiter.
Parameters
----------
key: str
Key corresponding to a node/edge property
value: Any
Value corresponding to the key
Returns
-------
value: Any
Sanitized value
"""
if key in column_types:
if column_types[key] == list:
if isinstance(value, (list, set, tuple)):
value = [
v.replace("\n", " ").replace("\t", " ") if isinstance(v, str) else v
for v in value
]
new_value = list(value)
elif isinstance(value, str):
value = value.replace("\n", " ").replace("\t", " ")
new_value = [x for x in value.split(list_delimiter) if x] if list_delimiter else [value]
else:
new_value = [str(value).replace("\n", " ").replace("\t", " ")]
# remove duplication in the list
value_set: Set = set()
for entry in new_value:
value_set.add(entry)
new_value = sorted(list(value_set))
elif column_types[key] == bool:
try:
new_value = bool(value)
except:
new_value = False
# the rest of this if/else block doesn't seem right:
# it's not checking the type against the expected type even though one exists
elif isinstance(value, (str, float)):
new_value = value
else:
# we might want to raise an exception or somehow indicate a type mismatch in the input data
new_value = str(value).replace("\n", " ").replace("\t", " ")
else:
if isinstance(value, (list, set, tuple)):
value = [
v.replace("\n", " ").replace("\t", " ") if isinstance(v, str) else v
for v in value
]
new_value = list(value)
elif isinstance(value, str):
multivalued_slots = _get_all_multivalued_slots()
if list_delimiter and list_delimiter in value:
value = value.replace("\n", " ").replace("\t", " ")
new_value = [x for x in value.split(list_delimiter) if x]
elif key in multivalued_slots:
new_value = [value]
else:
new_value = value.replace("\n", " ").replace("\t", " ")
elif isinstance(value, bool):
try:
new_value = bool(value)
except:
new_value = False
elif isinstance(value, (str, float)):
new_value = value
else:
new_value = str(value).replace("\n", " ").replace("\t", " ")
return new_value
def build_export_row(data: Dict, list_delimiter: str=None) -> Dict:
"""
Sanitize key-value pairs in dictionary.
This should be used to ensure proper syntax and types for node and edge data as it is exported.
Parameters
----------
data: Dict
A dictionary containing key-value pairs
list_delimiter: str
Optionally provide a delimiter character or string to be used to convert lists into strings.
Returns
-------
Dict
A dictionary containing processed key-value pairs
"""
tidy_data = {}
for key, value in data.items():
new_value = remove_null(value)
if new_value:
tidy_data[key] = _sanitize_export_property(key, new_value, list_delimiter)
return tidy_data
def _sanitize_export_property(key: str, value: Any, list_delimiter: str=None) -> Any:
"""
Sanitize value for a key for the purpose of export.
Casts all values to primitive types like str or bool according to the
specified type in ``column_types``.
If a list_delimiter is provided lists will be converted into strings using the delimiter.
Parameters
----------
key: str
Key corresponding to a node/edge property
value: Any
Value corresponding to the key
list_delimiter: str
Optionally provide a delimiter character or string to be used to convert lists into strings.
Returns
-------
value: Any
Sanitized value
"""
if key in column_types:
if column_types[key] == list:
if isinstance(value, (list, set, tuple)):
value = [
v.replace("\n", " ").replace('\\"', "").replace("\t", " ")
if isinstance(v, str)
else v
for v in value
]
new_value = list_delimiter.join([str(x) for x in value]) if list_delimiter else value
else:
new_value = (
str(value).replace("\n", " ").replace('\\"', "").replace("\t", " ")
)
elif column_types[key] == bool:
try:
new_value = bool(value)
except:
new_value = False
else:
new_value = (
str(value).replace("\n", " ").replace('\\"', "").replace("\t", " ")
)
else:
if type(value) == list:
value = [
v.replace("\n", " ").replace('\\"', "").replace("\t", " ")
if isinstance(v, str)
else v
for v in value
]
new_value = list_delimiter.join([str(x) for x in value]) if list_delimiter else value
column_types[key] = list
elif type(value) == bool:
try:
new_value = bool(value)
column_types[key] = bool # this doesn't seem right, shouldn't column_types come from the biolink model?
except:
new_value = False
else:
new_value = (
str(value).replace("\n", " ").replace('\\"', "").replace("\t", " ")
)
return new_value
def remove_null(input: Any) -> Any:
"""
Remove any null values from input.
Parameters
----------
input: Any
Can be a str, list or dict
Returns
-------
Any
The input without any null values
"""
new_value: Any = None
if isinstance(input, (list, set, tuple)):
# value is a list, set or a tuple
new_value = []
for v in input:
x = remove_null(v)
if x:
new_value.append(x)
elif isinstance(input, dict):
# value is a dict
new_value = {}
for k, v in input.items():
x = remove_null(v)
if x:
new_value[k] = x
elif isinstance(input, str):
# value is a str
if not is_null(input):
new_value = input
else:
if not is_null(input):
new_value = input
return new_value
def is_null(item: Any) -> bool:
"""
Checks if a given item is null or correspond to null.
This method checks for: ``None``, ``numpy.nan``, ``pandas.NA``,
``pandas.NaT``, and ` `
Parameters
----------
item: Any
The item to check
Returns
-------
bool
Whether the given item is null or not
"""
null_values = {np.nan, pd.NA, pd.NaT, None, "", " "}
return item in null_values
def apply_graph_operations(graph: BaseGraph, operations: List) -> None:
"""
Apply graph operations to a given graph.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
An instance of BaseGraph
operations: List
A list of graph operations with configuration
"""
for operation in operations:
op_name = operation["name"]
op_args = operation["args"]
module_name = ".".join(op_name.split(".")[0:-1])
function_name = op_name.split(".")[-1]
f = getattr(importlib.import_module(module_name), function_name)
f(graph, **op_args)
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
:param db_file: database file
:return: Connection object or None
"""
conn = None
try:
conn = sqlite3.connect(db_file)
except ConnectionError as e:
print(e)
return conn
def close_connection(conn):
""" close a database connection to the SQLite database
:return: None
"""
try:
if conn:
conn.close()
except ConnectionError as e:
print(e)
return conn
def drop_existing_tables(conn):
try:
# Get a cursor object
c = conn.cursor()
# Get a list of all tables in the database
c.execute("SELECT name FROM sqlite_master WHERE type='table';")
table_names = [row[0] for row in c.fetchall()]
# Loop through the table names and drop each table
for table_name in table_names:
drop_table_sql = f"DROP TABLE IF EXISTS {table_name};"
c.execute(drop_table_sql)
# Commit the changes and close the connection
conn.commit()
except sqlite3.Error as e:
print(f"An error occurred while removing all tables and data from the SQLite database: {e}")
| 32,009 | 27.252427 | 120 |
py
|
kgx
|
kgx-master/kgx/parsers/ntriples_parser.py
|
import codecs
from typing import Generator
from rdflib.plugins.parsers.ntriples import W3CNTriplesParser, ParseError
from rdflib.plugins.parsers.ntriples import r_wspace, r_wspaces, r_tail
class CustomNTriplesParser(W3CNTriplesParser):
"""
This class is an extension to ``rdflib.plugins.parsers.ntriples.W3CNTriplesParser``
that parses N-Triples and yields triples.
"""
def parse(self, filename: str) -> Generator:
"""
Parses an N-Triples file and yields triples.
Parameters
----------
filename: str
The filename to parse
Returns
-------
Generator
A generator for triples
"""
if not hasattr(filename, "read"):
raise ParseError("Item to parse must be a file-like object.")
# since N-Triples 1.1 files can and should be utf-8 encoded
f = codecs.getreader("utf-8")(filename)
self.file = f
self.buffer = ""
while True:
self.line = self.readline()
if self.line is None:
break
if self.line == "":
raise ParseError(f"Empty line encountered in {filename}. "
f"Ensure that no leading or trailing empty lines persist "
f"in the N-Triples file.")
break
try:
yield from self.parseline()
except ParseError:
raise ParseError("Invalid line: %r" % self.line)
def parseline(self) -> Generator:
"""
Parse each line and yield triples.
Parameters
----------
Generator
A generator
"""
self.eat(r_wspace)
if self.line or not self.line.startswith("#"):
subject = self.subject()
self.eat(r_wspaces)
predicate = self.predicate()
self.eat(r_wspaces)
object = self.object()
self.eat(r_tail)
if self.line:
raise ParseError("Trailing garbage")
return self.sink.triple(subject, predicate, object)
| 2,156 | 27.76 | 91 |
py
|
kgx
|
kgx-master/kgx/parsers/__init__.py
| 0 | 0 | 0 |
py
|
|
kgx
|
kgx-master/kgx/source/json_source.py
|
import gzip
import typing
import ijson
from itertools import chain
from typing import Dict, Tuple, Any, Generator, Optional, List
from kgx.config import get_logger
from kgx.source.tsv_source import TsvSource
log = get_logger()
class JsonSource(TsvSource):
"""
JsonSource is responsible for reading data as records
from a JSON.
"""
def __init__(self, owner):
super().__init__(owner)
self.compression = None
self.list_delimiter = None
def parse(
self,
filename: str,
format: str = "json",
compression: Optional[str] = None,
**kwargs: Any
) -> typing.Generator:
"""
This method reads from a JSON and yields records.
Parameters
----------
filename: str
The filename to parse
format: str
The format (``json``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for node and edge records read from the file
"""
self.set_provenance_map(kwargs)
self.compression = compression
n = self.read_nodes(filename)
e = self.read_edges(filename)
yield from chain(n, e)
def read_nodes(self, filename: str) -> Generator:
"""
Read node records from a JSON.
Parameters
----------
filename: str
The filename to read from
Returns
-------
Generator
A generator for node records
"""
if self.compression == "gz":
FH = gzip.open(filename, "rb")
else:
FH = open(filename, "rb")
for n in ijson.items(FH, "nodes.item"):
yield self.read_node(n)
def read_edges(self, filename: str) -> Generator:
"""
Read edge records from a JSON.
Parameters
----------
filename: str
The filename to read from
Returns
-------
Generator
A generator for edge records
"""
if self.compression == "gz":
FH = gzip.open(filename, "rb")
else:
FH = open(filename, "rb")
for e in ijson.items(FH, "edges.item", use_float=True):
yield self.read_edge(e)
| 2,399 | 22.529412 | 68 |
py
|
kgx
|
kgx-master/kgx/source/rdf_source.py
|
import gzip
import typing
from typing import Set, Dict, Union, Optional, Any, Tuple, List, Generator
import rdflib
from linkml_runtime.linkml_model.meta import SlotDefinition, ClassDefinition, Element
from rdflib import URIRef, RDF, Namespace
from kgx.error_detection import ErrorType, MessageLevel
from kgx.prefix_manager import PrefixManager
from kgx.config import get_logger
from kgx.parsers.ntriples_parser import CustomNTriplesParser
from kgx.source.source import Source, DEFAULT_EDGE_PREDICATE
from kgx.utils.graph_utils import curie_lookup
from kgx.utils.kgx_utils import (
get_toolkit,
is_property_multivalued,
generate_edge_key,
sentencecase_to_snakecase,
sentencecase_to_camelcase,
get_biolink_ancestors,
sanitize_import,
prepare_data_dict,
CORE_NODE_PROPERTIES,
CORE_EDGE_PROPERTIES,
knowledge_provenance_properties,
)
log = get_logger()
NAMED_THING = "biolink:NamedThing"
class RdfSource(Source):
"""
RdfSource is responsible for reading data as records
from RDF.
.. note::
Currently only RDF N-Triples are supported.
"""
def __init__(self, owner):
super().__init__(owner)
self.DEFAULT = Namespace(self.prefix_manager.prefix_map[""])
self.OBAN = Namespace(self.prefix_manager.prefix_map["OBAN"])
self.PMID = Namespace(self.prefix_manager.prefix_map["PMID"])
self.BIOLINK = Namespace(self.prefix_manager.prefix_map["biolink"])
self.predicate_mapping = {}
self.cache: Dict = {}
self.toolkit = get_toolkit()
self.node_property_predicates = set(
[
URIRef(self.prefix_manager.expand(x))
for x in self.toolkit.get_all_node_properties(formatted=True)
]
)
self.node_property_predicates.update(
set(self.toolkit.get_all_node_properties(formatted=True))
)
self.node_property_predicates.update(
set(self.toolkit.get_all_edge_properties(formatted=True))
)
for ksf in knowledge_provenance_properties:
self.node_property_predicates.add(
URIRef(self.prefix_manager.expand("biolink:" + ksf))
)
self.reification_types = {
RDF.Statement,
self.BIOLINK.Association,
self.OBAN.association,
}
self.reification_predicates = {
self.BIOLINK.subject,
self.BIOLINK.predicate,
self.BIOLINK.object,
RDF.subject,
RDF.object,
RDF.predicate,
self.OBAN.association_has_subject,
self.OBAN.association_has_predicate,
self.OBAN.association_has_object,
}
self.reified_nodes: Set = set()
self.start: int = 0
self.count: int = 0
self.CACHE_SIZE = 10000
self.node_record = {}
self.edge_record = {}
self.node_cache = {}
self.edge_cache = {}
self._incomplete_nodes = {}
def set_predicate_mapping(self, m: Dict) -> None:
"""
Set predicate mappings.
Use this method to update mappings for predicates that are
not in Biolink Model.
Parameters
----------
m: Dict
A dictionary where the keys are IRIs and values are their
corresponding property names
"""
for k, v in m.items():
self.predicate_mapping[URIRef(k)] = v
def set_node_property_predicates(self, predicates) -> None:
"""
Set predicates that are to be treated as node properties.
Parameters
----------
predicates: Set
Set of predicates
"""
for p in predicates:
self.node_property_predicates.add(URIRef(p))
def parse(
self,
filename: str,
format: str = "nt",
compression: Optional[str] = None,
**kwargs: Any,
) -> typing.Generator:
"""
This method reads from RDF N-Triples and yields records.
.. note::
To ensure proper parsing of N-Triples and a relatively low memory footprint,
it is recommended that the N-Triples be sorted based on the subject IRIs.
```sort -k 1,2 -t ' ' data.nt > data_sorted.nt```
Parameters
----------
filename: str
The filename to parse
format: str
The format (``nt``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for records
"""
p = CustomNTriplesParser(self)
self.set_provenance_map(kwargs)
if compression == "gz":
yield from p.parse(gzip.open(filename, "rb"))
else:
yield from p.parse(open(filename, "rb"))
log.info(f"Done parsing {filename}")
for n in self.reified_nodes:
data = self.node_cache.pop(n)
self.dereify(n, data)
for k in self.node_cache.keys():
node_data = self.node_cache[k]
if "category" in node_data:
if NAMED_THING not in set(node_data["category"]):
node_data["category"].append(NAMED_THING)
else:
node_data["category"] = [NAMED_THING]
node_data = self.validate_node(node_data)
if not node_data:
continue
node_data = sanitize_import(node_data)
self.set_node_provenance(node_data)
if self.check_node_filter(node_data):
self.node_properties.update(node_data.keys())
yield k, node_data
self.node_cache.clear()
for k in self.edge_cache.keys():
edge_data = self.edge_cache[k]
edge_data = self.validate_edge(edge_data)
if not edge_data:
continue
edge_data = sanitize_import(edge_data)
self.set_edge_provenance(edge_data)
if self.check_edge_filter(edge_data):
self.edge_properties.update(edge_data.keys())
yield k[0], k[1], k[2], edge_data
self.edge_cache.clear()
def triple(self, s: URIRef, p: URIRef, o: URIRef) -> None:
"""
Parse a triple.
Parameters
----------
s: URIRef
Subject
p: URIRef
Predicate
o: URIRef
Object
"""
self.count += 1
(element_uri, canonical_uri, predicate, property_name) = self.process_predicate(
p
)
if element_uri:
prop_uri = element_uri
elif predicate:
prop_uri = predicate
else:
prop_uri = property_name
s_curie = self.prefix_manager.contract(s)
if s_curie.startswith("biolink") or s_curie.startswith("OBAN"):
log.warning(f"Skipping {s} {p} {o}")
elif s_curie in self.reified_nodes:
self.add_node_attribute(s, key=prop_uri, value=o)
elif p in self.reification_predicates:
# subject is a reified node
self.reified_nodes.add(s_curie)
self.add_node_attribute(s, key=prop_uri, value=o)
elif property_name in {
"subject",
"predicate",
"object",
"predicate",
"relation",
}:
# subject is a reified node
self.reified_nodes.add(s_curie)
self.add_node_attribute(s, key=prop_uri, value=o)
elif o in self.reification_types:
# subject is a reified node
self.reified_nodes.add(s_curie)
self.add_node_attribute(s, key=prop_uri, value=o)
elif element_uri and element_uri in self.node_property_predicates:
# treating predicate as a node property
self.add_node_attribute(s, key=prop_uri, value=o)
elif (
p in self.node_property_predicates
or predicate in self.node_property_predicates
or property_name in self.node_property_predicates
):
# treating predicate as a node property
self.add_node_attribute(s, key=prop_uri, value=o)
elif isinstance(o, rdflib.term.Literal):
self.add_node_attribute(s, key=prop_uri, value=o)
else:
# treating predicate as an edge
self.add_edge(s, o, p)
if len(self.edge_cache) >= self.CACHE_SIZE:
while self.reified_nodes:
n = self.reified_nodes.pop()
data = self.node_cache.pop(n)
try:
self.dereify(n, data)
except ValueError as e:
self.owner.log_error(
entity=str(data),
error_type=ErrorType.INVALID_EDGE_PROPERTY,
message=str(e),
message_level=MessageLevel.WARNING
)
self._incomplete_nodes[n] = data
for n in self._incomplete_nodes.keys():
self.node_cache[n] = self._incomplete_nodes[n]
self.reified_nodes.add(n)
self._incomplete_nodes.clear()
for k in self.edge_cache.keys():
if (
"id" not in self.edge_cache[k]
and "association_id" not in self.edge_cache[k]
):
edge_key = generate_edge_key(
self.edge_cache[k]["subject"],
self.edge_cache[k]["predicate"],
self.edge_cache[k]["object"],
)
self.edge_cache[k]["id"] = edge_key
data = self.edge_cache[k]
data = self.validate_edge(data)
data = sanitize_import(data)
self.set_edge_provenance(data)
if self.check_edge_filter(data):
self.edge_properties.update(data.keys())
yield k[0], k[1], k[2], data
self.edge_cache.clear()
yield None
def dereify(self, n: str, node: Dict) -> None:
"""
Dereify a node to create a corresponding edge.
Parameters
----------
n: str
Node identifier
node: Dict
Node data
"""
if "predicate" not in node:
node["predicate"] = "biolink:related_to"
if "relation" not in node:
node["relation"] = node["predicate"]
if "subject" in node and "object" in node:
self.edge_properties.update(node.keys())
self.add_edge(node["subject"], node["object"], node["predicate"], node)
else:
self.owner.log_error(
entity=str(node),
error_type=ErrorType.MISSING_CATEGORY,
message=f"Missing 'subject' or 'object' in reified edge node",
message_level=MessageLevel.WARNING
)
def add_node_attribute(
self, iri: Union[URIRef, str], key: str, value: Union[str, List]
) -> None:
"""
Add an attribute to a node in cache, while taking into account whether the attribute
should be multi-valued.
The ``key`` may be a rdflib.URIRef or an URI string that maps onto a
property name as defined in ``rdf_utils.property_mapping``.
Parameters
----------
iri: Union[rdflib.URIRef, str]
The IRI of a node in the rdflib.Graph
key: str
The name of the attribute. Can be a rdflib.URIRef or URI string
value: Union[str, List]
The value of the attribute
Returns
-------
Dict
The node data
"""
if self.prefix_manager.is_iri(key):
key_curie = self.prefix_manager.contract(key)
else:
key_curie = key
c = curie_lookup(key_curie)
if c:
key_curie = c
if self.prefix_manager.is_curie(key_curie):
# property names will always be just the reference
mapped_key = self.prefix_manager.get_reference(key_curie)
else:
mapped_key = key_curie
if isinstance(value, rdflib.term.Identifier):
if isinstance(value, rdflib.term.URIRef):
value_curie = self.prefix_manager.contract(value)
value = value_curie
else:
value = value.toPython()
if (
mapped_key in is_property_multivalued
and is_property_multivalued[mapped_key]
):
value = [value]
if mapped_key in self.node_record:
if isinstance(self.node_record[mapped_key], str):
_ = self.node_record[mapped_key]
self.node_record[mapped_key] = [_]
self.node_record[mapped_key].append(value)
else:
self.node_record[mapped_key] = [value]
curie = self.prefix_manager.contract(iri)
if curie in self.node_cache:
if mapped_key in self.node_cache[curie]:
node = self.node_cache[curie]
updated_node = prepare_data_dict(node, {mapped_key: value})
self.node_cache[curie] = updated_node
else:
self.node_cache[curie][mapped_key] = value
else:
self.node_cache[curie] = {"id": curie, mapped_key: value}
def add_node(self, iri: URIRef, data: Optional[Dict] = None) -> Dict:
"""
Add a node to cache.
Parameters
----------
iri: rdflib.URIRef
IRI of a node
data: Optional[Dict]
Additional node properties
Returns
-------
Dict
The node data
"""
n = self.prefix_manager.contract(str(iri))
if n == iri:
if self.prefix_manager.has_urlfragment(iri):
n = rdflib.namespace.urldefrag(iri).fragment
if not n:
n = iri
if n in self.node_cache:
node_data = self.update_node(n, data)
else:
if data:
node_data = data
else:
node_data = {}
node_data["id"] = n
if "category" in node_data:
if NAMED_THING not in set(node_data["category"]):
node_data["category"].append(NAMED_THING)
else:
node_data["category"] = [NAMED_THING]
self.set_node_provenance(node_data)
self.node_cache[n] = node_data
return node_data
def add_edge(
self,
subject_iri: URIRef,
object_iri: URIRef,
predicate_iri: URIRef,
data: Optional[Dict[Any, Any]] = None,
) -> Dict:
"""
Add an edge to cache.
Parameters
----------
subject_iri: rdflib.URIRef
Subject IRI for the subject in a triple
object_iri: rdflib.URIRef
Object IRI for the object in a triple
predicate_iri: rdflib.URIRef
Predicate IRI for the predicate in a triple
data: Optional[Dict[Any, Any]]
Additional edge properties
Returns
-------
Dict
The edge data
"""
(element_uri, canonical_uri, predicate, property_name) = self.process_predicate(
predicate_iri
)
subject_curie = self.prefix_manager.contract(subject_iri)
object_curie = self.prefix_manager.contract(object_iri)
if subject_curie in self.node_cache:
subject_node = self.node_cache[subject_curie]
else:
subject_node = self.add_node(subject_iri)
if object_curie in self.node_cache:
object_node = self.node_cache[object_curie]
else:
object_node = self.add_node(object_iri)
edge_predicate = element_uri if element_uri else predicate
if not edge_predicate:
edge_predicate = property_name
if " " in edge_predicate:
log.debug(
f"predicate IRI '{predicate_iri}' yields edge_predicate '{edge_predicate}' that not in snake_case form; replacing ' ' with '_'"
)
edge_predicate_prefix = self.prefix_manager.get_prefix(edge_predicate)
if edge_predicate_prefix not in {"biolink", "rdf", "rdfs", "skos", "owl"}:
if PrefixManager.is_curie(edge_predicate):
# name = curie_lookup(edge_predicate)
# if name:
# log.debug(f"predicate IRI '{predicate_iri}' yields edge_predicate '{edge_predicate}' that is actually a CURIE; Using its mapping instead: {name}")
# edge_predicate = f"{edge_predicate_prefix}:{name}"
# else:
# log.debug(f"predicate IRI '{predicate_iri}' yields edge_predicate '{edge_predicate}' that is actually a CURIE; defaulting back to {self.DEFAULT_EDGE_PREDICATE}")
edge_predicate = DEFAULT_EDGE_PREDICATE
edge_key = generate_edge_key(
subject_node["id"], edge_predicate, object_node["id"]
)
if (subject_node["id"], object_node["id"], edge_key) in self.edge_cache:
# edge already exists; process kwargs and update the edge
edge_data = self.update_edge(
subject_node["id"], object_node["id"], edge_key, data
)
else:
# add a new edge
edge_data = data if data else {}
edge_data.update(
{
"subject": subject_node["id"],
"predicate": f"{edge_predicate}",
"object": object_node["id"],
}
)
if "relation" not in edge_data:
edge_data["relation"] = predicate
self.set_edge_provenance(edge_data)
self.edge_cache[(subject_node["id"], object_node["id"], edge_key)] = edge_data
return edge_data
def process_predicate(self, p: Optional[Union[URIRef, str]]) -> Tuple:
"""
Process a predicate where the method checks if there is a mapping in Biolink Model.
Parameters
----------
p: Optional[Union[URIRef, str]]
The predicate
Returns
-------
Tuple
A tuple that contains the Biolink CURIE (if available), the Biolink slot_uri CURIE (if available),
the CURIE form of p, the reference of p
"""
if p in self.cache:
# already processed this predicate before; pull from cache
element_uri = self.cache[p]["element_uri"]
canonical_uri = self.cache[p]["canonical_uri"]
predicate = self.cache[p]["predicate"]
property_name = self.cache[p]["property_name"]
else:
# haven't seen this property before; map to element
if self.prefix_manager.is_iri(p):
predicate = self.prefix_manager.contract(str(p))
else:
predicate = None
if self.prefix_manager.is_curie(p):
property_name = self.prefix_manager.get_reference(p)
predicate = p
else:
if predicate and self.prefix_manager.is_curie(predicate):
property_name = self.prefix_manager.get_reference(predicate)
else:
property_name = p
predicate = f":{p}"
element = self.get_biolink_element(p)
if not element:
element = self.get_biolink_element(predicate)
canonical_uri = None
if element:
if isinstance(element, SlotDefinition):
# predicate corresponds to a biolink slot
if element.definition_uri:
element_uri = self.prefix_manager.contract(
element.definition_uri
)
else:
element_uri = (
f"biolink:{sentencecase_to_snakecase(element.name)}"
)
if element.slot_uri:
canonical_uri = element.slot_uri
elif isinstance(element, ClassDefinition):
# this will happen only when the IRI is actually
# a reference to a class
element_uri = self.prefix_manager.contract(element.class_uri)
else:
element_uri = f"biolink:{sentencecase_to_camelcase(element.name)}"
if "biolink:Attribute" in get_biolink_ancestors(element.name):
element_uri = f"biolink:{sentencecase_to_snakecase(element.name)}"
if not predicate:
predicate = element_uri
else:
# look at predicate mappings
element_uri = None
if p in self.predicate_mapping:
property_name = self.predicate_mapping[p]
predicate = f":{property_name}"
self.cache[p] = {
"element_uri": element_uri,
"canonical_uri": canonical_uri,
"predicate": predicate,
"property_name": property_name,
}
return element_uri, canonical_uri, predicate, property_name
def update_node(self, n: Union[URIRef, str], data: Optional[Dict] = None) -> Dict:
"""
Update a node with properties.
Parameters
----------
n: Union[URIRef, str]
Node identifier
data: Optional[Dict]
Node properties
Returns
-------
Dict
The node data
"""
node_data = self.node_cache[str(n)]
if data:
new_data = self._prepare_data_dict(node_data, data)
node_data.update(new_data)
return node_data
def update_edge(
self,
subject_curie: str,
object_curie: str,
edge_key: str,
data: Optional[Dict[Any, Any]],
) -> Dict:
"""
Update an edge with properties.
Parameters
----------
subject_curie: str
Subject CURIE
object_curie: str
Object CURIE
edge_key: str
Edge key
data: Optional[Dict[Any, Any]]
Edge properties
Returns
-------
Dict
The edge data
"""
key = (subject_curie, object_curie, edge_key)
if key in self.edge_cache:
edge_data = self.edge_cache[key]
else:
edge_data = {}
if data:
new_data = self._prepare_data_dict(edge_data, data)
edge_data.update(new_data)
return edge_data
def _prepare_data_dict(self, d1: Dict, d2: Dict) -> Dict:
"""
Given two dict objects, make a new dict object that is the intersection of the two.
If a key is known to be multivalued then it's value is converted to a list.
If a key is already multivalued then it is updated with new values.
If a key is single valued, and a new unique value is found then the existing value is
converted to a list and the new value is appended to this list.
Parameters
----------
d1: Dict
Dict object
d2: Dict
Dict object
Returns
-------
Dict
The intersection of d1 and d2
"""
new_data = {}
for key, value in d2.items():
if isinstance(value, (list, set, tuple)):
new_value = [
self.prefix_manager.contract(x)
if self.prefix_manager.is_iri(x)
else x
for x in value
]
else:
new_value = (
self.prefix_manager.contract(value)
if self.prefix_manager.is_iri(value)
else value
)
if key in is_property_multivalued:
if is_property_multivalued[key]:
# key is supposed to be multivalued
if key in d1:
# key is in data
if isinstance(d1[key], list):
# existing key has value type list
new_data[key] = d1[key]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
if new_value not in new_data[key]:
new_data[key].append(new_value)
else:
if (
key in CORE_NODE_PROPERTIES
or key in CORE_EDGE_PROPERTIES
):
log.debug(
f"cannot modify core property '{key}': {d2[key]} vs {d1[key]}"
)
else:
# existing key does not have value type list; converting to list
new_data[key] = [d1[key]]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
if new_value not in new_data[key]:
new_data[key].append(new_value)
else:
# key is not in data; adding
if isinstance(new_value, (list, set, tuple)):
new_data[key] = [x for x in new_value]
else:
new_data[key] = [new_value]
else:
# key is not multivalued; adding/replacing as-is
if key in d1:
if isinstance(d1[key], list):
new_data[key] = d1[key]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [x for x in new_value]
else:
new_data[key].append(new_value)
else:
if (
key in CORE_NODE_PROPERTIES
or key in CORE_EDGE_PROPERTIES
):
log.debug(
f"cannot modify core property '{key}': {d2[key]} vs {d1[key]}"
)
else:
new_data[key] = new_value
else:
new_data[key] = new_value
else:
# treating key as multivalued
if key in d1:
# key is in data
if key in CORE_NODE_PROPERTIES or key in CORE_EDGE_PROPERTIES:
log.debug(
f"cannot modify core property '{key}': {d2[key]} vs {d1[key]}"
)
else:
if isinstance(d1[key], list):
# existing key has value type list
new_data[key] = d1[key]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
new_data[key].append(new_value)
else:
# existing key does not have value type list; converting to list
new_data[key] = [d1[key]]
if isinstance(new_value, (list, set, tuple)):
new_data[key] += [
x for x in new_value if x not in new_data[key]
]
else:
new_data[key].append(new_value)
else:
new_data[key] = new_value
return new_data
def get_biolink_element(self, predicate: Any) -> Optional[Element]:
"""
Returns a Biolink Model element for a given predicate.
Parameters
----------
predicate: Any
The CURIE of a predicate
Returns
-------
Optional[Element]
The corresponding Biolink Model element
"""
toolkit = get_toolkit()
if self.prefix_manager.is_iri(predicate):
predicate_curie = self.prefix_manager.contract(predicate)
else:
predicate_curie = predicate
if self.prefix_manager.is_curie(predicate_curie):
reference = self.prefix_manager.get_reference(predicate_curie)
else:
reference = predicate_curie
element = toolkit.get_element(reference)
if not element:
try:
mapping = toolkit.get_element_by_mapping(predicate)
if mapping:
element = toolkit.get_element(mapping)
else:
mapping = toolkit.get_element_by_mapping(reference)
if mapping:
element = toolkit.get_element(mapping)
except ValueError as e:
self.owner.log_error(
entity=str(predicate),
error_type=ErrorType.INVALID_EDGE_PREDICATE,
message=str(e)
)
element = None
return element
| 30,464 | 34.715123 | 183 |
py
|
kgx
|
kgx-master/kgx/source/jsonl_source.py
|
import gzip
import re
import typing
import jsonlines
from typing import Optional, Any, Generator, Dict
from kgx.config import get_logger
log = get_logger()
from kgx.source.json_source import JsonSource
class JsonlSource(JsonSource):
"""
JsonlSource is responsible for reading data as records
from JSON Lines.
"""
def __init__(self, owner):
super().__init__(owner)
def parse(
self,
filename: str,
format: str = "jsonl",
compression: Optional[str] = None,
**kwargs: Any,
) -> typing.Generator:
"""
This method reads from JSON Lines and yields records.
Parameters
----------
filename: str
The filename to parse
format: str
The format (``json``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for records
"""
self.set_provenance_map(kwargs)
if re.search(f"nodes.{format}", filename):
m = self.read_node
elif re.search(f"edges.{format}", filename):
m = self.read_edge
else:
# This used to throw an exception but perhaps we should simply ignore it.
log.warning(
f"Parse function cannot resolve the KGX file type in name {filename}. Skipped..."
)
return
if compression == "gz":
with gzip.open(filename, "rb") as FH:
reader = jsonlines.Reader(FH)
for obj in reader:
yield m(obj)
else:
with jsonlines.open(filename) as FH:
for obj in FH:
yield m(obj)
| 1,821 | 23.621622 | 97 |
py
|
kgx
|
kgx-master/kgx/source/owl_source.py
|
import typing
from typing import Set, Optional, Generator, Any
import rdflib
from rdflib import Namespace, URIRef, OWL, RDFS, RDF
from kgx.config import get_logger
from kgx.source import RdfSource
from kgx.utils.kgx_utils import (
current_time_in_millis,
generate_uuid,
sanitize_import
)
log = get_logger()
class OwlSource(RdfSource):
"""
OwlSource is responsible for parsing an OWL ontology.
..note::
This is a simple parser that loads direct class-class relationships.
For more formal OWL parsing, refer to Robot: http://robot.obolibrary.org/
"""
def __init__(self, owner):
super().__init__(owner)
self.imported: Set = set()
self.OWLSTAR = Namespace("http://w3id.org/owlstar/")
self.excluded_predicates = {
URIRef("https://raw.githubusercontent.com/geneontology/go-ontology/master/contrib/oboInOwl#id")
}
def parse(
self,
filename: str,
format: str = "owl",
compression: Optional[str] = None,
**kwargs: Any,
) -> typing.Generator:
"""
This method reads from an OWL and yields records.
Parameters
----------
filename: str
The filename to parse
format: str
The format (``owl``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for node and edge records read from the file
"""
rdfgraph = rdflib.Graph()
if compression:
log.warning(f"compression mode '{compression}' not supported by OwlSource")
if format is None:
format = rdflib.util.guess_format(filename)
if format == "owl":
format = "xml"
log.info("Parsing {} with '{}' format".format(filename, format))
rdfgraph.parse(filename, format=format)
log.info("{} parsed with {} triples".format(filename, len(rdfgraph)))
self.set_provenance_map(kwargs)
self.start = current_time_in_millis()
log.info(f"Done parsing {filename}")
triples = rdfgraph.triples((None, OWL.imports, None))
for s, p, o in triples:
# Load all imports first
if p == OWL.imports:
if o not in self.imported:
input_format = rdflib.util.guess_format(o)
imported_rdfgraph = rdflib.Graph()
log.info(f"Parsing OWL import: {o}")
self.imported.add(o)
imported_rdfgraph.parse(o, format=input_format)
self.load_graph(imported_rdfgraph)
else:
log.warning(f"Trying to import {o} but its already done")
yield from self.load_graph(rdfgraph)
def load_graph(self, rdfgraph: rdflib.Graph, **kwargs: Any) -> None:
"""
Walk through the rdflib.Graph and load all triples into kgx.graph.base_graph.BaseGraph
Parameters
----------
rdfgraph: rdflib.Graph
Graph containing nodes and edges
kwargs: Any
Any additional arguments
"""
seen = set()
seen.add(RDFS.subClassOf)
for s, p, o in rdfgraph.triples((None, RDFS.subClassOf, None)):
# ignoring blank nodes
if isinstance(s, rdflib.term.BNode):
continue
pred = None
parent = None
os_interpretation = None
if isinstance(o, rdflib.term.BNode):
# C SubClassOf R some D
for x in rdfgraph.objects(o, OWL.onProperty):
pred = x
# owl:someValuesFrom
for x in rdfgraph.objects(o, OWL.someValuesFrom):
os_interpretation = self.OWLSTAR.term("AllSomeInterpretation")
parent = x
# owl:allValuesFrom
for x in rdfgraph.objects(o, OWL.allValuesFrom):
os_interpretation = self.OWLSTAR.term("AllOnlyInterpretation")
parent = x
if pred is None or parent is None:
log.warning(
f"{s} {p} {o} has OWL.onProperty {pred} and OWL.someValuesFrom {parent}"
)
log.warning("Do not know how to handle BNode: {}".format(o))
continue
else:
# C rdfs:subClassOf D (where C and D are named classes)
pred = p
parent = o
if os_interpretation:
# reify edges that have logical interpretation
eid = generate_uuid()
self.reified_nodes.add(eid)
yield from self.triple(
URIRef(eid), self.BIOLINK.term("category"), self.BIOLINK.Association
)
yield from self.triple(URIRef(eid), self.BIOLINK.term("subject"), s)
yield from self.triple(
URIRef(eid), self.BIOLINK.term("predicate"), pred
)
yield from self.triple(URIRef(eid), self.BIOLINK.term("object"), parent)
yield from self.triple(
URIRef(eid),
self.BIOLINK.term("logical_interpretation"),
os_interpretation,
)
else:
yield from self.triple(s, pred, parent)
seen.add(OWL.equivalentClass)
for s, p, o in rdfgraph.triples((None, OWL.equivalentClass, None)):
# A owl:equivalentClass B (where A and B are named classes)
if not isinstance(o, rdflib.term.BNode):
yield from self.triple(s, p, o)
for relation in rdfgraph.subjects(RDF.type, OWL.ObjectProperty):
seen.add(relation)
for s, p, o in rdfgraph.triples((relation, None, None)):
if not isinstance(o, rdflib.term.BNode):
if p not in self.excluded_predicates:
yield from self.triple(s, p, o)
for s, p, o in rdfgraph.triples((None, None, None)):
if isinstance(s, rdflib.term.BNode) or isinstance(o, rdflib.term.BNode):
continue
if p in seen:
continue
if p in self.excluded_predicates:
continue
yield from self.triple(s, p, o)
for n in self.reified_nodes:
data = self.node_cache.pop(n)
self.dereify(n, data)
for k, data in self.node_cache.items():
node_data = self.validate_node(data)
if not node_data:
continue
node_data = sanitize_import(node_data)
self.set_node_provenance(node_data)
if self.check_node_filter(node_data):
self.node_properties.update(node_data.keys())
yield k, node_data
self.node_cache.clear()
for k, data in self.edge_cache.items():
edge_data = self.validate_edge(data)
if not edge_data:
continue
edge_data = sanitize_import(edge_data)
self.set_edge_provenance(edge_data)
if self.check_edge_filter(edge_data):
self.edge_properties.update(edge_data.keys())
yield k[0], k[1], k[2], edge_data
self.edge_cache.clear()
| 7,503 | 34.904306 | 107 |
py
|
kgx
|
kgx-master/kgx/source/trapi_source.py
|
import gzip
import typing
import ijson
from itertools import chain
from typing import Dict, Tuple, Generator, Optional, Any
from kgx.source.json_source import JsonSource
class TrapiSource(JsonSource):
"""
TrapiSource is responsible for reading data as records
from a TRAPI JSON.
"""
def __init__(self, owner):
super().__init__(owner)
self._node_properties = set()
self._edge_properties = set()
def parse(
self,
filename: str,
format: str = "json",
compression: Optional[str] = None,
**kwargs: Any
) -> typing.Generator:
"""
This method reads from a JSON and yields records.
Parameters
----------
filename: str
The filename to parse
format: str
The format (``trapi-json``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for node and edge records
"""
self.set_provenance_map(kwargs)
n = self.read_nodes(filename, compression)
e = self.read_edges(filename, compression)
yield from chain(n, e)
def read_nodes(self, filename: str, compression: Optional[str] = None) -> Generator:
"""
Read node records from a JSON.
Parameters
----------
filename: str
The filename to read from
compression: Optional[str]
The compression type
Returns
-------
Generator
A generator for node records
"""
if compression == "gz":
FH = gzip.open(filename, "rb")
else:
FH = open(filename, "rb")
for n in ijson.items(FH, "knowledge_graph.nodes.item"):
yield self.load_node(n)
def read_edges(self, filename: str, compression: Optional[str] = None) -> Generator:
"""
Read edge records from a JSON.
Parameters
----------
filename: str
The filename to read from
compression: Optional[str]
The compression type
Returns
-------
Generator
A generator for edge records
"""
if compression == "gz":
FH = gzip.open(filename, "rb")
else:
FH = open(filename, "rb")
for e in ijson.items(FH, "knowledge_graph.edges.item"):
yield self.load_edge(e)
def load_node(self, node: Dict) -> Tuple[str, Dict]:
"""
Load a node into an instance of BaseGraph
.. Note::
This method transformers Reasoner Std API format fields to Biolink Model fields.
Parameters
----------
node : Dict
A node
"""
if "type" in node and "category" not in node:
node["category"] = node["type"]
del node["type"]
return super().read_node(node)
def load_edge(self, edge: Dict) -> Tuple[str, str, str, Dict]:
"""
Load an edge into an instance of BaseGraph
.. Note::
This methods transformers Reasoner Std API format fields to Biolink Model fields.
Parameters
----------
edge : Dict
An edge
"""
if "source_id" in edge:
edge["subject"] = edge["source_id"]
if "target_id" in edge:
edge["object"] = edge["target_id"]
if "relation_label" in edge:
edge["predicate"] = edge["relation_label"][0]
return super().read_edge(edge)
| 3,665 | 24.636364 | 93 |
py
|
kgx
|
kgx-master/kgx/source/graph_source.py
|
from itertools import chain
from typing import Generator, Any, Dict, Optional
from kgx.config import get_graph_store_class
from kgx.graph.base_graph import BaseGraph
from kgx.source.source import Source
from kgx.utils.kgx_utils import sanitize_import
class GraphSource(Source):
"""
GraphSource is responsible for reading data as records
from an in memory graph representation.
The underlying store must be an instance of ``kgx.graph.base_graph.BaseGraph``
"""
def __init__(self, owner):
super().__init__(owner)
self.graph = get_graph_store_class()()
def parse(self, graph: BaseGraph, **kwargs: Any) -> Generator:
"""
This method reads from a graph and yields records.
Parameters
----------
graph: kgx.graph.base_graph.BaseGraph
The graph to read from
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for node and edge records read from the graph
"""
self.graph = graph
self.set_provenance_map(kwargs)
nodes = self.read_nodes()
edges = self.read_edges()
yield from chain(nodes, edges)
def read_nodes(self) -> Generator:
"""
Read nodes as records from the graph.
Returns
-------
Generator
A generator for nodes
"""
for n, data in self.graph.nodes(data=True):
if "id" not in data:
data["id"] = n
node_data = self.validate_node(data)
if not node_data:
continue
node_data = sanitize_import(node_data.copy())
self.set_node_provenance(node_data)
if self.check_node_filter(node_data):
self.node_properties.update(node_data.keys())
yield n, node_data
def read_edges(self) -> Generator:
"""
Read edges as records from the graph.
Returns
-------
Generator
A generator for edges
"""
for u, v, k, data in self.graph.edges(keys=True, data=True):
edge_data = self.validate_edge(data)
if not edge_data:
continue
edge_data = sanitize_import(edge_data.copy())
self.set_edge_provenance(edge_data)
if self.check_edge_filter(edge_data):
self.node_properties.update(edge_data.keys())
yield u, v, k, edge_data
| 2,526 | 25.322917 | 82 |
py
|
kgx
|
kgx-master/kgx/source/source.py
|
from typing import Dict, Union, Optional
from kgx.error_detection import ErrorType, MessageLevel
from kgx.utils.infores import InfoResContext
from kgx.prefix_manager import PrefixManager
from kgx.config import get_logger
log = get_logger()
DEFAULT_NODE_CATEGORY = "biolink:NamedThing"
DEFAULT_EDGE_PREDICATE = "biolink:related_to"
class Source(object):
"""
A Source is responsible for reading data as records
from a store where the store is a file or a database.
"""
def __init__(self, owner):
self.owner = owner
self.graph_metadata: Dict = {}
self.node_filters = {}
self.edge_filters = {}
self.node_properties = set()
self.edge_properties = set()
self.prefix_manager = PrefixManager()
self.infores_context: Optional[InfoResContext] = InfoResContext()
def set_prefix_map(self, m: Dict) -> None:
"""
Update default prefix map.
Parameters
----------
m: Dict
A dictionary with prefix to IRI mappings
"""
self.prefix_manager.update_prefix_map(m)
def check_node_filter(self, node: Dict) -> bool:
"""
Check if a node passes defined node filters.
Parameters
----------
node: Dict
A node
Returns
-------
bool
Whether the given node has passed all defined node filters
"""
pass_filter = False
if self.node_filters:
for k, v in self.node_filters.items():
if k in node:
# filter key exists in node
if isinstance(v, (list, set, tuple)):
if any(x in node[k] for x in v):
pass_filter = True
else:
return False
elif isinstance(v, str):
if node[k] == v:
pass_filter = True
else:
return False
else:
self.owner.log_error(
entity=node["id"],
error_type=ErrorType.INVALID_NODE_PROPERTY,
message=f"Unexpected '{k}' node filter of type '{type(v)}'"
)
return False
else:
# filter key does not exist in node
return False
else:
# no node filters defined
pass_filter = True
return pass_filter
def check_edge_filter(self, edge: Dict) -> bool:
"""
Check if an edge passes defined edge filters.
Parameters
----------
edge: Dict
An edge
Returns
-------
bool
Whether the given edge has passed all defined edge filters
"""
pass_filter = False
if self.edge_filters:
for k, v in self.edge_filters.items():
if k in {"subject_category", "object_category"}:
pass_filter = True
continue
if k in edge:
# filter key exists in edge
if isinstance(v, (list, set, tuple)):
if any(x in edge[k] for x in v):
pass_filter = True
else:
return False
elif isinstance(v, str):
if edge[k] == v:
pass_filter = True
else:
return False
else:
subobj = f"{edge['subject']}->{edge['object']}"
self.owner.log_error(
entity=subobj,
error_type=ErrorType.INVALID_EDGE_PROPERTY,
message=f"Unexpected '{k}' edge filter of type '{type(v)}'"
)
return False
else:
# filter does not exist in edge
return False
else:
# no edge filters defined
pass_filter = True
return pass_filter
def set_node_filter(self, key: str, value: Union[str, set]) -> None:
"""
Set a node filter, as defined by a key and value pair.
These filters are used to filter (or reduce) the
search space when fetching nodes from the underlying store.
.. note::
When defining the 'category' filter, the value should be of type ``set``.
This method also sets the 'subject_category' and 'object_category'
edge filters, to get a consistent set of nodes in the subgraph.
Parameters
----------
key: str
The key for node filter
value: Union[str, set]
The value for the node filter.
Can be either a string or a set.
"""
if key == "category":
if isinstance(value, set):
if "subject_category" in self.edge_filters:
self.edge_filters["subject_category"].update(value)
else:
self.edge_filters["subject_category"] = value
if "object_category" in self.edge_filters:
self.edge_filters["object_category"].update(value)
else:
self.edge_filters["object_category"] = value
else:
raise TypeError(
"'category' node filter should have a value of type 'set'"
)
if key in self.node_filters:
self.node_filters[key].update(value)
else:
self.node_filters[key] = value
def set_node_filters(self, filters: Dict) -> None:
"""
Set node filters.
Parameters
----------
filters: Dict
Node filters
"""
if filters:
for k, v in filters.items():
if isinstance(v, (list, set, tuple)):
self.set_node_filter(k, set(v))
else:
self.set_node_filter(k, v)
def set_edge_filters(self, filters: Dict) -> None:
"""
Set edge filters.
Parameters
----------
filters: Dict
Edge filters
"""
if filters:
for k, v in filters.items():
if isinstance(v, (list, set, tuple)):
self.set_edge_filter(k, set(v))
else:
self.set_edge_filter(k, v)
def set_edge_filter(self, key: str, value: set) -> None:
"""
Set an edge filter, as defined by a key and value pair.
These filters are used to filter (or reduce) the
search space when fetching nodes from the underlying store.
.. note::
When defining the 'subject_category' or 'object_category' filter,
the value should be of type ``set``.
This method also sets the 'category' node filter, to get a
consistent set of nodes in the subgraph.
Parameters
----------
key: str
The key for edge filter
value: Union[str, set]
The value for the edge filter.
Can be either a string or a set.
"""
if key in {"subject_category", "object_category"}:
if isinstance(value, set):
if "category" in self.node_filters:
self.node_filters["category"].update(value)
else:
self.node_filters["category"] = value
else:
raise TypeError(
f"'{key}' edge filter should have a value of type 'set'"
)
if key in self.edge_filters:
self.edge_filters[key].update(value)
else:
self.edge_filters[key] = value
def clear_graph_metadata(self):
"""
Clears a Source graph's internal graph_metadata. The value of such graph metadata is (now)
generally a Callable function. This operation can be used in the code when the metadata is
no longer needed, but may cause peculiar Python object persistent problems downstream.
"""
self.infores_context = None
def set_provenance_map(self, kwargs):
"""
Set up a provenance (Knowledge Source to InfoRes) map
"""
self.infores_context.set_provenance_map(kwargs)
def get_infores_catalog(self) -> Dict[str, str]:
"""
Return the InfoRes Context of the source
"""
if not self.infores_context:
return dict()
return self.infores_context.get_catalog()
def set_node_provenance(self, node_data):
"""
Set a specific node provenance value.
"""
self.infores_context.set_node_provenance(node_data)
def set_edge_provenance(self, edge_data):
"""
Set a specific edge provenance value.
"""
self.infores_context.set_edge_provenance(edge_data)
def validate_node(self, node: Dict) -> Optional[Dict]:
"""
Given a node as a dictionary, check for required properties.
This method will return the node dictionary with default
assumptions applied, if any.
Parameters
----------
node: Dict
A node represented as a dict
Returns
-------
Dict
A node represented as a dict, with default assumptions applied.
"""
if "id" not in node or not node["id"]:
self.owner.log_error(
entity=str(node),
error_type=ErrorType.MISSING_NODE_PROPERTY,
message=f"Node missing 'id' property or empty 'id' value"
)
return None
if "name" not in node:
self.owner.log_error(
entity=node["id"],
error_type=ErrorType.MISSING_NODE_PROPERTY,
message=f"Node missing 'name' property",
message_level=MessageLevel.WARNING
)
if "category" not in node:
self.owner.log_error(
entity=node["id"],
error_type=ErrorType.MISSING_CATEGORY,
message=f"Node missing 'category' property? Using '{DEFAULT_NODE_CATEGORY}' as default.",
message_level=MessageLevel.WARNING
)
node["category"] = [DEFAULT_NODE_CATEGORY]
return node
def validate_edge(self, edge: Dict) -> Optional[Dict]:
"""
Given an edge as a dictionary, check for required properties.
This method will return the edge dictionary with default
assumptions applied, if any.
Parameters
----------
edge: Dict
An edge represented as a dict
Returns
-------
Dict
An edge represented as a dict, with default assumptions applied.
"""
incomplete_edge: bool = False
if "subject" not in edge or not edge["subject"]:
self.owner.log_error(
entity=str(edge),
error_type=ErrorType.MISSING_NODE,
message=f"Edge missing 'subject'?"
)
incomplete_edge = True
if "predicate" not in edge or not edge["predicate"]:
self.owner.log_error(
entity=str(edge),
error_type=ErrorType.MISSING_EDGE_PREDICATE,
message=f"Edge missing 'predicate'?"
)
incomplete_edge = True
if "object" not in edge or not edge["object"]:
self.owner.log_error(
entity=str(edge),
error_type=ErrorType.MISSING_NODE,
message=f"Edge missing 'object'?"
)
incomplete_edge = True
if not incomplete_edge:
return edge
else:
return None
| 12,162 | 32.141689 | 105 |
py
|
kgx
|
kgx-master/kgx/source/neo_source.py
|
import itertools
import typing
from typing import Any, Dict, List, Optional, Iterator, Tuple, Generator
from neo4j import GraphDatabase, Neo4jDriver
from neo4j.graph import Node, Relationship
from kgx.config import get_logger
from kgx.source.source import Source
from kgx.utils.kgx_utils import (
generate_uuid,
generate_edge_key,
sanitize_import,
knowledge_provenance_properties,
)
log = get_logger()
class NeoSource(Source):
"""
NeoSource is responsible for reading data as records
from a Neo4j instance.
"""
def __init__(self, owner):
super().__init__(owner)
self.http_driver: Optional[Neo4jDriver] = None
self.session = None
self.node_count = 0
self.edge_count = 0
self.seen_nodes = set()
def _connect_db(self, uri: str, username: str, password: str):
self.http_driver = GraphDatabase.driver(
uri, auth=(username, password)
)
self.session = self.http_driver.session()
def parse(
self,
uri: str,
username: str,
password: str,
node_filters: Dict = None,
edge_filters: Dict = None,
start: int = 0,
end: int = None,
is_directed: bool = True,
page_size: int = 50000,
**kwargs: Any,
) -> typing.Generator:
"""
This method reads from Neo4j instance and yields records
Parameters
----------
uri: str
The URI for the Neo4j instance.
For example, http://localhost:7474
username: str
The username
password: str
The password
node_filters: Dict
Node filters
edge_filters: Dict
Edge filters
start: int
Number of records to skip before streaming
end: int
Total number of records to fetch
is_directed: bool
Whether or not the edges should be treated as directed
page_size: int
The size of each page/batch fetched from Neo4j (``50000``)
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for records
"""
self._connect_db(uri, username, password)
self.set_provenance_map(kwargs)
kwargs["is_directed"] = is_directed
self.node_filters = node_filters
self.edge_filters = edge_filters
for page in self.get_pages(
self.get_nodes, start, end, page_size=page_size, **kwargs
):
yield from self.load_nodes(page)
for page in self.get_pages(
self.get_edges, start, end, page_size=page_size, **kwargs
):
yield from self.load_edges(page)
def count(self, is_directed: bool = True) -> int:
"""
Get the total count of records to be fetched from the Neo4j database.
Parameters
----------
is_directed: bool
Are edges directed or undirected.
``True``, by default, since edges in most cases are directed.
Returns
-------
int
The total count of records
"""
direction = "->" if is_directed else "-"
query = f"MATCH (s)-[p]{direction}(o)"
if self.edge_filters:
qs = []
if "subject_category" in self.edge_filters:
qs.append(
f"({self.format_edge_filter(self.edge_filters, 'subject_category', 's', ':', 'OR')})"
)
if "object_category" in self.edge_filters:
qs.append(
f"({self.format_edge_filter(self.edge_filters, 'object_category', 'o', ':', 'OR')})"
)
if "predicate" in self.edge_filters:
qs.append(
f"({self.format_edge_filter(self.edge_filters, 'predicate', 'p', '.')})"
)
for ksf in knowledge_provenance_properties:
if ksf in self.edge_filters:
qs.append(
f"({self.format_edge_filter(self.edge_filters, ksf, 'p', '.', 'OR')})"
)
query = " WHERE "
query += " AND ".join(qs)
query += f" RETURN COUNT(*) AS count"
log.debug(query)
query_result: Any
counts: int = 0
try:
query_result = self.session.run(query)
for result in query_result:
counts = result[0]
except Exception as e:
log.error(e)
return counts
def get_nodes(self, skip: int = 0, limit: int = 0, **kwargs: Any) -> List:
"""
Get a page of nodes from the Neo4j database.
Parameters
----------
skip: int
Records to skip
limit: int
Total number of records to query for
kwargs: Any
Any additional arguments
Returns
-------
List
A list of nodes
"""
query = f"MATCH (n)"
if self.node_filters:
qs = []
if "category" in self.node_filters:
qs.append(
f"({self.format_node_filter(self.node_filters, 'category', 'n', ':', 'OR')})"
)
if "provided_by" in self.node_filters:
qs.append(
f"({self.format_node_filter(self.node_filters, 'provided_by', 'n', '.', 'OR')})"
)
query += " WHERE "
query += " AND ".join(qs)
query += f" RETURN n SKIP {skip}"
if limit:
query += f" LIMIT {limit}"
log.debug(query)
nodes = []
try:
results = self.session.run(query)
if results:
nodes = [
{
"id": node[0].get('id', f"{node[0].id}"),
"name": node[0].get('name', ''),
"category": node[0].get('category', ['biolink:NamedThing'])
}
for node in results.values()
]
except Exception as e:
log.error(e)
return nodes
def get_edges(
self, skip: int = 0, limit: int = 0, is_directed: bool = True, **kwargs: Any
) -> List:
"""
Get a page of edges from the Neo4j database.
Parameters
----------
skip: int
Records to skip
limit: int
Total number of records to query for
is_directed: bool
Are edges directed or undirected (``True``, by default, since edges in most cases are directed)
kwargs: Any
Any additional arguments
Returns
-------
List
A list of 3-tuples
"""
direction = "->" if is_directed else "-"
query = f"MATCH (s)-[p]{direction}(o)"
if self.edge_filters:
qs = []
if "subject_category" in self.edge_filters:
qs.append(
f"({self.format_edge_filter(self.edge_filters, 'subject_category', 's', ':', 'OR')})"
)
if "object_category" in self.edge_filters:
qs.append(
f"({self.format_edge_filter(self.edge_filters, 'object_category', 'o', ':', 'OR')})"
)
if "predicate" in self.edge_filters:
qs.append(
f"({self.format_edge_filter(self.edge_filters, 'predicate', 'p', '.')})"
)
for ksf in knowledge_provenance_properties:
if ksf in self.edge_filters:
qs.append(
f"({self.format_edge_filter(self.edge_filters, ksf, 'p', '.', 'OR')})"
)
query += " WHERE "
query += " AND ".join(qs)
query += f" RETURN s, p, o SKIP {skip}"
if limit:
query += f" LIMIT {limit}"
log.debug(query)
edges = []
try:
results = self.session.run(
query
)
if results:
edges = list()
for entry in results.values():
edge = list()
# subject
edge.append(
{
"id": entry[0].get('id', f"{entry[0].id}"),
"name": entry[0].get('name', ''),
"category": entry[0].get('category', ['biolink:NamedThing'])
}
)
# edge
edge.append(
{
"subject": entry[1].get('subject', f"{entry[0].id}"),
"predicate": entry[1].get('predicate', "biolink:related_to"),
"relation": entry[1].get('relation', "biolink:related_to"),
"object": entry[1].get('object', f"{entry[2].id}")
}
)
# object
edge.append(
{
"id": entry[2].get('id', f"{entry[2].id}"),
"name": entry[2].get('name', ''),
"category": entry[2].get('category', ['biolink:NamedThing'])
}
)
edges.append(edge)
except Exception as e:
log.error(e)
return edges
def load_nodes(self, nodes: List) -> Generator:
"""
Load nodes into an instance of BaseGraph
Parameters
----------
nodes: List
A list of nodes
"""
for node_data in nodes:
if node_data["id"] not in self.seen_nodes:
node_data = self.load_node(node_data)
if not node_data:
continue
yield node_data
def load_node(self, node_data: Dict) -> Optional[Tuple]:
"""
Load node into an instance of BaseGraph
Parameters
----------
node_data: Dict
A node
Returns
-------
Tuple
A tuple with node ID and node data
"""
self.node_count += 1
self.seen_nodes.add(node_data["id"])
self.set_node_provenance(node_data)
node_data = self.validate_node(node_data)
if not node_data:
return None
node_data = sanitize_import(node_data.copy())
self.node_properties.update(node_data.keys())
return node_data["id"], node_data
def load_edges(self, edges: List) -> None:
"""
Load edges into an instance of BaseGraph
Parameters
----------
edges: List
A list of edge records
"""
for record in edges:
self.edge_count += 1
subject_node = record[0]
edge = record[1]
object_node = record[2]
if "subject" not in edge:
edge["subject"] = subject_node["id"]
if "object" not in edge:
edge["object"] = object_node["id"]
s = self.load_node(subject_node)
if not s:
continue
o = self.load_node(object_node)
if not o:
continue
objs = [s, o]
edge_data = self.load_edge([s[1], edge, o[1]])
if not edge_data:
continue
objs.append(edge_data)
for o in objs:
yield o
def load_edge(self, edge_record: List) -> Tuple:
"""
Load an edge into an instance of BaseGraph
Parameters
----------
edge_record: List
A 4-tuple edge record
Returns
-------
Tuple
A tuple with subject ID, object ID, edge key, and edge data
"""
subject_node = edge_record[0]
edge_data = edge_record[1]
object_node = edge_record[2]
self.set_edge_provenance(edge_data)
if "id" not in edge_data.keys():
edge_data["id"] = generate_uuid()
key = generate_edge_key(
subject_node["id"], edge_data["predicate"], object_node["id"]
)
edge_data = self.validate_edge(edge_data)
if not edge_data:
return ()
edge_data = sanitize_import(edge_data.copy())
self.edge_properties.update(edge_data.keys())
return subject_node["id"], object_node["id"], key, edge_data
def get_pages(
self,
query_function,
start: int = 0,
end: Optional[int] = None,
page_size: int = 50000,
**kwargs: Any,
) -> Iterator:
"""
Get pages of size ``page_size`` from Neo4j.
Returns an iterator of pages where number of pages is (``end`` - ``start``)/``page_size``
Parameters
----------
query_function: func
The function to use to fetch records. Usually this is ``self.get_nodes`` or ``self.get_edges``
start: int
Start for pagination
end: Optional[int]
End for pagination
page_size: int
Size of each page (``10000``, by default)
kwargs: Dict
Any additional arguments that might be relevant for ``query_function``
Returns
-------
Iterator
An iterator for a list of records from Neo4j. The size of the list is ``page_size``
"""
# itertools.count(0) starts counting from zero, and would run indefinitely without a return statement.
# it's distinguished from applying a while loop via providing an index which is formative with the for statement
for i in itertools.count(0):
# First halt condition: page pointer exceeds the number of values allowed to be returned in total
skip = start + (page_size * i)
limit = page_size if end is None or skip + page_size <= end else end - skip
if limit <= 0:
return
# execute query_function to get records
records = query_function(skip=skip, limit=limit, **kwargs)
# Second halt condition: no more data available
if records:
"""
* Yield halts execution until next call
* Thus, the function continues execution upon next call
* Therefore, a new page is calculated before record is instantiated again
"""
yield records
else:
return
@staticmethod
def format_node_filter(
node_filters: Dict,
key: str,
variable: Optional[str] = None,
prefix: Optional[str] = None,
op: Optional[str] = None,
) -> str:
"""
Get the value for node filter as defined by ``key``.
This is used as a convenience method for generating cypher queries.
Parameters
----------
node_filters: Dict
All node filters
key: str
Name of the node filter
variable: Optional[str]
Variable binding for cypher query
prefix: Optional[str]
Prefix for the cypher
op: Optional[str]
The operator
Returns
-------
str
Value corresponding to the given node filter ``key``, formatted for CQL
"""
value = ""
if key in node_filters and node_filters[key]:
if isinstance(node_filters[key], (list, set, tuple)):
if key in {"category"}:
formatted = [f"{variable}{prefix}`{x}`" for x in node_filters[key]]
value = f" {op} ".join(formatted)
elif key == "provided_by":
formatted = [
f"'{x}' IN {variable}{prefix}{'provided_by'}"
for x in node_filters["provided_by"]
]
value = f" {op} ".join(formatted)
else:
formatted = []
for v in node_filters[key]:
formatted.append(f"{variable}{prefix}{key} = '{v}'")
value = f" {op} ".join(formatted)
elif isinstance(node_filters[key], str):
value = f"{variable}{prefix}{key} = '{node_filters[key]}'"
else:
log.error(
f"Unexpected {key} node filter of type {type(node_filters[key])}"
)
return value
@staticmethod
def format_edge_filter(
edge_filters: Dict,
key: str,
variable: Optional[str] = None,
prefix: Optional[str] = None,
op: Optional[str] = None,
) -> str:
"""
Get the value for edge filter as defined by ``key``.
This is used as a convenience method for generating cypher queries.
Parameters
----------
edge_filters: Dict
All edge filters
key: str
Name of the edge filter
variable: Optional[str]
Variable binding for cypher query
prefix: Optional[str]
Prefix for the cypher
op: Optional[str]
The operator
Returns
-------
str
Value corresponding to the given edge filter ``key``, formatted for CQL
"""
value = ""
if key in edge_filters and edge_filters[key]:
if isinstance(edge_filters[key], (list, set, tuple)):
if key in {"subject_category", "object_category"}:
formatted = [f"{variable}{prefix}`{x}`" for x in edge_filters[key]]
value = f" {op} ".join(formatted)
elif key == "predicate":
formatted = [f"'{x}'" for x in edge_filters["predicate"]]
value = f"type({variable}) IN [{', '.join(formatted)}]"
elif key in knowledge_provenance_properties:
formatted = [
f"'{x}' IN {variable}{prefix}{key}" for x in edge_filters[key]
]
value = f" {op} ".join(formatted)
else:
formatted = []
for v in edge_filters[key]:
formatted.append(f"{variable}{prefix}{key} = '{v}'")
value = f" {op} ".join(formatted)
elif isinstance(edge_filters[key], str):
value = f"{variable}{prefix}{key} = '{edge_filters[key]}'"
else:
log.error(
f"Unexpected {key} edge filter of type {type(edge_filters[key])}"
)
return value
| 18,963 | 30.872269 | 120 |
py
|
kgx
|
kgx-master/kgx/source/sssom_source.py
|
"""
KGX Source for Simple Standard for Sharing Ontology Mappings ("SSSOM")
"""
import gzip
import re
import typing
import pandas as pd
from typing import Optional, Generator, Any, Dict, Tuple
import yaml
from kgx.error_detection import ErrorType, MessageLevel
from kgx.prefix_manager import PrefixManager
from kgx.config import get_logger
from kgx.source import Source
from kgx.utils.kgx_utils import (
sanitize_import,
generate_uuid,
generate_edge_key,
)
from kgx.utils.rdf_utils import process_predicate
log = get_logger()
SSSOM_NODE_PROPERTY_MAPPING = {
"subject_id": "id",
"subject_category": "category",
"object_id": "id",
"object_category": "category",
}
class SssomSource(Source):
"""
SssomSource is responsible for reading data as records
from an SSSOM file.
"""
def __init__(self, owner):
super().__init__(owner)
self.predicate_mapping = {}
def set_prefix_map(self, m: Dict) -> None:
"""
Add or override default prefix to IRI map.
Parameters
----------
m: Dict
Prefix to IRI map
"""
self.prefix_manager.set_prefix_map(m)
def set_reverse_prefix_map(self, m: Dict) -> None:
"""
Add or override default IRI to prefix map.
Parameters
----------
m: Dict
IRI to prefix map
"""
self.prefix_manager.set_reverse_prefix_map(m)
def parse(
self,
filename: str,
format: str,
compression: Optional[str] = None,
**kwargs: Any,
) -> typing.Generator:
"""
Parse a SSSOM TSV
Parameters
----------
filename: str
File to read from
format: str
The input file format (``tsv``, by default)
compression: Optional[str]
The compression (``gz``)
kwargs: Dict
Any additional arguments
Returns
-------
Generator
A generator for node and edge records
"""
if "delimiter" not in kwargs:
kwargs["delimiter"] = "\t"
self.parse_header(filename, compression)
# SSSOM 'mapping provider' may override the default 'knowledge_source' setting?
if "mapping_provider" in self.graph_metadata:
kwargs["knowledge_source"] = self.graph_metadata["mapping_provider"]
self.set_provenance_map(kwargs)
if compression:
FH = gzip.open(filename, "rb")
else:
FH = open(filename)
file_iter = pd.read_csv(
FH,
comment="#",
dtype=str,
chunksize=10000,
low_memory=False,
keep_default_na=False,
**kwargs,
)
for chunk in file_iter:
yield from self.load_edges(chunk)
def parse_header(self, filename: str, compression: Optional[str] = None) -> None:
"""
Parse metadata from SSSOM headers.
Parameters
----------
filename: str
Filename to parse
compression: Optional[str]
Compression type
"""
yamlstr = ""
if compression:
FH = gzip.open(filename, "rb")
else:
FH = open(filename)
for line in FH:
if line.startswith("#"):
yamlstr += re.sub("^#", "", line)
else:
break
if yamlstr:
metadata = yaml.safe_load(yamlstr)
log.info(f"Metadata: {metadata}")
if "curie_map" in metadata:
self.prefix_manager.update_prefix_map(metadata["curie_map"])
for k, v in metadata.items():
self.graph_metadata[k] = v
def load_node(self, node_data: Dict) -> Optional[Tuple[str, Dict]]:
"""
Load a node into an instance of BaseGraph
Parameters
----------
node_data: Dict
A node
Returns
-------
Optional[Tuple[str, Dict]]
A tuple that contains node id and node data
"""
node_data = self.validate_node(node_data)
if not node_data:
return None
node_data = sanitize_import(node_data.copy())
if "id" in node_data:
n = node_data["id"]
self.set_node_provenance(node_data)
self.node_properties.update(list(node_data.keys()))
return n, node_data
else:
self.owner.log_error(
entity=str(node_data),
error_type=ErrorType.MISSING_NODE_PROPERTY,
message="Ignoring node with no 'id'",
message_level=MessageLevel.WARNING
)
def load_edges(self, df: pd.DataFrame) -> Generator:
"""
Load edges from pandas.DataFrame into an instance of BaseGraph
Parameters
----------
df : pandas.DataFrame
Dataframe containing records that represent edges
Returns
-------
Generator
A generator for edge records
"""
for obj in df.to_dict("records"):
yield from self.load_edge(obj)
def load_edge(self, edge: Dict) -> Generator:
"""
Load an edge into an instance of BaseGraph
Parameters
----------
edge : Dict
An edge
Returns
-------
Generator
A generator for node and edge records
"""
(element_uri, canonical_uri, predicate, property_name) = process_predicate(
self.prefix_manager, edge["predicate_id"], self.predicate_mapping
)
if element_uri:
edge_predicate = element_uri
elif predicate:
edge_predicate = predicate
else:
edge_predicate = property_name
if canonical_uri:
edge_predicate = element_uri
data = {
"subject": edge["subject_id"],
"predicate": edge_predicate,
"object": edge["object_id"],
}
del edge["predicate_id"]
data = self.validate_edge(data)
if not data:
return # ?
subject_node = {}
object_node = {}
for k, v in edge.items():
if k in SSSOM_NODE_PROPERTY_MAPPING:
if k.startswith("subject"):
mapped_k = SSSOM_NODE_PROPERTY_MAPPING[k]
if mapped_k == "category" and not PrefixManager.is_curie(v):
v = f"biolink:OntologyClass"
subject_node[mapped_k] = v
elif k.startswith("object"):
mapped_k = SSSOM_NODE_PROPERTY_MAPPING[k]
if mapped_k == "category" and not PrefixManager.is_curie(v):
v = f"biolink:OntologyClass"
object_node[mapped_k] = v
else:
log.info(f"Ignoring {k} {v}")
else:
data[k] = v
subject_node = self.load_node(subject_node)
object_node = self.load_node(object_node)
if not (subject_node and object_node):
return # ?
objs = [subject_node, object_node]
for k, v in self.graph_metadata.items():
if k not in {"curie_map"}:
data[k] = v
edge_data = sanitize_import(data.copy())
if "subject" in edge_data and "object" in edge_data:
if "id" not in edge_data:
edge_data["id"] = generate_uuid()
s = edge_data["subject"]
o = edge_data["object"]
self.set_edge_provenance(edge_data)
key = generate_edge_key(s, edge_data["predicate"], o)
self.edge_properties.update(list(edge_data.keys()))
objs.append((s, o, key, edge_data))
else:
self.owner.log_error(
entity=str(edge_data),
error_type=ErrorType.MISSING_NODE,
message="Ignoring edge with either a missing 'subject' or 'object'",
message_level=MessageLevel.WARNING
)
for o in objs:
yield o
| 8,221 | 27.061433 | 87 |
py
|
kgx
|
kgx-master/kgx/source/__init__.py
|
from .source import Source
from .tsv_source import TsvSource
from .json_source import JsonSource
from .jsonl_source import JsonlSource
from .obograph_source import ObographSource
from .trapi_source import TrapiSource
from .neo_source import NeoSource
from .rdf_source import RdfSource
from .graph_source import GraphSource
from .owl_source import OwlSource
from .sssom_source import SssomSource
| 395 | 32 | 43 |
py
|
kgx
|
kgx-master/kgx/source/obograph_source.py
|
import gzip
import tarfile
import typing
from itertools import chain
from typing import Optional, Tuple, Dict, Generator, Any
import ijson
import stringcase
import inflection
from bmt import Toolkit
from kgx.error_detection import ErrorType, MessageLevel
from kgx.prefix_manager import PrefixManager
from kgx.config import get_logger
from kgx.source.json_source import JsonSource
from kgx.utils.kgx_utils import get_biolink_element, format_biolink_slots
log = get_logger()
class ObographSource(JsonSource):
"""
ObographSource is responsible for reading data as records
from an OBO Graph JSON.
"""
HAS_OBO_NAMESPACE = "http://www.geneontology.org/formats/oboInOwl#hasOBONamespace"
SKOS_EXACT_MATCH = "http://www.w3.org/2004/02/skos/core#exactMatch"
def __init__(self, owner):
super().__init__(owner)
self.toolkit = Toolkit()
self.ecache: Dict = {}
def parse(
self,
filename: str,
format: str = "json",
compression: Optional[str] = None,
**kwargs: Any,
) -> typing.Generator:
"""
This method reads from JSON and yields records.
Parameters
----------
filename: str
The filename to parse
format: str
The format (``json``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for records
"""
self.set_provenance_map(kwargs)
n = self.read_nodes(filename, compression)
e = self.read_edges(filename, compression)
yield from chain(n, e)
def read_nodes(self, filename: str, compression: Optional[str] = None) -> Generator:
"""
Read node records from a JSON.
Parameters
----------
filename: str
The filename to read from
compression: Optional[str]
The compression type
Returns
-------
Generator
A generator for node records
"""
if compression and compression == "gz":
FH = gzip.open(filename, "rb")
else:
FH = open(filename, "rb")
for n in ijson.items(FH, "graphs.item.nodes.item"):
yield self.read_node(n)
def read_node(self, node: Dict) -> Optional[Tuple[str, Dict]]:
"""
Read and parse a node record.
Parameters
----------
node: Dict
The node record
Returns
-------
Dict
The processed node
"""
curie = self.prefix_manager.contract(node["id"])
node_properties = {}
if "meta" in node:
node_properties = self.parse_meta(node["id"], node["meta"])
fixed_node = dict()
fixed_node["id"] = curie
if "lbl" in node:
fixed_node["name"] = node["lbl"]
fixed_node["iri"] = node["id"]
if "description" in node_properties:
fixed_node["description"] = node_properties["description"]
if "synonym" in node_properties:
fixed_node["synonym"] = node_properties["synonym"]
if "xrefs" in node_properties:
fixed_node["xref"] = node_properties["xrefs"]
if "subsets" in node_properties:
fixed_node["subsets"] = node_properties["subsets"]
if "category" not in node:
category = self.get_category(curie, node)
if category:
fixed_node["category"] = [category]
else:
fixed_node["category"] = ["biolink:OntologyClass"]
if "equivalent_nodes" in node_properties:
equivalent_nodes = node_properties["equivalent_nodes"]
fixed_node["same_as"] = equivalent_nodes
return super().read_node(fixed_node)
def read_edges(self, filename: str, compression: Optional[str] = None) -> Generator:
"""
Read edge records from a JSON.
Parameters
----------
filename: str
The filename to read from
compression: Optional[str]
The compression type
Returns
-------
Generator
A generator for edge records
"""
if compression == "gz":
FH = gzip.open(filename, "rb")
else:
FH = open(filename, "rb")
for e in ijson.items(FH, "graphs.item.edges.item"):
yield self.read_edge(e)
def read_edge(self, edge: Dict) -> Optional[Tuple]:
"""
Read and parse an edge record.
Parameters
----------
edge: Dict
The edge record
Returns
-------
Dict
The processed edge
"""
fixed_edge = dict()
fixed_edge["subject"] = self.prefix_manager.contract(edge["sub"])
if PrefixManager.is_iri(edge["pred"]):
curie = self.prefix_manager.contract(edge["pred"])
if curie in self.ecache:
edge_predicate = self.ecache[curie]
else:
element = get_biolink_element(curie)
if not element:
try:
mapping = self.toolkit.get_element_by_mapping(edge["pred"])
if mapping:
element = self.toolkit.get_element(mapping)
except ValueError as e:
self.owner.log_error(
entity=str(edge["pred"]),
error_type=ErrorType.INVALID_EDGE_PREDICATE,
message=str(e)
)
element = None
if element:
edge_predicate = format_biolink_slots(element.name.replace(",", ""))
fixed_edge["predicate"] = edge_predicate
else:
edge_predicate = "biolink:related_to"
self.ecache[curie] = edge_predicate
fixed_edge["predicate"] = edge_predicate
fixed_edge["relation"] = curie
else:
if edge["pred"] == "is_a":
fixed_edge["predicate"] = "biolink:subclass_of"
fixed_edge["relation"] = "rdfs:subClassOf"
elif edge["pred"] == "has_part":
fixed_edge["predicate"] = "biolink:has_part"
fixed_edge["relation"] = "BFO:0000051"
elif edge["pred"] == "part_of":
fixed_edge["predicate"] = "biolink:part_of"
fixed_edge["relation"] = "BFO:0000050"
else:
fixed_edge["predicate"] = f"biolink:{edge['pred'].replace(' ', '_')}"
fixed_edge["relation"] = edge["pred"]
fixed_edge["object"] = self.prefix_manager.contract(edge["obj"])
for x in edge.keys():
if x not in {"sub", "pred", "obj"}:
fixed_edge[x] = edge[x]
return super().read_edge(fixed_edge)
def get_category(self, curie: str, node: dict) -> Optional[str]:
"""
Get category for a given CURIE.
Parameters
----------
curie: str
Curie for node
node: dict
Node data
Returns
-------
Optional[str]
Category for the given node CURIE.
"""
category = None
# use meta.basicPropertyValues
if "meta" in node and "basicPropertyValues" in node["meta"]:
for p in node["meta"]["basicPropertyValues"]:
if p["pred"] == self.HAS_OBO_NAMESPACE:
category = p["val"]
element = self.toolkit.get_element(category)
if element:
if "OBO" in element.name:
category = f"biolink:{inflection.camelize(inflection.underscore(element.name))}"
else:
category = f"biolink:{inflection.camelize(stringcase.snakecase(element.name))}"
else:
element = self.toolkit.get_element_by_mapping(category)
if element:
if "OBO" in element:
category = f"biolink:{inflection.camelize(inflection.underscore(element))}"
else:
category = f"biolink:{inflection.camelize(stringcase.snakecase(element))}"
else:
category = "biolink:OntologyClass"
if not category or category == "biolink:OntologyClass":
prefix = PrefixManager.get_prefix(curie)
if prefix == "HP":
category = "biolink:PhenotypicFeature"
elif prefix == "CHEBI":
category = "biolink:ChemicalSubstance"
elif prefix == "MONDO":
category = "biolink:Disease"
elif prefix == "UBERON":
category = "biolink:AnatomicalEntity"
elif prefix == "SO":
category = "biolink:SequenceFeature"
elif prefix == "CL":
category = "biolink:Cell"
elif prefix == "PR":
category = "biolink:Protein"
elif prefix == "NCBITaxon":
category = "biolink:OrganismalEntity"
else:
self.owner.log_error(
entity=f"{str(category)} for node {curie}",
error_type=ErrorType.MISSING_CATEGORY,
message=f"Missing category; Defaulting to 'biolink:OntologyClass'",
message_level=MessageLevel.WARNING
)
return category
def parse_meta(self, node: str, meta: Dict) -> Dict:
"""
Parse 'meta' field of a node.
Parameters
----------
node: str
Node identifier
meta: Dict
meta dictionary for the node
Returns
-------
Dict
A dictionary that contains 'description', 'synonyms',
'xrefs', and 'equivalent_nodes'.
"""
# cross species links are in meta; this needs to be parsed properly too
# do not put assumptions in code; import as much as possible
properties = {}
if "definition" in meta:
# parse 'definition' as 'description'
description = meta["definition"]["val"]
properties["description"] = description
if "subsets" in meta:
# parse 'subsets'
subsets = meta["subsets"]
properties["subsets"] = [
x.split("#")[1] if "#" in x else x for x in subsets
]
if "synonyms" in meta:
# parse 'synonyms' as 'synonym'
synonyms = [s["val"] for s in meta["synonyms"]]
properties["synonym"] = synonyms
if "xrefs" in meta:
# parse 'xrefs' as 'xrefs'
xrefs = [x["val"] for x in meta["xrefs"]]
properties["xrefs"] = xrefs
if "deprecated" in meta:
# parse 'deprecated' flag
properties["deprecated"] = meta["deprecated"]
equivalent_nodes = []
if "basicPropertyValues" in meta:
# parse SKOS_EXACT_MATCH entries as 'equivalent_nodes'
for p in meta["basicPropertyValues"]:
if p["pred"] in {self.SKOS_EXACT_MATCH}:
n = self.prefix_manager.contract(p["val"])
if not n:
n = p["val"]
equivalent_nodes.append(n)
properties["equivalent_nodes"] = equivalent_nodes
return properties
| 11,785 | 32.770774 | 108 |
py
|
kgx
|
kgx-master/kgx/source/tsv_source.py
|
import re
import tarfile
import typing
from typing import Dict, Tuple, Any, Generator, Optional, List
import pandas as pd
from kgx.config import get_logger
from kgx.source.source import Source
from kgx.utils.kgx_utils import (
generate_uuid,
generate_edge_key,
extension_types,
archive_read_mode,
sanitize_import
)
log = get_logger()
DEFAULT_LIST_DELIMITER = "|"
class TsvSource(Source):
"""
TsvSource is responsible for reading data as records
from a TSV/CSV.
"""
def __init__(self, owner):
super().__init__(owner)
self.list_delimiter = DEFAULT_LIST_DELIMITER
def set_prefix_map(self, m: Dict) -> None:
"""
Add or override default prefix to IRI map.
Parameters
----------
m: Dict
Prefix to IRI map
"""
self.prefix_manager.set_prefix_map(m)
def set_reverse_prefix_map(self, m: Dict) -> None:
"""
Add or override default IRI to prefix map.
Parameters
----------
m: Dict
IRI to prefix map
"""
self.prefix_manager.set_reverse_prefix_map(m)
def parse(
self,
filename: str,
format: str,
compression: Optional[str] = None,
**kwargs: Any,
) -> typing.Generator:
"""
This method reads from a TSV/CSV and yields records.
Parameters
----------
filename: str
The filename to parse
format: str
The format (``tsv``, ``csv``)
compression: Optional[str]
The compression type (``tar``, ``tar.gz``)
kwargs: Any
Any additional arguments
Returns
-------
Generator
A generator for node and edge records
"""
if "delimiter" not in kwargs:
# infer delimiter from file format
kwargs["delimiter"] = extension_types[format]
if "lineterminator" not in kwargs:
# set '\n' to be the default line terminator to prevent
# truncation of lines due to hidden/escaped carriage returns
kwargs["lineterminator"] = "\n"
if "list_delimeter" in kwargs:
self.list_delimiter = kwargs["list_delimiter"]
mode = (
archive_read_mode[compression] if compression in archive_read_mode else None
)
self.set_provenance_map(kwargs)
if format == "tsv":
kwargs["quoting"] = 3
if mode:
with tarfile.open(filename, mode=mode) as tar:
# Alas, the order that tar file members is important in some streaming operations
# (e.g. graph-summary and validation) in that generally, all the node files need to be
# loaded first, followed by the associated edges files can be loaded and analysed.
# Start by partitioning files of each type into separate lists
node_files: List[str] = list()
edge_files: List[str] = list()
for name in tar.getnames():
if re.search(f"nodes.{format}", name):
node_files.append(name)
elif re.search(f"edges.{format}", name):
edge_files.append(name)
else:
# This used to throw an exception but perhaps we should simply ignore it.
log.warning(
f"Tar archive contains an unrecognized file: {name}. Skipped..."
)
# Then, first extract and capture contents of the nodes files...
for name in node_files:
try:
member = tar.getmember(name)
except KeyError:
log.warning(
f"Node file {name} member in archive {filename} could not be accessed? Skipped?"
)
continue
f = tar.extractfile(member)
file_iter = pd.read_csv(
f,
dtype=str,
chunksize=10000,
low_memory=False,
keep_default_na=False,
**kwargs,
)
for chunk in file_iter:
self.node_properties.update(chunk.columns)
yield from self.read_nodes(chunk)
# Next, extract and capture contents of the edges files...
for name in edge_files:
try:
member = tar.getmember(name)
except KeyError:
log.warning(
f"Edge file {name} member in archive {filename} could not be accessed? Skipped?"
)
continue
f = tar.extractfile(member)
file_iter = pd.read_csv(
f,
dtype=str,
chunksize=10000,
low_memory=False,
keep_default_na=False,
**kwargs,
)
for chunk in file_iter:
self.edge_properties.update(chunk.columns)
yield from self.read_edges(chunk)
else:
file_iter = pd.read_csv(
filename,
dtype=str,
chunksize=10000,
low_memory=False,
keep_default_na=False,
**kwargs,
)
if re.search(f"nodes.{format}", filename):
for chunk in file_iter:
self.node_properties.update(chunk.columns)
yield from self.read_nodes(chunk)
elif re.search(f"edges.{format}", filename):
for chunk in file_iter:
self.edge_properties.update(chunk.columns)
yield from self.read_edges(chunk)
else:
# This used to throw an exception but perhaps we should simply ignore it.
log.warning(
f"Parse function cannot resolve the KGX file type in name {filename}. Skipped..."
)
def read_nodes(self, df: pd.DataFrame) -> Generator:
"""
Read records from pandas.DataFrame and yield records.
Parameters
----------
df: pandas.DataFrame
Dataframe containing records that represent nodes
Returns
-------
Generator
A generator for node records
"""
for obj in df.to_dict("records"):
yield self.read_node(obj)
def read_node(self, node: Dict) -> Optional[Tuple[str, Dict]]:
"""
Prepare a node.
Parameters
----------
node: Dict
A node
Returns
-------
Optional[Tuple[str, Dict]]
A tuple that contains node id and node data
"""
node = self.validate_node(node)
if node:
# if not None, assumed to have an "id" here...
node_data = sanitize_import(node.copy(), self.list_delimiter)
n = node_data["id"]
self.set_node_provenance(node_data) # this method adds provided_by to the node properties/node data
self.node_properties.update(list(node_data.keys()))
if self.check_node_filter(node_data):
self.node_properties.update(node_data.keys())
return n, node_data
def read_edges(self, df: pd.DataFrame) -> Generator:
"""
Load edges from pandas.DataFrame into an instance of BaseGraph.
Parameters
----------
df: pandas.DataFrame
Dataframe containing records that represent edges
Returns
-------
Generator
A generator for edge records
"""
for obj in df.to_dict("records"):
yield self.read_edge(obj)
def read_edge(self, edge: Dict) -> Optional[Tuple]:
"""
Load an edge into an instance of BaseGraph.
Parameters
----------
edge: Dict
An edge
Returns
-------
Optional[Tuple]
A tuple that contains subject id, object id, edge key, and edge data
"""
edge = self.validate_edge(edge)
if not edge:
return None
edge_data = sanitize_import(edge.copy(), self.list_delimiter)
if "id" not in edge_data:
edge_data["id"] = generate_uuid()
s = edge_data["subject"]
o = edge_data["object"]
self.set_edge_provenance(edge_data)
key = generate_edge_key(s, edge_data["predicate"], o)
self.edge_properties.update(list(edge_data.keys()))
if self.check_edge_filter(edge_data):
self.edge_properties.update(edge_data.keys())
return s, o, key, edge_data
| 9,150 | 31.917266 | 112 |
py
|
kgx
|
kgx-master/tests/__init__.py
|
import os
import pprint
RESOURCE_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "resources")
TARGET_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "target")
def print_graph(g):
pprint.pprint([x for x in g.nodes(data=True)])
pprint.pprint([x for x in g.edges(data=True)])
| 314 | 27.636364 | 84 |
py
|
kgx
|
kgx-master/tests/unit/test_graph_merge.py
|
from kgx.graph.nx_graph import NxGraph
from kgx.graph_operations.graph_merge import (
merge_all_graphs,
merge_graphs,
merge_node,
merge_edge,
)
def get_graphs():
"""
Returns instances of defined graphs.
"""
g1 = NxGraph()
g1.name = "Graph 1"
g1.add_node("A", id="A", name="Node A", category=["biolink:NamedThing"])
g1.add_node("B", id="B", name="Node B", category=["biolink:NamedThing"])
g1.add_node("C", id="C", name="Node C", category=["biolink:NamedThing"])
g1.add_edge(
"C",
"B",
edge_key="C-biolink:subclass_of-B",
edge_label="biolink:sub_class_of",
relation="rdfs:subClassOf",
)
g1.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
edge_label="biolink:sub_class_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
)
g2 = NxGraph()
g2.name = "Graph 2"
g2.add_node(
"A",
id="A",
name="Node A",
description="Node A in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"B",
id="B",
name="Node B",
description="Node B in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"C",
id="C",
name="Node C",
description="Node C in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"D",
id="D",
name="Node D",
description="Node D in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"E",
id="E",
name="Node E",
description="Node E in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
edge_label="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 2",
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:related_to-A",
edge_label="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"D",
"A",
edge_key="D-biolink:related_to-A",
edge_label="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"E",
"A",
edge_key="E-biolink:related_to-A",
edge_label="biolink:related_to",
relation="biolink:related_to",
)
g3 = NxGraph()
g3.name = "Graph 3"
g3.add_edge(
"F",
"E",
edge_key="F-biolink:same_as-E",
edge_label="biolink:same_as",
relation="OWL:same_as",
)
return [g1, g2, g3]
def test_merge_all_graphs():
"""
Test for merging three graphs into one,
while preserving conflicting node and edge properties.
"""
graphs = get_graphs()
# merge while preserving conflicting nodes and edges
merged_graph = merge_all_graphs(graphs, preserve=True)
assert merged_graph.number_of_nodes() == 6
assert merged_graph.number_of_edges() == 6
assert merged_graph.name == "Graph 2"
data = merged_graph.nodes()["A"]
assert data["name"] == "Node A"
assert data["description"] == "Node A in Graph 2"
edges = merged_graph.get_edge("B", "A")
assert len(edges) == 2
data = list(edges.values())[0]
assert len(data["provided_by"]) == 2
assert data["provided_by"] == ["Graph 2", "Graph 1"]
graphs = get_graphs()
# merge while not preserving conflicting nodes and edges
merged_graph = merge_all_graphs(graphs, preserve=False)
assert merged_graph.number_of_nodes() == 6
assert merged_graph.number_of_edges() == 6
assert merged_graph.name == "Graph 2"
data = merged_graph.nodes()["A"]
assert data["name"] == "Node A"
assert data["description"] == "Node A in Graph 2"
edges = merged_graph.get_edge("B", "A")
assert len(edges) == 2
data = list(edges.values())[0]
assert isinstance(data["provided_by"], list)
assert "Graph 1" in data["provided_by"]
assert "Graph 2" in data["provided_by"]
def test_merge_graphs():
"""
Test for merging 3 graphs into one,
while not preserving conflicting node and edge properties.
"""
graphs = get_graphs()
merged_graph = merge_graphs(NxGraph(), graphs)
assert merged_graph.number_of_nodes() == 6
assert merged_graph.number_of_edges() == 6
assert merged_graph.name not in [x.name for x in graphs]
def test_merge_node():
"""
Test merging of a node into a graph.
"""
graphs = get_graphs()
g = graphs[0]
node = g.nodes()["A"]
new_data = node.copy()
new_data["subset"] = "test"
new_data["source"] = "KGX"
new_data["category"] = ["biolink:InformationContentEntity"]
new_data["description"] = "Node A modified by merge operation"
node = merge_node(g, node["id"], new_data, preserve=True)
assert node["id"] == "A"
assert node["name"] == "Node A"
assert node["description"] == "Node A modified by merge operation"
assert "subset" in node and node["subset"] == "test"
assert "source" in node and node["source"] == "KGX"
def test_merge_edge():
"""
Test merging of an edge into a graph.
"""
graphs = get_graphs()
g = graphs[1]
edge = g.get_edge("E", "A")
new_data = edge.copy()
new_data["provided_by"] = "KGX"
new_data["evidence"] = "PMID:123456"
edge = merge_edge(g, "E", "A", "E-biolink:related_to-A", new_data, preserve=True)
assert edge["edge_label"] == "biolink:related_to"
assert edge["relation"] == "biolink:related_to"
assert "KGX" in edge["provided_by"]
assert edge["evidence"] == "PMID:123456"
| 5,684 | 26.463768 | 85 |
py
|
kgx
|
kgx-master/tests/unit/test_config.py
|
import pytest
from kgx.config import get_biolink_model_schema
def test_valid_biolink_version():
try:
schema = get_biolink_model_schema("v3.2.1")
except TypeError as te:
assert False, "test failure!"
assert (
schema
== "https://raw.githubusercontent.com/biolink/biolink-model/v3.2.1/biolink-model.yaml"
)
def test_valid_biolink_version_no_v():
try:
schema = get_biolink_model_schema("2.0.1")
except TypeError as te:
assert False, "test failure!"
assert (
schema
== "https://raw.githubusercontent.com/biolink/biolink-model/2.0.1/biolink-model.yaml"
)
def test_invalid_biolink_version():
try:
schema = get_biolink_model_schema()
except TypeError as te:
assert (
True
), "Type error expected: passed the invalid non-semver, type error: " + str(te)
| 891 | 24.485714 | 94 |
py
|
kgx
|
kgx-master/tests/unit/test_graph_operations.py
|
import pytest
from kgx.graph.nx_graph import NxGraph
from kgx.graph_operations import (
remove_singleton_nodes,
fold_predicate,
unfold_node_property,
remap_edge_property,
remap_node_property,
remap_node_identifier,
)
def get_graphs1():
"""
Returns instances of defined graphs.
"""
g1 = NxGraph()
g1.add_edge("B", "A", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("C", "B", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("D", "C", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("D", "A", **{"predicate": "biolink:related_to"})
g1.add_edge("E", "D", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("F", "D", **{"predicate": "biolink:sub_class_of"})
g2 = NxGraph()
g2.name = "Graph 1"
g2.add_node(
"HGNC:12345",
id="HGNC:12345",
name="Test Gene",
category=["biolink:NamedThing"],
alias="NCBIGene:54321",
same_as="UniProtKB:54321",
)
g2.add_node("B", id="B", name="Node B", category=["biolink:NamedThing"], alias="Z")
g2.add_node("C", id="C", name="Node C", category=["biolink:NamedThing"])
g2.add_edge(
"C",
"B",
edge_key="C-biolink:subclass_of-B",
subject="C",
object="B",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
publications=[1],
pubs=["PMID:123456"],
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
subject="B",
object="A",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
)
g2.add_edge(
"C",
"c",
edge_key="C-biolink:exact_match-B",
subject="C",
object="c",
predicate="biolink:exact_match",
relation="skos:exactMatch",
provided_by="Graph 1",
)
return [g1, g2]
def get_graphs2():
"""
Returns instances of defined graphs.
"""
g1 = NxGraph()
g1.name = "Graph 1"
g1.add_node(
"HGNC:12345",
id="HGNC:12345",
name="Test Gene",
category=["biolink:NamedThing"],
alias="NCBIGene:54321",
same_as="UniProtKB:54321",
)
g1.add_node("B", id="B", name="Node B", category=["biolink:NamedThing"], alias="Z")
g1.add_node("C", id="C", name="Node C", category=["biolink:NamedThing"])
g1.add_edge(
"C",
"B",
edge_key="C-biolink:subclass_of-B",
subject="C",
object="B",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
publications=[1],
pubs=["PMID:123456"],
)
g1.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
subject="B",
object="A",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
)
g2 = NxGraph()
g2.name = "Graph 2"
g2.add_node(
"A",
id="A",
name="Node A",
description="Node A in Graph 2",
category=["biolink:Gene"],
xref=["NCBIGene:12345", "HGNC:001033"],
)
g2.add_node(
"B",
id="B",
name="Node B",
description="Node B in Graph 2",
category=["biolink:Gene"],
xref=["NCBIGene:56463", "HGNC:012901"],
)
g2.add_node(
"C",
id="C",
name="Node C",
description="Node C in Graph 2",
category=["biolink:Gene", "biolink:NamedThing"],
xref=["NCBIGene:08239", "HGNC:103431"],
)
g2.add_node(
"D",
id="D",
name="Node D",
description="Node D in Graph 2",
category=["biolink:Gene"],
xref=["HGNC:394233"],
)
g2.add_node(
"E",
id="E",
name="Node E",
description="Node E in Graph 2",
category=["biolink:NamedThing"],
xref=["NCBIGene:X", "HGNC:X"],
)
g2.add_node(
"F",
id="F",
name="Node F",
description="Node F in Graph 2",
category=["biolink:NamedThing"],
xref=["HGNC:Y"],
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
subject="B",
object="A",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 2",
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:related_to-A",
subject="B",
object="A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"D",
"A",
edge_key="D-biolink:related_to-A",
subject="D",
object="A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"E",
"A",
edge_key="E-biolink:related_to-A",
subject="E",
object="A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"E",
"F",
edge_key="F-biolink:related_to-A",
subject="E",
object="F",
predicate="biolink:related_to",
relation="biolink:related_to",
)
return [g1, g2]
def test_fold_predicate1():
"""
Test the fold_predicate operation.
"""
g = get_graphs1()[1]
fold_predicate(g, "biolink:exact_match")
assert not g.has_edge("C", "c")
n = g.nodes(data=True)["C"]
assert "biolink:exact_match" in n and n["biolink:exact_match"] == "c"
def test_fold_predicate2():
"""
Test the fold predicate operation, where the prefix of
the predicate is removed.
"""
g = get_graphs1()[1]
fold_predicate(g, "biolink:exact_match", remove_prefix=True)
assert not g.has_edge("C", "c")
n = g.nodes(data=True)["C"]
assert "exact_match" in n and n["exact_match"] == "c"
def test_unfold_node_property1():
"""Test the unfold node property operation."""
g = get_graphs1()[1]
unfold_node_property(g, "same_as")
assert "same_as" not in g.nodes()["HGNC:12345"]
assert g.has_edge("HGNC:12345", "UniProtKB:54321")
e = list(dict(g.get_edge("HGNC:12345", "UniProtKB:54321")).values())[0]
assert "subject" in e and e["subject"] == "HGNC:12345"
assert "predicate" in e and e["predicate"] == "same_as"
assert "object" in e and e["object"] == "UniProtKB:54321"
def test_unfold_node_property2():
"""
Test the unfold node property operation, where the prefix of
the predicate is added explicitly.
"""
g = get_graphs1()[1]
unfold_node_property(g, "same_as", prefix="biolink")
assert "same_as" not in g.nodes()["HGNC:12345"]
assert g.has_edge("HGNC:12345", "UniProtKB:54321")
e = list(dict(g.get_edge("HGNC:12345", "UniProtKB:54321")).values())[0]
assert "subject" in e and e["subject"] == "HGNC:12345"
assert "predicate" in e and e["predicate"] == "biolink:same_as"
assert "object" in e and e["object"] == "UniProtKB:54321"
def test_remove_singleton_nodes():
"""
Test the remove singleton nodes operation.
"""
g = NxGraph()
g.add_edge("A", "B")
g.add_edge("B", "C")
g.add_edge("C", "D")
g.add_edge("B", "D")
g.add_node("X")
g.add_node("Y")
assert g.number_of_nodes() == 6
assert g.number_of_edges() == 4
remove_singleton_nodes(g)
assert g.number_of_nodes() == 4
assert g.number_of_edges() == 4
def test_remap_node_identifier_alias():
"""
Test remap node identifier operation.
"""
graphs = get_graphs2()
g = remap_node_identifier(
graphs[0], "biolink:NamedThing", alternative_property="alias"
)
assert g.has_node("NCBIGene:54321")
assert g.has_node("Z")
assert g.has_node("C")
assert g.has_edge("C", "Z")
assert g.has_edge("Z", "A")
assert not g.has_edge("C", "B")
assert not g.has_edge("B", "A")
e1 = list(g.get_edge("C", "Z").values())[0]
assert e1["subject"] == "C" and e1["object"] == "Z"
assert e1["edge_key"] == "C-biolink:subclass_of-Z"
e2 = list(g.get_edge("Z", "A").values())[0]
assert e2["subject"] == "Z" and e2["object"] == "A"
assert e2["edge_key"] == "Z-biolink:subclass_of-A"
def test_remap_node_identifier_xref():
"""
Test remap node identifier operation.
"""
graphs = get_graphs2()
g = remap_node_identifier(
graphs[1], "biolink:Gene", alternative_property="xref", prefix="NCBIGene"
)
assert g.has_node("NCBIGene:12345")
assert g.has_node("NCBIGene:56463")
assert g.has_node("NCBIGene:08239")
assert g.has_node("D")
assert g.has_node("E")
assert g.has_node("F")
assert not g.has_node("A")
assert not g.has_node("B")
assert not g.has_node("C")
e1 = list(g.get_edge("NCBIGene:56463", "NCBIGene:12345").values())[0]
assert e1["subject"] == "NCBIGene:56463" and e1["object"] == "NCBIGene:12345"
e2 = list(g.get_edge("D", "NCBIGene:12345").values())[0]
assert e2["subject"] == "D" and e2["object"] == "NCBIGene:12345"
e3 = list(g.get_edge("E", "NCBIGene:12345").values())[0]
assert e3["subject"] == "E" and e3["object"] == "NCBIGene:12345"
e4 = list(g.get_edge("E", "F").values())[0]
assert e4["subject"] == "E" and e4["object"] == "F"
def test_remap_node_property():
"""
Test remap node property operation.
"""
graphs = get_graphs2()
remap_node_property(
graphs[0],
category="biolink:NamedThing",
old_property="alias",
new_property="same_as",
)
assert graphs[0].nodes()["HGNC:12345"]["alias"] == "UniProtKB:54321"
def test_remap_node_property_fail():
"""
Test remap node property operation, where the test fails due to an attempt
to change a core node property.
"""
graphs = get_graphs2()
with pytest.raises(AttributeError):
remap_node_property(
graphs[0],
category="biolink:NamedThing",
old_property="id",
new_property="alias",
)
@pytest.mark.skip()
def test_remap_edge_property():
"""
Test remap edge property operation.
"""
graphs = get_graphs2()
remap_edge_property(
graphs[0],
edge_predicate="biolink:subclass_of",
old_property="publications",
new_property="pubs",
)
e = list(graphs[0].get_edge("C", "B").values())[0]
assert e["publications"] == ["PMID:123456"]
def test_remap_edge_property_fail():
"""
Test remap edge property operation, where the test fails due to an attempt
to change a core edge property.
"""
graphs = get_graphs2()
with pytest.raises(AttributeError):
remap_edge_property(
graphs[0],
edge_predicate="biolink:subclass_of",
old_property="subject",
new_property="pubs",
)
with pytest.raises(AttributeError):
remap_edge_property(
graphs[0],
edge_predicate="biolink:subclass_of",
old_property="object",
new_property="pubs",
)
with pytest.raises(AttributeError):
remap_edge_property(
graphs[0],
edge_predicate="biolink:subclass_of",
old_property="predicate",
new_property="pubs",
)
| 11,383 | 26.698297 | 87 |
py
|
kgx
|
kgx-master/tests/unit/test_curie_lookup_service.py
|
import pytest
from kgx.curie_lookup_service import CurieLookupService
@pytest.mark.parametrize(
"query",
[
("RO:0002410", "causally_related_to"),
("RO:0002334", "regulated_by"),
("BFO:0000003", "occurrent"),
],
)
def test_curie_lookup(query):
"""
Test lookup for a given CURIE via CurieLookupService.
"""
cls = CurieLookupService()
assert len(cls.ontologies) > 0
assert query[0] in cls.ontology_graph
assert query[0] in cls.curie_map
assert cls.curie_map[query[0]] == query[1]
def test_curie_lookup_with_custom():
"""
Test lookup for a given CURIE via CurieLookupService, with a user defined
CURIE prefix map.
"""
cls = CurieLookupService(curie_map={"XYZ:123": "custom entry"})
assert len(cls.ontologies) > 0
assert "XYZ:123" in cls.curie_map
assert cls.curie_map["XYZ:123"] == "custom entry"
| 898 | 25.441176 | 77 |
py
|
kgx
|
kgx-master/tests/unit/test_meta_knowledge_graph.py
|
import json
import os
from sys import stderr
from typing import List, Dict
from deprecation import deprecated
from kgx.utils.kgx_utils import GraphEntityType
from kgx.graph_operations.meta_knowledge_graph import (
generate_meta_knowledge_graph,
MetaKnowledgeGraph,
)
from kgx.transformer import Transformer
from tests import RESOURCE_DIR, TARGET_DIR
def _check_mkg_json_contents(data):
assert "NCBIGene" in data["nodes"]["biolink:Gene"]["id_prefixes"]
assert "REACT" in data["nodes"]["biolink:Pathway"]["id_prefixes"]
assert "HP" in data["nodes"]["biolink:PhenotypicFeature"]["id_prefixes"]
assert data["nodes"]["biolink:Gene"]["count"] == 178
assert len(data["nodes"]) == 8
assert len(data["edges"]) == 13
edge1 = data["edges"][0]
assert edge1["subject"] == "biolink:Gene"
assert edge1["predicate"] == "biolink:interacts_with"
assert edge1["object"] == "biolink:Gene"
assert edge1["count"] == 165
edge1_cbs = edge1["count_by_source"]
assert "aggregator_knowledge_source" in edge1_cbs
edge1_cbs_aks = edge1_cbs["aggregator_knowledge_source"]
assert edge1_cbs_aks["string"] == 160
@deprecated(deprecated_in="1.5.8", details="Default is the use streaming graph_summary with inspector")
def test_generate_classical_meta_knowledge_graph():
"""
Test generate meta knowledge graph operation.
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
}
transformer = Transformer()
transformer.transform(input_args)
output_filename = os.path.join(TARGET_DIR, "test_meta_knowledge_graph-1.json")
generate_meta_knowledge_graph(
graph=transformer.store.graph,
name="Test Graph",
filename=output_filename,
edge_facet_properties=["aggregator_knowledge_source"]
)
data = json.load(open(output_filename))
assert data["name"] == "Test Graph"
_check_mkg_json_contents(data)
def test_generate_meta_knowledge_graph_by_inspector():
"""
Test generate the meta knowledge graph by streaming
graph data through a graph Transformer.process() Inspector
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
}
transformer = Transformer(stream=True)
mkg = MetaKnowledgeGraph(
"Test Graph - Streamed",
edge_facet_properties=["aggregator_knowledge_source"]
)
# We configure the Transformer with a data flow inspector
# (Deployed in the internal Transformer.process() call)
transformer.transform(input_args=input_args, inspector=mkg)
# Dump a report to stderr ... will be a JSON document now
if len(mkg.get_errors()) > 0:
assert len(mkg.get_errors("Error")) == 0
assert len(mkg.get_errors("Warning")) > 0
mkg.write_report(None, "Warning")
assert mkg.get_name() == "Test Graph - Streamed"
assert mkg.get_total_nodes_count() == 512
assert mkg.get_number_of_categories() == 8
assert mkg.get_total_edges_count() == 539
assert mkg.get_edge_mapping_count() == 13
assert "NCBIGene" in mkg.get_category("biolink:Gene").get_id_prefixes()
assert "REACT" in mkg.get_category("biolink:Pathway").get_id_prefixes()
assert "HP" in mkg.get_category("biolink:PhenotypicFeature").get_id_prefixes()
gene_category = mkg.get_category("biolink:Gene")
assert gene_category.get_count() == 178
gene_category.get_count_by_source()
assert len(mkg.get_edge_count_by_source("", "", "")) == 0
assert (
len(
mkg.get_edge_count_by_source(
"biolink:Gene", "biolink:affects", "biolink:Disease"
)
)
== 0
)
ecbs1 = mkg.get_edge_count_by_source(
"biolink:Gene",
"biolink:interacts_with",
"biolink:Gene",
facet="aggregator_knowledge_source",
)
assert len(ecbs1) == 2
assert "biogrid" in ecbs1
assert "string" in ecbs1
assert ecbs1["string"] == 160
ecbs2 = mkg.get_edge_count_by_source(
"biolink:Gene",
"biolink:has_phenotype",
"biolink:PhenotypicFeature",
facet="aggregator_knowledge_source",
)
assert len(ecbs2) == 3
assert "omim" in ecbs2
assert "orphanet" in ecbs2
assert "hpoa" in ecbs2
assert ecbs2["hpoa"] == 111
#
# Testing alternate approach of generating and using meta knowledge graphs
#
def test_generate_meta_knowledge_graph_via_saved_file():
"""
Test generate meta knowledge graph operation...
MetaKnowledgeGraph as streaming Transformer.transform Inspector
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
}
t = Transformer(stream=True)
class ProgressMonitor:
def __init__(self):
self.count: Dict[GraphEntityType, int] = {
GraphEntityType.GRAPH: 0,
GraphEntityType.NODE: 0,
GraphEntityType.EDGE: 0,
}
def __call__(self, entity_type: GraphEntityType, rec: List):
self.count[GraphEntityType.GRAPH] += 1
self.count[entity_type] += 1
if not (self.count[GraphEntityType.GRAPH] % 100):
print(
str(self.count[GraphEntityType.GRAPH]) + " records processed...",
file=stderr,
)
def summary(self):
print(str(self.count[GraphEntityType.NODE]) + " nodes seen.", file=stderr)
print(str(self.count[GraphEntityType.EDGE]) + " edges seen.", file=stderr)
print(
str(self.count[GraphEntityType.GRAPH]) + " total records processed...",
file=stderr,
)
monitor = ProgressMonitor()
mkg = MetaKnowledgeGraph(
name="Test Graph - Streamed, Stats accessed via File",
progress_monitor=monitor,
node_facet_properties=["provided_by"],
edge_facet_properties=["aggregator_knowledge_source"]
)
t.transform(input_args=input_args, inspector=mkg)
output_filename = os.path.join(TARGET_DIR, "test_meta_knowledge_graph-2.json")
with open(output_filename, "w") as mkgh:
mkg.save(mkgh)
data = json.load(open(output_filename))
assert data["name"] == "Test Graph - Streamed, Stats accessed via File"
_check_mkg_json_contents(data)
monitor.summary()
def test_meta_knowledge_graph_multiple_category_and_predicate_parsing():
"""
Test meta knowledge graph parsing multiple categories using streaming
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "graph_multi_category_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_multi_category_edges.tsv"),
],
"format": "tsv",
}
t = Transformer(stream=True)
mkg = MetaKnowledgeGraph(name="Test Graph - Multiple Node Categories")
t.transform(input_args=input_args, inspector=mkg)
assert mkg.get_name() == "Test Graph - Multiple Node Categories"
assert mkg.get_total_nodes_count() == 10
# unique set, including (shared) parent
# classes (not including category 'unknown' )
assert mkg.get_number_of_categories() == 7
assert mkg.get_node_count_by_category("biolink:Disease") == 1
assert mkg.get_node_count_by_category("biolink:BiologicalEntity") == 5
assert mkg.get_node_count_by_category("biolink:AnatomicalEntityEntity") == 0
# sums up all the counts of node mappings across
# all categories (not including category 'unknown')
assert mkg.get_total_node_counts_across_categories() == 35
# only counts 'valid' edges for which
# subject and object nodes are in the nodes file
assert mkg.get_total_edges_count() == 8
# total number of distinct predicates
assert mkg.get_predicate_count() == 2
# counts edges with a given predicate
# (ignoring edges with unknown subject or object identifiers)
assert mkg.get_edge_count_by_predicate("biolink:has_phenotype") == 4
assert mkg.get_edge_count_by_predicate("biolink:involved_in") == 0
assert mkg.get_edge_mapping_count() == 25
assert mkg.get_total_edge_counts_across_mappings() == 100
@deprecated(deprecated_in="1.5.8", details="Default is the use streaming graph_summary with inspector")
def test_meta_knowledge_graph_of_complex_graph_data():
"""
Test generate meta knowledge graph operation.
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "complex_graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "complex_graph_edges.tsv"),
],
"format": "tsv",
}
transformer = Transformer()
transformer.transform(input_args)
output_filename = os.path.join(TARGET_DIR, "test_meta_knowledge_graph-1.json")
generate_meta_knowledge_graph(
graph=transformer.store.graph,
name="Complex Test Graph",
filename=output_filename,
edge_facet_properties=["aggregator_knowledge_source"]
)
data = json.load(open(output_filename))
assert data["name"] == "Complex Test Graph"
print(f"\n{json.dumps(data, indent=4)}")
| 9,443 | 32.253521 | 103 |
py
|
kgx
|
kgx-master/tests/unit/test_nx_graph.py
|
from kgx.graph.nx_graph import NxGraph
def get_graphs():
"""
Returns instances of defined graphs.
"""
g1 = NxGraph()
g1.name = "Graph 1"
g1.add_node("A", id="A", name="Node A", category=["biolink:NamedThing"])
g1.add_node("B", id="B", name="Node B", category=["biolink:NamedThing"])
g1.add_node("C", id="C", name="Node C", category=["biolink:NamedThing"])
g1.add_edge(
"C",
"B",
edge_key="C-biolink:subclass_of-B",
edge_label="biolink:sub_class_of",
relation="rdfs:subClassOf",
)
g1.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
edge_label="biolink:sub_class_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
)
g2 = NxGraph()
g2.name = "Graph 2"
g2.add_node(
"A",
id="A",
name="Node A",
description="Node A in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"B",
id="B",
name="Node B",
description="Node B in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"C",
id="C",
name="Node C",
description="Node C in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"D",
id="D",
name="Node D",
description="Node D in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"E",
id="E",
name="Node E",
description="Node E in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:related_to-A",
edge_label="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"D",
"A",
edge_key="D-biolink:related_to-A",
edge_label="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"E",
"A",
edge_key="E-biolink:related_to-A",
edge_label="biolink:related_to",
relation="biolink:related_to",
)
g3 = NxGraph()
g3.name = "Graph 3"
g3.add_edge(
"F",
"E",
edge_key="F-biolink:same_as-E",
edge_label="biolink:same_as",
relation="OWL:same_as",
)
return [g1, g2, g3]
def test_add_node():
"""
Test adding a node to an NxGraph.
"""
g = NxGraph()
g.add_node("A")
g.add_node("A", name="Node A", description="Node A")
assert g.has_node("A")
def test_add_edge():
"""
Test adding an edge to an NxGraph.
"""
g = NxGraph()
g.add_node("A")
g.add_node("B")
g.add_edge("A", "B", predicate="biolink:related_to", provided_by="test")
assert g.has_edge("A", "B")
g.add_edge("B", "C", edge_key="B-biolink:related_to-C", provided_by="test")
assert g.has_edge("B", "C")
def test_add_node_attribute():
"""
Test adding a node attribute to an NxGraph.
"""
g = NxGraph()
g.add_node("A")
g.add_node_attribute("A", "provided_by", "test")
n = g.get_node("A")
assert "provided_by" in n and n["provided_by"] == "test"
def test_add_edge_attribute():
"""
Test adding an edge attribute to an NxGraph.
"""
g = NxGraph()
g.add_edge("A", "B")
g.add_edge_attribute("A", "B", "edge_ab", "predicate", "biolink:related_to")
def test_update_node_attribute():
"""
Test updating a node attribute for a node in an NxGraph.
"""
g = NxGraph()
g.add_node("A", name="A", description="Node A")
g.update_node_attribute("A", "description", "Modified description")
n = g.get_node("A")
assert "name" in n and n["name"] == "A"
assert "description" in n and n["description"] == "Modified description"
def test_update_edge_attribute():
"""
Test updating an edge attribute for an edge in an NxGraph.
"""
g = NxGraph()
g.add_edge("A", "B", "edge_ab")
g.update_edge_attribute("A", "B", "edge_ab", "source", "test")
e = g.get_edge("A", "B", "edge_ab")
assert "source" in e and e["source"] == "test"
def test_nodes():
"""
Test fetching of nodes from an NxGraph.
"""
g = get_graphs()[0]
nodes = list(g.nodes(data=False))
assert len(nodes) == 3
assert nodes[0] == "A"
nodes = g.nodes(data=True)
assert len(nodes) == 3
assert "name" in nodes["A"] and nodes["A"]["name"] == "Node A"
def test_edges():
"""
Test fetching of edges from an NxGraph.
"""
g = get_graphs()[0]
edges = list(g.edges(keys=False, data=False))
assert len(edges) == 2
assert edges[0] == ("B", "A")
edges = list(g.edges(keys=False, data=True))
e1 = edges[0]
assert e1[0] == "B"
assert e1[1] == "A"
assert e1[2]["relation"] == "rdfs:subClassOf"
edges = list(g.edges(keys=True, data=True))
e1 = edges[0]
assert e1[0] == "B"
assert e1[1] == "A"
assert e1[2] == "B-biolink:subclass_of-A"
assert e1[3]["relation"] == "rdfs:subClassOf"
def test_in_edges():
"""
Test fetching of incoming edges for a node in an NxGraph.
"""
g = get_graphs()[1]
in_edges = list(g.in_edges("A", keys=False, data=False))
assert len(in_edges) == 3
assert in_edges[0] == ("B", "A")
in_edges = list(g.in_edges("A", keys=True, data=True))
e1 = in_edges[0]
assert e1
assert e1[0] == "B"
assert e1[1] == "A"
assert e1[2] == "B-biolink:related_to-A"
assert e1[3]["relation"] == "biolink:related_to"
def test_out_edges():
"""
Test fetching of outgoing edges for a node in an NxGraph.
"""
g = get_graphs()[1]
out_edges = list(g.out_edges("B", keys=False, data=False))
assert len(out_edges) == 1
assert out_edges[0] == ("B", "A")
out_edges = list(g.out_edges("B", keys=True, data=True))
e1 = out_edges[0]
assert e1
assert e1[0] == "B"
assert e1[1] == "A"
assert e1[2] == "B-biolink:related_to-A"
assert e1[3]["relation"] == "biolink:related_to"
def test_nodes_iter():
"""
Test fetching all nodes in an NxGraph via an iterator.
"""
g = get_graphs()[1]
n_iter = g.nodes_iter()
n = next(n_iter)
assert n[1]["id"] == "A"
assert n[1]["name"] == "Node A"
def test_edges_iter():
"""
Test fetching all edges in an NxGraph via an iterator.
"""
g = get_graphs()[1]
e_iter = g.edges_iter()
e = next(e_iter)
assert len(e) == 4
assert e[0] == "B"
assert e[1] == "A"
assert e[2] == "B-biolink:related_to-A"
assert e[3]["relation"] == "biolink:related_to"
def test_remove_node():
"""
Test removing a node from an NxGraph.
"""
g = get_graphs()[1]
g.remove_node("A")
assert not g.has_node("A")
def test_remove_edge():
"""
Test removing an edge from an NxGraph.
"""
g = get_graphs()[1]
g.remove_edge("B", "A")
assert not g.has_edge("B", "A")
def test_number_of_nodes_edges():
"""
Test getting number of nodes and edges in an NxGraph.
"""
g = get_graphs()[1]
assert g.number_of_nodes() == 5
assert g.number_of_edges() == 3
def test_set_node_attributes():
"""
Test setting node attributes in bulk.
"""
g = NxGraph()
g.add_node("X:1", alias="A:1")
g.add_node("X:2", alias="B:2")
d = {"X:1": {"alias": "ABC:1"}, "X:2": {"alias": "DEF:2"}}
NxGraph.set_node_attributes(g, d)
def test_set_edge_attributes():
"""
Test setting edge attributes in bulk.
"""
g = NxGraph()
g.add_node("X:1", alias="A:1")
g.add_node("X:2", alias="B:2")
g.add_edge("X:2", "X:1", edge_key="edge1", source="Source 1")
d = {("X:2", "X:1", "edge1"): {"source": "Modified Source 1"}}
NxGraph.set_edge_attributes(g, d)
e = list(g.edges(keys=True, data=True))[0]
assert e[3]["source"] == "Modified Source 1"
def test_get_node_attributes():
"""
Test getting node attributes in bulk.
"""
g = get_graphs()[1]
d = NxGraph.get_node_attributes(g, "name")
assert "A" in d and d["A"] == "Node A"
assert "E" in d and d["E"] == "Node E"
def test_get_edge_attributes():
"""
Test getting edge attributes in bulk.
"""
g = get_graphs()[1]
d = NxGraph.get_edge_attributes(g, "relation")
assert ("B", "A", "B-biolink:related_to-A") in d
assert d[("B", "A", "B-biolink:related_to-A")] == "biolink:related_to"
def test_relabel_nodes():
"""
Test relabelling of nodes in an NxGraph.
"""
g = get_graphs()[1]
m = {"A": "A:1", "E": "E:1"}
NxGraph.relabel_nodes(g, m)
assert not g.has_node("A")
assert g.has_node("A:1")
assert not g.has_node("E")
assert g.has_node("E:1")
assert len(g.in_edges("A:1")) == 3
| 8,753 | 24.011429 | 80 |
py
|
kgx
|
kgx-master/tests/unit/test_validator.py
|
from sys import stderr
import pytest
from kgx.validator import Validator
@pytest.mark.parametrize("prefix", ["GO", "HP", "MONDO", "HGNC", "UniProtKB"])
def test_get_all_prefixes(prefix):
"""
Test get_all_prefixes in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
prefixes = validator.get_all_prefixes()
assert prefix in prefixes
@pytest.mark.parametrize("property", ["id", "category"])
def test_get_required_node_properties(property):
"""
Test get_required_node_properties in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
properties = validator.get_required_node_properties()
assert property in properties
@pytest.mark.parametrize("property", ["id", "subject", "object", "predicate"])
def test_get_required_edge_properties(property):
"""
Test get_required_edge_properties in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
properties = validator.get_required_edge_properties()
assert property in properties
@pytest.mark.parametrize(
"query",
[
("A:123", {}, False),
("A:123", {"id": "A:123"}, False),
("A:123", {"id": "A:123", "name": "Node A:123"}, False),
(
"A:123",
{"id": "A:123", "name": "Node A:123", "category": ["biolink:NamedThing"]},
True,
),
],
)
def test_validate_node_properties(query):
"""
Test validate_node_properties in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
required_properties = validator.get_required_node_properties()
validator.validate_node_properties(query[0], query[1], required_properties)
assert (len(validator.get_errors()) == 0) == query[2]
@pytest.mark.parametrize(
"query",
[
("A:123", "X:1", {}, False),
("A:123", "X:1", {"predicate": "biolink:related_to"}, False),
(
"A:123",
"X:1",
{"subject": "A:123", "predicate": "biolink:related_to"},
False,
),
(
"A:123",
"X:1",
{"subject": "A:123", "object": "X:1", "predicate": "biolink:related_to"},
False,
),
(
"A:123",
"X:1",
{
"id": "A:123-biolink:related_to-X:1",
"subject": "A:123",
"object": "X:1",
"predicate": "biolink:related_to",
"category": ["biolink:Association"],
},
True,
),
(
"A:123",
"X:1",
{
"id": "Edge A-X",
"subject": "A:123",
"object": "X:1",
"predicate": "biolink:related_to",
"category": ["biolink:Association"],
},
True,
),
],
)
def test_validate_edge_properties(query):
"""
Test validate_edge_properties in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
required_properties = validator.get_required_edge_properties()
validator.validate_edge_properties(
query[0], query[1], query[2], required_properties
)
# Dump a report to stderr ... will be a JSON document now
print("\n*** validator error log:", file=stderr)
validator.write_report()
assert (len(validator.get_errors()) == 0) == query[3]
@pytest.mark.parametrize(
"query",
[
(
"A:123",
{"id": "A:123", "name": "Node A:123", "category": ["biolink:NamedThing"]},
True,
),
(
"A:123",
{"id": "A:123", "name": "Node A:123", "category": "biolink:NamedThing"},
False,
),
(
"A:123",
{"id": "A:123", "name": ["Node A:123"], "category": "biolink:NamedThing"},
False,
),
(
"A:123",
{
"id": "A:123",
"name": "Node A:123",
"category": ["biolink:NamedThing"],
"publications": "PMID:789",
},
False,
),
(
"A:123",
{
"id": "A:123",
"name": "Node A:123",
"category": ["biolink:NamedThing"],
"publications": ["PMID:789"],
},
True,
),
],
)
def test_validate_node_property_types(query):
"""
Test validate_node_property_types in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_node_property_types(query[0], query[1])
assert (len(validator.get_errors()) == 0) == query[2]
@pytest.mark.parametrize(
"query",
[
(
123,
{"id": 123, "name": "Node A:123", "category": ["biolink:NamedThing"]},
False,
)
]
)
def test_validate_node_property_id_types_error(query):
"""
Test validate_node_property_types in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_node_property_types(query[0], query[1])
assert validator.get_errors() is not None
@pytest.mark.parametrize(
"query",
[
(
"a:123",
{"id": "sweet:123", "name": "Node A:123", "category": 123},
False,
)
]
)
def test_validate_node_property_id_str_is_int_types_error(query):
"""
Test validate_node_property_types in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_node_property_types(query[0], query[1])
assert validator.get_errors() is not None
@pytest.mark.parametrize(
"query",
[
(
"a:123",
{"id": 123, "name": "Node A:123", "category": ["biolink:NamedThing"]},
False,
)
]
)
def test_validate_node_property_uriorcurie_types_error(query):
"""
Test validate_node_property_types in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_node_property_types(query[0], query[1])
assert validator.get_errors() is not None
@pytest.mark.parametrize(
"query",
[
(
"A:123",
"X:1",
{
"id": "A:123",
"subject": "a:123",
"object": 123,
"predicate": "biolink:related_to",
},
False,
),
],
)
def test_validate_edge_property_object_is_int_type_error(query):
"""
Test validate_edge_property_types in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_edge_property_types(query[0], query[1], query[2])
assert validator.get_default_model_version() is not None
assert validator.get_errors() is not None
@pytest.mark.parametrize(
"query",
[
(
"A:123",
"X:1",
{
"id": "A:123",
"subject": 123,
"object": "X:1",
"predicate": "biolink:related_to",
},
False,
),
],
)
def test_validate_edge_property_subject_is_int_type_error(query):
"""
Test validate_edge_property_types in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_edge_property_types(query[0], query[1], query[2])
assert validator.get_default_model_version() is not None
assert validator.get_errors() is not None
@pytest.mark.parametrize(
"query",
[
(
"A:123",
"X:1",
{
"id": "A:123-biolink:related_to-X:1",
"subject": "A:123",
"object": "X:1",
"predicate": "biolink:related_to",
},
True,
),
(
"A:123",
"X:1",
{
"id": "A:123-biolink:related_to-X:1",
"subject": "A:123",
"object": "X:1",
"predicate": ["biolink:related_to"],
},
False,
),
(
"A:123",
"X:1",
{
"id": "A:123-biolink:related_to-X:1",
"subject": ["A:123"],
"object": "X:1",
"predicate": "biolink:related_to",
},
False,
),
(
"A:123",
"X:1",
{
"subject": ["A:123"],
"object": "X:1",
"predicate": "biolink:related_to",
},
False,
),
],
)
def test_validate_edge_property_types_and_prefixes(query):
"""
Test validate_edge_property_types in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_edge_property_types(query[0], query[1], query[2])
assert validator.get_default_model_version() is not None
assert (len(validator.get_errors()) == 0) == query[3]
assert "biolink" in validator.get_all_prefixes()
@pytest.mark.parametrize(
"query",
[
(
"HGNC:123",
{
"id": "HGNC:123",
"name": "Node HGNC:123",
"category": ["biolink:NamedThing"],
},
True,
),
(
"HGNC_123",
{
"id": "HGNC_123",
"name": "Node HGNC_123",
"category": ["biolink:NamedThing"],
},
False,
),
(
"A:123",
{"id": "A:123", "name": "Node A:123", "category": ["biolink:NamedThing"]},
False,
),
],
)
def test_validate_node_property_values(query):
"""
Test validate_node_property_values in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_node_property_values(query[0], query[1])
assert (len(validator.get_errors()) == 0) == query[2]
@pytest.mark.parametrize(
"query",
[
(
"A:123",
"X:1",
{
"id": "A:123-biolink:related_to-X:1",
"subject": "A:123",
"object": "X:1",
"predicate": "biolink:related_to",
},
False,
),
(
"HGNC:123",
"X:1",
{
"id": "HGNC:123-biolink:related_to-X:1",
"subject": "A:123",
"object": "X:1",
"predicate": "biolink:related_to",
},
False,
),
(
"HGNC:123",
"MONDO:1",
{
"id": "HGNC:123-biolink:related_to-MONDO:1",
"subject": "A:123",
"object": "X:1",
"predicate": "biolink:related_to",
},
True,
),
(
"HGNC_123",
"MONDO:1",
{
"id": "HGNC_123-biolink:related_to-MONDO:1",
"subject": "A:123",
"object": "X:1",
"predicate": "biolink:related_to",
},
False,
),
],
)
def test_validate_edge_property_values(query):
"""
Test validate_edge_property_values in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_edge_property_values(query[0], query[1], query[2])
assert (len(validator.get_errors()) == 0) == query[3]
@pytest.mark.parametrize(
"query",
[
(
"HGNC:123",
{
"id": "HGNC:123",
"name": "Node HGNC:123",
"category": ["biolink:NamedThing"],
},
True,
),
(
"A:123",
{
"id": "A:123",
"name": "Node A:123",
"category": ["biolink:NamedThing", "biolink:Gene"],
},
True,
),
(
"A:123",
{"id": "A:123", "name": "Node A:123", "category": ["NamedThing"]},
True,
),
("A:123", {"id": "A:123", "name": "Node A:123", "category": ["Gene"]}, True),
("A:123", {"id": "A:123", "name": "Node A:123", "category": ["GENE"]}, False),
],
)
def test_validate_categories(query):
"""
Test validate_categories in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_categories(query[0], query[1])
assert (len(validator.get_errors()) == 0) == query[2]
@pytest.mark.parametrize(
"query",
[
(
"HGNC:123",
"MONDO:1",
{
"id": "HGNC:123-biolink:related_to-MONDO:1",
"subject": "A:123",
"object": "X:1",
"predicate": "biolink:related_to",
},
True,
),
(
"HGNC:123",
"MONDO:1",
{
"id": "HGNC:123-biolink:related_to-MONDO:1",
"subject": "A:123",
"object": "X:1",
"predicate": "related_to",
},
True,
),
(
"HGNC:123",
"MONDO:1",
{
"id": "HGNC:123-biolink:related_to-MONDO:1",
"subject": "A:123",
"object": "X:1",
"predicate": "related to",
},
False,
),
(
"HGNC:123",
"MONDO:1",
{
"id": "HGNC:123-biolink:related_to-MONDO:1",
"subject": "A:123",
"object": "X:1",
"predicate": "xyz",
},
False,
),
],
)
def test_validate_edge_label(query):
"""
Test validate_edge_predicate in Validator.
"""
validator = Validator.get_the_validator()
validator.clear_errors()
validator.validate_edge_predicate(query[0], query[1], dict(query[2]))
assert (len(validator.get_errors()) == 0) == query[3]
| 14,483 | 25.625 | 86 |
py
|
kgx
|
kgx-master/tests/unit/test_cli_utils.py
|
"""
Test CLI Utils
"""
import csv
import json
import os
import pytest
from click.testing import CliRunner
from pprint import pprint
from kgx.cli.cli_utils import validate, neo4j_upload, neo4j_download, merge, get_output_file_types
from kgx.cli import cli, get_input_file_types, graph_summary, get_report_format_types, transform
from tests import RESOURCE_DIR, TARGET_DIR
from tests.unit import (
check_container,
CONTAINER_NAME,
DEFAULT_NEO4J_URL,
DEFAULT_NEO4J_USERNAME,
DEFAULT_NEO4J_PASSWORD,
clean_database
)
def test_get_file_types():
"""
Test get_file_types method.
"""
file_types = get_input_file_types()
assert "tsv" in file_types
assert "nt" in file_types
assert "json" in file_types
assert "obojson" in file_types
def test_get_report_format_types():
"""
Test get_report_format_types method.
"""
format_types = get_report_format_types()
assert "yaml" in format_types
assert "json" in format_types
def test_graph_summary_wrapper():
output = os.path.join(TARGET_DIR, "graph_stats3.yaml")
runner = CliRunner()
result = runner.invoke(
cli,
[
"graph-summary",
"-i", "tsv",
"-o", output,
os.path.join(RESOURCE_DIR, "graph_nodes.tsv")
]
)
assert result.exit_code == 0
def test_graph_summary_wrapper_error():
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "graph_stats3.yaml")
runner = CliRunner()
result = runner.invoke(
cli,
[
"graph-summary",
"-i", "tsv",
"-o", output,
inputs
]
)
assert result.exit_code == 1
def test_graph_summary_report_type_wrapper_error():
output = os.path.join(TARGET_DIR, "graph_stats3.yaml")
runner = CliRunner()
result = runner.invoke(
cli,
[
"graph-summary",
"-i", "tsv",
"-o", output,
"-r", "testoutput",
os.path.join(RESOURCE_DIR, "graph_nodes.tsv")
]
)
assert result.exit_code == 1
def test_graph_summary_report_format_wrapper_error():
output = os.path.join(TARGET_DIR, "graph_stats3.yaml")
runner = CliRunner()
result = runner.invoke(
cli,
[
"graph-summary",
"-i", "tsv",
"-o", output,
"-f", "notaformat",
os.path.join(RESOURCE_DIR, "graph_nodes.tsv")
]
)
assert result.exit_code == 1
def test_transform_wrapper():
"""
Transform graph from TSV to JSON.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "grapht.json")
runner = CliRunner()
result = runner.invoke(
cli,
[
"transform",
"-i", "tsv",
"-o", output,
"-f", "json",
inputs
]
)
assert result.exit_code == 1
def test_transform_uncompressed_tsv_to_tsv():
"""
Transform nodes and edges file to nodes and edges TSV file
with extra provenance
"""
inputs = [
os.path.join(RESOURCE_DIR, "chebi_kgx_tsv_nodes.tsv"),
os.path.join(RESOURCE_DIR, "chebi_kgx_tsv_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "chebi_snippet")
knowledge_sources = [
("aggregator_knowledge_source", "someks"),
("primary_knowledge_source", "someotherks"),
("knowledge_source", "newknowledge")
]
transform(
inputs=inputs,
input_format="tsv",
input_compression=None,
output=output,
output_format="tsv",
output_compression=None,
knowledge_sources=knowledge_sources,
)
assert os.path.exists(f"{output}_nodes.tsv")
assert os.path.exists(f"{output}_edges.tsv")
with open(f"{output}_edges.tsv", "r") as fd:
edges = csv.reader(fd, delimiter="\t", quotechar='"')
csv_headings = next(edges)
assert "aggregator_knowledge_source" in csv_headings
for row in edges:
assert len(row) == 10
assert "someks" in row
assert "someotherks" in row
assert "newknowledge" not in row
assert "chebiasc66dwf" in row
def test_transform_obojson_to_csv_wrapper():
"""
Transform obojson to CSV.
"""
inputs = [
os.path.join(RESOURCE_DIR, "BFO_2_relaxed.json")
]
output = os.path.join(TARGET_DIR, "test_bfo_2_relaxed")
knowledge_sources = [
("aggregator_knowledge_source", "bioportal"),
("primary_knowledge_source", "justastring")
]
transform(
inputs=inputs,
input_format="obojson",
input_compression=None,
output=output,
output_format="tsv",
output_compression=None,
knowledge_sources=knowledge_sources,
)
with open(f"{output}_edges.tsv", "r") as fd:
edges = csv.reader(fd, delimiter="\t", quotechar='"')
csv_headings = next(edges)
assert "aggregator_knowledge_source" in csv_headings
for row in edges:
assert "bioportal" in row
assert "justastring" in row
def test_transform_with_provided_by_obojson_to_csv_wrapper():
"""
Transform obojson to CSV.
"""
inputs = [
os.path.join(RESOURCE_DIR, "BFO_2_relaxed.json")
]
output = os.path.join(TARGET_DIR, "test_bfo_2_relaxed_provided_by.csv")
knowledge_sources = [
("aggregator_knowledge_source", "bioportal"),
("primary_knowledge_source", "justastring"),
("provided_by", "bioportal")
]
transform(
inputs=inputs,
input_format="obojson",
input_compression=None,
output=output,
output_format="tsv",
output_compression=None,
knowledge_sources=knowledge_sources,
)
def test_merge_wrapper():
"""
Transform from test merge YAML.
"""
merge_config = os.path.join(RESOURCE_DIR, "test-merge.yaml")
runner = CliRunner()
result = runner.invoke(
cli,
[
"merge",
"--merge-config", merge_config
]
)
assert result.exit_code == 0
assert os.path.join(TARGET_DIR, "merged-graph_nodes.tsv")
assert os.path.join(TARGET_DIR, "merged-graph_edges.tsv")
assert os.path.join(TARGET_DIR, "merged-graph.json")
def test_get_output_file_types():
format_types = get_output_file_types()
assert format_types is not None
def test_merge_wrapper_error():
"""
Transform from test merge YAML.
"""
merge_config = os.path.join(RESOURCE_DIR, "test-merge.yaml")
runner = CliRunner()
result = runner.invoke(
cli,
[
"merge"
]
)
assert result.exit_code == 2
def test_kgx_graph_summary():
"""
Test graph summary, where the output report type is kgx-map.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "graph_stats1.yaml")
summary_stats = graph_summary(
inputs,
"tsv",
None,
output,
node_facet_properties=["provided_by"],
edge_facet_properties=["aggregator_knowledge_source"],
report_type="kgx-map"
)
assert os.path.exists(output)
assert summary_stats
assert "node_stats" in summary_stats
assert "edge_stats" in summary_stats
assert summary_stats["node_stats"]["total_nodes"] == 512
assert "biolink:Gene" in summary_stats["node_stats"]["node_categories"]
assert "biolink:Disease" in summary_stats["node_stats"]["node_categories"]
assert summary_stats["edge_stats"]["total_edges"] == 539
assert "biolink:has_phenotype" in summary_stats["edge_stats"]["predicates"]
assert "biolink:interacts_with" in summary_stats["edge_stats"]["predicates"]
def test_chebi_tsv_to_tsv_transform():
inputs = [
os.path.join(RESOURCE_DIR, "chebi_kgx_tsv.tar.gz")
]
output = os.path.join(TARGET_DIR, "test_chebi.tsv")
knowledge_sources = [
("aggregator_knowledge_source", "test1"),
("primary_knowledge_source", "test2")
]
transform(inputs=inputs,
input_format='tsv',
input_compression='tar.gz',
output=output,
output_format='tsv',
knowledge_sources=knowledge_sources)
def test_meta_knowledge_graph_as_json():
"""
Test graph summary, where the output report type is a meta-knowledge-graph,
with results output as the default JSON report format type.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "meta-knowledge-graph.json")
summary_stats = graph_summary(
inputs,
"tsv",
None,
output,
report_type="meta-knowledge-graph",
node_facet_properties=["provided_by"],
edge_facet_properties=["aggregator_knowledge_source"],
graph_name="Default Meta-Knowledge-Graph",
)
assert os.path.exists(output)
assert summary_stats
assert "nodes" in summary_stats
assert "edges" in summary_stats
assert "name" in summary_stats
assert summary_stats["name"] == "Default Meta-Knowledge-Graph"
def test_meta_knowledge_graph_as_yaml():
"""
Test graph summary, where the output report type is a meta-knowledge-graph,
with results output as the YAML report output format type.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "meta-knowledge-graph.yaml")
summary_stats = graph_summary(
inputs,
"tsv",
None,
output,
report_type="meta-knowledge-graph",
node_facet_properties=["provided_by"],
edge_facet_properties=["aggregator_knowledge_source"],
report_format="yaml"
)
assert os.path.exists(output)
assert summary_stats
assert "nodes" in summary_stats
assert "edges" in summary_stats
def test_meta_knowledge_graph_as_json_streamed():
"""
Test graph summary processed in stream mode, where the output report type
is meta-knowledge-graph, output as the default JSON report format type.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "meta-knowledge-graph-streamed.json")
summary_stats = graph_summary(
inputs=inputs,
input_format="tsv",
input_compression=None,
output=output,
report_type="meta-knowledge-graph",
node_facet_properties=["provided_by"],
edge_facet_properties=["aggregator_knowledge_source"]
)
assert os.path.exists(output)
assert summary_stats
assert "nodes" in summary_stats
assert "edges" in summary_stats
def test_validate_exception_triggered_error_exit_code():
"""
Test graph validate error exit code.
"""
test_input = os.path.join(RESOURCE_DIR, "graph_tiny_nodes.tsv")
runner = CliRunner()
result = runner.invoke(
cli,
[
"validate",
"-i", "tsv",
"-b not.a.semver",
test_input
]
)
assert result.exit_code == 2
@pytest.mark.parametrize(
"query",
[
("graph_nodes.tsv", 0),
("test_nodes.tsv", 1),
],
)
def test_validate_parsing_triggered_error_exit_code(query):
"""
Test graph validate error exit code.
"""
test_input = os.path.join(RESOURCE_DIR, query[0])
runner = CliRunner()
result = runner.invoke(
cli,
[
"validate",
"-i", "tsv",
test_input
]
)
assert result.exit_code == query[1]
def test_validate():
"""
Test graph validation.
"""
inputs = [
os.path.join(RESOURCE_DIR, "valid.json"),
]
output = os.path.join(TARGET_DIR, "validation.log")
errors = validate(
inputs=inputs,
input_format="json",
input_compression=None,
output=output
)
assert os.path.exists(output)
assert len(errors) == 0
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_neo4j_upload(clean_database):
"""
Test upload to Neo4j.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
# upload
t = neo4j_upload(
inputs,
"tsv",
None,
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
stream=False,
)
assert t.store.graph.number_of_nodes() == 512
assert t.store.graph.number_of_edges() == 531
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_neo4j_download_wrapper(clean_database):
output = os.path.join(TARGET_DIR, "neo_download2")
runner = CliRunner()
result = runner.invoke(
cli,
[
"neo4j-download",
"-l", DEFAULT_NEO4J_URL,
"-o", output,
"-f", "tsv",
"-u", DEFAULT_NEO4J_USERNAME,
"-p", DEFAULT_NEO4J_PASSWORD,
]
)
assert os.path.exists(f"{output}_nodes.tsv")
assert os.path.exists(f"{output}_edges.tsv")
assert result.exit_code == 0
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_download_exception_triggered_error_exit_code():
"""
Test graph download error exit code.
"""
output = os.path.join(TARGET_DIR, "neo_download")
runner = CliRunner()
result = runner.invoke(
cli,
[
"neo4j-download",
"-l", DEFAULT_NEO4J_URL,
"-o", output,
"-f", "tsvr",
"-u", "not a user name",
"-p", DEFAULT_NEO4J_PASSWORD,
]
)
assert result.exit_code == 1
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_neo4j_upload_wrapper(clean_database):
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
runner = CliRunner()
result = runner.invoke(
cli,
[
"neo4j-upload",
"--input-format", "tsv",
"--uri", DEFAULT_NEO4J_URL,
"--username", DEFAULT_NEO4J_USERNAME,
"--password", DEFAULT_NEO4J_PASSWORD,
os.path.join(RESOURCE_DIR, "graph_nodes.tsv")
]
)
assert result.exit_code == 0
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_neo4j_upload_wrapper_error(clean_database):
runner = CliRunner()
result = runner.invoke(
cli,
[
"neo4j-upload",
"-i", "tsv",
"inputs", "not_a_path"
"-u", "not a user",
"-p", DEFAULT_NEO4J_PASSWORD,
]
)
assert result.exit_code == 2
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_neo4j_download(clean_database):
"""
Test download from Neo4j.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "neo_download")
# upload
t1 = neo4j_upload(
inputs=inputs,
input_format="tsv",
input_compression=None,
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
stream=False,
)
t2 = neo4j_download(
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
output=output,
output_format="tsv",
output_compression=None,
stream=False,
)
assert os.path.exists(f"{output}_nodes.tsv")
assert os.path.exists(f"{output}_edges.tsv")
assert t1.store.graph.number_of_nodes() == t2.store.graph.number_of_nodes()
assert t1.store.graph.number_of_edges() == t2.store.graph.number_of_edges()
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_neo4j_download(clean_database):
"""
Test download from Neo4j.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "neo_download")
# upload
t1 = neo4j_upload(
inputs=inputs,
input_format="tsv",
input_compression=None,
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
stream=False,
)
t2 = neo4j_download(
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
output=output,
output_format="",
output_compression=None,
stream=False,
)
assert os.path.exists(f"{output}_nodes.tsv")
def test_transform1():
"""
Transform graph from TSV to JSON.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "graph.json")
knowledge_sources = [
("aggregator_knowledge_source", "True"),
]
transform(
inputs=inputs,
input_format="tsv",
input_compression=None,
output=output,
output_format="json",
output_compression=None,
knowledge_sources=knowledge_sources,
)
assert os.path.exists(output)
data = json.load(open(output, "r"))
assert "nodes" in data
assert "edges" in data
assert len(data["nodes"]) == 512
assert len(data["edges"]) == 531
for e in data["edges"]:
if e["subject"] == "HGNC:10848" and e["object"] == "HGNC:20738":
assert "aggregator_knowledge_source" in e
assert "infores:string" in e["aggregator_knowledge_source"]
assert "infores:biogrid" in e["aggregator_knowledge_source"]
break
def test_transform_error():
"""
Transform graph from TSV to JSON.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "graph.json")
knowledge_sources = [
("aggregator_knowledge_source", "True"),
]
try: {
transform(
transform_config="out.txt",
inputs=inputs,
input_format="tsv",
input_compression=None,
output=output,
output_format="json",
output_compression=None,
knowledge_sources=knowledge_sources,
)
}
except ValueError:
assert ValueError
def test_transform_knowledge_source_suppression():
"""
Transform graph from TSV to JSON.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "graph.json")
knowledge_sources = [
("aggregator_knowledge_source", "False"),
("knowledge_source", "False"),
]
transform(
inputs=inputs,
input_format="tsv",
input_compression=None,
output=output,
output_format="json",
output_compression=None,
knowledge_sources=knowledge_sources,
)
assert os.path.exists(output)
data = json.load(open(output, "r"))
assert "nodes" in data
assert "edges" in data
assert len(data["nodes"]) == 512
assert len(data["edges"]) == 531
for e in data["edges"]:
if e["subject"] == "HGNC:10848" and e["object"] == "HGNC:20738":
assert "aggregator_knowledge_source" not in e
assert "knowledge_source" not in e
break
def test_transform_provided_by_suppression():
"""
Transform graph from TSV to JSON.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "graph.json")
knowledge_sources = [
("aggregator_knowledge_source", "False"),
("knowledge_source", "False"),
("provided_by", "False")
]
transform(
inputs=inputs,
input_format="tsv",
input_compression=None,
output=output,
output_format="json",
output_compression=None,
knowledge_sources=knowledge_sources,
)
assert os.path.exists(output)
data = json.load(open(output, "r"))
for n in data["nodes"]:
assert "provided_by" not in n
def test_transform_knowledge_source_rewrite():
"""
Transform graph from TSV to JSON.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_tiny_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_tiny_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "graph.json")
knowledge_sources = [
("aggregator_knowledge_source", "go,gene ontology"),
("aggregator_knowledge_source", "string,string database"),
]
transform(
inputs=inputs,
input_format="tsv",
input_compression=None,
output=output,
output_format="json",
output_compression=None,
knowledge_sources=knowledge_sources,
)
assert os.path.exists(output)
data = json.load(open(output, "r"))
assert "nodes" in data
assert "edges" in data
assert len(data["nodes"]) == 6
assert len(data["edges"]) == 9
for e in data["edges"]:
if e["subject"] == "HGNC:10848" and e["object"] == "HGNC:20738":
assert "aggregator_knowledge_source" in e
assert "infores:string-database" in e["aggregator_knowledge_source"]
if e["subject"] == "HGNC:10848" and e["object"] == "GO:0005576":
assert "aggregator_knowledge_source" in e
print("aggregator ks", e["aggregator_knowledge_source"])
print(e)
def test_transform_knowledge_source_rewrite_with_prefix():
"""
Transform graph from TSV to JSON.
"""
inputs = [
os.path.join(RESOURCE_DIR, "graph_tiny_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_tiny_edges.tsv"),
]
output = os.path.join(TARGET_DIR, "graph.json")
knowledge_sources = [
("aggregator_knowledge_source", "string,string database,new")
]
transform(
inputs=inputs,
input_format="tsv",
input_compression=None,
output=output,
output_format="json",
output_compression=None,
knowledge_sources=knowledge_sources,
)
assert os.path.exists(output)
data = json.load(open(output, "r"))
assert "nodes" in data
assert "edges" in data
assert len(data["nodes"]) == 6
assert len(data["edges"]) == 9
for e in data["edges"]:
if e["subject"] == "HGNC:10848" and e["object"] == "HGNC:20738":
assert "aggregator_knowledge_source" in e
assert "infores:new-string-database" in e["aggregator_knowledge_source"]
assert "biogrid" in e["aggregator_knowledge_source"]
def test_transform2():
"""
Transform from a test transform YAML.
"""
transform_config = os.path.join(RESOURCE_DIR, "test-transform.yaml")
transform(inputs=None, transform_config=transform_config)
assert os.path.exists(os.path.join(RESOURCE_DIR, "graph_nodes.tsv"))
assert os.path.exists(os.path.join(RESOURCE_DIR, "graph_edges.tsv"))
def test_transform_rdf_to_tsv():
"""
Transform from a test transform YAML.
"""
transform_config = os.path.join(RESOURCE_DIR, "test-transform-rdf-tsv.yaml")
transform(inputs=None, transform_config=transform_config)
assert os.path.exists(os.path.join(TARGET_DIR, "test-transform-rdf_edges.tsv"))
assert os.path.exists(os.path.join(TARGET_DIR, "test-transform-rdf_nodes.tsv"))
def test_transform_tsv_to_rdf():
"""
Transform from a test transform YAML.
"""
transform_config = os.path.join(RESOURCE_DIR, "test-transform-tsv-rdf.yaml")
transform(inputs=None, transform_config=transform_config)
assert os.path.exists(os.path.join(TARGET_DIR, "test-tranform-tsv-rdf.nt"))
def test_merge1():
"""
Transform from test merge YAML.
"""
merge_config = os.path.join(RESOURCE_DIR, "test-merge.yaml")
merge(merge_config=merge_config)
assert os.path.join(TARGET_DIR, "merged-graph_nodes.tsv")
assert os.path.join(TARGET_DIR, "merged-graph_edges.tsv")
assert os.path.join(TARGET_DIR, "merged-graph.json")
def test_merge2():
"""
Transform selected source from test merge YAML and
write selected destinations.
"""
merge_config = os.path.join(RESOURCE_DIR, "test-merge.yaml")
merge(merge_config=merge_config, destination=["merged-graph-json"])
assert os.path.join(TARGET_DIR, "merged-graph.json")
| 25,595 | 27.220507 | 98 |
py
|
kgx
|
kgx-master/tests/unit/test_graph_utils.py
|
import pytest
from kgx.graph.nx_graph import NxGraph
from kgx.utils.graph_utils import get_parents, get_ancestors, curie_lookup
def get_graphs():
"""
Returns instances of defined graphs.
"""
g1 = NxGraph()
g1.add_edge("B", "A", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("C", "B", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("D", "C", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("D", "A", **{"predicate": "biolink:related_to"})
g1.add_edge("E", "D", **{"predicate": "biolink:sub_class_of"})
g1.add_edge("F", "D", **{"predicate": "biolink:sub_class_of"})
g2 = NxGraph()
g2.name = "Graph 1"
g2.add_node(
"HGNC:12345",
id="HGNC:12345",
name="Test Gene",
category=["biolink:NamedThing"],
alias="NCBIGene:54321",
same_as="UniProtKB:54321",
)
g2.add_node("B", id="B", name="Node B", category=["biolink:NamedThing"], alias="Z")
g2.add_node("C", id="C", name="Node C", category=["biolink:NamedThing"])
g2.add_edge(
"C",
"B",
edge_key="C-biolink:subclass_of-B",
subject="C",
object="B",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
publications=[1],
pubs=["PMID:123456"],
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
subject="B",
object="A",
predicate="biolink:subclass_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
)
g2.add_edge(
"C",
"c",
edge_key="C-biolink:exact_match-B",
subject="C",
object="c",
predicate="biolink:exact_match",
relation="skos:exactMatch",
provided_by="Graph 1",
)
return [g1, g2]
def test_get_parents():
"""
Test get_parents method where the parent is fetched
by walking the graph across a given edge predicate.
"""
query = ("E", ["D"])
graph = get_graphs()[0]
parents = get_parents(graph, query[0], relations=["biolink:sub_class_of"])
assert len(parents) == len(query[1])
assert parents == query[1]
def test_get_ancestors():
"""
Test get_ancestors method where the ancestors are fetched
by walking the graph across a given edge predicate.
"""
query = ("E", ["D", "C", "B", "A"])
graph = get_graphs()[0]
parents = get_ancestors(graph, query[0], relations=["biolink:sub_class_of"])
assert len(parents) == len(query[1])
assert parents == query[1]
@pytest.mark.skip(reason="To be implemented")
def test_get_category_via_superclass():
""""""
pass
@pytest.mark.parametrize(
"query",
[
("rdfs:subClassOf", "sub_class_of"),
("owl:equivalentClass", "equivalent_class"),
("RO:0000091", "has_disposition"),
],
)
def test_curie_lookup(query):
"""
Test look up of a CURIE.
"""
s = curie_lookup(query[0])
assert s == query[1]
| 3,018 | 26.198198 | 87 |
py
|
kgx
|
kgx-master/tests/unit/test_rdf_utils.py
|
import os
import pytest
from rdflib import URIRef, Graph
from pprint import pprint
from kgx.prefix_manager import PrefixManager
from kgx.utils.rdf_utils import infer_category, process_predicate
from tests import RESOURCE_DIR
@pytest.mark.parametrize(
"query",
[
(URIRef("http://purl.obolibrary.org/obo/GO_0007267"), "biological_process"),
(URIRef("http://purl.obolibrary.org/obo/GO_0019899"), "molecular_function"),
(URIRef("http://purl.obolibrary.org/obo/GO_0005739"), "cellular_component"),
],
)
def test_infer_category(query):
"""
Test inferring of biolink category for a given IRI.
"""
graph = Graph()
graph.parse(os.path.join(RESOURCE_DIR, "goslim_generic.owl"))
[c] = infer_category(query[0], graph)
assert c == query[1]
@pytest.mark.parametrize(
"query",
[
(
"http://purl.org/oban/association_has_object",
"biolink:object",
"rdf:object",
"OBAN:association_has_object",
"association_has_object",
),
(
"http://www.w3.org/1999/02/22-rdf-syntax-ns#type",
"biolink:type",
"rdf:type",
"rdf:type",
"type",
),
(
"https://monarchinitiative.org/frequencyOfPhenotype",
None,
None,
"MONARCH:frequencyOfPhenotype",
"frequencyOfPhenotype",
),
(
"http://purl.obolibrary.org/obo/RO_0002200",
"biolink:has_phenotype",
"biolink:has_phenotype",
"RO:0002200",
"0002200",
),
(
"http://www.w3.org/2002/07/owl#equivalentClass",
"biolink:same_as",
"biolink:same_as",
"owl:equivalentClass",
"equivalentClass",
),
(
"https://www.example.org/UNKNOWN/new_prop",
None,
None,
":new_prop",
"new_prop",
),
(
"http://purl.obolibrary.org/obo/RO_0000091",
None,
None,
"RO:0000091",
"0000091",
),
("RO:0000091", None, None, "RO:0000091", "0000091"),
("category", "biolink:category", "biolink:category", ":category", "category"),
("predicate", "biolink:predicate", "rdf:predicate", ":predicate", "predicate"),
("type", "biolink:type", "rdf:type", ":type", "type"),
("name", "biolink:name", "rdfs:label", ":name", "name"),
],
)
def test_process_predicate(query):
"""
Test behavior of process_predicate method.
"""
pm = PrefixManager()
pprint(query[0])
x = process_predicate(pm, query[0])
# x = "element_uri", "canonical_uri", "predicate", "property_name"
# print("x: ", x)
# print("query[0]", query[0])
# print("x[0]: ", x[0], "query[1]: ", query[1])
# print("x[1]: ", x[1], "query[2]: ", query[2])
# print("x[2]: ", x[2], "query[3]: ", query[3])
# print("x[3]: ", x[3], "query[4]: ", query[4])
assert x[0] == query[1]
assert x[1] == query[2]
assert x[2] == query[3]
assert x[3] == query[4]
| 3,181 | 29.018868 | 87 |
py
|
kgx
|
kgx-master/tests/unit/test_clique_merge.py
|
from kgx.graph.nx_graph import NxGraph
from kgx.graph_operations.clique_merge import (
check_categories,
sort_categories,
check_all_categories,
clique_merge,
)
from kgx.utils.kgx_utils import get_biolink_ancestors, generate_edge_key, get_toolkit
from tests import print_graph
from bmt import Toolkit
def test_check_categories():
"""
Test check_categories method.
"""
vbc, ibc, ic = check_categories(
["biolink:Gene"], get_biolink_ancestors("biolink:Gene"), None
)
assert "biolink:Gene" in vbc
assert len(ibc) == 0
vbc, ibc, ic = check_categories(
["biolink:BiologicalEntity"], get_biolink_ancestors("biolink:Disease"), None
)
assert "biolink:BiologicalEntity" in vbc
assert len(ibc) == 0
vbc, ibc, ic = check_categories(
["biolink:Disease"], get_biolink_ancestors("biolink:Gene"), None
)
assert len(vbc) == 0
assert len(ibc) == 1 and "biolink:Disease" in ibc
def test_check_all_categories1():
"""
Test check_all_categories method.
ibc = invalid biolink categories
ic = invalid category
vbc = valid biolink categories
Note: in the check_categories method called by check_all_categories, the
categories list in this test gets sorted to be processed like this:
['biolink:Gene', 'biolink:Disease', 'biolink:NamedThing', 'biolink:GeneOrGeneProduct']
Which effects which closure is checked for valid biolink categories and is why disease is
tagged as an invalid biolink category (even though it descends from biolink:NamedThing).
GeneOrGeneProduct is a mixin, and therefore not considered a valid 'category' even though it is
in the 'biolink:Gene' hierarchy.
"""
categories = [
"biolink:Disease",
"biolink:Gene",
"biolink:GeneOrGeneProduct",
"biolink:NamedThing",
]
vbc, ibc, ic = check_all_categories(categories)
assert len(vbc) == 2
assert len(ibc) == 1 and "biolink:Disease" in ibc
assert (
len(ic) == 1
) # since biolink:GeneOrGeneProduct is a mixin, it is declared as an invalid category.
def test_check_all_categories2():
"""
Test check_all_categories method.
"""
categories = get_biolink_ancestors("biolink:Gene")
vbc, ibc, ic = check_all_categories(categories)
assert len(vbc) == 4
assert len(ibc) == 0
assert (
len(ic) == 8
) # mixins are not valid biolink categories, but they are ancestors.
categories = ["biolink:NamedThing", "biolink:GeneOrGeneProduct", "biolink:Gene"]
vbc, ibc, ic = check_all_categories(categories)
assert len(vbc) == 2
assert len(ibc) == 0
assert len(ic) == 1
categories = ["biolink:NamedThing", "biolink:GeneOrGeneProduct", "Node"]
vbc, ibc, ic = check_all_categories(categories)
assert len(vbc) == 1
assert len(ibc) == 0
assert len(ic) == 2
def test_sort_categories():
"""
Test sort_categories method.
"""
categories = ["biolink:NamedThing", "biolink:BiologicalEntity", "biolink:Disease"]
sorted_categories = sort_categories(categories)
assert sorted_categories.index("biolink:Disease") == 0
assert sorted_categories.index("biolink:BiologicalEntity") == 1
assert sorted_categories.index("biolink:NamedThing") == 2
def test_clique_merge1():
"""
Test to perform a clique merge where all nodes in a clique are valid.
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:Gene"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:Gene"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"]})
g1.add_node("ENSEMBL:6", **{"category": ["biolink:Gene"]})
g1.add_node("HGNC:7", **{"category": ["biolink:Gene"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:Gene"]})
g1.add_edge(
"ENSEMBL:4",
"HGNC:1",
edge_key=generate_edge_key("ENSEMBL:4", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"OMIM:2",
"HGNC:1",
edge_key=generate_edge_key("OMIM:2", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm
)
print_graph(updated_graph)
assert updated_graph.number_of_nodes() == 2
assert updated_graph.number_of_edges() == 0
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("HGNC:7")
n1 = updated_graph.nodes()["HGNC:1"]
assert "OMIM:2" in n1["same_as"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
n2 = updated_graph.nodes()["HGNC:7"]
assert "ENSEMBL:6" in n2["same_as"]
assert "NCBIGene:8" in n2["same_as"]
assert not updated_graph.has_node("OMIM:2")
assert not updated_graph.has_node("NCBIGene:3")
assert not updated_graph.has_node("ENSEMBL:4")
assert not updated_graph.has_node("ENSEMBL:6")
assert not updated_graph.has_node("NCBIGene:8")
def test_clique_merge2():
"""
Test to perform clique merge where all nodes in a clique are
valid but one node has a less specific category.
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:NamedThing"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:NamedThing"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"]})
g1.add_node("ENSEMBL:6", **{"category": ["biolink:Gene"]})
g1.add_node("HGNC:7", **{"category": ["biolink:NamedThing"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:Gene"]})
g1.add_edge(
"ENSEMBL:4",
"HGNC:1",
edge_key=generate_edge_key("ENSEMBL:4", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"OMIM:2",
"HGNC:1",
edge_key=generate_edge_key("OMIM:2", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm
)
assert updated_graph.number_of_nodes() == 2
assert updated_graph.number_of_edges() == 0
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("HGNC:7")
n1 = updated_graph.nodes()["HGNC:1"]
assert "OMIM:2" in n1["same_as"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
n2 = updated_graph.nodes()["HGNC:7"]
assert "ENSEMBL:6" in n2["same_as"]
assert "NCBIGene:8" in n2["same_as"]
assert not updated_graph.has_node("OMIM:2")
assert not updated_graph.has_node("NCBIGene:3")
assert not updated_graph.has_node("ENSEMBL:4")
assert not updated_graph.has_node("ENSEMBL:6")
assert not updated_graph.has_node("NCBIGene:8")
def test_clique_merge3():
"""
Test for clique merge where each clique has a node that
has a non-biolink category.
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:NamedThing", "Node"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:NamedThing"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"]})
g1.add_node("ENSEMBL:6", **{"category": ["biolink:Gene"]})
g1.add_node("HGNC:7", **{"category": ["biolink:NamedThing"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:NamedThing", "Node"]})
g1.add_edge(
"ENSEMBL:4",
"HGNC:1",
edge_key=generate_edge_key("ENSEMBL:4", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"OMIM:2",
"HGNC:1",
edge_key=generate_edge_key("OMIM:2", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm
)
assert updated_graph.number_of_nodes() == 2
assert updated_graph.number_of_edges() == 0
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("HGNC:7")
n1 = updated_graph.nodes()["HGNC:1"]
assert "OMIM:2" in n1["same_as"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
n2 = updated_graph.nodes()["HGNC:7"]
assert "ENSEMBL:6" in n2["same_as"]
assert "NCBIGene:8" in n2["same_as"]
assert not updated_graph.has_node("OMIM:2")
assert not updated_graph.has_node("NCBIGene:3")
assert not updated_graph.has_node("ENSEMBL:4")
assert not updated_graph.has_node("ENSEMBL:6")
assert not updated_graph.has_node("NCBIGene:8")
def test_clique_merge4():
"""
Test for clique merge where each clique has a node that has
a disjoint category from other nodes in a clique. (strict)
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:Gene", "biolink:Disease"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:NamedThing"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"]})
g1.add_node("ENSEMBL:6", **{"category": ["biolink:Gene"]})
g1.add_node("HGNC:7", **{"category": ["biolink:NamedThing"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:Gene", "biolink:Disease"]})
g1.add_edge(
"ENSEMBL:4",
"HGNC:1",
edge_key=generate_edge_key("ENSEMBL:4", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"OMIM:2",
"HGNC:1",
edge_key=generate_edge_key("OMIM:2", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm
)
print("clique graph:")
print_graph(clique_graph)
print("updated graph:")
print_graph(updated_graph)
assert updated_graph.number_of_nodes() == 5
assert updated_graph.number_of_edges() == 3
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("HGNC:7")
n1 = updated_graph.nodes()["HGNC:1"]
assert "OMIM:2" not in n1["same_as"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
n2 = updated_graph.nodes()["HGNC:7"]
assert len(n2["same_as"]) == 0
assert updated_graph.has_node("OMIM:2")
assert not updated_graph.has_node("NCBIGene:3")
assert not updated_graph.has_node("ENSEMBL:4")
assert updated_graph.has_node("ENSEMBL:6")
assert updated_graph.has_node("NCBIGene:8")
def test_clique_merge5():
"""
Test for clique merge where each clique has a node that has
a disjoint category from other nodes in a clique. (lenient)
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:Gene", "biolink:Disease"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:NamedThing"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"]})
g1.add_node("ENSEMBL:6", **{"category": ["biolink:Gene"]})
g1.add_node("HGNC:7", **{"category": ["biolink:NamedThing"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:Gene", "biolink:Disease"]})
g1.add_edge(
"ENSEMBL:4",
"HGNC:1",
edge_key=generate_edge_key("ENSEMBL:4", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"OMIM:2",
"HGNC:1",
edge_key=generate_edge_key("OMIM:2", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm, strict=False
)
assert updated_graph.number_of_nodes() == 2
assert updated_graph.number_of_edges() == 0
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("HGNC:7")
n1 = updated_graph.nodes()["HGNC:1"]
assert "OMIM:2" in n1["same_as"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
n2 = updated_graph.nodes()["HGNC:7"]
assert "ENSEMBL:6" in n2["same_as"]
assert "NCBIGene:8" in n2["same_as"]
assert not updated_graph.has_node("OMIM:2")
assert not updated_graph.has_node("NCBIGene:3")
assert not updated_graph.has_node("ENSEMBL:4")
assert not updated_graph.has_node("ENSEMBL:6")
assert not updated_graph.has_node("NCBIGene:8")
def test_clique_merge6():
"""
Test for clique merge where each clique has a node that has
a disjoint category from other nodes in a clique and the node is a
participant in same_as edges.
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:Disease"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:NamedThing"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"]})
g1.add_node("ENSEMBL:6", **{"category": ["biolink:Gene"]})
g1.add_node("HGNC:7", **{"category": ["biolink:NamedThing"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:Disease"]})
g1.add_edge(
"ENSEMBL:4",
"HGNC:1",
edge_key=generate_edge_key("ENSEMBL:4", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"OMIM:2",
"HGNC:1",
edge_key=generate_edge_key("OMIM:2", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm
)
assert updated_graph.number_of_nodes() == 5
assert updated_graph.number_of_edges() == 3
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("HGNC:7")
n1 = updated_graph.nodes()["HGNC:1"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
assert "OMIM:2" not in n1["same_as"]
n2 = updated_graph.nodes()["HGNC:7"]
assert "ENSEMBL:6" not in n2["same_as"]
assert "NCBIGene:8" not in n2["same_as"]
assert updated_graph.has_node("OMIM:2")
assert not updated_graph.has_node("NCBIGene:3")
assert not updated_graph.has_node("ENSEMBL:4")
assert updated_graph.has_node("ENSEMBL:6")
assert updated_graph.has_node("NCBIGene:8")
def test_clique_merge7():
"""
Test for clique merge where each clique has a node that has
a disjoint category from other nodes in a clique and the node is
not a participant in same_as edges.
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:Disease"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:NamedThing"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"]})
g1.add_node("ENSEMBL:6", **{"category": ["biolink:Gene"]})
g1.add_node("HGNC:7", **{"category": ["biolink:Disease"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:Gene"]})
g1.add_edge(
"ENSEMBL:4",
"HGNC:1",
edge_key=generate_edge_key("ENSEMBL:4", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"OMIM:2",
"HGNC:1",
edge_key=generate_edge_key("OMIM:2", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm
)
assert updated_graph.number_of_nodes() == 4
assert updated_graph.number_of_edges() == 2
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("NCBIGene:8")
n1 = updated_graph.nodes()["HGNC:1"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
assert "OMIM:2" not in n1["same_as"]
n2 = updated_graph.nodes()["NCBIGene:8"]
assert "ENSEMBL:6" in n2["same_as"]
assert updated_graph.has_node("OMIM:2")
assert not updated_graph.has_node("NCBIGene:3")
assert not updated_graph.has_node("ENSEMBL:4")
assert updated_graph.has_node("HGNC:7")
def test_clique_merge8():
"""
Test for clique merge where same_as appear as both node and edge properties.
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:Gene"], "same_as": ["HGNC:1"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:NamedThing"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"], "same_as": ["HGNC:1"]})
g1.add_node(
"ENSEMBL:6", **{"category": ["biolink:Gene"], "same_as": ["NCBIGene:8"]}
)
g1.add_node("HGNC:7", **{"category": ["biolink:Gene"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:Gene"]})
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm
)
assert updated_graph.number_of_nodes() == 2
assert updated_graph.number_of_edges() == 0
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("HGNC:7")
n1 = updated_graph.nodes()["HGNC:1"]
assert "OMIM:2" in n1["same_as"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
n2 = updated_graph.nodes()["HGNC:7"]
assert "ENSEMBL:6" in n2["same_as"]
assert "NCBIGene:8" in n2["same_as"]
assert not updated_graph.has_node("OMIM:2")
assert not updated_graph.has_node("NCBIGene:3")
assert not updated_graph.has_node("ENSEMBL:4")
assert not updated_graph.has_node("ENSEMBL:6")
assert not updated_graph.has_node("NCBIGene:8")
def test_clique_merge9():
"""
Test for clique merge where same_as appear as both node and edge properties,
but an invalid node also has a same_as property and participates in same_as edge.
"""
ppm = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
g1 = NxGraph()
g1.add_node("HGNC:1", **{"category": ["biolink:Gene"]})
g1.add_node("OMIM:2", **{"category": ["biolink:Disease"], "same_as": ["HGNC:1"]})
g1.add_node("NCBIGene:3", **{"category": ["biolink:NamedThing"]})
g1.add_node("ENSEMBL:4", **{"category": ["biolink:Gene"], "same_as": ["HGNC:1"]})
g1.add_node(
"ENSEMBL:6", **{"category": ["biolink:Gene"], "same_as": ["NCBIGene:8"]}
)
g1.add_node("HGNC:7", **{"category": ["biolink:Gene"]})
g1.add_node("NCBIGene:8", **{"category": ["biolink:Gene"]})
g1.add_edge(
"X:00001",
"OMIM:2",
edge_key=generate_edge_key("X:00001", "biolink:same_as", "OMIM:2"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"NCBIGene:3",
"HGNC:1",
edge_key=generate_edge_key("NCBIGene:3", "biolink:same_as", "HGNC:1"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"ENSEMBL:6",
"NCBIGene:8",
edge_key=generate_edge_key("ENSEMBL:6", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
g1.add_edge(
"HGNC:7",
"NCBIGene:8",
edge_key=generate_edge_key("HGNC:7", "biolink:same_as", "NCBIGene:8"),
**{"predicate": "biolink:same_as", "relation": "owl:equivalentClass"}
)
updated_graph, clique_graph = clique_merge(
target_graph=g1, prefix_prioritization_map=ppm
)
assert updated_graph.number_of_nodes() == 4
assert updated_graph.number_of_edges() == 1
assert updated_graph.has_node("HGNC:1")
assert updated_graph.has_node("HGNC:7")
n1 = updated_graph.nodes()["HGNC:1"]
assert "OMIM:2" not in n1["same_as"]
assert "NCBIGene:3" in n1["same_as"]
assert "ENSEMBL:4" in n1["same_as"]
n2 = updated_graph.nodes()["HGNC:7"]
assert "ENSEMBL:6" in n2["same_as"]
assert "NCBIGene:8" in n2["same_as"]
assert updated_graph.has_node("OMIM:2")
| 26,434 | 34.819783 | 99 |
py
|
kgx
|
kgx-master/tests/unit/test_kgx_utils.py
|
import pytest
import pandas as pd
import numpy as np
from bmt import Toolkit
from kgx.curie_lookup_service import CurieLookupService
from kgx.utils.kgx_utils import (
get_toolkit,
get_curie_lookup_service,
get_prefix_prioritization_map,
get_biolink_element,
get_biolink_ancestors,
generate_edge_key,
contract,
expand,
camelcase_to_sentencecase,
snakecase_to_sentencecase,
sentencecase_to_snakecase,
sentencecase_to_camelcase,
generate_uuid,
prepare_data_dict,
sanitize_import,
build_export_row,
_sanitize_import_property,
_sanitize_export_property,
)
def test_get_toolkit():
"""
Test to get an instance of Toolkit via get_toolkit and
check if default is the default biolink model version.
"""
tk = get_toolkit()
assert isinstance(tk, Toolkit)
assert tk.get_model_version() == Toolkit().get_model_version()
def test_get_curie_lookup_service():
"""
Test to get an instance of CurieLookupService via get_curie_lookup_service.
"""
cls = get_curie_lookup_service()
assert isinstance(cls, CurieLookupService)
def test_get_prefix_prioritization_map():
"""
Test to get a prefix prioritization map.
"""
prioritization_map = get_prefix_prioritization_map()
assert "biolink:Gene" in prioritization_map.keys()
assert "biolink:Protein" in prioritization_map.keys()
assert "biolink:Disease" in prioritization_map.keys()
@pytest.mark.parametrize(
"query",
[
("gene", "gene"),
("disease", "disease"),
("related_to", "related to"),
("causes", "causes"),
("biolink:Gene", "gene"),
("biolink:causes", "causes"),
],
)
def test_get_biolink_element(query):
"""
Test to get biolink element.
"""
element1 = get_biolink_element(query[0])
assert element1 is not None
assert element1.name == query[1]
def test_get_biolink_ancestors():
"""
Test to get biolink ancestors.
"""
ancestors1 = get_biolink_ancestors("phenotypic feature")
assert ancestors1 is not None
# changed to 6 from 5 when biolink model updated to 2.2.1 and mixins are included in ancestry
assert len(ancestors1) == 6
def test_generate_edge_key():
"""
Test generation of edge key via generate_edge_key method.
"""
key = generate_edge_key("S:CURIE", "related_to", "O:CURIE")
assert key == "S:CURIE-related_to-O:CURIE"
def test_camelcase_to_sentencecase():
"""
Test conversion of CamelCase to sentence case.
"""
s = camelcase_to_sentencecase("NamedThing")
assert s == "named thing"
def test_snakecase_to_sentencecase():
"""
Test conversion of a snake_case to sentence case.
"""
s = snakecase_to_sentencecase("named_thing")
assert s == "named thing"
def test_sentencecase_to_snakecase():
"""
Test conversion of a sentence case text to snake_case.
"""
s = sentencecase_to_snakecase("named thing")
assert s == "named_thing"
def test_sentencecase_to_camelcase():
"""
Test conversion of a sentence case text to CamelCase.
"""
s = sentencecase_to_camelcase("named thing")
assert s == "NamedThing"
@pytest.mark.parametrize(
"query",
[
(
"HGNC:11603",
"http://www.genenames.org/cgi-bin/gene_symbol_report?hgnc_id=11603",
"https://identifiers.org/hgnc:11603",
)
],
)
def test_contract(query):
"""
Test contract method for contracting an IRI to a CURIE.
"""
curie = contract(query[1], prefix_maps=None, fallback=True)
# get the CURIE
assert curie == query[0]
# provide custom prefix_maps, with fallback
curie = contract(
query[2], prefix_maps=[{"HGNC": "https://identifiers.org/hgnc:"}], fallback=True
)
# get the CURIE
assert curie == query[0]
# provide custom prefix_maps, but no fallback
curie = contract(
query[2],
prefix_maps=[{"HGNC": "https://identifiers.org/hgnc:"}],
fallback=False,
)
# get the CURIE
assert curie == query[0]
# provide no prefix_maps, and no fallback
curie = contract(query[2], prefix_maps=None, fallback=False)
# get back the IRI
assert curie == query[2]
@pytest.mark.parametrize(
"query",
[
(
"HGNC:11603",
"http://www.genenames.org/cgi-bin/gene_symbol_report?hgnc_id=11603",
"https://identifiers.org/hgnc:11603",
)
],
)
def test_expand(query):
"""
Test expand method for expanding a CURIE to an IRI.
"""
iri = expand(query[0], prefix_maps=None, fallback=True)
# get the IRI
assert iri == query[1]
# provide custom prefix_maps, with fallback
iri = expand(
query[0], prefix_maps=[{"HGNC": "https://identifiers.org/hgnc:"}], fallback=True
)
# get the alternate IRI
assert iri == query[2]
# provide custom prefix_maps, but no fallback
iri = expand(
query[0], prefix_maps=[{"hgnc": "https://example.org/hgnc:"}], fallback=False
)
# get back the CURIE
assert iri == query[0]
# provide no prefix_maps, and no fallback
iri = expand(query[0], prefix_maps=None, fallback=False)
# get the IRI
assert iri == query[1]
def test_generate_uuid():
"""
Test generation of UUID by generate_uuid method.
"""
s = generate_uuid()
assert s.startswith("urn:uuid:")
@pytest.mark.parametrize(
"query",
[
(
{"id": "HGNC:11603", "name": "Some Gene", "provided_by": ["Dataset A"]},
{"id": "HGNC:11603", "name": "Some Gene", "provided_by": "Dataset B"},
{"id": "HGNC:11603", "name": "Some Gene", "provided_by": ["Dataset C"]},
),
(
{"id": "HGNC:11603", "name": "Some Gene", "provided_by": ["Dataset A"]},
{"id": "HGNC:11603", "name": "Some Gene", "provided_by": "Dataset B"},
{},
),
],
)
def test_prepare_data_dict(query):
"""
Test prepare_data_dict method.
"""
res = prepare_data_dict(query[0], query[1])
res = prepare_data_dict(res, query[2])
assert res is not None
@pytest.mark.parametrize(
"query",
[
({"id": "A", "name": "Node A"}, {"id": "A", "name": "Node A"}),
(
{"id": "A", "name": "Node A", "description": None},
{"id": "A", "name": "Node A"},
),
(
{
"id": "A",
"name": "Node A",
"description": None,
"publications": "PMID:1234|PMID:1456|PMID:3466",
},
{
"id": "A",
"name": "Node A",
"publications": ["PMID:1234", "PMID:1456", "PMID:3466"],
},
),
(
{
"id": "A",
"name": "Node A",
"description": None,
"property": [pd.NA, 123, "ABC"],
},
{"id": "A", "name": "Node A", "property": [123, "ABC"]},
),
(
{
"id": "A",
"name": "Node A",
"description": None,
"property": [pd.NA, 123, "ABC"],
"score": 0.0,
},
{"id": "A", "name": "Node A", "property": [123, "ABC"], "score": 0.0},
),
],
)
def test_sanitize_import1(query):
"""
Test sanitize_import method.
"""
d = sanitize_import(query[0], list_delimiter='|')
for k, v in query[1].items():
assert k in d
if isinstance(v, list):
assert set(d[k]) == set(v)
else:
assert d[k] == v
@pytest.mark.parametrize(
"query",
[
(("category", "biolink:Gene"), ["biolink:Gene"]),
(
("publications", "PMID:123|PMID:456|PMID:789"),
["PMID:123", "PMID:456", "PMID:789"],
),
(("negated", "True"), True),
(("negated", True), True),
(("negated", True), True),
(("xref", {"a", "b", "c"}), ["a", "b", "c"]),
(("xref", "a|b|c"), ["a", "b", "c"]),
(("valid", "True"), "True"),
(("valid", True), True),
(("alias", "xyz"), "xyz"),
(("description", "Line 1\nLine 2\nLine 3"), "Line 1 Line 2 Line 3"),
],
)
def test_sanitize_import2(query):
"""
Test internal sanitize_import method.
"""
value = _sanitize_import_property(query[0][0], query[0][1], list_delimiter='|')
if isinstance(query[1], str):
assert value == query[1]
elif isinstance(query[1], (list, set, tuple)):
for x in query[1]:
assert x in value
elif isinstance(query[1], bool):
assert query[1] == value
else:
assert query[1] in value
@pytest.mark.parametrize(
"query",
[
(
{
"id": "A",
"name": "Node A",
"category": ["biolink:NamedThing", "biolink:Gene"],
},
{
"id": "A",
"name": "Node A",
"category": "biolink:NamedThing|biolink:Gene",
},
),
(
{
"id": "A",
"name": "Node A",
"category": ["biolink:NamedThing", "biolink:Gene"],
"xrefs": [np.nan, "UniProtKB:123", None, "NCBIGene:456"],
},
{
"id": "A",
"name": "Node A",
"category": "biolink:NamedThing|biolink:Gene",
"xrefs": "UniProtKB:123|NCBIGene:456",
},
),
],
)
def test_build_export_row(query):
"""
Test build_export_row method.
"""
d = build_export_row(query[0], list_delimiter="|")
for k, v in query[1].items():
assert k in d
assert d[k] == v
@pytest.mark.parametrize(
"query",
[
(("category", "biolink:Gene"), ["biolink:Gene"]),
(
("publications", ["PMID:123", "PMID:456", "PMID:789"]),
"PMID:123|PMID:456|PMID:789",
),
(("negated", "True"), True),
(("negated", True), True),
(("negated", True), True),
(("xref", {"a", "b", "c"}), ["a", "b", "c"]),
(("xref", ["a", "b", "c"]), "a|b|c"),
(("valid", "True"), "True"),
(("valid", True), True),
(("alias", "xyz"), "xyz"),
(("description", "Line 1\nLine 2\nLine 3"), "Line 1 Line 2 Line 3"),
],
)
def test_sanitize_export_property(query):
"""
Test sanitize_export method.
"""
value = _sanitize_export_property(query[0][0], query[0][1], list_delimiter='|')
if isinstance(query[1], str):
assert value == query[1]
elif isinstance(query[1], (list, set, tuple)):
for x in query[1]:
assert x in value
elif isinstance(query[1], bool):
assert query[1] == value
else:
assert query[1] in value
| 10,963 | 26.341646 | 97 |
py
|
kgx
|
kgx-master/tests/unit/test_summarize_graph.py
|
import os
import pytest
from deprecation import deprecated
from kgx.graph.nx_graph import NxGraph
from kgx.graph_operations.summarize_graph import (
summarize_graph,
generate_graph_stats,
GraphSummary,
TOTAL_NODES,
NODE_CATEGORIES,
NODE_ID_PREFIXES,
NODE_ID_PREFIXES_BY_CATEGORY,
COUNT_BY_CATEGORY,
COUNT_BY_ID_PREFIXES,
COUNT_BY_ID_PREFIXES_BY_CATEGORY,
TOTAL_EDGES,
EDGE_PREDICATES,
COUNT_BY_EDGE_PREDICATES,
COUNT_BY_SPO,
)
from kgx.transformer import Transformer
from tests import RESOURCE_DIR, TARGET_DIR
try:
from yaml import load, CLoader as Loader
except ImportError:
from yaml import load, Loader
def get_graphs():
"""
Returns instances of defined graphs.
"""
g1 = NxGraph()
g1.name = "Graph 1"
g1.add_node("A", id="A", name="Node A", category=["biolink:NamedThing"])
g1.add_node("B", id="B", name="Node B", category=["biolink:NamedThing"])
g1.add_node("C", id="C", name="Node C", category=["biolink:NamedThing"])
g1.add_edge(
"C",
"B",
edge_key="C-biolink:subclass_of-B",
predicate="biolink:sub_class_of",
relation="rdfs:subClassOf",
)
g1.add_edge(
"B",
"A",
edge_key="B-biolink:subclass_of-A",
predicate="biolink:sub_class_of",
relation="rdfs:subClassOf",
provided_by="Graph 1",
)
g2 = NxGraph()
g2.name = "Graph 2"
g2.add_node(
"A",
id="A",
name="Node A",
description="Node A in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"B",
id="B",
name="Node B",
description="Node B in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"C",
id="C",
name="Node C",
description="Node C in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"D",
id="D",
name="Node D",
description="Node D in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_node(
"E",
id="E",
name="Node E",
description="Node E in Graph 2",
category=["biolink:NamedThing"],
)
g2.add_edge(
"B",
"A",
edge_key="B-biolink:related_to-A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"D",
"A",
edge_key="D-biolink:related_to-A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g2.add_edge(
"E",
"A",
edge_key="E-biolink:related_to-A",
predicate="biolink:related_to",
relation="biolink:related_to",
)
g3 = NxGraph()
g3.name = "Graph 3"
g3.add_edge(
"F",
"E",
edge_key="F-biolink:same_as-E",
predicate="biolink:same_as",
relation="OWL:same_as",
)
return [g1, g2, g3]
@deprecated(deprecated_in="1.5.8", details="Default is the use streaming graph_summary with inspector")
def test_generate_graph_stats():
"""
Test for generating graph stats.
"""
graphs = get_graphs()
for g in graphs:
filename = os.path.join(TARGET_DIR, f"{g.name}_stats.yaml")
generate_graph_stats(g, g.name, filename)
assert os.path.exists(filename)
@pytest.mark.parametrize(
"query",
[
(
get_graphs()[0],
{
"node_stats": {
"total_nodes": 3,
"node_categories": ["biolink:NamedThing"],
"count_by_category": {
"unknown": {"count": 0},
"biolink:NamedThing": {"count": 3},
},
},
"edge_stats": {"total_edges": 2},
"predicates": ["biolink:subclass_of"],
"count_by_predicates": {
"unknown": {"count": 0},
"biolink:subclass_of": {"count": 2},
},
"count_by_spo": {
"biolink:NamedThing-biolink:subclass_of-biolink:NamedThing": {
"count": 2
}
},
},
),
(
get_graphs()[1],
{
"node_stats": {
"total_nodes": 5,
"node_categories": ["biolink:NamedThing"],
"count_by_category": {
"unknown": {"count": 0},
"biolink:NamedThing": {"count": 5},
},
},
"edge_stats": {
"total_edges": 3,
"predicates": ["biolink:related_to"],
"count_by_predicates": {
"unknown": {"count": 0},
"biolink:related_to": {"count": 3},
},
"count_by_spo": {
"biolink:NamedThing-biolink:related_to-biolink:NamedThing": {
"count": 3
}
},
},
},
),
(
get_graphs()[2],
{
"node_stats": {
"total_nodes": 2,
"node_categories": [],
"count_by_category": {"unknown": {"count": 2}},
},
"edge_stats": {
"total_edges": 1,
"predicates": ["biolink:same_as"],
"count_by_predicates": {
"unknown": {"count": 0},
"biolink:same_as": {"count": 1},
},
"count_by_spo": {"unknown-biolink:same_as-unknown": {"count": 1}},
},
},
),
],
)
def test_summarize_graph(query):
"""
Test for generating graph stats, and comparing the resulting stats.
"""
stats = summarize_graph(query[0])
for k, v in query[1]["node_stats"].items():
assert v == stats["node_stats"][k]
for k, v in query[1]["edge_stats"].items():
assert v == stats["edge_stats"][k]
def test_summarize_graph_inspector():
"""
Test generate the graph summary by streaming
graph data through a graph Transformer.process() Inspector
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
}
transformer = Transformer(stream=True)
inspector = GraphSummary("Test Graph Summary - Streamed")
# We configure the Transformer with a data flow inspector
# (Deployed in the internal Transformer.process() call)
transformer.transform(input_args=input_args, inspector=inspector)
output_filename = os.path.join(
TARGET_DIR, "test_graph-summary-from-inspection.yaml"
)
# Dump a report to stderr ... will be a JSON document now
if len(inspector.get_errors()) > 0:
assert len(inspector.get_errors("Error")) == 0
assert len(inspector.get_errors("Warning")) > 0
inspector.write_report(None, "Warning")
with open(output_filename, "w") as gsh:
inspector.save(gsh)
with open(output_filename, "r") as gsh:
data = load(stream=gsh, Loader=Loader)
assert data["graph_name"] == "Test Graph Summary - Streamed"
node_stats = data["node_stats"]
assert node_stats
assert TOTAL_NODES in node_stats
assert node_stats[TOTAL_NODES] == 512
assert NODE_CATEGORIES in node_stats
node_categories = node_stats[NODE_CATEGORIES]
assert "biolink:Pathway" in node_categories
assert NODE_ID_PREFIXES in node_stats
node_id_prefixes = node_stats[NODE_ID_PREFIXES]
assert "HGNC" in node_id_prefixes
assert NODE_ID_PREFIXES_BY_CATEGORY in node_stats
id_prefixes_by_category = node_stats[NODE_ID_PREFIXES_BY_CATEGORY]
assert "biolink:Gene" in id_prefixes_by_category
assert "ENSEMBL" in id_prefixes_by_category["biolink:Gene"]
assert "biolink:Disease" in id_prefixes_by_category
assert "MONDO" in id_prefixes_by_category["biolink:Disease"]
assert "biolink:PhenotypicFeature" in id_prefixes_by_category
assert "HP" in id_prefixes_by_category["biolink:PhenotypicFeature"]
assert COUNT_BY_CATEGORY in node_stats
count_by_category = node_stats[COUNT_BY_CATEGORY]
assert "biolink:AnatomicalEntity" in count_by_category
assert count_by_category["biolink:AnatomicalEntity"]["count"] == 20
assert COUNT_BY_ID_PREFIXES in node_stats
count_by_id_prefixes = node_stats[COUNT_BY_ID_PREFIXES]
assert "HP" in count_by_id_prefixes
assert count_by_id_prefixes["HP"] == 111
assert COUNT_BY_ID_PREFIXES_BY_CATEGORY in node_stats
count_by_id_prefixes_by_category = node_stats[COUNT_BY_ID_PREFIXES_BY_CATEGORY]
assert "biolink:BiologicalProcess" in count_by_id_prefixes_by_category
biological_process_id_prefix_count = count_by_id_prefixes_by_category[
"biolink:BiologicalProcess"
]
assert "GO" in biological_process_id_prefix_count
assert biological_process_id_prefix_count["GO"] == 143
edge_stats = data["edge_stats"]
assert edge_stats
assert TOTAL_EDGES in edge_stats
assert edge_stats[TOTAL_EDGES] == 539
assert EDGE_PREDICATES in edge_stats
assert len(edge_stats[EDGE_PREDICATES]) == 8
assert "biolink:actively_involved_in" in edge_stats[EDGE_PREDICATES]
assert COUNT_BY_EDGE_PREDICATES in edge_stats
assert len(edge_stats[COUNT_BY_EDGE_PREDICATES]) == 9
assert "biolink:has_phenotype" in edge_stats[COUNT_BY_EDGE_PREDICATES]
assert edge_stats[COUNT_BY_EDGE_PREDICATES]["biolink:has_phenotype"]["count"] == 124
assert COUNT_BY_SPO in edge_stats
assert len(edge_stats[COUNT_BY_SPO]) == 13
assert "biolink:Gene-biolink:related_to-biolink:Pathway" in edge_stats[COUNT_BY_SPO]
assert (
"count"
in edge_stats[COUNT_BY_SPO]["biolink:Gene-biolink:related_to-biolink:Pathway"]
)
assert (
edge_stats[COUNT_BY_SPO]["biolink:Gene-biolink:related_to-biolink:Pathway"][
"count"
]
== 16
)
| 10,327 | 29.829851 | 103 |
py
|
kgx
|
kgx-master/tests/unit/__init__.py
|
import docker
import pytest
from neo4j import GraphDatabase
from kgx.graph.nx_graph import NxGraph
CONTAINER_NAME = "kgx-neo4j-unit-test"
DEFAULT_NEO4J_URL = "neo4j://localhost:7687"
DEFAULT_NEO4J_USERNAME = "neo4j"
DEFAULT_NEO4J_PASSWORD = "test"
DEFAULT_NEO4J_DATABASE = "neo4j"
def check_container():
"""
Check whether the container with the name ``CONTAINER_NAME``
is currently running.
"""
try:
client = docker.from_env()
status = False
try:
c = client.containers.get(CONTAINER_NAME)
if c.status == "running":
status = True
except:
status = False
except:
status = False
return status
@pytest.fixture(scope="function")
def clean_database():
"""
Delete all nodes and edges in Neo4j test container.
"""
with GraphDatabase.driver(
DEFAULT_NEO4J_URL,
auth=(DEFAULT_NEO4J_USERNAME, DEFAULT_NEO4J_PASSWORD)
) as http_driver:
q = "MATCH (n) DETACH DELETE (n)"
try:
session = http_driver.session()
session.run(q)
except Exception as e:
print(e)
# this is a bit of misnomer: yes, it processes a stream
# but then loads it into in memory data structures.
# This could be problematic for huge graphs?
def load_graph_dictionary(g):
"""
Process a given stream into a nodes and edges dictionary.
"""
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
key = (rec[0], rec[1])
if key in edges:
edges[key].append(rec[-1])
else:
edges[key] = [rec[-1]]
else:
nodes[rec[0]] = rec[-1]
return nodes, edges
def get_graph(source):
"""
Returns a series of defined graphs.
"""
g1 = NxGraph()
g1.name = "Graph 1"
g1.add_node(
"A",
**{
"id": "A",
"name": "Node A",
"category": ["biolink:NamedThing"],
"source": source,
}
)
g1.add_node(
"B",
**{
"id": "B",
"name": "Node B",
"category": ["biolink:NamedThing"],
"source": source,
}
)
g1.add_node(
"C",
**{
"id": "C",
"name": "Node C",
"category": ["biolink:NamedThing"],
"source": source,
}
)
g1.add_edge(
"B",
"A",
**{
"subject": "B",
"object": "A",
"predicate": "biolink:subclass_of",
"source": source,
}
)
g2 = NxGraph()
g2.add_node(
"A", **{"id": "A", "category": ["biolink:NamedThing"], "source": source}
)
g2.add_node(
"B", **{"id": "B", "category": ["biolink:NamedThing"], "source": source}
)
g2.add_node(
"C", **{"id": "C", "category": ["biolink:NamedThing"], "source": source}
)
g2.add_node(
"D", **{"id": "D", "category": ["biolink:NamedThing"], "source": source}
)
g2.add_node(
"E", **{"id": "E", "category": ["biolink:NamedThing"], "source": source}
)
g2.add_node(
"F", **{"id": "F", "category": ["biolink:NamedThing"], "source": source}
)
g2.add_edge(
"B",
"A",
**{
"subject": "B",
"object": "A",
"predicate": "biolink:subclass_of",
"source": source,
}
)
g2.add_edge(
"C",
"B",
**{
"subject": "C",
"object": "B",
"predicate": "biolink:subclass_of",
"source": source,
}
)
g2.add_edge(
"D",
"C",
**{
"subject": "D",
"object": "C",
"predicate": "biolink:subclass_of",
"source": source,
}
)
g2.add_edge(
"D",
"A",
**{
"subject": "D",
"object": "A",
"predicate": "biolink:related_to",
"source": source,
}
)
g2.add_edge(
"E",
"D",
**{
"subject": "E",
"object": "D",
"predicate": "biolink:subclass_of",
"source": source,
}
)
g2.add_edge(
"F",
"D",
**{
"subject": "F",
"object": "D",
"predicate": "biolink:subclass_of",
"source": source,
}
)
g3 = NxGraph()
g3.add_node(
"A", **{"id": "A", "category": ["biolink:NamedThing"], "source": source}
)
g3.add_node(
"B", **{"id": "B", "category": ["biolink:NamedThing"], "source": source}
)
g3.add_edge(
"A",
"B",
**{
"subject": "A",
"object": "B",
"predicate": "biolink:related_to",
"source": source,
}
)
g4 = NxGraph()
g4.add_node(
"A",
**{
"id": "A",
"category": ["biolink:Gene"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"B",
**{
"id": "B",
"category": ["biolink:Gene"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"A1",
**{
"id": "A1",
"category": ["biolink:Protein"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"A2",
**{
"id": "A2",
"category": ["biolink:Protein"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"B1",
**{
"id": "B1",
"category": ["biolink:Protein"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"X",
**{
"id": "X",
"category": ["biolink:Drug"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"Y",
**{
"id": "Y",
"category": ["biolink:Drug"],
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"A",
"A1",
**{
"subject": "A",
"object": "A1",
"predicate": "biolink:has_gene_product",
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"A",
"A2",
**{
"subject": "A",
"object": "A2",
"predicate": "biolink:has_gene_product",
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"B",
"B1",
**{
"subject": "B",
"object": "B1",
"predicate": "biolink:has_gene_product",
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"X",
"A1",
**{
"subject": "X",
"object": "A1",
"predicate": "biolink:interacts_with",
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"Y",
"B",
**{
"subject": "Y",
"object": "B",
"predicate": "biolink:interacts_with",
"provided_by": source,
"source": source,
}
)
return [g1, g2, g3, g4]
| 7,590 | 21.727545 | 80 |
py
|
kgx
|
kgx-master/tests/unit/test_prefix_manager.py
|
import pytest
from kgx.prefix_manager import PrefixManager
@pytest.mark.parametrize(
"query",
[
("https://example.org/123", True),
("http://example.org/ABC", True),
("http://purl.obolibrary.org/obo/GO_0008150", True),
("GO:0008150", False),
],
)
def test_is_iri(query):
"""
Test to check behavior of is_iri method in PrefixManager.
"""
assert PrefixManager.is_iri(query[0]) == query[1]
@pytest.mark.parametrize(
"query",
[
("GO:0008150", True),
("CHEMBL.COMPOUND:12345", True),
("HP:0000000", True),
("GO_0008150", False),
("12345", False),
(":12345", True),
],
)
def test_is_curie(query):
"""
Test to check behavior of is_curie method in PrefixManager.
"""
assert PrefixManager.is_curie(query[0]) == query[1]
@pytest.mark.parametrize(
"query",
[
("GO:0008150", "GO"),
("CHEMBL.COMPOUND:12345", "CHEMBL.COMPOUND"),
("HP:0000000", "HP"),
("GO_0008150", None),
("12345", None),
(":12345", ""),
],
)
def test_get_prefix(query):
"""
Test to check behavior of test_get_prefix method in PrefixManager.
"""
assert PrefixManager.get_prefix(query[0]) == query[1]
@pytest.mark.parametrize(
"query",
[
("GO:0008150", "0008150"),
("CHEMBL.COMPOUND:12345", "12345"),
("HP:0000000", "0000000"),
("GO_0008150", None),
("12345", None),
(":12345", "12345"),
],
)
def test_get_reference(query):
"""
Test to check behavior of get_reference method in PrefixManager.
"""
assert PrefixManager.get_reference(query[0]) == query[1]
def test_prefix_manager():
"""
Test to get an instance of PrefixManager.
"""
pm = PrefixManager()
assert pm.prefix_map
assert pm.reverse_prefix_map
assert "biolink" in pm.prefix_map
assert "" in pm.prefix_map
@pytest.mark.parametrize(
"query",
[
("GO:0008150", "http://purl.obolibrary.org/obo/GO_0008150"),
("HP:0000000", "http://purl.obolibrary.org/obo/HP_0000000"),
("biolink:category", "https://w3id.org/biolink/vocab/category"),
("biolink:related_to", "https://w3id.org/biolink/vocab/related_to"),
("biolink:NamedThing", "https://w3id.org/biolink/vocab/NamedThing"),
("HGNC:1103", "http://identifiers.org/hgnc/1103"),
],
)
def test_prefix_manager_expand(query):
"""
Test to check the expand method in PrefixManager.
"""
pm = PrefixManager()
assert pm.expand(query[0]) == query[1]
@pytest.mark.parametrize(
"query",
[
("http://purl.obolibrary.org/obo/GO_0008150", "GO:0008150"),
("http://purl.obolibrary.org/obo/HP_0000000", "HP:0000000"),
("https://w3id.org/biolink/vocab/category", "biolink:category"),
("https://w3id.org/biolink/vocab/related_to", "biolink:related_to"),
("https://w3id.org/biolink/vocab/NamedThing", "biolink:NamedThing"),
("http://identifiers.org/hgnc/1103", "HGNC:1103"),
],
)
def test_prefix_manager_contract(query):
"""
Test to check the contract method in PrefixManager.
"""
pm = PrefixManager()
assert pm.contract(query[0]) == query[1]
| 3,267 | 25.569106 | 76 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_jsonl_source.py
|
import os
from kgx.source import JsonlSource
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
def test_read_jsonl1():
"""
Read from JSON Lines using JsonlSource.
"""
t = Transformer()
s = JsonlSource(t)
g = s.parse(os.path.join(RESOURCE_DIR, "valid_nodes.jsonl"))
nodes = {}
for rec in g:
if rec:
nodes[rec[0]] = rec[1]
g = s.parse(os.path.join(RESOURCE_DIR, "valid_edges.jsonl"))
edges = {}
for rec in g:
if rec:
edges[(rec[0], rec[1])] = rec[3]
assert len(nodes.keys()) == 7
assert len(edges.keys()) == 5
n = nodes["MONDO:0017148"]
assert "id" in n and n["id"] == "MONDO:0017148"
assert n["name"] == "heritable pulmonary arterial hypertension"
assert n["category"][0] == "biolink:Disease"
n2 = nodes["PUBCHEM.COMPOUND:10429502"]
assert "id" in n2 and n2["id"] == "PUBCHEM.COMPOUND:10429502"
assert n2["name"] == "16|A-Methyl Prednisolone"
e = edges[("HGNC:11603", "MONDO:0017148")]
assert e["subject"] == "HGNC:11603"
assert e["object"] == "MONDO:0017148"
assert e["predicate"] == "biolink:related_to"
assert e["relation"] == "RO:0004013"
def test_read_jsonl2():
"""
Read from JSON Lines using JsonlSource.
This test also supplies the knowledge_source parameter.
"""
t = Transformer()
s = JsonlSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "valid_nodes.jsonl"),
provided_by="Test JSON",
knowledge_source="Test JSON",
)
nodes = {}
for rec in g:
if rec:
nodes[rec[0]] = rec[1]
g = s.parse(
os.path.join(RESOURCE_DIR, "valid_edges.jsonl"),
provided_by="Test JSON",
knowledge_source="Test JSON",
)
edges = {}
for rec in g:
if rec:
edges[(rec[0], rec[1])] = rec[3]
assert len(nodes.keys()) == 7
assert len(edges.keys()) == 5
n = nodes["MONDO:0017148"]
assert "id" in n and n["id"] == "MONDO:0017148"
assert n["name"] == "heritable pulmonary arterial hypertension"
assert n["category"][0] == "biolink:Disease"
assert "Test JSON" in n["provided_by"]
e = edges[("HGNC:11603", "MONDO:0017148")]
assert e["subject"] == "HGNC:11603"
assert e["object"] == "MONDO:0017148"
assert e["predicate"] == "biolink:related_to"
assert e["relation"] == "RO:0004013"
assert "Test JSON" in e["knowledge_source"]
| 2,465 | 27.344828 | 67 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_neo_source.py
|
import pytest
from neo4j import GraphDatabase
from kgx.source import NeoSource
from kgx.transformer import Transformer
from tests.unit import (
clean_database,
DEFAULT_NEO4J_URL,
DEFAULT_NEO4J_USERNAME,
DEFAULT_NEO4J_PASSWORD,
load_graph_dictionary,
check_container,
CONTAINER_NAME
)
queries = [
"CREATE (n:`biolink:NamedThing` {id: 'A', name: 'A', category: ['biolink:NamedThing']})",
"CREATE (n:`biolink:NamedThing` {id: 'B', name: 'B', category: ['biolink:NamedThing']})",
"CREATE (n:`biolink:NamedThing` {id: 'C', name: 'C', category: ['biolink:NamedThing']})",
"""
MATCH (s), (o)
WHERE s.id = 'A' AND o.id = 'B'
CREATE (s)-[p:`biolink:related_to` {subject: s.id, object: o.id, predicate: 'biolink:related_to', relation: 'biolink:related_to'}]->(o)
RETURN p
""",
"""
MATCH (s), (o)
WHERE s.id = 'A' AND o.id = 'C'
CREATE (s)-[p:`biolink:related_to` {subject: s.id, object: o.id, predicate: 'biolink:related_to', relation: 'biolink:related_to'}]->(o)
RETURN p
""",
]
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_read_neo(clean_database):
"""
Read a graph from a Neo4j instance.
"""
with GraphDatabase.driver(
DEFAULT_NEO4J_URL,
auth=(DEFAULT_NEO4J_USERNAME, DEFAULT_NEO4J_PASSWORD)
) as http_driver:
session = http_driver.session()
for q in queries:
session.run(q)
t = Transformer()
s = NeoSource(t)
g = s.parse(
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
)
nodes, edges = load_graph_dictionary(g)
count = s.count()
assert count > 0
assert len(nodes.keys()) == 3
assert len(edges.keys()) == 2
n1 = nodes["A"]
assert n1["id"] == "A"
assert n1["name"] == "A"
assert "category" in n1 and "biolink:NamedThing" in n1["category"]
e1 = edges[("A", "C")][0]
assert e1["subject"] == "A"
assert e1["object"] == "C"
assert e1["predicate"] == "biolink:related_to"
assert e1["relation"] == "biolink:related_to"
| 2,180 | 27.324675 | 139 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_rdf_source.py
|
import os
from pprint import pprint
import pytest
from kgx.source import RdfSource
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
from tests.unit import load_graph_dictionary
def test_read_nt1():
"""
Read from an RDF N-Triple file using RdfSource.
"""
t = Transformer()
s = RdfSource(t)
g = s.parse(os.path.join(RESOURCE_DIR, "rdf", "test1.nt"))
nodes, edges = load_graph_dictionary(g)
assert len(nodes) == 2
assert len(edges) == 1
n1 = nodes["ENSEMBL:ENSG0000000000001"]
assert n1["type"] == ["SO:0000704"]
assert len(n1["category"]) == 4
assert "biolink:Gene" in n1["category"]
assert "biolink:GenomicEntity" in n1["category"]
assert "biolink:NamedThing" in n1["category"]
assert n1["name"] == "Test Gene 123"
assert n1["description"] == "This is a Test Gene 123"
assert "Test Dataset" in n1["provided_by"]
n2 = nodes["ENSEMBL:ENSG0000000000002"]
assert n2["type"] == ["SO:0000704"]
assert len(n2["category"]) == 4
assert "biolink:Gene" in n2["category"]
assert "biolink:GenomicEntity" in n2["category"]
assert "biolink:NamedThing" in n1["category"]
assert n2["name"] == "Test Gene 456"
assert n2["description"] == "This is a Test Gene 456"
assert "Test Dataset" in n2["provided_by"]
e = list(edges.values())[0][0]
assert e["subject"] == "ENSEMBL:ENSG0000000000001"
assert e["object"] == "ENSEMBL:ENSG0000000000002"
assert e["predicate"] == "biolink:interacts_with"
assert e["relation"] == "biolink:interacts_with"
def test_read_nt2():
"""
Read from an RDF N-Triple file using RdfSource.
This test also supplies the knowledge_source parameter.
"""
t = Transformer()
s = RdfSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "rdf", "test1.nt"),
provided_by="Test Dataset",
knowledge_source="Test Dataset",
)
nodes, edges = load_graph_dictionary(g)
assert len(nodes) == 2
assert len(edges) == 1
n1 = nodes["ENSEMBL:ENSG0000000000001"]
assert n1["type"] == ["SO:0000704"]
assert len(n1["category"]) == 4
assert "biolink:Gene" in n1["category"]
assert "biolink:GenomicEntity" in n1["category"]
assert "biolink:NamedThing" in n1["category"]
assert n1["name"] == "Test Gene 123"
assert n1["description"] == "This is a Test Gene 123"
assert "Test Dataset" in n1["provided_by"]
n2 = nodes["ENSEMBL:ENSG0000000000002"]
assert n2["type"] == ["SO:0000704"]
assert len(n2["category"]) == 4
assert "biolink:Gene" in n2["category"]
assert "biolink:GenomicEntity" in n2["category"]
assert "biolink:NamedThing" in n1["category"]
assert n2["name"] == "Test Gene 456"
assert n2["description"] == "This is a Test Gene 456"
assert "Test Dataset" in n2["provided_by"]
e = list(edges.values())[0][0]
assert e["subject"] == "ENSEMBL:ENSG0000000000001"
assert e["object"] == "ENSEMBL:ENSG0000000000002"
assert e["predicate"] == "biolink:interacts_with"
assert e["relation"] == "biolink:interacts_with"
assert "Test Dataset" in e["knowledge_source"]
def test_read_nt3():
"""
Read from an RDF N-Triple file using RdfSource, with user defined
node property predicates.
"""
node_property_predicates = {
f"https://www.example.org/UNKNOWN/{x}"
for x in ["fusion", "homology", "combined_score", "cooccurence"]
}
t = Transformer()
source = RdfSource(t)
source.set_node_property_predicates(node_property_predicates)
g = source.parse(
filename=os.path.join(RESOURCE_DIR, "rdf", "test2.nt"), format="nt"
)
nodes, edges = load_graph_dictionary(g)
assert len(nodes) == 4
assert len(edges) == 3
n1 = nodes["ENSEMBL:ENSG0000000000001"]
assert n1["type"] == ["SO:0000704"]
assert len(n1["category"]) == 4
assert "biolink:Gene" in n1["category"]
assert "biolink:GenomicEntity" in n1["category"]
assert "biolink:NamedThing" in n1["category"]
assert n1["name"] == "Test Gene 123"
assert n1["description"] == "This is a Test Gene 123"
assert "Test Dataset" in n1["provided_by"]
n2 = nodes["ENSEMBL:ENSG0000000000002"]
assert n2["type"] == ["SO:0000704"]
assert len(n2["category"]) == 4
assert "biolink:Gene" in n2["category"]
assert "biolink:GenomicEntity" in n2["category"]
assert "biolink:NamedThing" in n1["category"]
assert n2["name"] == "Test Gene 456"
assert n2["description"] == "This is a Test Gene 456"
assert "Test Dataset" in n2["provided_by"]
e1 = edges["ENSEMBL:ENSP0000000000001", "ENSEMBL:ENSP0000000000002"][0]
assert e1["subject"] == "ENSEMBL:ENSP0000000000001"
assert e1["object"] == "ENSEMBL:ENSP0000000000002"
assert e1["predicate"] == "biolink:interacts_with"
assert e1["relation"] == "biolink:interacts_with"
assert e1["type"] == ["biolink:Association"]
assert e1["id"] == "urn:uuid:fcf76807-f909-4ccb-b40a-3b79b49aa518"
assert e1["fusion"] == "0"
assert e1["homology"] == "0.0"
assert e1["combined_score"] == "490.0"
assert e1["cooccurence"] == "332"
def test_read_nt4():
"""
Read from an RDF N-Triple file using RdfSource, with user defined
node property predicates.
"""
node_property_predicates = {
f"https://www.example.org/UNKNOWN/{x}"
for x in ["fusion", "homology", "combined_score", "cooccurence"]
}
t = Transformer()
source = RdfSource(t)
source.set_node_property_predicates(node_property_predicates)
g = source.parse(
filename=os.path.join(RESOURCE_DIR, "rdf", "test3.nt"), format="nt"
)
nodes, edges = load_graph_dictionary(g)
assert len(nodes.keys()) == 7
assert len(edges.keys()) == 6
n1 = nodes["ENSEMBL:ENSG0000000000001"]
assert n1["type"] == ["SO:0000704"]
assert len(n1["category"]) == 4
assert "biolink:Gene" in n1["category"]
assert "biolink:GenomicEntity" in n1["category"]
assert "biolink:NamedThing" in n1["category"]
assert n1["name"] == "Test Gene 123"
assert n1["description"] == "This is a Test Gene 123"
assert "Test Dataset" in n1["provided_by"]
n2 = nodes["ENSEMBL:ENSG0000000000002"]
assert n2["type"] == ["SO:0000704"]
assert len(n2["category"]) == 4
assert "biolink:Gene" in n2["category"]
assert "biolink:GenomicEntity" in n2["category"]
assert "biolink:NamedThing" in n1["category"]
assert n2["name"] == "Test Gene 456"
assert n2["description"] == "This is a Test Gene 456"
assert "Test Dataset" in n2["provided_by"]
e1 = edges["ENSEMBL:ENSP0000000000001", "ENSEMBL:ENSP0000000000002"][0]
assert e1["subject"] == "ENSEMBL:ENSP0000000000001"
assert e1["object"] == "ENSEMBL:ENSP0000000000002"
assert e1["predicate"] == "biolink:interacts_with"
assert e1["relation"] == "biolink:interacts_with"
assert e1["type"] == ["biolink:Association"]
assert e1["id"] == "urn:uuid:fcf76807-f909-4ccb-b40a-3b79b49aa518"
assert e1["fusion"] == "0"
assert e1["homology"] == "0.0"
assert e1["combined_score"] == "490.0"
assert e1["cooccurence"] == "332"
e2 = edges["ENSEMBL:ENSP0000000000001", "UniProtKB:X0000001"][0]
assert e2["subject"] == "ENSEMBL:ENSP0000000000001"
assert e2["object"] == "UniProtKB:X0000001"
assert e2["predicate"] == "biolink:same_as"
assert e2["relation"] == "owl:equivalentClass"
e3 = edges["ENSEMBL:ENSP0000000000001", "MONDO:0000001"][0]
assert e3["subject"] == "ENSEMBL:ENSP0000000000001"
assert e3["object"] == "MONDO:0000001"
assert e3["predicate"] == "biolink:treats"
assert e3["relation"] == "RO:0002606"
def test_read_nt5():
"""
Parse an OBAN styled NT, with user defined prefix_map and node_property_predicates.
"""
prefix_map = {
"HGNC": "https://www.genenames.org/data/gene-symbol-report/#!/hgnc_id/",
"OMIM": "http://omim.org/entry/",
}
node_property_predicates = {
"http://purl.obolibrary.org/obo/RO_0002558",
"http://purl.org/dc/elements/1.1/source",
"https://monarchinitiative.org/frequencyOfPhenotype",
}
filename = os.path.join(RESOURCE_DIR, "rdf", "oban-test.nt")
t = Transformer()
source = RdfSource(t)
source.set_prefix_map(prefix_map)
source.set_node_property_predicates(node_property_predicates)
g = source.parse(filename=filename, format="nt")
nodes, edges = load_graph_dictionary(g)
assert len(nodes.keys()) == 14
assert len(edges.keys()) == 7
n1 = nodes["HP:0000505"]
assert len(n1["category"]) == 1
assert "biolink:NamedThing" in n1["category"]
e1 = edges["OMIM:166400", "HP:0000006"][0]
assert e1["subject"] == "OMIM:166400"
assert e1["object"] == "HP:0000006"
assert e1["relation"] == "RO:0000091"
assert e1["type"] == ["OBAN:association"]
assert e1["has_evidence"] == ["ECO:0000501"]
e2 = edges["ORPHA:93262", "HP:0000505"][0]
assert e2["subject"] == "ORPHA:93262"
assert e2["object"] == "HP:0000505"
assert e2["relation"] == "RO:0002200"
assert e2["type"] == ["OBAN:association"]
assert e2["frequencyOfPhenotype"] == "HP:0040283"
def test_read_nt6():
prefix_map = {
"HGNC": "https://www.genenames.org/data/gene-symbol-report/#!/hgnc_id/",
"OMIM": "http://omim.org/entry/",
}
node_property_predicates = {
"http://purl.obolibrary.org/obo/RO_0002558",
"http://purl.org/dc/elements/1.1/source",
"https://monarchinitiative.org/frequencyOfPhenotype",
}
predicate_mapping = {
"https://monarchinitiative.org/frequencyOfPhenotype": "frequency_of_phenotype"
}
filename = os.path.join(RESOURCE_DIR, "rdf", "oban-test.nt")
t = Transformer()
source = RdfSource(t)
source.set_prefix_map(prefix_map)
source.set_node_property_predicates(node_property_predicates)
source.set_predicate_mapping(predicate_mapping)
g = source.parse(filename=filename, format="nt")
nodes, edges = load_graph_dictionary(g)
assert len(nodes.keys()) == 14
assert len(edges.keys()) == 7
n1 = nodes["HP:0000505"]
assert len(n1["category"]) == 1
assert "biolink:NamedThing" in n1["category"]
e1 = edges["OMIM:166400", "HP:0000006"][0]
assert e1["subject"] == "OMIM:166400"
assert e1["object"] == "HP:0000006"
assert e1["relation"] == "RO:0000091"
assert e1["type"] == ["OBAN:association"]
assert e1["has_evidence"] == ["ECO:0000501"]
e2 = edges["ORPHA:93262", "HP:0000505"][0]
assert e2["subject"] == "ORPHA:93262"
assert e2["object"] == "HP:0000505"
assert e2["relation"] == "RO:0002200"
assert e2["type"] == ["OBAN:association"]
assert e2["frequency_of_phenotype"] == "HP:0040283"
@pytest.mark.parametrize(
"query",
[
(
{"id": "ABC:123", "category": "biolink:NamedThing", "prop1": [1, 2, 3]},
{"category": ["biolink:NamedThing", "biolink:Gene"], "prop1": [4]},
{"category": ["biolink:NamedThing", "biolink:Gene"]},
{"prop1": [1, 2, 3, 4]},
),
(
{"id": "ABC:123", "category": ["biolink:NamedThing"], "prop1": 1},
{"category": {"biolink:NamedThing", "biolink:Gene"}, "prop1": [2, 3]},
{"category": ["biolink:NamedThing", "biolink:Gene"]},
{"prop1": [1, 2, 3]},
),
(
{
"id": "ABC:123",
"category": ["biolink:NamedThing"],
"provided_by": "test1",
},
{
"id": "DEF:456",
"category": ("biolink:NamedThing", "biolink:Gene"),
"provided_by": "test2",
},
{"category": ["biolink:NamedThing", "biolink:Gene"]},
{"provided_by": ["test1", "test2"]},
),
(
{
"subject": "Orphanet:331206",
"object": "HP:0004429",
"relation": "RO:0002200",
"predicate": "biolink:has_phenotype",
"id": "bfada868a8309f2b7849",
"type": "OBAN:association",
},
{
"subject": "Orphanet:331206",
"object": "HP:0004429",
"relation": "RO:0002200",
"predicate": "biolink:has_phenotype",
"id": "bfada868a8309f2b7849",
"type": "OBAN:association",
},
{},
{},
),
(
{
"subject": "Orphanet:331206",
"object": "HP:0004429",
"relation": "RO:0002200",
"predicate": "biolink:has_phenotype",
"id": "bfada868a8309f2b7849",
"type": "OBAN:association",
"knowledge_source": "Orphanet:331206",
},
{
"subject": "Orphanet:331206",
"object": "HP:0004429",
"relation": "RO:0002200",
"predicate": "biolink:has_phenotype",
"id": "bfada868a8309f2b7849",
"type": "OBAN:association",
"knowledge_source": "Orphanet:331206",
},
{},
{"knowledge_source": ["Orphanet:331206"]},
),
],
)
def test_prepare_data_dict(query):
"""
Test for internal _prepare_data_dict method in RdfSource.
"""
t = Transformer()
source = RdfSource(t)
new_data = source._prepare_data_dict(query[0], query[1])
for k, v in query[2].items():
assert new_data[k] == v
for k, v in query[3].items():
assert new_data[k] == v
| 13,723 | 34.010204 | 87 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_sssom_source.py
|
import os
from kgx.source.sssom_source import SssomSource
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
from tests.unit import load_graph_dictionary
def test_load1():
"""
Read a SSSOM formatted file.
"""
t = Transformer()
source = SssomSource(t)
g = source.parse(
filename=os.path.join(RESOURCE_DIR, "sssom_example1.tsv"), format="sssom"
)
nodes, edges = load_graph_dictionary(g)
assert len(nodes.keys()) == 18
assert len(edges.keys()) == 9
assert nodes["MP:0012051"]["id"] == "MP:0012051"
assert nodes["HP:0001257"]["id"] == "HP:0001257"
e = edges["MP:0012051", "HP:0001257"][0]
assert e["subject"] == "MP:0012051"
assert e["object"] == "HP:0001257"
assert e["predicate"] == "biolink:same_as"
assert e["confidence"] == "1.0"
def test_load2():
"""
Read a SSSOM formatted file, with more metadata on mappings.
"""
t = Transformer()
source = SssomSource(t)
g = source.parse(
filename=os.path.join(RESOURCE_DIR, "sssom_example2.tsv"), format="sssom"
)
nodes, edges = load_graph_dictionary(g)
assert len(nodes.keys()) == 18
assert len(edges.keys()) == 9
n1 = nodes["MP:0002152"]
assert n1["id"] == "MP:0002152"
n2 = nodes["HP:0012443"]
assert n2["id"] == "HP:0012443"
e = edges["MP:0002152", "HP:0012443"][0]
assert e["subject"] == "MP:0002152"
assert e["subject_label"] == "abnormal brain morphology"
assert e["object"] == "HP:0012443"
assert e["object_label"] == "Abnormality of brain morphology"
assert e["predicate"] == "biolink:exact_match"
assert e["match_type"] == "SSSOMC:Lexical"
assert e["reviewer_id"] == "orcid:0000-0000-0000-0000"
def test_load3():
"""
Read a SSSOM formatted file that has metadata provided in headers.
"""
t = Transformer()
source = SssomSource(t)
g = source.parse(
filename=os.path.join(RESOURCE_DIR, "sssom_example3.tsv"), format="sssom"
)
nodes, edges = load_graph_dictionary(g)
assert len(nodes) == 20
assert len(edges) == 10
e = edges["MA:0000168", "UBERON:0000955"][0]
assert (
"mapping_provider" in e
and e["mapping_provider"] == "https://www.mousephenotype.org"
)
assert (
"mapping_set_group" in e and e["mapping_set_group"] == "impc_mouse_morphology"
)
assert "mapping_set_id" in e and e["mapping_set_id"] == "ma_uberon_impc_pat"
assert (
"mapping_set_title" in e
and e["mapping_set_title"]
== "The IMPC Mouse Morphology Mappings: Gross Pathology & Tissue Collection Test (Anatomy)"
)
assert (
"creator_id" in e and e["creator_id"] == "https://orcid.org/0000-0000-0000-0000"
)
assert (
"license" in e
and e["license"] == "https://creativecommons.org/publicdomain/zero/1.0/"
)
assert "curie_map" not in e
| 2,925 | 28.555556 | 99 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_json_source.py
|
import os
from kgx.source import JsonSource
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
def test_read_json1():
"""
Read from a JSON using JsonSource.
"""
t = Transformer()
s = JsonSource(t)
g = s.parse(os.path.join(RESOURCE_DIR, "valid.json"))
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes.keys()) == 7
assert len(edges.keys()) == 5
n = nodes["MONDO:0017148"]
assert "id" in n and n["id"] == "MONDO:0017148"
assert n["name"] == "heritable pulmonary arterial hypertension"
assert n["category"][0] == "biolink:Disease"
n2 = nodes["PUBCHEM.COMPOUND:10429502"]
assert "id" in n2 and n2["id"] == "PUBCHEM.COMPOUND:10429502"
assert n2["name"] == "16|A-Methyl Prednisolone"
e = edges[("HGNC:11603", "MONDO:0017148")]
assert e["subject"] == "HGNC:11603"
assert e["object"] == "MONDO:0017148"
assert e["predicate"] == "biolink:related_to"
assert e["relation"] == "RO:0004013"
def test_read_json_filter():
"""
Read from a JSON using JsonSource.
"""
t = Transformer()
s = JsonSource(t)
filters = {
"category": {"biolink:Disease"}
}
s.set_node_filters(filters)
g = s.parse(os.path.join(RESOURCE_DIR, "valid.json"))
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1])] = rec[3]
else:
nodes[rec[0]] = rec[1]
for node in nodes:
n = nodes[node]
assert n["category"] == ["biolink:Disease"]
def test_read_json2():
"""
Read from a JSON using JsonSource.
This test also supplies the provided_by parameter.
"""
t = Transformer()
s = JsonSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "valid.json"),
provided_by="Test JSON",
knowledge_source="Test JSON",
)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes.keys()) == 7
assert len(edges.keys()) == 5
n = nodes["MONDO:0017148"]
assert "id" in n and n["id"] == "MONDO:0017148"
assert n["name"] == "heritable pulmonary arterial hypertension"
assert n["category"][0] == "biolink:Disease"
assert "Test JSON" in n["provided_by"]
e = edges[("HGNC:11603", "MONDO:0017148")]
assert e["subject"] == "HGNC:11603"
assert e["object"] == "MONDO:0017148"
assert e["predicate"] == "biolink:related_to"
assert e["relation"] == "RO:0004013"
assert "Test JSON" in e["knowledge_source"]
def test_read_json_compressed():
"""
Read from a gzip compressed JSON using JsonSource.
"""
t = Transformer()
s = JsonSource(t)
g = s.parse(os.path.join(RESOURCE_DIR, "valid.json.gz"), compression="gz")
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes.keys()) == 7
assert len(edges.keys()) == 5
n = nodes["MONDO:0017148"]
assert "id" in n and n["id"] == "MONDO:0017148"
assert n["name"] == "heritable pulmonary arterial hypertension"
assert n["category"][0] == "biolink:Disease"
e = edges[("HGNC:11603", "MONDO:0017148")]
assert e["subject"] == "HGNC:11603"
assert e["object"] == "MONDO:0017148"
assert e["predicate"] == "biolink:related_to"
assert e["relation"] == "RO:0004013"
| 3,759 | 26.851852 | 78 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_source.py
|
import pytest
from kgx.transformer import Transformer
from kgx.source.source import DEFAULT_NODE_CATEGORY, Source
@pytest.mark.parametrize(
"node",
[
{"name": "Node A", "description": "Node without an ID"},
{"node_id": "A", "description": "Node without an ID, name and category"},
{"name": "Node A", "description": "Node A", "category": "biolink:NamedThing"},
{"id": "", "name": "hgnc:0", "description": "Node without empty 'id' value", "category": "biolink:NamedThing"},
{"id": "hgnc:1234", "description": "Node without name", "category": "biolink:NamedThing"},
{"id": "hgnc:5678", "name": "Node A", "description": "Node without category"},
],
)
def test_validate_incorrect_node(node):
"""
Test basic validation of a node, where the node is invalid.
"""
t = Transformer()
s = Source(t)
result = s.validate_node(node)
if len(t.get_errors("Error")) > 0:
assert result is None
else:
assert result is not None
t.write_report()
@pytest.mark.parametrize(
"node",
[
{
"id": "A",
"name": "Node A",
"description": "Node A",
"category": ["biolink:NamedThing"],
},
{
"id": "A",
"name": "Node A",
"description": "Node A"
},
],
)
def test_validate_correct_node(node):
"""
Test basic validation of a node, where the node is valid.
"""
t = Transformer()
s = Source(t)
n = s.validate_node(node)
assert n is not None
assert "category" in n
assert n["category"][0] == DEFAULT_NODE_CATEGORY
if len(t.get_errors()) > 0:
assert len(t.get_errors("Error")) == 0
assert len(t.get_errors("Warning")) > 0
t.write_report(None, "Warning")
@pytest.mark.parametrize(
"edge",
[
{"predicate": "biolink:related_to"},
{"subject": "A", "predicate": "biolink:related_to"},
{"subject": "A", "object": "B"},
],
)
def test_validate_incorrect_edge(edge):
"""
Test basic validation of an edge, where the edge is invalid.
"""
t = Transformer()
s = Source(t)
assert not s.validate_edge(edge)
assert len(t.get_errors()) > 0
t.write_report()
@pytest.mark.parametrize(
"edge",
[
{"subject": "A", "object": "B", "predicate": "biolink:related_to"},
{
"subject": "A",
"object": "B",
"predicate": "biolink:related_to",
"relation": "RO:000000",
},
],
)
def test_validate_correct_edge(edge):
"""
Test basic validation of an edge, where the edge is valid.
"""
t = Transformer()
s = Source(t)
e = s.validate_edge(edge)
assert e is not None
assert len(t.get_errors()) == 0
t.write_report()
@pytest.mark.parametrize(
"node",
[
{
"id": "hgnc:1234",
"name": "some node",
"description": "Node without name",
"category": "biolink:NamedThing",
"some_field": "don't care!"
},
],
)
def test_incorrect_node_filters(node):
"""
Test filtering of a node
"""
t = Transformer()
s = Source(t)
filters = {
"some_field": {"bad_node_filter": 1}
}
s.set_node_filters(filters)
s.check_node_filter(node)
assert len(t.get_errors("Error")) > 0
t.write_report()
@pytest.mark.parametrize(
"edge",
[
{
"subject": "A",
"predicate": "biolink:related_to",
"object": "B",
"some_field": "don't care here either!"
},
],
)
def test_incorrect_edge_filters(edge):
"""
Test filtering of an edge
"""
t = Transformer()
s = Source(t)
filters = {
"some_field": {"bad_edge_filter": 1}
}
s.set_edge_filters(filters)
s.check_edge_filter(edge)
assert len(t.get_errors("Error")) > 0
t.write_report()
| 3,975 | 24.164557 | 119 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_graph_source.py
|
from kgx.graph.nx_graph import NxGraph
from kgx.source import GraphSource
from kgx.transformer import Transformer
def test_read_graph1():
"""
Read from an NxGraph using GraphSource.
"""
graph = NxGraph()
graph.add_node("A", **{"id": "A", "name": "node A"})
graph.add_node("B", **{"id": "B", "name": "node B"})
graph.add_node("C", **{"id": "C", "name": "node C"})
graph.add_edge(
"A",
"C",
**{
"subject": "A",
"predicate": "biolink:related_to",
"object": "C",
"relation": "biolink:related_to",
}
)
t = Transformer()
s = GraphSource(t)
g = s.parse(graph=graph)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1], rec[2])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes.keys()) == 3
n1 = nodes["A"]
assert n1["id"] == "A"
assert n1["name"] == "node A"
assert len(edges.keys()) == 1
e1 = list(edges.values())[0]
assert e1["subject"] == "A"
assert e1["predicate"] == "biolink:related_to"
assert e1["object"] == "C"
assert e1["relation"] == "biolink:related_to"
def test_read_graph2():
"""
Read from an NxGraph using GraphSource.
This test also supplies the provided_by parameter.
"""
graph = NxGraph()
graph.add_node("A", **{"id": "A", "name": "node A"})
graph.add_node("B", **{"id": "B", "name": "node B"})
graph.add_node("C", **{"id": "C", "name": "node C"})
graph.add_edge(
"A",
"C",
**{
"subject": "A",
"predicate": "biolink:related_to",
"object": "C",
"relation": "biolink:related_to",
}
)
t = Transformer()
s = GraphSource(t)
g = s.parse(graph=graph, provided_by="Test Graph", knowledge_source="Test Graph")
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1], rec[2])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes.keys()) == 3
n1 = nodes["A"]
assert n1["id"] == "A"
assert n1["name"] == "node A"
print("n1:", n1)
assert "Test Graph" in n1["provided_by"]
assert len(edges.keys()) == 1
e1 = list(edges.values())[0]
assert e1["subject"] == "A"
assert e1["predicate"] == "biolink:related_to"
assert e1["object"] == "C"
assert e1["relation"] == "biolink:related_to"
print("e1:", e1)
assert "Test Graph" in e1["knowledge_source"]
| 2,627 | 26.092784 | 85 |
py
|
kgx
|
kgx-master/tests/unit/test_source/__init__.py
| 0 | 0 | 0 |
py
|
|
kgx
|
kgx-master/tests/unit/test_source/test_tsv_source.py
|
import os
from kgx.source import TsvSource
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
def test_read_tsv():
"""
Read a TSV using TsvSource.
"""
t = Transformer()
s = TsvSource(t)
g = s.parse(filename=os.path.join(RESOURCE_DIR, "test_nodes.tsv"), format="tsv")
nodes = []
for rec in g:
if rec:
nodes.append(rec)
assert len(nodes) == 3
nodes.sort()
n1 = nodes.pop()[-1]
assert n1["id"] == "CURIE:456"
assert n1["name"] == "Disease 456"
assert "biolink:Disease" in n1["category"]
assert "biolink:NamedThing" in n1["category"]
assert n1["description"] == '"Node of type Disease, CURIE:456"'
g = s.parse(filename=os.path.join(RESOURCE_DIR, "test_edges.tsv"), format="tsv")
edges = []
for rec in g:
if rec:
edges.append(rec)
e1 = edges.pop()[-1]
assert "id" in e1
assert e1["subject"] == "CURIE:123"
assert e1["object"] == "CURIE:456"
assert e1["predicate"] == "biolink:related_to"
assert e1["relation"] == "biolink:related_to"
assert "PMID:1" in e1["publications"]
def test_read_csv():
"""
Read a CSV using TsvSource.
"""
t = Transformer()
s = TsvSource(t)
g = s.parse(filename=os.path.join(RESOURCE_DIR, "test_nodes.csv"), format="csv")
nodes = []
for rec in g:
if rec:
nodes.append(rec)
assert len(nodes) == 3
nodes.sort()
n1 = nodes.pop()[-1]
assert n1["id"] == "CURIE:456"
assert n1["name"] == "Disease 456"
assert "biolink:Disease" in n1["category"]
assert "biolink:NamedThing" in n1["category"]
assert n1["description"] == "Node of type Disease, CURIE:456"
g = s.parse(filename=os.path.join(RESOURCE_DIR, "test_edges.csv"), format="csv")
edges = []
for rec in g:
if rec:
print(rec)
edges.append(rec)
e1 = edges.pop()[-1]
assert "id" in e1
assert e1["subject"] == "CURIE:123"
assert e1["object"] == "CURIE:456"
assert e1["predicate"] == "biolink:related_to"
assert e1["relation"] == "biolink:related_to"
assert "PMID:1" in e1["publications"]
def test_read_tsv_tar_compressed():
"""
Read a compressed TSV TAR archive using TsvSource.
"""
t = Transformer()
s = TsvSource(t)
g = s.parse(
filename=os.path.join(RESOURCE_DIR, "test.tar"), format="tsv", compression="tar"
)
nodes = []
edges = []
for rec in g:
if rec:
if len(rec) == 4:
edges.append(rec)
else:
nodes.append(nodes)
assert len(nodes) == 3
assert len(edges) == 1
def test_read_tsv_tar_gz_compressed():
"""
Read a compressed TSV TAR archive using TsvSource.
"""
t = Transformer()
s = TsvSource(t)
g = s.parse(
filename=os.path.join(RESOURCE_DIR, "test.tar.gz"),
format="tsv",
compression="tar.gz",
)
nodes = []
edges = []
for rec in g:
if rec:
if len(rec) == 4:
edges.append(rec)
else:
nodes.append(nodes)
assert len(nodes) == 3
assert len(edges) == 1
def test_read_tsv_tar_gz_compressed_inverted_file_order():
"""
Read a compressed TSV TAR archive using TsvSource, where source tar archive has edge file first, node second.
"""
t = Transformer()
s = TsvSource(t)
g = s.parse(
filename=os.path.join(RESOURCE_DIR, "test-inverse.tar.gz"),
format="tsv",
compression="tar.gz",
)
nodes = []
edges = []
for rec in g:
if rec:
if len(rec) == 4:
edges.append(rec)
else:
nodes.append(nodes)
assert len(nodes) == 3
assert len(edges) == 1
def test_incorrect_nodes():
"""
Test basic validation of a node, where the node is invalid.
"""
t = Transformer()
s = TsvSource(t)
g = s.parse(filename=os.path.join(RESOURCE_DIR, "incomplete_nodes.tsv"), format="tsv")
nodes = []
for rec in g:
if rec:
nodes.append(rec)
t.write_report()
| 4,164 | 24.869565 | 113 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_owl_source.py
|
import os
from kgx.source import OwlSource
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
from pprint import pprint
def test_read_owl1():
"""
Read an OWL ontology using OwlSource.
"""
t = Transformer()
s = OwlSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "goslim_generic.owl"),
provided_by="GO slim generic",
knowledge_source="GO slim generic"
)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1])] = rec[3]
else:
nodes[rec[0]] = rec[1]
n1 = nodes["GO:0008150"]
assert n1["name"] == "biological_process"
assert "has_exact_synonym" in n1
assert "description" in n1
assert "comment" in n1
assert "has_alternative_id" in n1
assert "has_exact_synonym" in n1
assert "physiological process" in n1["has_exact_synonym"]
n2 = nodes["GO:0003674"]
n2["name"] = "molecular_function"
assert "has_exact_synonym" in n2
assert "description" in n2
assert "comment" in n2
assert "has_alternative_id" in n2
n3 = nodes["GO:0005575"]
n3["name"] = "cellular_component"
assert "has_exact_synonym" in n3
assert "description" in n3
assert "comment" in n3
assert "has_alternative_id" in n3
assert "GO:0008372" in n3["has_alternative_id"]
e1 = edges["GO:0008289", "GO:0003674"]
assert e1["subject"] == "GO:0008289"
assert e1["predicate"] == "biolink:subclass_of"
assert e1["object"] == "GO:0003674"
assert e1["relation"] == "rdfs:subClassOf"
def test_read_owl2():
"""
Read an OWL ontology using OwlSource.
This test also supplies the knowledge_source parameter.
"""
t = Transformer()
s = OwlSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "goslim_generic.owl"),
provided_by="GO slim generic",
knowledge_source="GO slim generic",
)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1])] = rec[3]
else:
nodes[rec[0]] = rec[1]
n1 = nodes["GO:0008150"]
assert n1["name"] == "biological_process"
assert "has_exact_synonym" in n1
assert "description" in n1
assert "comment" in n1
assert "has_alternative_id" in n1
assert "GO slim generic" in n1["provided_by"]
n2 = nodes["GO:0003674"]
n2["name"] = "molecular_function"
assert "has_exact_synonym" in n2
assert "description" in n2
assert "comment" in n2
assert "has_alternative_id" in n2
assert "GO slim generic" in n2["provided_by"]
n3 = nodes["GO:0005575"]
n3["name"] = "cellular_component"
assert "has_exact_synonym" in n3
assert "description" in n3
assert "comment" in n3
assert "has_alternative_id" in n3
assert "GO slim generic" in n3["provided_by"]
e1 = edges["GO:0008289", "GO:0003674"]
assert e1["subject"] == "GO:0008289"
assert e1["predicate"] == "biolink:subclass_of"
assert e1["object"] == "GO:0003674"
assert e1["relation"] == "rdfs:subClassOf"
assert "GO slim generic" in e1["knowledge_source"]
def test_read_owl3():
"""
Read an OWL ontology, with user defined
node property predicates and predicate mappings.
"""
node_property_predicates = {"http://www.geneontology.org/formats/oboInOwl#inSubset"}
predicate_mappings = {
"http://www.geneontology.org/formats/oboInOwl#inSubset": "subsets",
"http://www.geneontology.org/formats/oboInOwl#hasOBONamespace": "namespace",
"http://www.geneontology.org/formats/oboInOwl#hasAlternativeId": "xref",
}
t = Transformer()
source = OwlSource(t)
source.set_predicate_mapping(predicate_mappings)
source.set_node_property_predicates(node_property_predicates)
g = source.parse(filename=os.path.join(RESOURCE_DIR, "goslim_generic.owl"))
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
key = (rec[0], rec[1])
if key in edges:
edges[key].append(rec[-1])
else:
edges[key] = [rec[-1]]
else:
nodes[rec[0]] = rec[-1]
n1 = nodes["GO:0008150"]
pprint(n1)
assert n1["name"] == "biological_process"
assert "subsets" in n1 and "GOP:goslim_generic" in n1["subsets"]
assert "has_exact_synonym" in n1
assert "description" in n1
assert "comment" in n1
assert "xref" in n1 and "GO:0044699" in n1["xref"]
n2 = nodes["GO:0003674"]
n2["name"] = "molecular_function"
assert "subsets" in n2 and "GOP:goslim_generic" in n2["subsets"]
assert "has_exact_synonym" in n2
assert "description" in n2
assert "comment" in n2
assert "xref" in n2 and "GO:0005554" in n2["xref"]
n3 = nodes["GO:0005575"]
n3["name"] = "cellular_component"
assert "subsets" in n3 and "GOP:goslim_generic" in n3["subsets"]
assert "has_exact_synonym" in n3
assert "description" in n3
assert "comment" in n3
assert "xref" in n3 and "GO:0008372" in n3["xref"]
e1 = edges["GO:0008289", "GO:0003674"][0]
assert e1["subject"] == "GO:0008289"
assert e1["predicate"] == "biolink:subclass_of"
assert e1["object"] == "GO:0003674"
assert e1["relation"] == "rdfs:subClassOf"
def test_read_owl4():
"""
Read an OWL and ensure that logical axioms are annotated with Owlstar vocabulary.
"""
t = Transformer()
source = OwlSource(t)
g = source.parse(
filename=os.path.join(RESOURCE_DIR, "goslim_generic.owl"), format="owl"
)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
key = (rec[0], rec[1])
if key in edges:
edges[key].append(rec[-1])
else:
edges[key] = [rec[-1]]
else:
nodes[rec[0]] = rec[-1]
e1 = edges["GO:0031012", "GO:0005576"][0]
assert e1["predicate"] == "biolink:part_of"
assert e1["relation"] == "BFO:0000050"
assert (
"logical_interpretation" in e1
and e1["logical_interpretation"] == "owlstar:AllSomeInterpretation"
)
e2 = edges["GO:0030705", "GO:0005622"][0]
assert e2["predicate"] == "biolink:occurs_in"
assert e2["relation"] == "BFO:0000066"
assert (
"logical_interpretation" in e2
and e2["logical_interpretation"] == "owlstar:AllSomeInterpretation"
)
| 6,565 | 29.826291 | 88 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_obograph_source.py
|
import os
import pytest
from kgx.source import ObographSource
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
def test_read_obograph1():
"""
Read from an Obograph JSON using ObographSource.
"""
t = Transformer()
s = ObographSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "goslim_generic.json"),
knowledge_source="GO slim generic",
)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1], rec[2])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes) == 176
assert len(edges) == 205
n1 = nodes["GO:0003677"]
assert n1["id"] == "GO:0003677"
assert n1["name"] == "DNA binding"
assert (
n1["description"]
== "Any molecular function by which a gene product interacts selectively and non-covalently with DNA (deoxyribonucleic acid)."
)
assert n1["category"] == ["biolink:MolecularActivity"]
assert "structure-specific DNA binding" in n1["synonym"]
assert "structure specific DNA binding" in n1["synonym"]
assert "microtubule/chromatin interaction" in n1["synonym"]
assert "plasmid binding" in n1["synonym"]
n2 = nodes["GO:0005575"]
assert n2["id"] == "GO:0005575"
assert n2["name"] == "cellular_component"
assert (
n2["description"]
== "A location, relative to cellular compartments and structures, occupied by a macromolecular machine when it carries out a molecular function. There are two ways in which the gene ontology describes locations of gene products: (1) relative to cellular structures (e.g., cytoplasmic side of plasma membrane) or compartments (e.g., mitochondrion), and (2) the stable macromolecular complexes of which they are parts (e.g., the ribosome)."
)
assert n2["category"] == ["biolink:CellularComponent"]
assert n2["xref"] == ["NIF_Subcellular:sao1337158144"]
assert "goslim_chembl" in n2["subsets"]
assert "goslim_generic" in n2["subsets"]
def test_read_jsonl2():
"""
Read from an Obograph JSON using ObographSource.
This test also supplies the provided_by parameter.
"""
t = Transformer()
s = ObographSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "goslim_generic.json"),
provided_by="GO slim generic",
knowledge_source="GO slim generic",
)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1], rec[2])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes) == 176
assert len(edges) == 205
n1 = nodes["GO:0003677"]
assert n1["id"] == "GO:0003677"
assert n1["name"] == "DNA binding"
assert (
n1["description"]
== "Any molecular function by which a gene product interacts selectively and non-covalently with DNA (deoxyribonucleic acid)."
)
assert n1["category"] == ["biolink:MolecularActivity"]
assert "structure-specific DNA binding" in n1["synonym"]
assert "structure specific DNA binding" in n1["synonym"]
assert "microtubule/chromatin interaction" in n1["synonym"]
assert "plasmid binding" in n1["synonym"]
assert "GO slim generic" in n1["provided_by"]
n2 = nodes["GO:0005575"]
assert n2["id"] == "GO:0005575"
assert n2["name"] == "cellular_component"
assert (
n2["description"]
== "A location, relative to cellular compartments and structures, occupied by a macromolecular machine when it carries out a molecular function. There are two ways in which the gene ontology describes locations of gene products: (1) relative to cellular structures (e.g., cytoplasmic side of plasma membrane) or compartments (e.g., mitochondrion), and (2) the stable macromolecular complexes of which they are parts (e.g., the ribosome)."
)
assert n2["category"] == ["biolink:CellularComponent"]
assert n2["xref"] == ["NIF_Subcellular:sao1337158144"]
assert "goslim_chembl" in n2["subsets"]
assert "goslim_generic" in n2["subsets"]
assert "GO slim generic" in n2["provided_by"]
@pytest.mark.parametrize(
"query",
[
(
{
"id": "http://purl.obolibrary.org/obo/GO_0005615",
"meta": {
"basicPropertyValues": [
{
"pred": "http://www.geneontology.org/formats/oboInOwl#hasOBONamespace",
"val": "cellular_component",
}
]
},
"type": "CLASS",
"lbl": "extracellular space",
},
"biolink:CellularComponent",
),
(
{
"id": "http://purl.obolibrary.org/obo/GO_0008168",
"meta": {
"definition": {
"val": "Catalysis of the transfer of a methyl group to an acceptor molecule."
},
"basicPropertyValues": [
{
"pred": "http://www.geneontology.org/formats/oboInOwl#hasAlternativeId",
"val": "GO:0004480",
},
{
"pred": "http://www.geneontology.org/formats/oboInOwl#hasOBONamespace",
"val": "molecular_function",
},
],
},
},
"biolink:MolecularActivity",
),
(
{
"id": "http://purl.obolibrary.org/obo/GO_0065003",
"meta": {
"definition": {
"val": "The aggregation, arrangement and bonding together of a set of macromolecules to form a protein-containing complex."
},
"basicPropertyValues": [
{
"pred": "http://www.geneontology.org/formats/oboInOwl#hasAlternativeId",
"val": "GO:0006461",
},
{
"pred": "http://www.geneontology.org/formats/oboInOwl#hasOBONamespace",
"val": "biological_process",
},
],
},
},
"biolink:BiologicalProcess",
),
],
)
def test_get_category(query):
"""
Test to guess the appropriate category for a sample OBO Graph JSON.
"""
node = query[0]
t = Transformer()
s = ObographSource(t)
c = s.get_category(node["id"], node)
assert c == query[1]
def test_error_detection():
t = Transformer()
s = ObographSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "obo_error_detection.json"),
knowledge_source="Sample OBO",
)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1], rec[2])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(t.get_errors()) > 0
if len(t.get_errors("Error")) > 0:
t.write_report(None, "Error")
if len(t.get_errors("Warning")) > 0:
t.write_report(None, "Warning")
| 7,389 | 34.528846 | 446 |
py
|
kgx
|
kgx-master/tests/unit/test_source/test_trapi_source.py
|
import os
from kgx.source import TrapiSource
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
def test_read_trapi_json1():
"""
Read from a JSON using TrapiSource.
"""
t = Transformer()
s = TrapiSource(t)
g = s.parse(os.path.join(RESOURCE_DIR, "rsa_sample.json"))
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes.keys()) == 4
assert len(edges.keys()) == 3
n = nodes["HGNC:11603"]
assert n["id"] == "HGNC:11603"
assert n["name"] == "TBX4"
assert n["category"] == ["biolink:Gene"]
e = edges["HGNC:11603", "MONDO:0005002"]
assert e["subject"] == "HGNC:11603"
assert e["object"] == "MONDO:0005002"
assert e["predicate"] == "biolink:related_to"
def test_read_trapi_json2():
"""
Read from a TRAPI JSON using TrapiSource.
This test also supplies the knowledge_source parameter.
"""
t = Transformer()
s = TrapiSource(t)
g = s.parse(
os.path.join(RESOURCE_DIR, "rsa_sample.json"),
provided_by="Test TRAPI JSON",
knowledge_source="Test TRAPI JSON",
)
nodes = {}
edges = {}
for rec in g:
if rec:
if len(rec) == 4:
edges[(rec[0], rec[1])] = rec[3]
else:
nodes[rec[0]] = rec[1]
assert len(nodes.keys()) == 4
assert len(edges.keys()) == 3
n = nodes["HGNC:11603"]
assert n["id"] == "HGNC:11603"
assert n["name"] == "TBX4"
assert n["category"] == ["biolink:Gene"]
assert "Test TRAPI JSON" in n["provided_by"]
e = edges["HGNC:11603", "MONDO:0005002"]
assert e["subject"] == "HGNC:11603"
assert e["object"] == "MONDO:0005002"
assert e["predicate"] == "biolink:related_to"
assert "Test TRAPI JSON" in e["knowledge_source"]
| 1,950 | 25.013333 | 62 |
py
|
kgx
|
kgx-master/tests/unit/test_sink/test_graph_sink.py
|
from kgx.sink import GraphSink
from kgx.transformer import Transformer
def test_write_graph_no_edge_identifier():
"""
Write a graph via GraphSink.
"""
t = Transformer()
s = GraphSink(t)
s.write_node({"id": "A", "name": "Node A", "category": ["biolink:NamedThing"]})
s.write_node({"id": "B", "name": "Node B", "category": ["biolink:NamedThing"]})
s.write_node({"id": "C", "name": "Node C", "category": ["biolink:NamedThing"]})
s.write_edge(
{
"subject": "A",
"predicate": "biolink:related_to",
"object": "B",
"relation": "biolink:related_to",
}
)
assert s.graph.number_of_nodes() == 3
assert s.graph.number_of_edges() == 1
| 737 | 28.52 | 83 |
py
|
kgx
|
kgx-master/tests/unit/test_sink/test_jsonl_sink.py
|
import gzip
import os
from kgx.sink import JsonlSink
from kgx.transformer import Transformer
from tests import TARGET_DIR
from tests.unit.test_sink import get_graph
def test_write_jsonl1():
"""
Write a graph as JSON Lines using JsonlSink.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, "test_graph1")
t = Transformer()
s = JsonlSink(t, filename=filename)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
del s
assert os.path.exists(f"{filename}_nodes.jsonl")
assert os.path.exists(f"{filename}_edges.jsonl")
node_lines = open(f"{filename}_nodes.jsonl").readlines()
edge_lines = open(f"{filename}_edges.jsonl").readlines()
assert len(node_lines) == 6
assert len(edge_lines) == 6
def test_write_jsonl2():
"""
Write a graph as compressed JSON Lines using JsonlSink.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, "test_graph2")
t = Transformer()
s = JsonlSink(t,filename=filename, compression="gz")
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
del s
assert os.path.exists(f"{filename}_nodes.jsonl.gz")
assert os.path.exists(f"{filename}_edges.jsonl.gz")
node_lines = gzip.open(f"{filename}_nodes.jsonl.gz", "rb").readlines()
edge_lines = gzip.open(f"{filename}_edges.jsonl.gz", "rb").readlines()
assert len(node_lines) == 6
assert len(edge_lines) == 6
| 1,649 | 29 | 74 |
py
|
kgx
|
kgx-master/tests/unit/test_sink/test_sqlite_sink.py
|
import os
from kgx.graph.nx_graph import NxGraph
from kgx.sink import SqlSink
from kgx.transformer import Transformer
from tests import TARGET_DIR
from kgx.utils.kgx_utils import create_connection, drop_existing_tables
NAMED_THING = "biolink:NamedThing"
SUBCLASS_OF = "biolink:sub_class_of"
def test_write_sqlite():
"""
Write a graph to a sqlite db file using SqlSink.
"""
conn = create_connection(os.path.join(TARGET_DIR, "test_graph.db"))
drop_existing_tables(conn)
graph = NxGraph()
graph.add_node("A", id="A", **{"name": "Node A", "category": [NAMED_THING, "biolink:Gene"]})
graph.add_node("B", id="B", **{"name": "Node B", "category": [NAMED_THING]})
graph.add_node("C", id="C", **{"name": "Node C", "category": [NAMED_THING]})
graph.add_node("D", id="D", **{"name": "Node D", "category": [NAMED_THING]})
graph.add_node("E", id="E", **{"name": "Node E", "category": [NAMED_THING]})
graph.add_node("F", id="F", **{"name": "Node F", "category": [NAMED_THING]})
graph.add_edge(
"B", "A", **{"subject": "B", "object": "A", "predicate": SUBCLASS_OF}
)
graph.add_edge(
"C", "B", **{"subject": "C", "object": "B", "predicate": SUBCLASS_OF}
)
graph.add_edge(
"D", "C", **{"subject": "D", "object": "C", "predicate": SUBCLASS_OF}
)
graph.add_edge(
"D", "A", **{"subject": "D", "object": "A", "predicate": "biolink:related_to"}
)
graph.add_edge(
"E", "D", **{"subject": "E", "object": "D", "predicate": SUBCLASS_OF}
)
graph.add_edge(
"F", "D", **{"subject": "F", "object": "D", "predicate": SUBCLASS_OF}
)
t = Transformer()
s = SqlSink(
owner=t,
filename=os.path.join(TARGET_DIR, "test_graph.db"),
format="sql",
node_properties={"id", "name", "category"},
edge_properties={"subject", "predicate", "object", "relation"},
)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
cur = conn.cursor()
sql_query = """SELECT name FROM sqlite_master
WHERE type='table';"""
cur.execute(sql_query)
tables = cur.fetchall()
assert len(tables) == 2
cur.execute("SELECT count(*) FROM nodes")
number_of_nodes = cur.fetchone()[0]
assert number_of_nodes == 6
cur.execute("SELECT count(*) FROM edges")
number_of_edges = cur.fetchone()[0]
assert number_of_edges == 6
def test_write_denormalized_sqlite():
"""
Write a graph to a sqlite db file using SqlSink.
"""
conn = create_connection(os.path.join(TARGET_DIR, "test_graph.db"))
drop_existing_tables(conn)
graph = NxGraph()
graph.add_node("A", id="A", **{"name": "Node A", "category": [NAMED_THING, "biolink:Gene"]})
graph.add_node("B", id="B", **{"name": "Node B", "category": [NAMED_THING]})
graph.add_node("C", id="C", **{"name": "Node C", "category": [NAMED_THING]})
graph.add_node("D", id="D", **{"name": "Node D", "category": [NAMED_THING]})
graph.add_node("E", id="E", **{"name": "Node E", "category": [NAMED_THING]})
graph.add_node("F", id="F", **{"name": "Node F", "category": [NAMED_THING]})
graph.add_edge(
"B", "A", **{"subject": "B", "object": "A", "predicate": SUBCLASS_OF}
)
graph.add_edge(
"C", "B", **{"subject": "C", "object": "B", "predicate": SUBCLASS_OF}
)
graph.add_edge(
"D", "C", **{"subject": "D", "object": "C", "predicate": SUBCLASS_OF}
)
graph.add_edge(
"D", "A", **{"subject": "D", "object": "A", "predicate": "biolink:related_to"}
)
graph.add_edge(
"E", "D", **{"subject": "E", "object": "D", "predicate": SUBCLASS_OF}
)
graph.add_edge(
"F", "D", **{"subject": "F", "object": "D", "predicate": SUBCLASS_OF}
)
t = Transformer()
s = SqlSink(
owner=t,
filename=os.path.join(TARGET_DIR, "test_graph.db"),
format="sql",
node_properties={"id", "name", "category"},
edge_properties={"subject", "predicate", "object", "relation"},
denormalize=True,
)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
cur = conn.cursor()
sql_query = """SELECT name FROM sqlite_master
WHERE type='table';"""
cur.execute(sql_query)
tables = cur.fetchall()
assert len(tables) == 2
cur.execute("SELECT count(*) FROM edges")
number_of_edges = cur.fetchone()[0]
assert number_of_edges == 6
| 4,667 | 33.835821 | 96 |
py
|
kgx
|
kgx-master/tests/unit/test_sink/test_neo_sink.py
|
from time import sleep
import pytest
from neo4j import GraphDatabase
from kgx.sink import NeoSink
from kgx.transformer import Transformer
from tests import print_graph
from tests.unit import (
clean_database,
DEFAULT_NEO4J_URL,
DEFAULT_NEO4J_USERNAME,
DEFAULT_NEO4J_PASSWORD,
check_container,
CONTAINER_NAME,
)
from tests.unit import get_graph
def test_sanitize_category():
"""
Test to ensure behavior of sanitze_category.
"""
categories = ["biolink:Gene", "biolink:GeneOrGeneProduct"]
s = NeoSink.sanitize_category(categories)
assert s == ["`biolink:Gene`", "`biolink:GeneOrGeneProduct`"]
@pytest.mark.parametrize(
"category", ["biolink:Gene", "biolink:GeneOrGeneProduct", "biolink:NamedThing"]
)
def test_create_constraint_query(category):
"""
Test to ensure that a CONSTRAINT cypher query is generated as expected.
"""
sanitized_category = NeoSink.sanitize_category([category])
q = NeoSink.create_constraint_query(sanitized_category)
assert q == f"CREATE CONSTRAINT IF NOT EXISTS ON (n:{sanitized_category}) ASSERT n.id IS UNIQUE"
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_write_neo1(clean_database):
"""
Write a graph to a Neo4j instance using NeoSink.
"""
graph = get_graph("test")[0]
t = Transformer()
s = NeoSink(
owner=t,
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
cache_size=1000
)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert s.CACHE_SIZE is not None
d = GraphDatabase.driver(
DEFAULT_NEO4J_URL, auth=(DEFAULT_NEO4J_USERNAME, DEFAULT_NEO4J_PASSWORD)
)
session = d.session()
try:
results = session.run("MATCH (n) RETURN COUNT(*)")
number_of_nodes = results[0][0]
assert number_of_nodes == 3
except Exception as e:
print(e)
try:
results = session.run("MATCH (s)-->(o) RETURN COUNT(*)")
number_of_edges = results[0][0]
assert number_of_edges == 1
except Exception as e:
print(e)
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
@pytest.mark.parametrize(
"query",
[(get_graph("kgx-unit-test")[0], 3, 1), (get_graph("kgx-unit-test")[1], 6, 6)],
)
def test_write_neo2(clean_database, query):
"""
Test writing a graph to a Neo4j instance.
"""
graph = query[0]
t = Transformer()
sink = NeoSink(
owner=t,
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
)
for n, data in graph.nodes(data=True):
sink.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
sink.write_edge(data)
sink.finalize()
nr = sink.session.run("MATCH (n) RETURN count(n)")
[node_counts] = [x for x in nr][0]
assert node_counts >= query[1]
er = sink.session.run("MATCH ()-[p]->() RETURN count(p)")
[edge_counts] = [x for x in er][0]
assert edge_counts >= query[2]
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_write_neo3(clean_database):
"""
Test writing a graph and then writing a slightly
modified version of the graph to the same Neo4j instance.
"""
graph = get_graph("kgx-unit-test")[2]
t = Transformer()
sink = NeoSink(
owner=t,
uri=DEFAULT_NEO4J_URL,
username=DEFAULT_NEO4J_USERNAME,
password=DEFAULT_NEO4J_PASSWORD,
)
for n, data in graph.nodes(data=True):
sink.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
sink.write_edge(data)
sink.finalize()
graph.add_node(
"B", id="B", publications=["PMID:1", "PMID:2"], category=["biolink:NamedThing"]
)
graph.add_node("C", id="C", category=["biolink:NamedThing"], source="kgx-unit-test")
e = graph.get_edge("A", "B")
edge_key = list(e.keys())[0]
graph.add_edge_attribute(
"A", "B", edge_key, attr_key="test_prop", attr_value="VAL123"
)
print_graph(graph)
assert graph.number_of_nodes() == 3
assert graph.number_of_edges() == 1
for n, data in graph.nodes(data=True):
sink.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
sink.write_edge(data)
sink.finalize()
nr = sink.session.run("MATCH (n) RETURN n")
nodes = []
for node in nr:
nodes.append(node)
edges = []
er = sink.session.run(
"MATCH ()-[p]-() RETURN p",
data_contents=True,
# returns=(Node, Relationship, Node),
)
for edge in er:
edges.append(edge)
| 4,933 | 27.853801 | 100 |
py
|
kgx
|
kgx-master/tests/unit/test_sink/test_json_sink.py
|
import json
import os
from kgx.sink import JsonSink
from kgx.transformer import Transformer
from tests import TARGET_DIR
from tests.unit.test_sink import get_graph
def test_write_json1():
"""
Write a graph as JSON using JsonSink.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, "test_graph1.json")
t = Transformer()
s = JsonSink(t, filename=filename)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(filename)
def test_write_json2():
"""
Write a graph as a compressed JSON using JsonSink.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, "test_graph2.json")
t = Transformer()
s = JsonSink(t, filename=filename, compression="gz")
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(f"{filename}.gz")
| 1,082 | 26.075 | 59 |
py
|
kgx
|
kgx-master/tests/unit/test_sink/__init__.py
|
from kgx.graph.nx_graph import NxGraph
def get_graph():
graph = NxGraph()
graph.add_node(
"A", **{"id": "A", "name": "Node A", "category": ["biolink:NamedThing"]}
)
graph.add_node(
"B", **{"id": "B", "name": "Node B", "category": ["biolink:NamedThing"]}
)
graph.add_node(
"C", **{"id": "C", "name": "Node C", "category": ["biolink:NamedThing"]}
)
graph.add_node(
"D", **{"id": "D", "name": "Node D", "category": ["biolink:NamedThing"]}
)
graph.add_node(
"E", **{"id": "E", "name": "Node E", "category": ["biolink:NamedThing"]}
)
graph.add_node(
"F", **{"id": "F", "name": "Node F", "category": ["biolink:NamedThing"]}
)
graph.add_edge(
"B", "A", **{"subject": "B", "object": "A", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"C", "B", **{"subject": "C", "object": "B", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"D", "C", **{"subject": "D", "object": "C", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"D", "A", **{"subject": "D", "object": "A", "predicate": "biolink:related_to"}
)
graph.add_edge(
"E", "D", **{"subject": "E", "object": "D", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"F", "D", **{"subject": "F", "object": "D", "predicate": "biolink:sub_class_of"}
)
return graph
| 1,427 | 32.209302 | 88 |
py
|
kgx
|
kgx-master/tests/unit/test_sink/test_rdf_sink.py
|
import os
import pytest
import rdflib
from pprint import pprint
from kgx.sink import RdfSink
from kgx.transformer import Transformer
from tests import TARGET_DIR
from tests.unit.test_sink import get_graph
def test_write_rdf1():
"""
Write a graph as RDF N-Triples using RdfSink without reifying all edges.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, "test_graph1.nt")
t = Transformer()
s = RdfSink(owner=t, filename=filename, reify_all_edges=False)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(filename)
lines = open(filename, "r").readlines()
assert len(lines) == 18
def test_write_rdf2():
"""
Write a graph as a compressed RDF N-Triples using RdfSink, without reifying all edges.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, "test_graph2.nt.gz")
t = Transformer()
s = RdfSink(owner=t, filename=filename, compression=True, reify_all_edges=False)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(filename)
lines = open(filename, "r").readlines()
assert len(lines) == 18
def test_write_rdf3():
"""
Write a graph as RDF N-Triples using RdfSink, where all edges are reified.
"""
graph = get_graph()
filename = os.path.join(TARGET_DIR, "test_graph3.nt")
t = Transformer()
s = RdfSink(owner=t, filename=filename)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(filename)
counter = 0
lines = open(filename, "r").readlines()
for line in lines:
if "<https://w3id.org/biolink/vocab/Association>" in line:
# the lines collection is the entirety of the RDF, we only want to test that the association
# type is fully expanded.
counter = counter+1
assert counter > 0
assert len(lines) == 42
@pytest.mark.parametrize(
"query",
[
("id", "uriorcurie", "MONDO:000001", "URIRef", None),
(
"name",
"xsd:string",
"Test concept name",
"Literal",
rdflib.term.URIRef("http://www.w3.org/2001/XMLSchema#string"),
),
("predicate", "uriorcurie", "biolink:related_to", "URIRef", None),
("relation", "uriorcurie", "RO:000000", "URIRef", None),
("custom_property1", "uriorcurie", "X:123", "URIRef", None),
(
"custom_property2",
"xsd:float",
"480.213",
"Literal",
rdflib.term.URIRef("http://www.w3.org/2001/XMLSchema#float"),
),
],
)
def test_prepare_object(query):
"""
Test internal _prepare_object method.
"""
t = Transformer()
sink = RdfSink(t, os.path.join(TARGET_DIR, "test_graph3.nt"))
o = sink._prepare_object(query[0], query[1], query[2])
assert type(o).__name__ == query[3]
if query[4]:
assert o.datatype == query[4]
@pytest.mark.parametrize(
"query",
[("name", "xsd:string"), ("predicate", "uriorcurie"), ("xyz", "xsd:string")],
)
def test_get_property_type(query):
"""
Test to ensure that get_property_type returns the appropriate type
for a given property.
"""
t = Transformer()
sink = RdfSink(t, os.path.join(TARGET_DIR, "test_graph3.nt"))
assert sink._get_property_type(query[0]) == query[1]
@pytest.mark.parametrize(
"query",
[
("MONDO:000001", "URIRef", "http://purl.obolibrary.org/obo/MONDO_000001"),
("urn:uuid:12345", "URIRef", "urn:uuid:12345"),
(":new_prop", "URIRef", "https://www.example.org/UNKNOWN/new_prop"),
],
)
def test_uriref(query):
"""
Test for uriref method.
"""
t = Transformer()
sink = RdfSink(t, os.path.join(TARGET_DIR, "test_graph3.nt"))
x = sink.uriref(query[0])
assert type(x).__name__ == query[1]
assert str(x) == query[2]
| 4,250 | 27.152318 | 104 |
py
|
kgx
|
kgx-master/tests/unit/test_sink/test_tsv_sink.py
|
import os
from kgx.graph.nx_graph import NxGraph
from kgx.sink import TsvSink
from kgx.transformer import Transformer
from tests import TARGET_DIR
def test_write_tsv1():
"""
Write a graph to a TSV file using TsvSink.
"""
graph = NxGraph()
graph.add_node("A", id="A", **{"name": "Node A", "category": ["biolink:NamedThing", "biolink:Gene"]})
graph.add_node("B", id="B", **{"name": "Node B", "category": ["biolink:NamedThing"]})
graph.add_node("C", id="C", **{"name": "Node C", "category": ["biolink:NamedThing"]})
graph.add_node("D", id="D", **{"name": "Node D", "category": ["biolink:NamedThing"]})
graph.add_node("E", id="E", **{"name": "Node E", "category": ["biolink:NamedThing"]})
graph.add_node("F", id="F", **{"name": "Node F", "category": ["biolink:NamedThing"]})
graph.add_edge(
"B", "A", **{"subject": "B", "object": "A", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"C", "B", **{"subject": "C", "object": "B", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"D", "C", **{"subject": "D", "object": "C", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"D", "A", **{"subject": "D", "object": "A", "predicate": "biolink:related_to"}
)
graph.add_edge(
"E", "D", **{"subject": "E", "object": "D", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"F", "D", **{"subject": "F", "object": "D", "predicate": "biolink:sub_class_of"}
)
t = Transformer()
s = TsvSink(
owner=t,
filename=os.path.join(TARGET_DIR, "test_graph"),
format="tsv",
node_properties={"id", "name", "category"},
edge_properties={"subject", "predicate", "object", "relation"},
)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
node_lines = open(os.path.join(TARGET_DIR, "test_graph_nodes.tsv")).readlines()
edge_lines = open(os.path.join(TARGET_DIR, "test_graph_edges.tsv")).readlines()
assert len(node_lines) == 7
assert len(edge_lines) == 7
for n in node_lines:
assert len(n.split("\t")) == 3
for e in edge_lines:
assert len(e.split("\t")) == 4
def test_write_tsv2():
"""
Write a graph to a TSV archive using TsvSink.
"""
graph = NxGraph()
graph.add_node("A", id="A", **{"name": "Node A", "category": ["biolink:NamedThing", "biolink:Gene"]})
graph.add_node("B", id="B", **{"name": "Node B"})
graph.add_node("C", id="C", **{"name": "Node C"})
graph.add_node("D", id="D", **{"name": "Node D"})
graph.add_node("E", id="E", **{"name": "Node E"})
graph.add_node("F", id="F", **{"name": "Node F"})
graph.add_edge(
"B", "A", **{"subject": "B", "object": "A", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"C", "B", **{"subject": "C", "object": "B", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"D", "C", **{"subject": "D", "object": "C", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"D", "A", **{"subject": "D", "object": "A", "predicate": "biolink:related_to"}
)
graph.add_edge(
"E", "D", **{"subject": "E", "object": "D", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"F", "D", **{"subject": "F", "object": "D", "predicate": "biolink:sub_class_of"}
)
t = Transformer()
s = TsvSink(
owner=t,
filename=os.path.join(TARGET_DIR, "test_graph_archive"),
format="tsv",
compression="tar",
node_properties={"id", "name"},
edge_properties={"subject", "predicate", "object", "relation"},
)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(os.path.join(TARGET_DIR, "test_graph_archive.tar"))
def test_write_tsv3():
"""
Write a graph to a TSV archive using TsvSink.
"""
graph = NxGraph()
graph.add_node("A", id="A", **{"name": "Node A", "category": ["biolink:NamedThing", "biolink:Gene"]})
graph.add_node("B", id="B", **{"name": "Node B"})
graph.add_node("C", id="C", **{"name": "Node C"})
graph.add_node("D", id="D", **{"name": "Node D"})
graph.add_node("E", id="E", **{"name": "Node E"})
graph.add_node("F", id="F", **{"name": "Node F"})
graph.add_edge(
"B", "A", **{"subject": "B", "object": "A", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"C", "B", **{"subject": "C", "object": "B", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"D", "C", **{"subject": "D", "object": "C", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"D", "A", **{"subject": "D", "object": "A", "predicate": "biolink:related_to"}
)
graph.add_edge(
"E", "D", **{"subject": "E", "object": "D", "predicate": "biolink:sub_class_of"}
)
graph.add_edge(
"F", "D", **{"subject": "F", "object": "D", "predicate": "biolink:sub_class_of"}
)
t = Transformer()
s = TsvSink(
owner=t,
filename=os.path.join(TARGET_DIR, "test_graph_archive"),
format="tsv",
compression="tar.gz",
node_properties={"id", "name"},
edge_properties={"subject", "predicate", "object", "relation"},
)
for n, data in graph.nodes(data=True):
s.write_node(data)
for u, v, k, data in graph.edges(data=True, keys=True):
s.write_edge(data)
s.finalize()
assert os.path.exists(os.path.join(TARGET_DIR, "test_graph_archive.tar.gz"))
| 5,750 | 35.865385 | 105 |
py
|
kgx
|
kgx-master/tests/integration/test_graph_merge.py
|
import os
from kgx.graph_operations.graph_merge import merge_all_graphs
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
def test_merge():
"""
Test for merging graphs.
"""
input_args1 = {
"filename": [
os.path.join(RESOURCE_DIR, "merge", "test1_nodes.tsv"),
os.path.join(RESOURCE_DIR, "merge", "test1_edges.tsv"),
],
"format": "tsv",
}
t1 = Transformer()
t1.transform(input_args1)
input_args2 = {
"filename": [
os.path.join(RESOURCE_DIR, "merge", "test2_nodes.tsv"),
os.path.join(RESOURCE_DIR, "merge", "test2_edges.tsv"),
],
"format": "tsv",
}
t2 = Transformer()
t2.transform(input_args2)
merged_graph = merge_all_graphs([t1.store.graph, t2.store.graph], preserve=True)
assert len(merged_graph.nodes()) == 6
assert len(merged_graph.edges()) == 8
x1 = merged_graph.nodes()["x1"]
assert x1["name"] == "node x1"
assert isinstance(x1["category"], list)
assert "a" in x1["p1"]
assert "1" in x1["p1"]
x10 = merged_graph.nodes()["x10"]
assert x10["id"] == "x10"
assert x10["name"] == "node x10"
def test_merge_no_preserve():
"""
Test for merging graphs, overwriting conflicting properties.
"""
input_args1 = {
"filename": [
os.path.join(RESOURCE_DIR, "merge", "test1_nodes.tsv"),
os.path.join(RESOURCE_DIR, "merge", "test1_edges.tsv"),
],
"format": "tsv",
}
t1 = Transformer()
t1.transform(input_args1)
input_args2 = {
"filename": [
os.path.join(RESOURCE_DIR, "merge", "test2_nodes.tsv"),
os.path.join(RESOURCE_DIR, "merge", "test2_edges.tsv"),
],
"format": "tsv",
}
t2 = Transformer()
t2.transform(input_args2)
merged_graph = merge_all_graphs([t1.store.graph, t2.store.graph], preserve=False)
assert len(merged_graph.nodes()) == 6
assert len(merged_graph.edges()) == 8
x1 = merged_graph.nodes()["x1"]
assert x1["name"] == "node x1"
assert isinstance(x1["category"], list)
assert list(t1.store.graph.nodes()["x1"]["category"])[0] in x1["category"]
assert list(t2.store.graph.nodes()["x1"]["category"])[0] in x1["category"]
assert x1["p1"] == "a"
| 2,334 | 27.82716 | 85 |
py
|
kgx
|
kgx-master/tests/integration/test_transform.py
|
import os
from typing import List
from pprint import pprint
import pytest
from kgx.utils.kgx_utils import GraphEntityType
from kgx.transformer import Transformer
from tests import RESOURCE_DIR, TARGET_DIR
from tests.integration import (
DEFAULT_NEO4J_URL,
DEFAULT_NEO4J_USERNAME,
DEFAULT_NEO4J_PASSWORD,
)
def _transform(query):
"""
Transform an input to an output via Transformer.
"""
t1 = Transformer()
t1.transform(query[0])
t1.save(query[1].copy())
assert t1.store.graph.number_of_nodes() == query[2]
assert t1.store.graph.number_of_edges() == query[3]
output = query[1]
if output["format"] in {"tsv", "csv", "jsonl"}:
input_args = {
"filename": [
f"{output['filename']}_nodes.{output['format']}",
f"{output['filename']}_edges.{output['format']}",
],
"format": output["format"],
}
elif output["format"] in {"neo4j"}:
input_args = {
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
}
else:
input_args = {"filename": [f"{output['filename']}"], "format": output["format"]}
t2 = Transformer()
t2.transform(input_args)
assert t2.store.graph.number_of_nodes() == query[2]
assert t2.store.graph.number_of_edges() == query[3]
@pytest.mark.parametrize(
"query",
[
({"category": {"biolink:Gene", "biolink:Disease"}}, {}, 2, 1),
(
{
"category": {
"biolink:Gene",
"biolink:Disease",
"biolink:PhenotypicFeature",
}
},
{"validated": "true"},
3,
2,
),
(
{"category": {"biolink:Gene", "biolink:PhenotypicFeature"}},
{
"subject_category": {"biolink:Gene"},
"object_category": {"biolink:PhenotypicFeature"},
"predicate": {"biolink:related_to"},
},
2,
1,
),
],
)
def test_transform_filters1(query):
"""
Test transform with filters.
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test2_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test2_edges.tsv"),
],
"format": "tsv",
"node_filters": query[0],
"edge_filters": query[1],
}
t = Transformer()
t.transform(input_args)
assert t.store.graph.number_of_nodes() == query[2]
assert t.store.graph.number_of_edges() == query[3]
@pytest.mark.parametrize(
"query",
[
({}, {}, 512, 531),
({"category": {"biolink:Gene"}}, {}, 178, 177),
(
{"category": {"biolink:Gene"}},
{"subject_category": {"biolink:Gene"}, "object_category": {"biolink:Gene"}},
178,
177,
),
(
{"category": {"biolink:Gene"}},
{
"subject_category": {"biolink:Gene"},
"object_category": {"biolink:Gene"},
"predicate": {"biolink:orthologous_to"},
},
178,
12,
),
(
{"category": {"biolink:Gene"}},
{"predicate": {"biolink:interacts_with"}},
178,
165,
),
({}, {"aggregator_knowledge_source": {"omim", "hpoa", "orphanet"}}, 512, 166),
({}, {"subject_category": {"biolink:Disease"}}, 56, 35),
({}, {"object_category": {"biolink:Disease"}}, 22, 20),
],
)
def test_transform_filters2(query):
"""
Test transform with filters.
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
"node_filters": query[0],
"edge_filters": query[1],
"lineterminator": None,
}
t = Transformer()
t.transform(input_args)
assert t.store.graph.number_of_nodes() == query[2]
assert t.store.graph.number_of_edges() == query[3]
@pytest.mark.parametrize(
"query",
[
({"category": {"biolink:Gene"}}, {}, 2, 0),
({"category": {"biolink:Protein"}}, {}, 4, 3),
(
{"category": {"biolink:Protein"}},
{"predicate": {"biolink:interacts_with"}},
4,
1,
),
],
)
def test_rdf_transform_with_filters1(query):
"""
Test RDF transform with filters.
"""
input_args = {
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test3.nt")],
"format": "nt",
"node_filters": query[0],
"edge_filters": query[1],
}
t = Transformer()
t.transform(input_args)
assert t.store.graph.number_of_edges() == query[3]
def test_rdf_transform1():
"""
Test parsing an RDF N-triple, with user defined prefix map,
and node property predicates.
"""
prefix_map = {
"HGNC": "https://www.genenames.org/data/gene-symbol-report/#!/hgnc_id/",
"OMIM": "http://omim.org/entry/",
}
node_property_predicates = {
"http://purl.obolibrary.org/obo/RO_0002558",
"https://monarchinitiative.org/frequencyOfPhenotype",
}
input_args1 = {
"filename": [os.path.join(RESOURCE_DIR, "rdf", "oban-test.nt")],
"format": "nt",
"prefix_map": prefix_map,
"node_property_predicates": node_property_predicates,
}
t1 = Transformer()
t1.transform(input_args1)
assert t1.store.graph.number_of_nodes() == 14
assert t1.store.graph.number_of_edges() == 7
n1 = t1.store.graph.nodes()["HP:0000505"]
assert len(n1["category"]) == 1
assert "biolink:NamedThing" in n1["category"]
e1 = list(t1.store.graph.get_edge("OMIM:166400", "HP:0000006").values())[0]
assert e1["subject"] == "OMIM:166400"
assert e1["object"] == "HP:0000006"
assert e1["relation"] == "RO:0000091"
assert e1["type"] == ["OBAN:association"]
assert e1["has_evidence"] == ["ECO:0000501"]
e2 = list(t1.store.graph.get_edge("ORPHA:93262", "HP:0000505").values())[0]
assert e2["subject"] == "ORPHA:93262"
assert e2["object"] == "HP:0000505"
assert e2["relation"] == "RO:0002200"
assert e2["type"] == ["OBAN:association"]
assert e2["frequencyOfPhenotype"] == "HP:0040283"
output_args = {
"filename": os.path.join(TARGET_DIR, "oban-export.nt"),
"format": "nt",
}
t1.save(output_args)
input_args2 = {
"filename": [os.path.join(TARGET_DIR, "oban-export.nt")],
"format": "nt",
"prefix_map": prefix_map,
}
t2 = Transformer()
t2.transform(input_args2)
assert t2.store.graph.number_of_nodes() == 14
assert t2.store.graph.number_of_edges() == 7
def test_rdf_transform2():
"""
Test parsing an RDF N-triple, with user defined prefix map,
node property predicates, and predicate mappings.
"""
prefix_map = {
"HGNC": "https://www.genenames.org/data/gene-symbol-report/#!/hgnc_id/",
"OMIM": "http://omim.org/entry/",
}
node_property_predicates = {
"http://purl.obolibrary.org/obo/RO_0002558",
"https://monarchinitiative.org/frequencyOfPhenotype",
}
predicate_mappings = {
"https://monarchinitiative.org/frequencyOfPhenotype": "frequency_of_phenotype",
}
input_args1 = {
"filename": [os.path.join(RESOURCE_DIR, "rdf", "oban-test.nt")],
"format": "nt",
"prefix_map": prefix_map,
"node_property_predicates": node_property_predicates,
"predicate_mappings": predicate_mappings,
}
t1 = Transformer()
t1.transform(input_args1)
n1t1 = t1.store.graph.nodes()["HP:0000505"]
assert len(n1t1["category"]) == 1
assert "biolink:NamedThing" in n1t1["category"]
e1t1 = list(t1.store.graph.get_edge("OMIM:166400", "HP:0000006").values())[0]
assert e1t1["subject"] == "OMIM:166400"
assert e1t1["object"] == "HP:0000006"
assert e1t1["relation"] == "RO:0000091"
assert e1t1["type"] == ['OBAN:association']
assert e1t1["has_evidence"] == ["ECO:0000501"]
e2t1 = list(t1.store.graph.get_edge("ORPHA:93262", "HP:0000505").values())[0]
assert e2t1["subject"] == "ORPHA:93262"
assert e2t1["object"] == "HP:0000505"
assert e2t1["relation"] == "RO:0002200"
assert e2t1["type"] == ['OBAN:association']
assert e2t1["frequency_of_phenotype"] == "HP:0040283"
assert t1.store.graph.number_of_nodes() == 14
assert t1.store.graph.number_of_edges() == 7
property_types = {"frequency_of_phenotype": "uriorcurie", "source": "uriorcurie"}
output_args1 = {
"filename": os.path.join(TARGET_DIR, "oban-export.nt"),
"format": "nt",
"property_types": property_types,
}
t1.save(output_args1)
input_args2 = {
"filename": [os.path.join(TARGET_DIR, "oban-export.nt")],
"format": "nt",
}
t2 = Transformer()
t2.transform(input_args2)
n1t2 = t2.store.graph.nodes()["HP:0000505"]
assert len(n1t2["category"]) == 1
assert "biolink:NamedThing" in n1t2["category"]
e1t2 = list(t2.store.graph.get_edge("OMIM:166400", "HP:0000006").values())[0]
assert e1t2["subject"] == "OMIM:166400"
assert e1t2["object"] == "HP:0000006"
assert e1t2["relation"] == "RO:0000091"
assert e1t2["type"] == ['biolink:Association']
assert e1t2["has_evidence"] == ["ECO:0000501"]
e2t2 = list(t2.store.graph.get_edge("ORPHA:93262", "HP:0000505").values())[0]
assert e2t2["subject"] == "ORPHA:93262"
assert e2t2["object"] == "HP:0000505"
assert e2t2["relation"] == "RO:0002200"
assert e2t2["type"] == ['biolink:Association']
assert e2t2["frequency_of_phenotype"] == "HP:0040283"
assert t2.store.graph.number_of_nodes() == 14
assert t2.store.graph.number_of_edges() == 7
input_args3 = {
"filename": [os.path.join(TARGET_DIR, "oban-export.nt")],
"format": "nt",
}
t3 = Transformer()
t3.transform(input_args3)
n1t3 = t1.store.graph.nodes()["HP:0000505"]
assert len(n1t3["category"]) == 1
assert "biolink:NamedThing" in n1t3["category"]
e1t3 = list(t3.store.graph.get_edge("OMIM:166400", "HP:0000006").values())[0]
assert e1t3["subject"] == "OMIM:166400"
assert e1t3["object"] == "HP:0000006"
assert e1t3["relation"] == "RO:0000091"
assert e1t3["type"] == ['biolink:Association']
assert e1t3["has_evidence"] == ["ECO:0000501"]
e2t3 = list(t3.store.graph.get_edge("ORPHA:93262", "HP:0000505").values())[0]
assert e2t3["subject"] == "ORPHA:93262"
assert e2t3["object"] == "HP:0000505"
assert e2t3["relation"] == "RO:0002200"
assert e2t3["type"] == ['biolink:Association']
assert e2t3["frequency_of_phenotype"] == "HP:0040283"
assert t3.store.graph.number_of_nodes() == 14
assert t3.store.graph.number_of_edges() == 7
def test_rdf_transform3():
"""
Test parsing an RDF N-triple and round-trip.
"""
input_args1 = {
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test1.nt")],
"format": "nt",
}
t1 = Transformer()
t1.transform(input_args1)
assert t1.store.graph.number_of_nodes() == 2
assert t1.store.graph.number_of_edges() == 1
output_args1 = {
"filename": os.path.join(TARGET_DIR, "test1-export.nt"),
"format": "nt",
}
t1.save(output_args1)
input_args2 = {
"filename": [os.path.join(TARGET_DIR, "test1-export.nt")],
"format": "nt",
}
t2 = Transformer()
t2.transform(input_args2)
assert t2.store.graph.number_of_nodes() == 2
assert t2.store.graph.number_of_edges() == 1
n1t1 = t1.store.graph.nodes()["ENSEMBL:ENSG0000000000001"]
n1t2 = t2.store.graph.nodes()["ENSEMBL:ENSG0000000000001"]
n1t3 = t2.store.graph.nodes()["ENSEMBL:ENSG0000000000001"]
assert n1t1["type"] == n1t2["type"] == n1t3["type"] == ["SO:0000704"]
assert len(n1t1["category"]) == len(n1t2["category"]) == len(n1t3["category"]) == 4
assert (
"biolink:Gene" in n1t1["category"]
and "biolink:Gene" in n1t2["category"]
and "biolink:Gene" in n1t3["category"]
)
assert (
"biolink:GenomicEntity" in n1t1["category"]
and "biolink:GenomicEntity" in n1t2["category"]
and "biolink:GenomicEntity" in n1t3["category"]
)
assert (
"biolink:NamedThing" in n1t1["category"]
and "biolink:NamedThing" in n1t2["category"]
and "biolink:NamedThing" in n1t3["category"]
)
assert n1t1["name"] == n1t2["name"] == n1t3["name"] == "Test Gene 123"
assert (
n1t1["description"]
== n1t2["description"]
== n1t3["description"]
== "This is a Test Gene 123"
)
assert (
"Test Dataset" in n1t1["provided_by"]
and "Test Dataset" in n1t2["provided_by"]
and "Test Dataset" in n1t3["provided_by"]
)
def test_rdf_transform4():
"""
Test parsing an RDF N-triple and round-trip, with user defined node property predicates.
"""
node_property_predicates = {
f"https://www.example.org/UNKNOWN/{x}"
for x in ["fusion", "homology", "combined_score", "cooccurence"]
}
input_args1 = {
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test2.nt")],
"format": "nt",
"node_property_predicates": node_property_predicates,
"knowledge_source": "Test Dataset",
}
t1 = Transformer()
t1.transform(input_args1)
assert t1.store.graph.number_of_nodes() == 4
assert t1.store.graph.number_of_edges() == 3
output_args2 = {
"filename": os.path.join(TARGET_DIR, "test2-export.nt"),
"format": "nt",
}
t1.save(output_args2)
t2 = Transformer()
input_args2 = {
"filename": [os.path.join(TARGET_DIR, "test2-export.nt")],
"format": "nt",
}
t2.transform(input_args2)
assert t2.store.graph.number_of_nodes() == 4
assert t2.store.graph.number_of_edges() == 3
n1t1 = t1.store.graph.nodes()["ENSEMBL:ENSG0000000000001"]
n1t2 = t2.store.graph.nodes()["ENSEMBL:ENSG0000000000001"]
assert n1t1["type"] == n1t2["type"] == ["SO:0000704"]
assert len(n1t1["category"]) == len(n1t2["category"]) == 4
assert "biolink:Gene" in n1t1["category"] and "biolink:Gene" in n1t2["category"]
assert (
"biolink:GenomicEntity" in n1t1["category"]
and "biolink:GenomicEntity" in n1t2["category"]
)
assert (
"biolink:NamedThing" in n1t1["category"]
and "biolink:NamedThing" in n1t2["category"]
)
assert n1t1["name"] == n1t2["name"] == "Test Gene 123"
assert n1t1["description"] == n1t2["description"] == "This is a Test Gene 123"
assert (
"Test Dataset" in n1t1["provided_by"] and "Test Dataset" in n1t2["provided_by"]
)
e1t1 = list(
t1.store.graph.get_edge(
"ENSEMBL:ENSP0000000000001", "ENSEMBL:ENSP0000000000002"
).values()
)[0]
e1t2 = list(
t2.store.graph.get_edge(
"ENSEMBL:ENSP0000000000001", "ENSEMBL:ENSP0000000000002"
).values()
)[0]
assert e1t1["subject"] == e1t2["subject"] == "ENSEMBL:ENSP0000000000001"
assert e1t1["object"] == e1t2["object"] == "ENSEMBL:ENSP0000000000002"
assert e1t1["predicate"] == e1t2["predicate"] == "biolink:interacts_with"
assert e1t1["relation"] == e1t2["relation"] == "biolink:interacts_with"
assert e1t1["type"] == e1t2["type"] == ["biolink:Association"]
assert e1t1["id"] == e1t2["id"] == "urn:uuid:fcf76807-f909-4ccb-b40a-3b79b49aa518"
assert e1t1["fusion"] == e1t2["fusion"] == "0"
assert e1t1["homology"] == e1t2["homology"] == "0.0"
assert e1t1["combined_score"] == e1t2["combined_score"] == "490.0"
assert e1t1["cooccurence"] == e1t2["cooccurence"] == "332"
def test_rdf_transform5():
"""
Parse an RDF N-Triple and round-trip, with user defined node property predicates
and export property types.
"""
node_property_predicates = {
f"https://www.example.org/UNKNOWN/{x}"
for x in ["fusion", "homology", "combined_score", "cooccurence"]
}
property_types = {}
for k in node_property_predicates:
property_types[k] = "xsd:float"
input_args1 = {
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test3.nt")],
"format": "nt",
"node_property_predicates": node_property_predicates,
}
t1 = Transformer()
t1.transform(input_args1)
assert t1.store.graph.number_of_nodes() == 7
assert t1.store.graph.number_of_edges() == 6
output_args2 = {
"filename": os.path.join(TARGET_DIR, "test3-export.nt"),
"format": "nt",
"property_types": property_types,
}
t1.save(output_args2)
input_args2 = {
"filename": [os.path.join(TARGET_DIR, "test3-export.nt")],
"format": "nt",
}
t2 = Transformer()
t2.transform(input_args2)
assert t2.store.graph.number_of_nodes() == 7
assert t2.store.graph.number_of_edges() == 6
n1t1 = t1.store.graph.nodes()["ENSEMBL:ENSG0000000000001"]
n1t2 = t2.store.graph.nodes()["ENSEMBL:ENSG0000000000001"]
assert n1t1["type"] == n1t2["type"] == ["SO:0000704"]
assert len(n1t1["category"]) == len(n1t2["category"]) == 4
assert "biolink:Gene" in n1t1["category"] and "biolink:Gene" in n1t2["category"]
assert (
"biolink:GenomicEntity" in n1t1["category"]
and "biolink:GenomicEntity" in n1t2["category"]
)
assert (
"biolink:NamedThing" in n1t1["category"]
and "biolink:NamedThing" in n1t2["category"]
)
assert n1t1["name"] == n1t2["name"] == "Test Gene 123"
assert n1t1["description"] == n1t2["description"] == "This is a Test Gene 123"
assert (
"Test Dataset" in n1t1["provided_by"] and "Test Dataset" in n1t2["provided_by"]
)
e1t1 = list(
t1.store.graph.get_edge(
"ENSEMBL:ENSP0000000000001", "ENSEMBL:ENSP0000000000002"
).values()
)[0]
e1t2 = list(
t2.store.graph.get_edge(
"ENSEMBL:ENSP0000000000001", "ENSEMBL:ENSP0000000000002"
).values()
)[0]
assert e1t1["subject"] == e1t2["subject"] == "ENSEMBL:ENSP0000000000001"
assert e1t1["object"] == e1t2["object"] == "ENSEMBL:ENSP0000000000002"
assert e1t1["predicate"] == e1t2["predicate"] == "biolink:interacts_with"
assert e1t1["relation"] == e1t2["relation"] == "biolink:interacts_with"
assert e1t1["type"] == e1t2["type"] == ["biolink:Association"]
assert e1t1["id"] == e1t2["id"] == "urn:uuid:fcf76807-f909-4ccb-b40a-3b79b49aa518"
assert "test3.nt" in e1t1["knowledge_source"]
assert e1t2["fusion"] == 0.0
assert e1t2["homology"] == 0.0
assert e1t2["combined_score"] == 490.0
assert e1t2["cooccurence"] == 332.0
assert "test3.nt" in e1t2["knowledge_source"]
def test_transform_inspector():
"""
Test transform with an inspection callable.
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test2_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test2_edges.tsv"),
],
"format": "tsv",
}
t = Transformer()
class TestInspector:
def __init__(self):
self._node_count = 0
self._edge_count = 0
def __call__(self, entity_type: GraphEntityType, rec: List):
if entity_type == GraphEntityType.EDGE:
self._edge_count += 1
elif entity_type == GraphEntityType.NODE:
self._node_count += 1
else:
raise RuntimeError("Unexpected GraphEntityType: " + str(entity_type))
def get_node_count(self):
return self._node_count
def get_edge_count(self):
return self._edge_count
inspector = TestInspector()
t.transform(input_args=input_args, inspector=inspector)
assert inspector.get_node_count() == 4
assert inspector.get_edge_count() == 4
def test_transformer_infores_basic_obojson_formatting():
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "pato.json")
],
"format": "obojson",
"provided_by": True,
"aggregator_knowledge_source": True,
"primary_knowledge_source": True
}
output_args = {
"filename": os.path.join(TARGET_DIR, "pato-export.tsv"),
"format": "tsv",
}
t = Transformer()
t.transform(input_args=input_args)
t.save(output_args)
nt = list(t.store.graph.get_node("BFO:0000004"))
pprint(nt)
et = list(t.store.graph.get_edge("BFO:0000004", "BFO:0000002").values())[0]
assert et.get('aggregator_knowledge_source')
assert et.get('primary_knowledge_source')
def test_transformer_infores_basic_formatting():
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test_infores_coercion_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test_infores_coercion_edges.tsv"),
],
"format": "tsv",
"provided_by": True,
"aggregator_knowledge_source": True,
"primary_knowledge_source": True
}
t = Transformer()
t.transform(input_args=input_args)
n1 = t.store.graph.nodes()["FlyBase:FBgn0000008"]
assert "provided_by" in n1
assert len(n1["provided_by"]) == 1
assert "infores:flybase-monarch-version-202012" in n1["provided_by"]
n2 = t.store.graph.nodes()["GO:0005912"]
assert "provided_by" in n2
assert len(n2["provided_by"]) == 1
assert "infores:gene-ontology-monarch-version-202012" in n2["provided_by"]
et = list(t.store.graph.get_edge("FlyBase:FBgn0000008", "GO:0005912").values())[0]
assert (
"infores:gene-ontology-monarch-version-202012"
in et["aggregator_knowledge_source"]
)
assert (
"infores"
in et["primary_knowledge_source"]
)
print(et)
# irc = t.get_infores_catalog()
# assert len(irc) == 2
# assert "fixed-gene-ontology-monarch-version-202012" in irc
# assert "Gene Ontology (Monarch version 202012)" in irc['fixed-gene-ontology-monarch-version-202012']
def test_transformer_infores_suppression():
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test_infores_coercion_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test_infores_coercion_edges.tsv"),
],
"format": "tsv",
"provided_by": False,
"aggregator_knowledge_source": False,
"primary_knowledge_source": False
}
t = Transformer()
t.transform(input_args=input_args)
n1 = t.store.graph.nodes()["FlyBase:FBgn0000008"]
assert "provided_by" not in n1
n2 = t.store.graph.nodes()["GO:0005912"]
assert "provided_by" not in n2
et = list(t.store.graph.get_edge("FlyBase:FBgn0000008", "GO:0005912").values())[0]
assert "aggregator_knowledge_source" not in et
def test_transformer_infores_parser_deletion_rewrite():
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test_infores_coercion_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test_infores_coercion_edges.tsv"),
],
"format": "tsv",
"provided_by": (r"\(.+\)", ""),
"aggregator_knowledge_source": (r"\(.+\)", ""),
}
t = Transformer()
t.transform(input_args=input_args)
n1 = t.store.graph.nodes()["FlyBase:FBgn0000008"]
assert "provided_by" in n1
assert len(n1["provided_by"]) == 1
assert "infores:flybase" in n1["provided_by"]
n2 = t.store.graph.nodes()["GO:0005912"]
assert "provided_by" in n2
assert len(n2["provided_by"]) == 1
assert "infores:gene-ontology" in n2["provided_by"]
et = list(t.store.graph.get_edge("FlyBase:FBgn0000008", "GO:0005912").values())[0]
assert "infores:gene-ontology" in et["aggregator_knowledge_source"]
irc = t.get_infores_catalog()
assert len(irc) == 3
assert "Gene Ontology (Monarch version 202012)" in irc
assert "infores:gene-ontology" in irc["Gene Ontology (Monarch version 202012)"]
def test_transformer_infores_parser_substitution_rewrite():
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test_infores_coercion_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test_infores_coercion_edges.tsv"),
],
"format": "tsv",
"provided_by": (r"\(.+\)", "Monarch"),
"aggregator_knowledge_source": (r"\(.+\)", "Monarch"),
}
t = Transformer()
t.transform(input_args=input_args)
n1 = t.store.graph.nodes()["FlyBase:FBgn0000008"]
assert "provided_by" in n1
assert len(n1["provided_by"]) == 1
assert "infores:flybase-monarch" in n1["provided_by"]
n2 = t.store.graph.nodes()["GO:0005912"]
assert "provided_by" in n2
assert len(n2["provided_by"]) == 1
assert "infores:gene-ontology-monarch" in n2["provided_by"]
et = list(t.store.graph.get_edge("FlyBase:FBgn0000008", "GO:0005912").values())[0]
assert "infores:gene-ontology-monarch" in et["aggregator_knowledge_source"]
irc = t.get_infores_catalog()
assert len(irc) == 3
assert "Gene Ontology (Monarch version 202012)" in irc
assert (
"infores:gene-ontology-monarch" in irc["Gene Ontology (Monarch version 202012)"]
)
def test_transformer_infores_parser_prefix_rewrite():
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test_infores_coercion_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test_infores_coercion_edges.tsv"),
],
"format": "tsv",
"provided_by": (r"\(.+\)", "", "Monarch"),
"aggregator_knowledge_source": (r"\(.+\)", "", "Monarch"),
}
t = Transformer()
t.transform(input_args=input_args)
n1 = t.store.graph.nodes()["FlyBase:FBgn0000008"]
assert "provided_by" in n1
assert len(n1["provided_by"]) == 1
assert "infores:monarch-flybase" in n1["provided_by"]
n2 = t.store.graph.nodes()["GO:0005912"]
assert "provided_by" in n2
assert len(n2["provided_by"]) == 1
assert "infores:monarch-gene-ontology" in n2["provided_by"]
et = list(t.store.graph.get_edge("FlyBase:FBgn0000008", "GO:0005912").values())[0]
assert "infores:monarch-gene-ontology" in et["aggregator_knowledge_source"]
irc = t.get_infores_catalog()
assert len(irc) == 3
assert "Gene Ontology (Monarch version 202012)" in irc
assert (
"infores:monarch-gene-ontology" in irc["Gene Ontology (Monarch version 202012)"]
)
def test_transformer_infores_simple_prefix_rewrite():
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test_infores_coercion_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test_infores_coercion_edges.tsv"),
],
"format": "tsv",
"provided_by": (r"", "", "Fixed"),
"aggregator_knowledge_source": (r"", "", "Fixed"),
}
t = Transformer()
t.transform(input_args=input_args)
n1 = t.store.graph.nodes()["FlyBase:FBgn0000008"]
assert "provided_by" in n1
assert len(n1["provided_by"]) == 1
assert "infores:fixed-flybase-monarch-version-202012" in n1["provided_by"]
n2 = t.store.graph.nodes()["GO:0005912"]
assert "provided_by" in n2
assert len(n2["provided_by"]) == 1
assert "infores:fixed-gene-ontology-monarch-version-202012" in n2["provided_by"]
et = list(t.store.graph.get_edge("FlyBase:FBgn0000008", "GO:0005912").values())[0]
assert (
"infores:fixed-gene-ontology-monarch-version-202012"
in et["aggregator_knowledge_source"]
)
irc = t.get_infores_catalog()
assert len(irc) == 3
assert "Gene Ontology (Monarch version 202012)" in irc
assert (
"infores:fixed-gene-ontology-monarch-version-202012"
in irc["Gene Ontology (Monarch version 202012)"]
)
def test_transform_to_sqlite():
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "test_infores_coercion_nodes.tsv"),
os.path.join(RESOURCE_DIR, "test_infores_coercion_edges.tsv"),
],
"format": "tsv",
"provided_by": (r"", "", "Fixed"),
"aggregator_knowledge_source": (r"", "", "Fixed"),
}
output_args = {
"format": "sql",
"filename": os.path.join(RESOURCE_DIR, "test.db"),
}
t = Transformer()
t.transform(input_args=input_args, output_args=output_args)
assert os.path.exists(output_args["filename"])
| 28,648 | 31.816724 | 106 |
py
|
kgx
|
kgx-master/tests/integration/test_clique_merge_operation.py
|
import os
import networkx as nx
from kgx.graph.nx_graph import NxGraph
from kgx.graph_operations.clique_merge import clique_merge
from kgx.transformer import Transformer
from tests import TARGET_DIR, RESOURCE_DIR
prefix_prioritization_map = {"biolink:Gene": ["HGNC", "NCBIGene", "ENSEMBL", "OMIM"]}
def test_clique_generation():
"""
Test for generation of cliques.
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "cm_nodes.csv"),
os.path.join(RESOURCE_DIR, "cm_edges.csv"),
],
"format": "csv",
}
t = Transformer()
t.transform(input_args)
updated_graph, clique_graph = clique_merge(
target_graph=t.store.graph, prefix_prioritization_map=prefix_prioritization_map
)
cliques = list(nx.strongly_connected_components(clique_graph))
assert len(cliques) == 2
def test_clique_merge():
"""
Test for clique merge.
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "cm_nodes.csv"),
os.path.join(RESOURCE_DIR, "cm_edges.csv"),
],
"format": "csv",
}
t = Transformer()
t.transform(input_args)
updated_graph, clique_graph = clique_merge(
target_graph=t.store.graph, prefix_prioritization_map=prefix_prioritization_map
)
leaders = NxGraph.get_node_attributes(updated_graph, "clique_leader")
leader_list = list(leaders.keys())
leader_list.sort()
assert len(leader_list) == 2
n1 = updated_graph.nodes()[leader_list[0]]
assert n1["election_strategy"] == "PREFIX_PRIORITIZATION"
assert "NCBIGene:100302240" in n1["same_as"]
assert "ENSEMBL:ENSG00000284458" in n1["same_as"]
n2 = updated_graph.nodes()[leader_list[1]]
assert n2["election_strategy"] == "PREFIX_PRIORITIZATION"
assert "NCBIGene:8202" in n2["same_as"]
assert "OMIM:601937" in n2["same_as"]
assert "ENSEMBL:ENSG00000124151" not in n2["same_as"]
def test_clique_merge_edge_consolidation():
"""
Test for clique merge, with edge consolidation.
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "cm_test2_nodes.tsv"),
os.path.join(RESOURCE_DIR, "cm_test2_edges.tsv"),
],
"format": "tsv",
}
t = Transformer()
t.transform(input_args)
updated_graph, clique_graph = clique_merge(
target_graph=t.store.graph, prefix_prioritization_map=prefix_prioritization_map
)
leaders = NxGraph.get_node_attributes(updated_graph, "clique_leader")
leader_list = list(leaders.keys())
leader_list.sort()
assert len(leader_list) == 2
n1 = updated_graph.nodes()[leader_list[0]]
assert n1["election_strategy"] == "LEADER_ANNOTATION"
assert "NCBIGene:100302240" in n1["same_as"]
assert "ENSEMBL:ENSG00000284458" in n1["same_as"]
n2 = updated_graph.nodes()[leader_list[1]]
assert n2["election_strategy"] == "LEADER_ANNOTATION"
assert "NCBIGene:8202" in n2["same_as"]
assert "OMIM:601937" in n2["same_as"]
assert "ENSEMBL:ENSG00000124151" not in n2["same_as"]
e1_incoming = updated_graph.in_edges("HGNC:7670", data=True)
assert len(e1_incoming) == 3
e1_outgoing = updated_graph.out_edges("HGNC:7670", data=True)
assert len(e1_outgoing) == 6
| 3,299 | 32 | 87 |
py
|
kgx
|
kgx-master/tests/integration/test_validator.py
|
import os
from sys import stderr
from kgx.utils.kgx_utils import get_toolkit
from kgx.validator import Validator
from kgx.graph.nx_graph import NxGraph
from kgx.transformer import Transformer
from tests import RESOURCE_DIR
from bmt import Toolkit
toolkit = Toolkit()
def test_validator_bad():
"""
A fake test to establish a fail condition for validation.
"""
G = NxGraph()
G.add_node("x", foo=3)
G.add_node("ZZZ:3", **{"nosuch": 1})
G.add_edge("x", "y", **{"baz": 6})
validator = Validator(verbose=True)
validator.validate(G)
assert len(validator.get_errors()) > 0
def test_validator_good():
"""
A fake test to establish a success condition for validation.
"""
G = NxGraph()
G.add_node(
"UniProtKB:P123456", id="UniProtKB:P123456", name="fake", category=["Protein"]
)
G.add_node(
"UBERON:0000001", id="UBERON:0000001", name="fake", category=["NamedThing"]
)
G.add_node(
"UBERON:0000002", id="UBERON:0000002", name="fake", category=["NamedThing"]
)
G.add_edge(
"UBERON:0000001",
"UBERON:0000002",
id="UBERON:0000001-part_of-UBERON:0000002",
relation="RO:1",
predicate="part_of",
subject="UBERON:0000001",
object="UBERON:0000002",
category=["biolink:Association"],
)
validator = Validator(verbose=True)
validator.validate(G)
print(validator.get_errors())
assert len(validator.get_errors()) == 0
def test_validate_json():
"""
Validate against a valid representative Biolink Model compliant JSON.
"""
input_args = {
"filename": [os.path.join(RESOURCE_DIR, "valid.json")],
"format": "json",
}
t = Transformer(stream=True)
t.transform(input_args)
validator = Validator()
validator.validate(t.store.graph)
assert len(validator.get_errors()) == 0
def test_distinct_validator_class_versus_default_toolkit_biolink_version():
Validator.set_biolink_model(version="1.8.2")
default_tk = get_toolkit()
validator_tk = Validator.get_toolkit()
assert default_tk.get_model_version() != validator_tk.get_model_version()
def test_distinct_class_versus_validator_instance_biolink_version():
Validator.set_biolink_model(version="1.7.0")
validator = Validator()
Validator.set_biolink_model(version="1.8.2")
validator_class_tk = Validator.get_toolkit()
validation_instance_version = validator.get_validation_model_version()
assert validation_instance_version != validator_class_tk.get_model_version()
def test_validator_explicit_biolink_version():
"""
A fake test to establish a success condition for validation.
"""
G = NxGraph()
G.add_node(
"CHEMBL.COMPOUND:1222250",
id="CHEMBL.COMPOUND:1222250",
name="Dextrose",
category=["NamedThing"]
)
G.add_node(
"UBERON:0000001", id="UBERON:0000001", name="fake", category=["NamedThing"]
)
G.add_edge(
"CHEMBL.COMPOUND:1222250",
"UBERON:0000001",
id="CHEMBL.COMPOUND:1222250-part_of-UBERON:0000001",
relation="RO:1",
predicate="part_of",
subject="CHEMBL.COMPOUND:1222250",
object="UBERON:0000001",
category=["biolink:Association"],
)
Validator.set_biolink_model(toolkit.get_model_version())
validator = Validator(verbose=True)
validator.validate(G)
print(validator.get_errors())
assert len(validator.get_errors()) == 0
def test_validator():
"""
Test generate the validate function by streaming
graph data through a graph Transformer.process() Inspector
"""
input_args = {
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
"aggregator_knowledge_source": True,
}
Validator.set_biolink_model(toolkit.get_model_version())
# Validator assumes the currently set Biolink Release
validator = Validator()
transformer = Transformer(stream=True)
transformer.transform(
input_args=input_args,
output_args={
"format": "null"
}, # streaming processing throws the graph data away
# ... Second, we inject the Inspector into the transform() call,
# for the underlying Transformer.process() to use...
inspector=validator,
)
validator.write_report()
e = validator.get_errors()
assert len(e) == 0
| 4,523 | 28.568627 | 86 |
py
|
kgx
|
kgx-master/tests/integration/test_stream_transform.py
|
import copy
import os
import pytest
from neo4j import GraphDatabase
import neo4j
from kgx.transformer import Transformer
from tests import TARGET_DIR, RESOURCE_DIR
from tests.integration import (
check_container,
CONTAINER_NAME,
DEFAULT_NEO4J_URL,
DEFAULT_NEO4J_USERNAME,
DEFAULT_NEO4J_PASSWORD,
clean_slate
)
def clean_database():
driver = GraphDatabase.driver(
DEFAULT_NEO4J_URL, auth=(DEFAULT_NEO4J_USERNAME, DEFAULT_NEO4J_PASSWORD)
)
session = driver.session()
q = "MATCH (n) DETACH DELETE n"
with session.begin_transaction() as tx:
tx.run(q)
tx.commit()
tx.close()
def run_transform(query):
clean_database()
_transform(copy.deepcopy(query))
def _transform(query):
"""
Transform an input to an output via Transformer.
"""
t1 = Transformer()
t1.transform(query[0])
t1.save(query[1].copy())
print("query[0]", query[0])
print("number of nodes: ", t1.store.graph.number_of_nodes(), "expected: ", query[2])
print("number of edges: ", t1.store.graph.number_of_edges(), "expected: ", query[3])
assert t1.store.graph.number_of_nodes() == query[2]
assert t1.store.graph.number_of_edges() == query[3]
output = query[1]
if output["format"] in {"tsv", "csv", "jsonl"}:
input_args = {
"filename": [
f"{output['filename']}_nodes.{output['format']}",
f"{output['filename']}_edges.{output['format']}",
],
"format": output["format"],
}
elif output["format"] in {"neo4j"}:
input_args = {
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
}
else:
input_args = {"filename": [f"{output['filename']}"], "format": output["format"]}
t2 = Transformer()
t2.transform(input_args)
print("query[0]", query[0])
print("number of nodes: ", t2.store.graph.number_of_nodes(), "expected: ", query[4])
print("number of edges: ", t2.store.graph.number_of_edges(), "expected: ", query[5])
assert t2.store.graph.number_of_nodes() == query[4]
assert t2.store.graph.number_of_edges() == query[5]
def _stream_transform(query):
"""
Transform an input to an output via Transformer where streaming is enabled.
"""
t1 = Transformer(stream=True)
t1.transform(query[0], query[1])
output = query[1]
if output["format"] in {"tsv", "csv", "jsonl"}:
input_args = {
"filename": [
f"{output['filename']}_nodes.{output['format']}",
f"{output['filename']}_edges.{output['format']}",
],
"format": output["format"],
}
elif output["format"] in {"neo4j"}:
input_args = {
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
}
else:
input_args = {"filename": [f"{output['filename']}"], "format": output["format"]}
t2 = Transformer()
t2.transform(input_args)
print("query[0]",query[0])
print("number of nodes: ", t2.store.graph.number_of_nodes(), "expected: ", query[2])
print("number of edges: ", t2.store.graph.number_of_edges(), "expected: ", query[3])
assert t2.store.graph.number_of_nodes() == query[2]
assert t2.store.graph.number_of_edges() == query[3]
@pytest.mark.parametrize(
"query",
[
(
{
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
},
{"filename": os.path.join(TARGET_DIR, "graph1.json"), "format": "json"},
512,
531,
512,
531,
),
(
{
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
},
{"filename": os.path.join(TARGET_DIR, "graph2"), "format": "jsonl"},
512,
531,
512,
531,
),
(
{
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
"lineterminator": None,
},
{"filename": os.path.join(TARGET_DIR, "graph3.nt"), "format": "nt"},
512,
531,
512,
531,
),
(
{
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
"node_filters": {"category": {"biolink:Gene"}},
},
{"filename": os.path.join(TARGET_DIR, "graph4"), "format": "jsonl"},
178,
177,
178,
177,
),
(
{
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
"node_filters": {"category": {"biolink:Gene"}},
"edge_filters": {"predicate": {"biolink:interacts_with"}},
},
{"filename": os.path.join(TARGET_DIR, "graph5"), "format": "jsonl"},
178,
165,
178,
165,
),
(
{
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
"edge_filters": {
"subject_category": {"biolink:Disease"},
"object_category": {"biolink:PhenotypicFeature"},
"predicate": {"biolink:has_phenotype"},
},
},
{"filename": os.path.join(TARGET_DIR, "graph6"), "format": "jsonl"},
133,
13,
133,
13,
),
],
)
def test_transform1(query):
"""
Test loading data from a TSV source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
"query",
[
(
{"filename": [os.path.join(RESOURCE_DIR, "graph.json")], "format": "json"},
{
"filename": os.path.join(TARGET_DIR, "graph1s2"),
"format": "tsv",
"node_properties": ["id", "name", "category", "taxon"],
"edge_properties": [
"subject",
"predicate",
"object",
"relation",
"knowledge_source",
],
},
512,
532,
512,
532,
),
(
{"filename": [os.path.join(RESOURCE_DIR, "graph.json")], "format": "json"},
{"filename": os.path.join(TARGET_DIR, "graph2s2"), "format": "jsonl"},
512,
532,
512,
532,
),
(
{"filename": [os.path.join(RESOURCE_DIR, "graph.json")], "format": "json"},
{"filename": os.path.join(TARGET_DIR, "graph3s2.nt"), "format": "nt"},
512,
532,
512,
532,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "graph.json")],
"format": "json",
"edge_filters": {
"subject_category": {"biolink:Disease"},
"object_category": {"biolink:PhenotypicFeature"},
"predicate": {"biolink:has_phenotype"},
},
},
{"filename": os.path.join(TARGET_DIR, "graph4s2"), "format": "jsonl"},
133,
13,
133,
13,
),
],
)
def test_transform2(query):
"""
Test loading data from JSON source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
"query",
[
(
{
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test3.nt")],
"format": "nt",
},
{
"filename": os.path.join(TARGET_DIR, "graph1s3"),
"format": "tsv",
"node_properties": [
"id",
"name",
"category",
"description",
"knowledge_source",
],
"edge_properties": [
"subject",
"predicate",
"object",
"relation",
"category",
"fusion",
"homology",
"combined_score",
"cooccurrence",
],
},
7,
6,
7,
6,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test3.nt")],
"format": "nt",
},
{"filename": os.path.join(TARGET_DIR, "graph2s3.json"), "format": "json"},
7,
6,
7,
6,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test3.nt")],
"format": "nt",
},
{"filename": os.path.join(TARGET_DIR, "graph3s3"), "format": "jsonl"},
7,
6,
7,
6,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test3.nt")],
"format": "nt",
"edge_filters": {
"subject_category": {"biolink:Gene", "biolink:Protein"},
"object_category": {"biolink:Gene", "biolink:Protein"},
"predicate": {"biolink:has_gene_product", "biolink:interacts_with"},
},
},
{"filename": os.path.join(TARGET_DIR, "graph4s3"), "format": "jsonl"},
6,
3,
6,
3,
),
],
)
def test_transform3(query):
"""
Test loading data from RDF source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
"query",
[
(
{
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.json")],
"format": "obojson",
},
{
"filename": os.path.join(TARGET_DIR, "graph1s4"),
"format": "tsv",
"node_properties": [
"id",
"name",
"category",
"description",
"knowledge_source",
],
"edge_properties": [
"subject",
"predicate",
"object",
"relation",
"category",
],
},
176,
205,
176,
205,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.json")],
"format": "obojson",
},
{
"filename": os.path.join(TARGET_DIR, "graph2s4"),
"format": "jsonl",
},
176,
205,
176,
205,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.json")],
"format": "obojson",
},
{
"filename": os.path.join(TARGET_DIR, "graph3s4.nt"),
"format": "nt",
},
176,
205,
176,
205,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.json")],
"format": "obojson",
"edge_filters": {
"subject_category": {"biolink:BiologicalProcess"},
"predicate": {"biolink:subclass_of"},
},
},
{"filename": os.path.join(TARGET_DIR, "graph4s4"), "format": "jsonl"},
72,
73,
72,
73,
),
],
)
def test_transform4(query):
"""
Test loading data from RDF source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
"query",
[
(
{
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.owl")],
"format": "owl",
},
{
"filename": os.path.join(TARGET_DIR, "graph1s5"),
"format": "tsv",
"node_properties": [
"id",
"name",
"category",
"description",
"knowledge_source",
],
"edge_properties": [
"subject",
"predicate",
"object",
"relation",
"category",
],
},
220,
1050,
220,
1050,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.owl")],
"format": "owl",
},
{"filename": os.path.join(TARGET_DIR, "graph2s5"), "format": "jsonl"},
220,
1050,
220,
1050,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.owl")],
"format": "owl",
},
{"filename": os.path.join(TARGET_DIR, "graph3s5.nt"), "format": "nt"},
220,
1050,
221,
1052,
),
],
)
def test_transform5(query):
"""
Test transforming data from an OWL source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.parametrize(
"query",
[
(
{
"filename": [os.path.join(RESOURCE_DIR, "rsa_sample.json")],
"format": "trapi-json",
},
{
"filename": os.path.join(TARGET_DIR, "graph1s6"),
"format": "tsv",
"node_properties": [
"id",
"name",
"category",
"description",
"knowledge_source",
],
"edge_properties": [
"subject",
"predicate",
"object",
"relation",
"category",
],
},
4,
3,
4,
3,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "rsa_sample.json")],
"format": "trapi-json",
},
{
"filename": os.path.join(TARGET_DIR, "graph2s6.json"),
"format": "json",
},
4,
3,
4,
3,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "rsa_sample.json")],
"format": "trapi-json",
},
{
"filename": os.path.join(TARGET_DIR, "graph3s6"),
"format": "jsonl",
},
4,
3,
4,
3,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "rsa_sample.json")],
"format": "trapi-json",
},
{
"filename": os.path.join(TARGET_DIR, "graph4s6.nt"),
"format": "nt",
},
4,
3,
4,
3,
),
(
{
"filename": [os.path.join(RESOURCE_DIR, "rsa_sample.json")],
"format": "trapi-json",
"edge_filters": {
"subject_category": {"biolink:Disease"},
},
},
{"filename": os.path.join(TARGET_DIR, "graph5s6"), "format": "jsonl"},
2,
0,
2,
0,
),
],
)
def test_transform6(query):
"""
Test transforming data from RDF source and writing to various sinks.
"""
run_transform(query)
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_transform7():
"""
Test transforming data from various sources to a Neo4j sink.
"""
clean_database()
t1 = Transformer()
t1.transform(input_args={
"filename": [
os.path.join(RESOURCE_DIR, "graph_nodes.tsv"),
os.path.join(RESOURCE_DIR, "graph_edges.tsv"),
],
"format": "tsv",
},
output_args={
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
})
assert t1.store.graph.number_of_nodes() == 512
assert t1.store.graph.number_of_edges() == 531
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_transform8():
clean_database()
t1 = Transformer()
t1.transform(input_args={"filename": [os.path.join(RESOURCE_DIR, "graph.json")], "format": "json"},
output_args={
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
})
assert t1.store.graph.number_of_nodes() == 512
assert t1.store.graph.number_of_edges() == 532
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_transform9():
clean_database()
t1 = Transformer()
t1.transform(input_args={
"filename": [os.path.join(RESOURCE_DIR, "rdf", "test3.nt")],
"format": "nt",
},
output_args={
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
})
assert t1.store.graph.number_of_nodes() == 7
assert t1.store.graph.number_of_edges() == 6
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_transform10():
clean_database()
t1 = Transformer()
t1.transform(input_args={
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.json")],
"format": "obojson",
},
output_args={
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
})
assert t1.store.graph.number_of_nodes() == 176
assert t1.store.graph.number_of_edges() == 205
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_transform11():
clean_database()
t1 = Transformer()
t1.transform(input_args={
"filename": [os.path.join(RESOURCE_DIR, "goslim_generic.owl")],
"format": "owl",
},
output_args={
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
})
assert t1.store.graph.number_of_nodes() == 220
assert t1.store.graph.number_of_edges() == 1050
@pytest.mark.skipif(
not check_container(), reason=f"Container {CONTAINER_NAME} is not running"
)
def test_transform12():
clean_database()
t1 = Transformer()
t1.transform(input_args={
"filename": [os.path.join(RESOURCE_DIR, "rsa_sample.json")],
"format": "trapi-json",
},
output_args={
"uri": DEFAULT_NEO4J_URL,
"username": DEFAULT_NEO4J_USERNAME,
"password": DEFAULT_NEO4J_PASSWORD,
"format": "neo4j",
})
assert t1.store.graph.number_of_nodes() == 4
assert t1.store.graph.number_of_edges() == 3
| 21,186 | 27.786685 | 103 |
py
|
kgx
|
kgx-master/tests/integration/__init__.py
|
import docker
import pytest
from neo4j import GraphDatabase
from kgx.graph.nx_graph import NxGraph
CONTAINER_NAME = "kgx-neo4j-integration-test"
DEFAULT_NEO4J_URL = "neo4j://localhost:7687"
DEFAULT_NEO4J_USERNAME = "neo4j"
DEFAULT_NEO4J_PASSWORD = "test"
def check_container():
try:
client = docker.from_env()
status = False
try:
c = client.containers.get(CONTAINER_NAME)
if c.status == "running":
status = True
except:
status = False
except:
status = False
return status
@pytest.fixture(scope="function")
def clean_slate():
print("tearing down db")
http_driver = GraphDatabase.driver(
DEFAULT_NEO4J_URL, auth=(DEFAULT_NEO4J_USERNAME, DEFAULT_NEO4J_PASSWORD)
)
q = "MATCH (n) DETACH DELETE (n)"
try:
http_driver.session().run(q)
print("deleted all nodes")
except Exception as e:
print(e)
def get_graph(source):
g1 = NxGraph()
g1.name = "Graph 1"
g1.add_node(
"A",
**{
"id": "A",
"name": "Node A",
"category": ["biolink:NamedThing"],
"source": source,
}
)
g1.add_node(
"B",
**{
"id": "B",
"name": "Node B",
"category": ["biolink:NamedThing"],
"source": source,
}
)
g1.add_node(
"C",
**{
"id": "C",
"name": "Node C",
"category": ["biolink:NamedThing"],
"source": source,
}
)
g1.add_edge(
"B",
"A",
**{
"subject": "B",
"object": "A",
"predicate": "biolink:sub_class_of",
"source": source,
}
)
g2 = NxGraph()
g2.add_node("A", id="A", **{"source": source})
g2.add_node("B", id="B", **{"source": source})
g2.add_node("C", id="C", **{"source": source})
g2.add_node("D", id="D", **{"source": source})
g2.add_node("E", id="E", **{"source": source})
g2.add_node("F", id="F", **{"source": source})
g2.add_edge(
"B",
"A",
**{
"subject": "B",
"object": "A",
"predicate": "biolink:sub_class_of",
"source": source,
}
)
g2.add_edge(
"C",
"B",
**{
"subject": "C",
"object": "B",
"predicate": "biolink:sub_class_of",
"source": source,
}
)
g2.add_edge(
"D",
"C",
**{
"subject": "D",
"object": "C",
"predicate": "biolink:sub_class_of",
"source": source,
}
)
g2.add_edge(
"D",
"A",
**{
"subject": "D",
"object": "A",
"predicate": "biolink:related_to",
"source": source,
}
)
g2.add_edge(
"E",
"D",
**{
"subject": "E",
"object": "D",
"predicate": "biolink:sub_class_of",
"source": source,
}
)
g2.add_edge(
"F",
"D",
**{
"subject": "F",
"object": "D",
"predicate": "biolink:sub_class_of",
"source": source,
}
)
g3 = NxGraph()
g3.add_node(
"A", **{"id": "A", "category": ["biolink:NamedThing"], "source": source}
)
g3.add_node(
"B", **{"id": "B", "category": ["biolink:NamedThing"], "source": source}
)
g3.add_edge(
"A",
"B",
**{
"subject": "A",
"object": "B",
"predicate": "biolink:related_to",
"source": source,
}
)
g4 = NxGraph()
g4.add_node(
"A",
**{
"id": "A",
"category": ["biolink:Gene"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"B",
**{
"id": "B",
"category": ["biolink:Gene"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"A1",
**{
"id": "A1",
"category": ["biolink:Protein"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"A2",
**{
"id": "A2",
"category": ["biolink:Protein"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"B1",
**{
"id": "B1",
"category": ["biolink:Protein"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"X",
**{
"id": "X",
"category": ["biolink:Drug"],
"provided_by": source,
"source": source,
}
)
g4.add_node(
"Y",
**{
"id": "Y",
"category": ["biolink:Drug"],
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"A",
"A1",
**{
"subject": "A",
"object": "A1",
"predicate": "biolink:has_gene_product",
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"A",
"A2",
**{
"subject": "A",
"object": "A2",
"predicate": "biolink:has_gene_product",
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"B",
"B1",
**{
"subject": "B",
"object": "B1",
"predicate": "biolink:has_gene_product",
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"X",
"A1",
**{
"subject": "X",
"object": "A1",
"predicate": "biolink:interacts_with",
"provided_by": source,
"source": source,
}
)
g4.add_edge(
"Y",
"B",
**{
"subject": "Y",
"object": "B",
"predicate": "biolink:interacts_with",
"provided_by": source,
"source": source,
}
)
return [g1, g2, g3, g4]
| 6,383 | 21.013793 | 80 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.