Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def cmdclass(path, enable=None, user=None):
"""Build nbextension cmdclass dict for the setuptools.setup method.
Parameters
----------
path: str
Directory relative to the setup file that the nbextension code
lives in.
enable: [str=None]
Extension to "enable". Enabling an extension causes it to be loaded
automatically by the IPython notebook.
user: [bool=None]
Whether or not the nbextension should be installed in user mode.
If this is undefined, the script will install as user mode IF the
installer is not sudo.
Usage
-----
For automatic loading:
# Assuming `./extension` is the relative path to the JS files and
# `./extension/main.js` is the file that you want automatically loaded.
setup(
name='extension',
...
cmdclass=cmdclass('extension', 'extension/main'),
)
For manual loading:
# Assuming `./extension` is the relative path to the JS files.
setup(
name='extension',
...
cmdclass=cmdclass('extension'),
)
"""
import warnings
from setuptools.command.install import install
from setuptools.command.develop import develop
from os.path import dirname, join, exists, realpath
from traceback import extract_stack
try:
# IPython/Jupyter 4.0
from notebook.nbextensions import install_nbextension
from notebook.services.config import ConfigManager
except ImportError:
# Pre-schism
try:
from IPython.html.nbextensions import install_nbextension
from IPython.html.services.config import ConfigManager
except ImportError:
warnings.warn("No jupyter notebook found in your environment. "
"Hence jupyter nbextensions were not installed. "
"If you would like to have them,"
"please issue 'pip install jupyter'.")
return {}
# Check if the user flag was set.
if user is None:
user = not _is_root()
# Get the path of the extension
calling_file = extract_stack()[-2][0]
fullpath = realpath(calling_file)
if not exists(fullpath):
raise Exception('Could not find path of setup file.')
extension_dir = join(dirname(fullpath), path)
# Installs the nbextension
def run_nbextension_install(develop):
import sys
sysprefix = hasattr(sys, 'real_prefix')
if sysprefix:
install_nbextension(
extension_dir, symlink=develop, sys_prefix=sysprefix)
else:
install_nbextension(extension_dir, symlink=develop, user=user)
if enable is not None:
print("Enabling the extension ...")
cm = ConfigManager()
cm.update('notebook', {"load_extensions": {enable: True}})
# Command used for standard installs
class InstallCommand(install):
def run(self):
print("Installing Python module...")
install.run(self)
print("Installing nbextension ...")
run_nbextension_install(False)
# Command used for development installs (symlinks the JS)
class DevelopCommand(develop):
def run(self):
print("Installing Python module...")
develop.run(self)
print("Installing nbextension ...")
run_nbextension_install(True)
return {
'install': InstallCommand,
'develop': DevelopCommand,
} |
def count_subgraph_sizes(graph: BELGraph, annotation: str = 'Subgraph') -> Counter[int]:
"""Count the number of nodes in each subgraph induced by an annotation.
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: A dictionary from {annotation value: number of nodes}
"""
return count_dict_values(group_nodes_by_annotation(graph, annotation)) |
def calculate_subgraph_edge_overlap(
graph: BELGraph,
annotation: str = 'Subgraph'
) -> Tuple[
Mapping[str, EdgeSet],
Mapping[str, Mapping[str, EdgeSet]],
Mapping[str, Mapping[str, EdgeSet]],
Mapping[str, Mapping[str, float]],
]:
"""Build a DatafFame to show the overlap between different sub-graphs.
Options:
1. Total number of edges overlap (intersection)
2. Percentage overlap (tanimoto similarity)
:param graph: A BEL graph
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: {subgraph: set of edges}, {(subgraph 1, subgraph2): set of intersecting edges},
{(subgraph 1, subgraph2): set of unioned edges}, {(subgraph 1, subgraph2): tanimoto similarity},
"""
sg2edge = defaultdict(set)
for u, v, d in graph.edges(data=True):
if not edge_has_annotation(d, annotation):
continue
sg2edge[d[ANNOTATIONS][annotation]].add((u, v))
subgraph_intersection = defaultdict(dict)
subgraph_union = defaultdict(dict)
result = defaultdict(dict)
for sg1, sg2 in itt.product(sg2edge, repeat=2):
subgraph_intersection[sg1][sg2] = sg2edge[sg1] & sg2edge[sg2]
subgraph_union[sg1][sg2] = sg2edge[sg1] | sg2edge[sg2]
result[sg1][sg2] = len(subgraph_intersection[sg1][sg2]) / len(subgraph_union[sg1][sg2])
return sg2edge, subgraph_intersection, subgraph_union, result |
def summarize_subgraph_edge_overlap(graph: BELGraph, annotation: str = 'Subgraph') -> Mapping[str, Mapping[str, float]]:
"""Return a similarity matrix between all subgraphs (or other given annotation).
:param annotation: The annotation to group by and compare. Defaults to :code:`"Subgraph"`
:return: A similarity matrix in a dict of dicts
:rtype: dict
"""
_, _, _, subgraph_overlap = calculate_subgraph_edge_overlap(graph, annotation)
return subgraph_overlap |
def summarize_subgraph_node_overlap(graph: BELGraph, node_predicates=None, annotation: str = 'Subgraph'):
"""Calculate the subgraph similarity tanimoto similarity in nodes passing the given filter.
Provides an alternate view on subgraph similarity, from a more node-centric view
"""
r1 = group_nodes_by_annotation_filtered(graph, node_predicates=node_predicates, annotation=annotation)
return calculate_tanimoto_set_distances(r1) |
def rank_subgraph_by_node_filter(graph: BELGraph,
node_predicates: Union[NodePredicate, Iterable[NodePredicate]],
annotation: str = 'Subgraph',
reverse: bool = True,
) -> List[Tuple[str, int]]:
"""Rank sub-graphs by which have the most nodes matching an given filter.
A use case for this function would be to identify which subgraphs contain the most differentially expressed
genes.
>>> from pybel import from_pickle
>>> from pybel.constants import GENE
>>> from pybel_tools.integration import overlay_type_data
>>> from pybel_tools.summary import rank_subgraph_by_node_filter
>>> import pandas as pd
>>> graph = from_pickle('~/dev/bms/aetionomy/alzheimers.gpickle')
>>> df = pd.read_csv('~/dev/bananas/data/alzheimers_dgxp.csv', columns=['Gene', 'log2fc'])
>>> data = {gene: log2fc for _, gene, log2fc in df.itertuples()}
>>> overlay_type_data(graph, data, 'log2fc', GENE, 'HGNC', impute=0.0)
>>> results = rank_subgraph_by_node_filter(graph, lambda g, n: 1.3 < abs(g[n]['log2fc']))
"""
r1 = group_nodes_by_annotation_filtered(graph, node_predicates=node_predicates, annotation=annotation)
r2 = count_dict_values(r1)
# TODO use instead: r2.most_common()
return sorted(r2.items(), key=itemgetter(1), reverse=reverse) |
def to_jupyter(graph: BELGraph, chart: Optional[str] = None) -> Javascript:
"""Render the graph as JavaScript in a Jupyter Notebook."""
with open(os.path.join(HERE, 'render_with_javascript.js'), 'rt') as f:
js_template = Template(f.read())
return Javascript(js_template.render(**_get_context(graph, chart=chart))) |
def to_html(graph: BELGraph, chart: Optional[str] = None) -> str:
"""Render the graph as an HTML string.
Common usage may involve writing to a file like:
>>> from pybel.examples import sialic_acid_graph
>>> with open('ideogram_output.html', 'w') as file:
... print(to_html(sialic_acid_graph), file=file)
"""
with open(os.path.join(HERE, 'index.html'), 'rt') as f:
html_template = Template(f.read())
return html_template.render(**_get_context(graph, chart=chart)) |
def prerender(graph: BELGraph) -> Mapping[str, Mapping[str, Any]]:
"""Generate the annotations JSON for Ideogram."""
import bio2bel_hgnc
from bio2bel_hgnc.models import HumanGene
graph: BELGraph = graph.copy()
enrich_protein_and_rna_origins(graph)
collapse_all_variants(graph)
genes: Set[Gene] = get_nodes_by_function(graph, GENE)
hgnc_symbols = {
gene.name
for gene in genes
if gene.namespace.lower() == 'hgnc'
}
result = {}
hgnc_manager = bio2bel_hgnc.Manager()
human_genes = (
hgnc_manager.session
.query(HumanGene.symbol, HumanGene.location)
.filter(HumanGene.symbol.in_(hgnc_symbols))
.all()
)
for human_gene in human_genes:
result[human_gene.symbol] = {
'name': human_gene.symbol,
'chr': (
human_gene.location.split('q')[0]
if 'q' in human_gene.location else
human_gene.location.split('p')[0]
),
}
df = get_df()
for _, (gene_id, symbol, start, stop) in df[df['Symbol'].isin(hgnc_symbols)].iterrows():
result[symbol]['start'] = start
result[symbol]['stop'] = stop
return result |
def plot_summary_axes(graph: BELGraph, lax, rax, logx=True):
"""Plots your graph summary statistics on the given axes.
After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param pybel.BELGraph graph: A BEL graph
:param lax: An axis object from matplotlib
:param rax: An axis object from matplotlib
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary_axes
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))
>>> plot_summary_axes(graph, axes[0], axes[1])
>>> plt.tight_layout()
>>> plt.show()
"""
ntc = count_functions(graph)
etc = count_relations(graph)
df = pd.DataFrame.from_dict(dict(ntc), orient='index')
df_ec = pd.DataFrame.from_dict(dict(etc), orient='index')
df.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=lax)
lax.set_title('Number of nodes: {}'.format(graph.number_of_nodes()))
df_ec.sort_values(0, ascending=True).plot(kind='barh', logx=logx, ax=rax)
rax.set_title('Number of edges: {}'.format(graph.number_of_edges())) |
def plot_summary(graph: BELGraph, plt, logx=True, **kwargs):
"""Plots your graph summary statistics. This function is a thin wrapper around :func:`plot_summary_axis`. It
automatically takes care of building figures given matplotlib's pyplot module as an argument. After, you need
to run :func:`plt.show`.
:code:`plt` is given as an argument to avoid needing matplotlib as a dependency for this function
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param plt: Give :code:`matplotlib.pyplot` to this parameter
:param kwargs: keyword arguments to give to :func:`plt.subplots`
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> plot_summary(graph, plt, figsize=(10, 4))
>>> plt.show()
"""
fig, axes = plt.subplots(1, 2, **kwargs)
lax = axes[0]
rax = axes[1]
plot_summary_axes(graph, lax, rax, logx=logx)
plt.tight_layout()
return fig, axes |
def remove_nodes_by_function_namespace(graph: BELGraph, func: str, namespace: Strings) -> None:
"""Remove nodes with the given function and namespace.
This might be useful to exclude information learned about distant species, such as excluding all information
from MGI and RGD in diseases where mice and rats don't give much insight to the human disease mechanism.
"""
remove_filtered_nodes(graph, function_namespace_inclusion_builder(func, namespace)) |
def preprocessing_excel(path):
"""Preprocess the excel sheet
:param filepath: filepath of the excel data
:return: df: pandas dataframe with excel data
:rtype: pandas.DataFrame
"""
if not os.path.exists(path):
raise ValueError("Error: %s file not found" % path)
# Import Models from Excel sheet, independent for AD and PD
df = pd.read_excel(path, sheetname=0, header=0)
# Indexes and column name
# [log.info(str(x)+': '+str((df.columns.values[x]))) for x in range (0,len(df.columns.values))]
# Starting from 4: Pathway Name
# Fill Pathway cells that are merged and are 'NaN' after deleting rows where there is no genes
df.iloc[:, 0] = pd.Series(df.iloc[:, 0]).fillna(method='ffill')
# Number of gaps
# log.info(df.ix[:,6].isnull().sum())
df = df[df.ix[:, 1].notnull()]
df = df.reset_index(drop=True)
# Fill NaN to ceros in PubmedID column
df.ix[:, 2].fillna(0, inplace=True)
# Number of gaps in the gene column should be already zero
if (df.ix[:, 1].isnull().sum()) != 0:
raise ValueError("Error: Empty cells in the gene column")
# Check current state
# df.to_csv('out.csv')
return df |
def preprocessing_br_projection_excel(path: str) -> pd.DataFrame:
"""Preprocess the excel file.
Parameters
----------
path : Filepath of the excel sheet
"""
if not os.path.exists(path):
raise ValueError("Error: %s file not found" % path)
return pd.read_excel(path, sheetname=0, header=0) |
def get_nift_values() -> Mapping[str, str]:
"""Extract the list of NIFT names from the BEL resource and builds a dictionary mapping from the lowercased version
to the uppercase version.
"""
r = get_bel_resource(NIFT)
return {
name.lower(): name
for name in r['Values']
} |
def write_neurommsig_bel(file,
df: pd.DataFrame,
disease: str,
nift_values: Mapping[str, str],
):
"""Writes the NeuroMMSigDB excel sheet to BEL
:param file: a file or file-like that can be writen to
:param df:
:param disease:
:param nift_values: a dictionary of lowercased to normal names in NIFT
"""
write_neurommsig_biolerplate(disease, file)
missing_features = set()
fixed_caps = set()
nift_value_originals = set(nift_values.values())
graph = BELGraph(
name=f'NeuroMMSigDB for {disease}',
description=f'SNP and Clinical Features for Subgraphs in {disease}',
authors='Daniel Domingo-Fernández, Charles Tapley Hoyt, Mufassra Naz, Aybuge Altay, Anandhi Iyappan',
contact='[email protected]',
version=time.strftime('%Y%m%d'),
)
for pathway, pathway_df in df.groupby(pathway_column):
sorted_pathway_df = pathway_df.sort_values(genes_column)
sliced_df = sorted_pathway_df[columns].itertuples()
for _, gene, pubmeds, lit_snps, gwas_snps, ld_block_snps, clinical_features, clinical_snps in sliced_df:
gene = ensure_quotes(gene)
for snp in itt.chain(lit_snps or [], gwas_snps or [], ld_block_snps or [], clinical_snps or []):
if not snp.strip():
continue
graph.add_association(
Gene('HGNC', gene),
Gene('DBSNP', snp),
evidence='Serialized from NeuroMMSigDB',
citation='28651363',
annotations={
'MeSHDisease': disease,
},
)
for clinical_feature in clinical_features or []:
if not clinical_feature.strip():
continue
if clinical_feature.lower() not in nift_values:
missing_features.add(clinical_feature)
continue
if clinical_feature not in nift_value_originals:
fixed_caps.add((clinical_feature, nift_values[clinical_feature.lower()]))
clinical_feature = nift_values[clinical_feature.lower()] # fix capitalization
graph.add_association(
Gene('HGNC', gene),
Abundance('NIFT', clinical_feature),
evidence='Serialized from NeuroMMSigDB',
citation='28651363',
annotations={
'MeSHDisease': disease,
},
)
if clinical_snps:
for clinical_snp in clinical_snps:
graph.add_association(
Gene('DBSNP', clinical_snp),
Abundance('NIFT', clinical_feature),
evidence='Serialized from NeuroMMSigDB',
citation='28651363',
annotations={
'MeSHDisease': disease,
},
)
if missing_features:
log.warning('Missing Features in %s', disease)
for feature in missing_features:
log.warning(feature)
if fixed_caps:
log.warning('Fixed capitalization')
for broken, fixed in fixed_caps:
log.warning('%s -> %s', broken, fixed) |
def get_contradiction_summary(graph: BELGraph) -> Iterable[Tuple[BaseEntity, BaseEntity, str]]:
"""Yield triplets of (source node, target node, set of relations) for (source node, target node) pairs
that have multiple, contradictory relations.
"""
for u, v in set(graph.edges()):
relations = {data[RELATION] for data in graph[u][v].values()}
if relation_set_has_contradictions(relations):
yield u, v, relations |
def get_regulatory_pairs(graph: BELGraph) -> Set[NodePair]:
"""Find pairs of nodes that have mutual causal edges that are regulating each other such that ``A -> B`` and
``B -| A``.
:return: A set of pairs of nodes with mutual causal edges
"""
cg = get_causal_subgraph(graph)
results = set()
for u, v, d in cg.edges(data=True):
if d[RELATION] not in CAUSAL_INCREASE_RELATIONS:
continue
if cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_DECREASE_RELATIONS for dd in cg[v][u].values()):
results.add((u, v))
return results |
def get_chaotic_pairs(graph: BELGraph) -> SetOfNodePairs:
"""Find pairs of nodes that have mutual causal edges that are increasing each other such that ``A -> B`` and
``B -> A``.
:return: A set of pairs of nodes with mutual causal edges
"""
cg = get_causal_subgraph(graph)
results = set()
for u, v, d in cg.edges(data=True):
if d[RELATION] not in CAUSAL_INCREASE_RELATIONS:
continue
if cg.has_edge(v, u) and any(dd[RELATION] in CAUSAL_INCREASE_RELATIONS for dd in cg[v][u].values()):
results.add(tuple(sorted([u, v], key=str)))
return results |
def get_correlation_graph(graph: BELGraph) -> Graph:
"""Extract an undirected graph of only correlative relationships."""
result = Graph()
for u, v, d in graph.edges(data=True):
if d[RELATION] not in CORRELATIVE_RELATIONS:
continue
if not result.has_edge(u, v):
result.add_edge(u, v, **{d[RELATION]: True})
elif d[RELATION] not in result[u][v]:
log.log(5, 'broken correlation relation for %s, %s', u, v)
result[u][v][d[RELATION]] = True
result[v][u][d[RELATION]] = True
return result |
def get_correlation_triangles(graph: BELGraph) -> SetOfNodeTriples:
"""Return a set of all triangles pointed by the given node."""
return {
tuple(sorted([n, u, v], key=str))
for n in graph
for u, v in itt.combinations(graph[n], 2)
if graph.has_edge(u, v)
} |
def get_triangles(graph: DiGraph) -> SetOfNodeTriples:
"""Get a set of triples representing the 3-cycles from a directional graph.
Each 3-cycle is returned once, with nodes in sorted order.
"""
return {
tuple(sorted([a, b, c], key=str))
for a, b in graph.edges()
for c in graph.successors(b)
if graph.has_edge(c, a)
} |
def get_separate_unstable_correlation_triples(graph: BELGraph) -> Iterable[NodeTriple]:
"""Yield all triples of nodes A, B, C such that ``A pos B``, ``A pos C``, and ``B neg C``.
:return: An iterator over triples of unstable graphs, where the second two are negative
"""
cg = get_correlation_graph(graph)
for a, b, c in get_correlation_triangles(cg):
if POSITIVE_CORRELATION in cg[a][b] and POSITIVE_CORRELATION in cg[b][c] and NEGATIVE_CORRELATION in \
cg[a][c]:
yield b, a, c
if POSITIVE_CORRELATION in cg[a][b] and NEGATIVE_CORRELATION in cg[b][c] and POSITIVE_CORRELATION in \
cg[a][c]:
yield a, b, c
if NEGATIVE_CORRELATION in cg[a][b] and POSITIVE_CORRELATION in cg[b][c] and POSITIVE_CORRELATION in \
cg[a][c]:
yield c, a, b |
def get_mutually_unstable_correlation_triples(graph: BELGraph) -> Iterable[NodeTriple]:
"""Yield triples of nodes (A, B, C) such that ``A neg B``, ``B neg C``, and ``C neg A``."""
cg = get_correlation_graph(graph)
for a, b, c in get_correlation_triangles(cg):
if all(NEGATIVE_CORRELATION in x for x in (cg[a][b], cg[b][c], cg[a][c])):
yield a, b, c |
def jens_transformation_alpha(graph: BELGraph) -> DiGraph:
"""Apply Jens' transformation (Type 1) to the graph.
1. Induce a sub-graph over causal + correlative edges
2. Transform edges by the following rules:
- increases => increases
- decreases => backwards increases
- positive correlation => two way increases
- negative correlation => delete
The resulting graph can be used to search for 3-cycles, which now symbolize unstable triplets where ``A -> B``,
``A -| C`` and ``B positiveCorrelation C``.
"""
result = DiGraph()
for u, v, d in graph.edges(data=True):
relation = d[RELATION]
if relation == POSITIVE_CORRELATION:
result.add_edge(u, v)
result.add_edge(v, u)
elif relation in CAUSAL_INCREASE_RELATIONS:
result.add_edge(u, v)
elif relation in CAUSAL_DECREASE_RELATIONS:
result.add_edge(v, u)
return result |
def jens_transformation_beta(graph: BELGraph) -> DiGraph:
"""Apply Jens' Transformation (Type 2) to the graph.
1. Induce a sub-graph over causal and correlative relations
2. Transform edges with the following rules:
- increases => backwards decreases
- decreases => decreases
- positive correlation => delete
- negative correlation => two way decreases
The resulting graph can be used to search for 3-cycles, which now symbolize stable triples where ``A -> B``,
``A -| C`` and ``B negativeCorrelation C``.
"""
result = DiGraph()
for u, v, d in graph.edges(data=True):
relation = d[RELATION]
if relation == NEGATIVE_CORRELATION:
result.add_edge(u, v)
result.add_edge(v, u)
elif relation in CAUSAL_INCREASE_RELATIONS:
result.add_edge(v, u)
elif relation in CAUSAL_DECREASE_RELATIONS:
result.add_edge(u, v)
return result |
def get_jens_unstable(graph: BELGraph) -> Iterable[NodeTriple]:
"""Yield triples of nodes (A, B, C) where ``A -> B``, ``A -| C``, and ``C positiveCorrelation A``.
Calculated efficiently using the Jens Transformation.
"""
r = jens_transformation_alpha(graph)
return get_triangles(r) |
def summarize_stability(graph: BELGraph) -> Mapping[str, int]:
"""Summarize the stability of the graph."""
regulatory_pairs = get_regulatory_pairs(graph)
chaotic_pairs = get_chaotic_pairs(graph)
dampened_pairs = get_dampened_pairs(graph)
contraditory_pairs = get_contradiction_summary(graph)
separately_unstable_triples = get_separate_unstable_correlation_triples(graph)
mutually_unstable_triples = get_mutually_unstable_correlation_triples(graph)
jens_unstable_triples = get_jens_unstable(graph)
increase_mismatch_triples = get_increase_mismatch_triplets(graph)
decrease_mismatch_triples = get_decrease_mismatch_triplets(graph)
chaotic_triples = get_chaotic_triplets(graph)
dampened_triples = get_dampened_triplets(graph)
return {
'Regulatory Pairs': _count_or_len(regulatory_pairs),
'Chaotic Pairs': _count_or_len(chaotic_pairs),
'Dampened Pairs': _count_or_len(dampened_pairs),
'Contradictory Pairs': _count_or_len(contraditory_pairs),
'Separately Unstable Triples': _count_or_len(separately_unstable_triples),
'Mutually Unstable Triples': _count_or_len(mutually_unstable_triples),
'Jens Unstable Triples': _count_or_len(jens_unstable_triples),
'Increase Mismatch Triples': _count_or_len(increase_mismatch_triples),
'Decrease Mismatch Triples': _count_or_len(decrease_mismatch_triples),
'Chaotic Triples': _count_or_len(chaotic_triples),
'Dampened Triples': _count_or_len(dampened_triples)
} |
def flatten_list_abundance(node: ListAbundance) -> ListAbundance:
"""Flattens the complex or composite abundance."""
return node.__class__(list(chain.from_iterable(
(
flatten_list_abundance(member).members
if isinstance(member, ListAbundance) else
[member]
)
for member in node.members
))) |
def list_abundance_expansion(graph: BELGraph) -> None:
"""Flatten list abundances."""
mapping = {
node: flatten_list_abundance(node)
for node in graph
if isinstance(node, ListAbundance)
}
relabel_nodes(graph, mapping, copy=False) |
def list_abundance_cartesian_expansion(graph: BELGraph) -> None:
"""Expand all list abundances to simple subject-predicate-object networks."""
for u, v, k, d in list(graph.edges(keys=True, data=True)):
if CITATION not in d:
continue
if isinstance(u, ListAbundance) and isinstance(v, ListAbundance):
for u_member, v_member in itt.product(u.members, v.members):
graph.add_qualified_edge(
u_member, v_member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(u, ListAbundance):
for member in u.members:
graph.add_qualified_edge(
member, v,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(v, ListAbundance):
for member in v.members:
graph.add_qualified_edge(
u, member,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
_remove_list_abundance_nodes(graph) |
def _reaction_cartesion_expansion_unqualified_helper(
graph: BELGraph,
u: BaseEntity,
v: BaseEntity,
d: dict,
) -> None:
"""Helper to deal with cartension expansion in unqualified edges."""
if isinstance(u, Reaction) and isinstance(v, Reaction):
enzymes = _get_catalysts_in_reaction(u) | _get_catalysts_in_reaction(v)
for reactant, product in chain(itt.product(u.reactants, u.products),
itt.product(v.reactants, v.products)):
if reactant in enzymes or product in enzymes:
continue
graph.add_unqualified_edge(
reactant, product, INCREASES
)
for product, reactant in itt.product(u.products, u.reactants):
if reactant in enzymes or product in enzymes:
continue
graph.add_unqualified_edge(
product, reactant, d[RELATION],
)
elif isinstance(u, Reaction):
enzymes = _get_catalysts_in_reaction(u)
for product in u.products:
# Skip create increases edges between enzymes
if product in enzymes:
continue
# Only add edge between v and reaction if the node is not part of the reaction
# In practice skips hasReactant, hasProduct edges
if v not in u.products and v not in u.reactants:
graph.add_unqualified_edge(
product, v, INCREASES
)
for reactant in u.reactants:
graph.add_unqualified_edge(
reactant, product, INCREASES
)
elif isinstance(v, Reaction):
enzymes = _get_catalysts_in_reaction(v)
for reactant in v.reactants:
# Skip create increases edges between enzymes
if reactant in enzymes:
continue
# Only add edge between v and reaction if the node is not part of the reaction
# In practice skips hasReactant, hasProduct edges
if u not in v.products and u not in v.reactants:
graph.add_unqualified_edge(
u, reactant, INCREASES
)
for product in v.products:
graph.add_unqualified_edge(
reactant, product, INCREASES
) |
def _get_catalysts_in_reaction(reaction: Reaction) -> Set[BaseAbundance]:
"""Return nodes that are both in reactants and reactions in a reaction."""
return {
reactant
for reactant in reaction.reactants
if reactant in reaction.products
} |
def reaction_cartesian_expansion(graph: BELGraph, accept_unqualified_edges: bool = True) -> None:
"""Expand all reactions to simple subject-predicate-object networks."""
for u, v, d in list(graph.edges(data=True)):
# Deal with unqualified edges
if CITATION not in d and accept_unqualified_edges:
_reaction_cartesion_expansion_unqualified_helper(graph, u, v, d)
continue
if isinstance(u, Reaction) and isinstance(v, Reaction):
catalysts = _get_catalysts_in_reaction(u) | _get_catalysts_in_reaction(v)
for reactant, product in chain(itt.product(u.reactants, u.products), itt.product(v.reactants, v.products)):
if reactant in catalysts or product in catalysts:
continue
graph.add_increases(
reactant, product,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
for product, reactant in itt.product(u.products, u.reactants):
if reactant in catalysts or product in catalysts:
continue
graph.add_qualified_edge(
product, reactant,
relation=d[RELATION],
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(u, Reaction):
catalysts = _get_catalysts_in_reaction(u)
for product in u.products:
# Skip create increases edges between enzymes
if product in catalysts:
continue
# Only add edge between v and reaction if the node is not part of the reaction
# In practice skips hasReactant, hasProduct edges
if v not in u.products and v not in u.reactants:
graph.add_increases(
product, v,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
for reactant in u.reactants:
graph.add_increases(
reactant, product,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
elif isinstance(v, Reaction):
for reactant in v.reactants:
catalysts = _get_catalysts_in_reaction(v)
# Skip create increases edges between enzymes
if reactant in catalysts:
continue
# Only add edge between v and reaction if the node is not part of the reaction
# In practice skips hasReactant, hasProduct edges
if u not in v.products and u not in v.reactants:
graph.add_increases(
u, reactant,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
for product in v.products:
graph.add_increases(
reactant, product,
citation=d.get(CITATION),
evidence=d.get(EVIDENCE),
annotations=d.get(ANNOTATIONS),
)
_remove_reaction_nodes(graph) |
def insert_graph(self, graph: BELGraph, **_kwargs) -> Network:
"""Insert a graph and return the resulting ORM object (mocked)."""
result = _Namespace()
result.id = len(self.networks)
self.networks[result.id] = graph
return result |
def get_graphs_by_ids(self, network_ids: Iterable[int]) -> List[BELGraph]:
"""Get several graphs by their identifiers."""
return [
self.networks[network_id]
for network_id in network_ids
] |
def _generate_citation_dict(graph: BELGraph) -> Mapping[str, Mapping[Tuple[BaseEntity, BaseEntity], str]]:
"""Prepare a citation data dictionary from a graph.
:return: A dictionary of dictionaries {citation type: {(source, target): citation reference}
"""
results = defaultdict(lambda: defaultdict(set))
for u, v, data in graph.edges(data=True):
if CITATION not in data:
continue
results[data[CITATION][CITATION_TYPE]][u, v].add(data[CITATION][CITATION_REFERENCE].strip())
return dict(results) |
def get_pmid_by_keyword(keyword: str,
graph: Optional[BELGraph] = None,
pubmed_identifiers: Optional[Set[str]] = None,
) -> Set[str]:
"""Get the set of PubMed identifiers beginning with the given keyword string.
:param keyword: The beginning of a PubMed identifier
:param graph: A BEL graph
:param pubmed_identifiers: A set of pre-cached PubMed identifiers
:return: A set of PubMed identifiers starting with the given string
"""
if pubmed_identifiers is not None:
return {
pubmed_identifier
for pubmed_identifier in pubmed_identifiers
if pubmed_identifier.startswith(keyword)
}
if graph is None:
raise ValueError('Graph not supplied')
return {
pubmed_identifier
for pubmed_identifier in iterate_pubmed_identifiers(graph)
if pubmed_identifier.startswith(keyword)
} |
def count_citations(graph: BELGraph, **annotations) -> Counter:
"""Counts the citations in a graph based on a given filter
:param graph: A BEL graph
:param dict annotations: The annotation filters to use
:return: A counter from {(citation type, citation reference): frequency}
"""
citations = defaultdict(set)
annotation_dict_filter = build_edge_data_filter(annotations)
for u, v, _, d in filter_edges(graph, annotation_dict_filter):
if CITATION not in d:
continue
citations[u, v].add((d[CITATION][CITATION_TYPE], d[CITATION][CITATION_REFERENCE].strip()))
return Counter(itt.chain.from_iterable(citations.values())) |
def count_citations_by_annotation(graph: BELGraph, annotation: str) -> Mapping[str, typing.Counter[str]]:
"""Group the citation counters by subgraphs induced by the annotation.
:param graph: A BEL graph
:param annotation: The annotation to use to group the graph
:return: A dictionary of Counters {subgraph name: Counter from {citation: frequency}}
"""
citations = defaultdict(lambda: defaultdict(set))
for u, v, data in graph.edges(data=True):
if not edge_has_annotation(data, annotation) or CITATION not in data:
continue
k = data[ANNOTATIONS][annotation]
citations[k][u, v].add((data[CITATION][CITATION_TYPE], data[CITATION][CITATION_REFERENCE].strip()))
return {k: Counter(itt.chain.from_iterable(v.values())) for k, v in citations.items()} |
def count_author_publications(graph: BELGraph) -> typing.Counter[str]:
"""Count the number of publications of each author to the given graph."""
authors = group_as_dict(_iter_author_publiations(graph))
return Counter(count_dict_values(count_defaultdict(authors))) |
def get_authors_by_keyword(keyword: str, graph=None, authors=None) -> Set[str]:
"""Get authors for whom the search term is a substring.
:param pybel.BELGraph graph: A BEL graph
:param keyword: The keyword to search the author strings for
:param set[str] authors: An optional set of pre-cached authors calculated from the graph
:return: A set of authors with the keyword as a substring
"""
keyword_lower = keyword.lower()
if authors is not None:
return {
author
for author in authors
if keyword_lower in author.lower()
}
if graph is None:
raise ValueError('Graph not supplied')
return {
author
for author in get_authors(graph)
if keyword_lower in author.lower()
} |
def count_authors_by_annotation(graph: BELGraph, annotation: str = 'Subgraph') -> Mapping[str, typing.Counter[str]]:
"""Group the author counters by sub-graphs induced by the annotation.
:param graph: A BEL graph
:param annotation: The annotation to use to group the graph
:return: A dictionary of Counters {subgraph name: Counter from {author: frequency}}
"""
authors = group_as_dict(_iter_authors_by_annotation(graph, annotation=annotation))
return count_defaultdict(authors) |
def get_evidences_by_pmid(graph: BELGraph, pmids: Union[str, Iterable[str]]):
"""Get a dictionary from the given PubMed identifiers to the sets of all evidence strings associated with each
in the graph.
:param graph: A BEL graph
:param pmids: An iterable of PubMed identifiers, as strings. Is consumed and converted to a set.
:return: A dictionary of {pmid: set of all evidence strings}
:rtype: dict
"""
result = defaultdict(set)
for _, _, _, data in filter_edges(graph, build_pmid_inclusion_filter(pmids)):
result[data[CITATION][CITATION_REFERENCE]].add(data[EVIDENCE])
return dict(result) |
def count_citation_years(graph: BELGraph) -> typing.Counter[int]:
"""Count the number of citations from each year."""
result = defaultdict(set)
for _, _, data in graph.edges(data=True):
if CITATION not in data or CITATION_DATE not in data[CITATION]:
continue
try:
dt = _ensure_datetime(data[CITATION][CITATION_DATE])
result[dt.year].add((data[CITATION][CITATION_TYPE], data[CITATION][CITATION_REFERENCE]))
except Exception:
continue
return count_dict_values(result) |
def get_citation_years(graph: BELGraph) -> List[Tuple[int, int]]:
"""Create a citation timeline counter from the graph."""
return create_timeline(count_citation_years(graph)) |
def create_timeline(year_counter: typing.Counter[int]) -> List[Tuple[int, int]]:
"""Complete the Counter timeline.
:param Counter year_counter: counter dict for each year
:return: complete timeline
"""
if not year_counter:
return []
from_year = min(year_counter) - 1
until_year = datetime.now().year + 1
return [
(year, year_counter.get(year, 0))
for year in range(from_year, until_year)
] |
def count_confidences(graph: BELGraph) -> typing.Counter[str]:
"""Count the confidences in the graph."""
return Counter(
(
'None'
if ANNOTATIONS not in data or 'Confidence' not in data[ANNOTATIONS] else
list(data[ANNOTATIONS]['Confidence'])[0]
)
for _, _, data in graph.edges(data=True)
if CITATION in data # don't bother with unqualified statements
) |
def enrich_pubmed_citations(graph: BELGraph, manager: Manager) -> Set[str]:
"""Overwrite all PubMed citations with values from NCBI's eUtils lookup service.
:return: A set of PMIDs for which the eUtils service crashed
"""
pmids = get_pubmed_identifiers(graph)
pmid_data, errors = get_citations_by_pmids(manager=manager, pmids=pmids)
for u, v, k in filter_edges(graph, has_pubmed):
pmid = graph[u][v][k][CITATION][CITATION_REFERENCE].strip()
if pmid not in pmid_data:
log.warning('Missing data for PubMed identifier: %s', pmid)
errors.add(pmid)
continue
graph[u][v][k][CITATION].update(pmid_data[pmid])
return errors |
def update_context(universe: BELGraph, graph: BELGraph):
"""Update the context of a subgraph from the universe of all knowledge."""
for namespace in get_namespaces(graph):
if namespace in universe.namespace_url:
graph.namespace_url[namespace] = universe.namespace_url[namespace]
elif namespace in universe.namespace_pattern:
graph.namespace_pattern[namespace] = universe.namespace_pattern[namespace]
else:
log.warning('namespace: %s missing from universe', namespace)
for annotation in get_annotations(graph):
if annotation in universe.annotation_url:
graph.annotation_url[annotation] = universe.annotation_url[annotation]
elif annotation in universe.annotation_pattern:
graph.annotation_pattern[annotation] = universe.annotation_pattern[annotation]
elif annotation in universe.annotation_list:
graph.annotation_list[annotation] = universe.annotation_list[annotation]
else:
log.warning('annotation: %s missing from universe', annotation) |
def highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]] = None, color: Optional[str]=None):
"""Adds a highlight tag to the given nodes.
:param graph: A BEL graph
:param nodes: The nodes to add a highlight tag on
:param color: The color to highlight (use something that works with CSS)
"""
color = color or NODE_HIGHLIGHT_DEFAULT_COLOR
for node in nodes if nodes is not None else graph:
graph.node[node][NODE_HIGHLIGHT] = color |
def is_node_highlighted(graph: BELGraph, node: BaseEntity) -> bool:
"""Returns if the given node is highlighted.
:param graph: A BEL graph
:param node: A BEL node
:type node: tuple
:return: Does the node contain highlight information?
:rtype: bool
"""
return NODE_HIGHLIGHT in graph.node[node] |
def remove_highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]]=None) -> None:
"""Removes the highlight from the given nodes, or all nodes if none given.
:param graph: A BEL graph
:param nodes: The list of nodes to un-highlight
"""
for node in graph if nodes is None else nodes:
if is_node_highlighted(graph, node):
del graph.node[node][NODE_HIGHLIGHT] |
def highlight_edges(graph: BELGraph, edges=None, color: Optional[str]=None) -> None:
"""Adds a highlight tag to the given edges.
:param graph: A BEL graph
:param edges: The edges (4-tuples of u, v, k, d) to add a highlight tag on
:type edges: iter[tuple]
:param str color: The color to highlight (use something that works with CSS)
"""
color = color or EDGE_HIGHLIGHT_DEFAULT_COLOR
for u, v, k, d in edges if edges is not None else graph.edges(keys=True, data=True):
graph[u][v][k][EDGE_HIGHLIGHT] = color |
def is_edge_highlighted(graph: BELGraph, u, v, k) -> bool:
"""Returns if the given edge is highlighted.
:param graph: A BEL graph
:return: Does the edge contain highlight information?
:rtype: bool
"""
return EDGE_HIGHLIGHT in graph[u][v][k] |
def remove_highlight_edges(graph: BELGraph, edges=None):
"""Remove the highlight from the given edges, or all edges if none given.
:param graph: A BEL graph
:param edges: The edges (4-tuple of u,v,k,d) to remove the highlight from)
:type edges: iter[tuple]
"""
for u, v, k, _ in graph.edges(keys=True, data=True) if edges is None else edges:
if is_edge_highlighted(graph, u, v, k):
del graph[u][v][k][EDGE_HIGHLIGHT] |
def highlight_subgraph(universe: BELGraph, graph: BELGraph):
"""Highlight all nodes/edges in the universe that in the given graph.
:param universe: The universe of knowledge
:param graph: The BEL graph to mutate
"""
highlight_nodes(universe, graph)
highlight_edges(universe, graph.edges()) |
def remove_highlight_subgraph(graph: BELGraph, subgraph: BELGraph):
"""Remove the highlight from all nodes/edges in the graph that are in the subgraph.
:param graph: The BEL graph to mutate
:param subgraph: The subgraph from which to remove the highlighting
"""
remove_highlight_nodes(graph, subgraph.nodes())
remove_highlight_edges(graph, subgraph.edges()) |
def get_causal_out_edges(
graph: BELGraph,
nbunch: Union[BaseEntity, Iterable[BaseEntity]],
) -> Set[Tuple[BaseEntity, BaseEntity]]:
"""Get the out-edges to the given node that are causal.
:return: A set of (source, target) pairs where the source is the given node
"""
return {
(u, v)
for u, v, k, d in graph.out_edges(nbunch, keys=True, data=True)
if is_causal_relation(graph, u, v, k, d)
} |
def get_causal_source_nodes(graph: BELGraph, func: str) -> Set[BaseEntity]:
"""Return a set of all nodes that have an in-degree of 0.
This likely means that it is an external perturbagen and is not known to have any causal origin from within the
biological system. These nodes are useful to identify because they generally don't provide any mechanistic insight.
"""
return {
node
for node in graph
if node.function == func and is_causal_source(graph, node)
} |
def get_causal_central_nodes(graph: BELGraph, func: str) -> Set[BaseEntity]:
"""Return a set of all nodes that have both an in-degree > 0 and out-degree > 0.
This means that they are an integral part of a pathway, since they are both produced and consumed.
"""
return {
node
for node in graph
if node.function == func and is_causal_central(graph, node)
} |
def get_causal_sink_nodes(graph: BELGraph, func) -> Set[BaseEntity]:
"""Returns a set of all ABUNDANCE nodes that have an causal out-degree of 0.
This likely means that the knowledge assembly is incomplete, or there is a curation error.
"""
return {
node
for node in graph
if node.function == func and is_causal_sink(graph, node)
} |
def count_top_centrality(graph: BELGraph, number: Optional[int] = 30) -> Mapping[BaseEntity, int]:
"""Get top centrality dictionary."""
dd = nx.betweenness_centrality(graph)
dc = Counter(dd)
return dict(dc.most_common(number)) |
def get_modifications_count(graph: BELGraph) -> Mapping[str, int]:
"""Get a modifications count dictionary."""
return remove_falsy_values({
'Translocations': len(get_translocated(graph)),
'Degradations': len(get_degradations(graph)),
'Molecular Activities': len(get_activities(graph)),
}) |
def remove_falsy_values(counter: Mapping[Any, int]) -> Mapping[Any, int]:
"""Remove all values that are zero."""
return {
label: count
for label, count in counter.items()
if count
} |
def _collapse_variants_by_function(graph: BELGraph, func: str) -> None:
"""Collapse all of the given functions' variants' edges to their parents, in-place."""
for parent_node, variant_node, data in graph.edges(data=True):
if data[RELATION] == HAS_VARIANT and parent_node.function == func:
collapse_pair(graph, from_node=variant_node, to_node=parent_node) |
def rewire_variants_to_genes(graph: BELGraph) -> None:
"""Find all protein variants that are pointing to a gene and not a protein and fixes them by changing their
function to be :data:`pybel.constants.GENE`, in place
A use case is after running :func:`collapse_to_genes`.
"""
mapping = {}
for node in graph:
if not isinstance(node, Protein) or not node.variants:
continue
mapping[node] = Gene(
name=node.name,
namespace=node.namespace,
identifier=node.identifier,
variants=node.variants,
)
nx.relabel_nodes(graph, mapping, copy=False) |
def _collapse_edge_passing_predicates(graph: BELGraph, edge_predicates: EdgePredicates = None) -> None:
"""Collapse all edges passing the given edge predicates."""
for u, v, _ in filter_edges(graph, edge_predicates=edge_predicates):
collapse_pair(graph, survivor=u, victim=v) |
def _collapse_edge_by_namespace(graph: BELGraph,
victim_namespaces: Strings,
survivor_namespaces: str,
relations: Strings) -> None:
"""Collapse pairs of nodes with the given namespaces that have the given relationship.
:param graph: A BEL Graph
:param victim_namespaces: The namespace(s) of the node to collapse
:param survivor_namespaces: The namespace of the node to keep
:param relations: The relation(s) to search
"""
relation_filter = build_relation_predicate(relations)
source_namespace_filter = build_source_namespace_filter(victim_namespaces)
target_namespace_filter = build_target_namespace_filter(survivor_namespaces)
edge_predicates = [
relation_filter,
source_namespace_filter,
target_namespace_filter
]
_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates) |
def collapse_equivalencies_by_namespace(graph: BELGraph, victim_namespace: Strings, survivor_namespace: str) -> None:
"""Collapse pairs of nodes with the given namespaces that have equivalence relationships.
:param graph: A BEL graph
:param victim_namespace: The namespace(s) of the node to collapse
:param survivor_namespace: The namespace of the node to keep
To convert all ChEBI names to InChI keys, assuming there are appropriate equivalence relations between nodes with
those namespaces:
>>> collapse_equivalencies_by_namespace(graph, 'CHEBI', 'CHEBIID')
>>> collapse_equivalencies_by_namespace(graph, 'CHEBIID', 'INCHI')
"""
_collapse_edge_by_namespace(graph, victim_namespace, survivor_namespace, EQUIVALENT_TO) |
def collapse_orthologies_by_namespace(graph: BELGraph, victim_namespace: Strings, survivor_namespace: str) -> None:
"""Collapse pairs of nodes with the given namespaces that have orthology relationships.
:param graph: A BEL Graph
:param victim_namespace: The namespace(s) of the node to collapse
:param survivor_namespace: The namespace of the node to keep
To collapse all MGI nodes to their HGNC orthologs, use:
>>> collapse_orthologies_by_namespace('MGI', 'HGNC')
To collapse collapse both MGI and RGD nodes to their HGNC orthologs, use:
>>> collapse_orthologies_by_namespace(['MGI', 'RGD'], 'HGNC')
"""
_collapse_edge_by_namespace(graph, victim_namespace, survivor_namespace, ORTHOLOGOUS) |
def collapse_entrez_equivalencies(graph: BELGraph):
"""Collapse all equivalence edges away from Entrez. Assumes well formed, 2-way equivalencies."""
relation_filter = build_relation_predicate(EQUIVALENT_TO)
source_namespace_filter = build_source_namespace_filter(['EGID', 'EG', 'ENTREZ'])
edge_predicates = [
relation_filter,
source_namespace_filter,
]
_collapse_edge_passing_predicates(graph, edge_predicates=edge_predicates) |
def collapse_consistent_edges(graph: BELGraph):
"""Collapse consistent edges together.
.. warning:: This operation doesn't preserve evidences or other annotations
"""
for u, v in graph.edges():
relation = pair_is_consistent(graph, u, v)
if not relation:
continue
edges = [(u, v, k) for k in graph[u][v]]
graph.remove_edges_from(edges)
graph.add_edge(u, v, attr_dict={RELATION: relation}) |
def collapse_to_protein_interactions(graph: BELGraph) -> BELGraph:
"""Collapse to a graph made of only causal gene/protein edges."""
rv: BELGraph = graph.copy()
collapse_to_genes(rv)
def is_edge_ppi(_: BELGraph, u: BaseEntity, v: BaseEntity, __: str) -> bool:
"""Check if an edge is a PPI."""
return isinstance(u, Gene) and isinstance(v, Gene)
return get_subgraph_by_edge_filter(rv, edge_predicates=[has_polarity, is_edge_ppi]) |
def collapse_nodes_with_same_names(graph: BELGraph) -> None:
"""Collapse all nodes with the same name, merging namespaces by picking first alphabetical one."""
survivor_mapping = defaultdict(set) # Collapse mapping dict
victims = set() # Things already mapped while iterating
it = tqdm(itt.combinations(graph, r=2), total=graph.number_of_nodes() * (graph.number_of_nodes() - 1) / 2)
for a, b in it:
if b in victims:
continue
a_name, b_name = a.get(NAME), b.get(NAME)
if not a_name or not b_name or a_name.lower() != b_name.lower():
continue
if a.keys() != b.keys(): # not same version (might have variants)
continue
# Ensure that the values in the keys are also the same
for k in set(a.keys()) - {NAME, NAMESPACE}:
if a[k] != b[k]: # something different
continue
survivor_mapping[a].add(b)
# Keep track of things that has been already mapped
victims.add(b)
collapse_nodes(graph, survivor_mapping) |
def main(output):
"""Output the HBP knowledge graph to the desktop"""
from hbp_knowledge import get_graph
graph = get_graph()
text = to_html(graph)
print(text, file=output) |
def node_is_upstream_leaf(graph: BELGraph, node: BaseEntity) -> bool:
"""Return if the node is an upstream leaf.
An upstream leaf is defined as a node that has no in-edges, and exactly 1 out-edge.
"""
return 0 == len(graph.predecessors(node)) and 1 == len(graph.successors(node)) |
def get_unweighted_upstream_leaves(graph: BELGraph, key: Optional[str] = None) -> Iterable[BaseEntity]:
"""Get nodes with no incoming edges, one outgoing edge, and without the given key in its data dictionary.
.. seealso :: :func:`data_does_not_contain_key_builder`
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
:return: An iterable over leaves (nodes with an in-degree of 0) that don't have the given annotation
"""
if key is None:
key = WEIGHT
return filter_nodes(graph, [node_is_upstream_leaf, data_missing_key_builder(key)]) |
def remove_unweighted_leaves(graph: BELGraph, key: Optional[str] = None) -> None:
"""Remove nodes that are leaves and that don't have a weight (or other key) attribute set.
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
"""
unweighted_leaves = list(get_unweighted_upstream_leaves(graph, key=key))
graph.remove_nodes_from(unweighted_leaves) |
def is_unweighted_source(graph: BELGraph, node: BaseEntity, key: str) -> bool:
"""Check if the node is both a source and also has an annotation.
:param graph: A BEL graph
:param node: A BEL node
:param key: The key in the node data dictionary representing the experimental data
"""
return graph.in_degree(node) == 0 and key not in graph.nodes[node] |
def get_unweighted_sources(graph: BELGraph, key: Optional[str] = None) -> Iterable[BaseEntity]:
"""Get nodes on the periphery of the sub-graph that do not have a annotation for the given key.
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data
:return: An iterator over BEL nodes that are unannotated and on the periphery of this subgraph
"""
if key is None:
key = WEIGHT
for node in graph:
if is_unweighted_source(graph, node, key):
yield node |
def remove_unweighted_sources(graph: BELGraph, key: Optional[str] = None) -> None:
"""Prune unannotated nodes on the periphery of the sub-graph.
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
"""
nodes = list(get_unweighted_sources(graph, key=key))
graph.remove_nodes_from(nodes) |
def prune_mechanism_by_data(graph, key: Optional[str] = None) -> None:
"""Remove all leaves and source nodes that don't have weights.
Is a thin wrapper around :func:`remove_unweighted_leaves` and :func:`remove_unweighted_sources`
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data. Defaults to
:data:`pybel_tools.constants.WEIGHT`.
Equivalent to:
>>> remove_unweighted_leaves(graph)
>>> remove_unweighted_sources(graph)
"""
remove_unweighted_leaves(graph, key=key)
remove_unweighted_sources(graph, key=key) |
def generate_mechanism(graph: BELGraph, node: BaseEntity, key: Optional[str] = None) -> BELGraph:
"""Generate a mechanistic sub-graph upstream of the given node.
:param graph: A BEL graph
:param node: A BEL node
:param key: The key in the node data dictionary representing the experimental data.
:return: A sub-graph grown around the target BEL node
"""
subgraph = get_upstream_causal_subgraph(graph, node)
expand_upstream_causal(graph, subgraph)
remove_inconsistent_edges(subgraph)
collapse_consistent_edges(subgraph)
if key is not None: # FIXME when is it not pruned?
prune_mechanism_by_data(subgraph, key)
return subgraph |
def generate_bioprocess_mechanisms(graph, key: Optional[str] = None) -> Mapping[BiologicalProcess, BELGraph]:
"""Generate a mechanistic sub-graph for each biological process in the graph using :func:`generate_mechanism`.
:param graph: A BEL graph
:param key: The key in the node data dictionary representing the experimental data.
"""
return {
biological_process: generate_mechanism(graph, biological_process, key=key)
for biological_process in get_nodes_by_function(graph, BIOPROCESS)
} |
def get_neurommsig_scores(graph: BELGraph,
genes: List[Gene],
annotation: str = 'Subgraph',
ora_weight: Optional[float] = None,
hub_weight: Optional[float] = None,
top_percent: Optional[float] = None,
topology_weight: Optional[float] = None,
preprocess: bool = False
) -> Optional[Mapping[str, float]]:
"""Preprocess the graph, stratify by the given annotation, then run the NeuroMMSig algorithm on each.
:param graph: A BEL graph
:param genes: A list of gene nodes
:param annotation: The annotation to use to stratify the graph to subgraphs
:param ora_weight: The relative weight of the over-enrichment analysis score from
:py:func:`neurommsig_gene_ora`. Defaults to 1.0.
:param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.
Defaults to 1.0.
:param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).
:param topology_weight: The relative weight of the topolgical analysis core from
:py:func:`neurommsig_topology`. Defaults to 1.0.
:param preprocess: If true, preprocess the graph.
:return: A dictionary from {annotation value: NeuroMMSig composite score}
Pre-processing steps:
1. Infer the central dogma with :func:``
2. Collapse all proteins, RNAs and miRNAs to genes with :func:``
3. Collapse variants to genes with :func:``
"""
if preprocess:
graph = neurommsig_graph_preprocessor.run(graph)
if not any(gene in graph for gene in genes):
logger.debug('no genes mapping to graph')
return
subgraphs = get_subgraphs_by_annotation(graph, annotation=annotation)
return get_neurommsig_scores_prestratified(
subgraphs=subgraphs,
genes=genes,
ora_weight=ora_weight,
hub_weight=hub_weight,
top_percent=top_percent,
topology_weight=topology_weight,
) |
def get_neurommsig_scores_prestratified(subgraphs: Mapping[str, BELGraph],
genes: List[Gene],
ora_weight: Optional[float] = None,
hub_weight: Optional[float] = None,
top_percent: Optional[float] = None,
topology_weight: Optional[float] = None,
) -> Optional[Mapping[str, float]]:
"""Takes a graph stratification and runs neurommsig on each
:param subgraphs: A pre-stratified set of graphs
:param genes: A list of gene nodes
:param ora_weight: The relative weight of the over-enrichment analysis score from
:py:func:`neurommsig_gene_ora`. Defaults to 1.0.
:param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.
Defaults to 1.0.
:param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).
:param topology_weight: The relative weight of the topolgical analysis core from
:py:func:`neurommsig_topology`. Defaults to 1.0.
:return: A dictionary from {annotation value: NeuroMMSig composite score}
Pre-processing steps:
1. Infer the central dogma with :func:``
2. Collapse all proteins, RNAs and miRNAs to genes with :func:``
3. Collapse variants to genes with :func:``
"""
return {
name: get_neurommsig_score(
graph=subgraph,
genes=genes,
ora_weight=ora_weight,
hub_weight=hub_weight,
top_percent=top_percent,
topology_weight=topology_weight,
)
for name, subgraph in subgraphs.items()
} |
def get_neurommsig_score(graph: BELGraph,
genes: List[Gene],
ora_weight: Optional[float] = None,
hub_weight: Optional[float] = None,
top_percent: Optional[float] = None,
topology_weight: Optional[float] = None) -> float:
"""Calculate the composite NeuroMMSig Score for a given list of genes.
:param graph: A BEL graph
:param genes: A list of gene nodes
:param ora_weight: The relative weight of the over-enrichment analysis score from
:py:func:`neurommsig_gene_ora`. Defaults to 1.0.
:param hub_weight: The relative weight of the hub analysis score from :py:func:`neurommsig_hubs`.
Defaults to 1.0.
:param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).
:param topology_weight: The relative weight of the topolgical analysis core from
:py:func:`neurommsig_topology`. Defaults to 1.0.
:return: The NeuroMMSig composite score
"""
ora_weight = ora_weight or 1.0
hub_weight = hub_weight or 1.0
topology_weight = topology_weight or 1.0
total_weight = ora_weight + hub_weight + topology_weight
genes = list(genes)
ora_score = neurommsig_gene_ora(graph, genes)
hub_score = neurommsig_hubs(graph, genes, top_percent=top_percent)
topology_score = neurommsig_topology(graph, genes)
weighted_sum = (
ora_weight * ora_score +
hub_weight * hub_score +
topology_weight * topology_score
)
return weighted_sum / total_weight |
def neurommsig_gene_ora(graph: BELGraph, genes: List[Gene]) -> float:
"""Calculate the percentage of target genes mappable to the graph.
Assume: graph central dogma inferred, collapsed to genes, collapsed variants
"""
graph_genes = set(get_nodes_by_function(graph, GENE))
return len(graph_genes.intersection(genes)) / len(graph_genes) |
def neurommsig_hubs(graph: BELGraph, genes: List[Gene], top_percent: Optional[float] = None) -> float:
"""Calculate the percentage of target genes mappable to the graph.
Assume: graph central dogma inferred, collapsed to genes, collapsed variants, graph has more than 20 nodes
:param graph: A BEL graph
:param genes: A list of nodes
:param top_percent: The percentage of top genes to use as hubs. Defaults to 5% (0.05).
"""
top_percent = top_percent or 0.05
if graph.number_of_nodes() < 20:
logger.debug('Graph has less than 20 nodes')
return 0.0
graph_genes = set(get_nodes_by_function(graph, GENE))
bc = Counter({
node: betweenness_centrality
for node, betweenness_centrality in calculate_betweenness_centality(graph).items()
if node in graph_genes
})
# TODO consider continuous analog with weighting by percentile
number_central_nodes = int(len(graph_genes) * top_percent)
if number_central_nodes < 1:
number_central_nodes = 1
number_mappable_central_nodes = sum(
node in genes
for node in bc.most_common(number_central_nodes)
)
return number_mappable_central_nodes / number_central_nodes |
def neurommsig_topology(graph: BELGraph, nodes: List[BaseEntity]) -> float:
"""Calculate the node neighbor score for a given list of nodes.
- Doesn't consider self loops
.. math::
\frac{\sum_i^n N_G[i]}{n*(n-1)}
"""
nodes = list(nodes)
number_nodes = len(nodes)
if number_nodes <= 1:
# log.debug('')
return 0.0
unnormalized_sum = sum(
u in graph[v]
for u, v in itt.product(nodes, repeat=2)
if v in graph and u != v
)
return unnormalized_sum / (number_nodes * (number_nodes - 1.0)) |
def bond_run(perc_graph_result, seed, ps, convolution_factors_tasks):
"""
Perform a single run (realization) over all microstates and return the
canonical cluster statistics
"""
microcanonical_statistics = percolate.hpc.bond_microcanonical_statistics(
seed=seed, **perc_graph_result
)
# initialize statistics array
canonical_statistics = np.empty(
ps.size,
dtype=percolate.hpc.canonical_statistics_dtype(
spanning_cluster=SPANNING_CLUSTER,
)
)
# loop over all p's and convolve canonical statistics
# http://docs.scipy.org/doc/numpy/reference/arrays.nditer.html#modifying-array-values
for row, convolution_factors_task in zip(
np.nditer(canonical_statistics, op_flags=['writeonly']),
convolution_factors_tasks,
):
# load task result
# http://jug.readthedocs.org/en/latest/api.html#jug.Task.load
assert not convolution_factors_task.is_loaded()
convolution_factors_task.load()
# fetch task result
my_convolution_factors = convolution_factors_task.result
# convolve to canonical statistics
row[...] = percolate.hpc.bond_canonical_statistics(
microcanonical_statistics=microcanonical_statistics,
convolution_factors=my_convolution_factors,
spanning_cluster=SPANNING_CLUSTER,
)
# explicitly unload task to save memory
# http://jug.readthedocs.org/en/latest/api.html#jug.Task.unload
convolution_factors_task.unload()
# initialize canonical averages for reduce
ret = percolate.hpc.bond_initialize_canonical_averages(
canonical_statistics=canonical_statistics,
spanning_cluster=SPANNING_CLUSTER,
)
return ret |
def bond_task(
perc_graph_result, seeds, ps, convolution_factors_tasks_iterator
):
"""
Perform a number of runs
The number of runs is the number of seeds
convolution_factors_tasks_iterator needs to be an iterator
We shield the convolution factors tasks from jug value/result mechanism
by supplying an iterator to the list of tasks for lazy evaluation
http://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L100
http://github.com/luispedro/jug/blob/43f0d80a78f418fd3aa2b8705eaf7c4a5175fff7/jug/task.py#L455
"""
# restore the list of convolution factors tasks
convolution_factors_tasks = list(convolution_factors_tasks_iterator)
return reduce(
percolate.hpc.bond_reduce,
map(
bond_run,
itertools.repeat(perc_graph_result),
seeds,
itertools.repeat(ps),
itertools.repeat(convolution_factors_tasks),
)
) |
def get_leaves_by_type(graph, func=None, prune_threshold=1):
"""Returns an iterable over all nodes in graph (in-place) with only a connection to one node. Useful for gene and
RNA. Allows for optional filter by function type.
:param pybel.BELGraph graph: A BEL graph
:param func: If set, filters by the node's function from :mod:`pybel.constants` like
:data:`pybel.constants.GENE`, :data:`pybel.constants.RNA`, :data:`pybel.constants.PROTEIN`, or
:data:`pybel.constants.BIOPROCESS`
:type func: str
:param prune_threshold: Removes nodes with less than or equal to this number of connections. Defaults to :code:`1`
:type prune_threshold: int
:return: An iterable over nodes with only a connection to one node
:rtype: iter[tuple]
"""
for node, data in graph.nodes(data=True):
if func and func != data.get(FUNCTION):
continue
if graph.in_degree(node) + graph.out_degree(node) <= prune_threshold:
yield node |
def get_peripheral_successor_edges(graph: BELGraph, subgraph: BELGraph) -> EdgeIterator:
"""Get the set of possible successor edges peripheral to the sub-graph.
The source nodes in this iterable are all inside the sub-graph, while the targets are outside.
"""
for u in subgraph:
for _, v, k in graph.out_edges(u, keys=True):
if v not in subgraph:
yield u, v, k |
def get_peripheral_predecessor_edges(graph: BELGraph, subgraph: BELGraph) -> EdgeIterator:
"""Get the set of possible predecessor edges peripheral to the sub-graph.
The target nodes in this iterable are all inside the sub-graph, while the sources are outside.
"""
for v in subgraph:
for u, _, k in graph.in_edges(v, keys=True):
if u not in subgraph:
yield u, v, k |
def count_sources(edge_iter: EdgeIterator) -> Counter:
"""Count the source nodes in an edge iterator with keys and data.
:return: A counter of source nodes in the iterable
"""
return Counter(u for u, _, _ in edge_iter) |
def count_targets(edge_iter: EdgeIterator) -> Counter:
"""Count the target nodes in an edge iterator with keys and data.
:return: A counter of target nodes in the iterable
"""
return Counter(v for _, v, _ in edge_iter) |
def get_subgraph_edges(graph: BELGraph,
annotation: str,
value: str,
source_filter=None,
target_filter=None,
):
"""Gets all edges from a given subgraph whose source and target nodes pass all of the given filters
:param pybel.BELGraph graph: A BEL graph
:param str annotation: The annotation to search
:param str value: The annotation value to search by
:param source_filter: Optional filter for source nodes (graph, node) -> bool
:param target_filter: Optional filter for target nodes (graph, node) -> bool
:return: An iterable of (source node, target node, key, data) for all edges that match the annotation/value and
node filters
:rtype: iter[tuple]
"""
if source_filter is None:
source_filter = keep_node_permissive
if target_filter is None:
target_filter = keep_node_permissive
for u, v, k, data in graph.edges(keys=True, data=True):
if not edge_has_annotation(data, annotation):
continue
if data[ANNOTATIONS][annotation] == value and source_filter(graph, u) and target_filter(graph, v):
yield u, v, k, data |
def get_subgraph_peripheral_nodes(graph: BELGraph,
subgraph: Iterable[BaseEntity],
node_predicates: NodePredicates = None,
edge_predicates: EdgePredicates = None,
):
"""Get a summary dictionary of all peripheral nodes to a given sub-graph.
:return: A dictionary of {external node: {'successor': {internal node: list of (key, dict)},
'predecessor': {internal node: list of (key, dict)}}}
:rtype: dict
For example, it might be useful to quantify the number of predecessors and successors:
>>> from pybel.struct.filters import exclude_pathology_filter
>>> value = 'Blood vessel dilation subgraph'
>>> sg = get_subgraph_by_annotation_value(graph, annotation='Subgraph', value=value)
>>> p = get_subgraph_peripheral_nodes(graph, sg, node_predicates=exclude_pathology_filter)
>>> for node in sorted(p, key=lambda n: len(set(p[n]['successor']) | set(p[n]['predecessor'])), reverse=True):
>>> if 1 == len(p[value][node]['successor']) or 1 == len(p[value][node]['predecessor']):
>>> continue
>>> print(node,
>>> len(p[node]['successor']),
>>> len(p[node]['predecessor']),
>>> len(set(p[node]['successor']) | set(p[node]['predecessor'])))
"""
node_filter = concatenate_node_predicates(node_predicates=node_predicates)
edge_filter = and_edge_predicates(edge_predicates=edge_predicates)
result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for u, v, k, d in get_peripheral_successor_edges(graph, subgraph):
if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):
continue
result[v]['predecessor'][u].append((k, d))
for u, v, k, d in get_peripheral_predecessor_edges(graph, subgraph):
if not node_filter(graph, v) or not node_filter(graph, u) or not edge_filter(graph, u, v, k):
continue
result[u]['successor'][v].append((k, d))
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.