repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
correlate
|
correlate-master/regret.py
|
import numpy as np
import pandas as pd
from config import target_label, verbosity_thesis
def get_last_outcome(ts_measured_actual, n_samples_per_generation):
"""
in the last n_samples_per_generation of ts_measured_actual get value of the target_label
"""
outcome_last = np.array(ts_measured_actual.loc[:, target_label])[-n_samples_per_generation:]
return outcome_last
def compute_regret(ts_measured_actual, ts_generated_optimal, regret_list, n_samples_per_generation, interv_var_optil, interv_var, interv_var_correct_list):
outcome_actual = get_last_outcome(ts_measured_actual, n_samples_per_generation)
outcome_optimal = get_last_outcome(ts_generated_optimal, n_samples_per_generation)
new_regret = sum(outcome_optimal - outcome_actual)
# if new_regret < 0:
# print('outcome_optimal:', outcome_optimal,
# '\noutcome_actual:', outcome_actual,
# '\nintervention_variable:', interv_var,
# '\ninterv_val:', interv_val,
# '\nintervention_value_optimal_backup:', intervention_value_optimal_backup,
# '\nintervention_var_optimal_backup:', intervention_var_optimal_backup,
# '\nintervention_variable:', interv_var)
# ValueError("Regret is negative! See prints above")
regret_list = np.append(regret_list, new_regret)
# if interv_var_opti == interv_var then add 1 to ts_interv_var_correct else add 0
if interv_var_optil == interv_var:
interv_var_correct_list = np.append(interv_var_correct_list, 1)
else:
interv_var_correct_list = np.append(interv_var_correct_list, 0)
return regret_list, outcome_actual[0], interv_var_correct_list
def test_compute_regret():
ts_measured_actual = pd.DataFrame(np.array([[1, 3], [4, 6], [7, 9]]), columns=['0', '1'])
ts_generated_optimal = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), columns=['0', '1', '2'])
regret_list = np.array([])
n_samples_per_generation = 1
regret_list = compute_regret(ts_measured_actual, ts_generated_optimal, regret_list, n_samples_per_generation)
assert regret_list == [0]
def cost_function(regret_list, was_intervened, n_ini_obs):
"""
compute cost function
"""
cost_per_observation = 1
cost_per_intervention = 10
cost_per_regret = 34 # 3.4*10
# count number of interventions
n_interventions = was_intervened.to_numpy().sum()
# count number of observations
n_observations = was_intervened.shape[0] - n_interventions
# compute cost
sum_regret = sum(regret_list)
cost = cost_per_observation * n_observations + cost_per_intervention * n_interventions + cost_per_regret * sum_regret
print(
'cost', cost, ' = cost_per_observation', cost_per_observation, ' * n_observations', n_observations,
' + cost_per_intervention', cost_per_intervention, ' * n_interventions', n_interventions, ' + cost_per_regret',
cost_per_regret, ' * sum_regret', sum_regret)
return cost
| 3,012 | 44.651515 | 155 |
py
|
correlate
|
correlate-master/apis/weather.py
|
import json
import time
import urllib.request
import numpy as np
import pandas as pd
from keys import key_open_weather
def flatten_data(y):
"""flatten json"""
out = {}
def flatten(x, name=''):
if type(x) is dict:
for a in x:
flatten(x[a], name + a + '_')
elif type(x) is list:
i = 0
for a in x:
flatten(a, name + str(i) + '_')
i += 1
else:
out[name[:-1]] = x
flatten(y)
return out
def change_format(contents):
# calc dt_iso from dt unix, UTC to iso date format: e.g., 1652310000 to 2022-05-11 23:00:00 +0000 UTC
# change kelvin to celsius
# contents['main_temp'] = contents['main_temp']
# contents['main_temp_min'] = contents['main_temp_min']
# contents['main_temp_max'] = contents['main_temp_max']
# contents['main_feels_like'] = contents['main_feels_like']
return contents
def get_current_weather_dict(long, lat, key_open_weather):
# url
open_weather_map_request_url = 'https://api.openweathermap.org/data/2.5/weather?lat=' + long + '&lon=' + lat + '&appid=' + key_open_weather
# contents = urllib.request.urlopen(
# "https://api.weatherapi.com/v1/history.json?key=" + key + "&q=" + long + "," + lat + "&dt=" + date).read()
# request, read and decode json
current_weather = json.loads(urllib.request.urlopen(open_weather_map_request_url).read())
# flatten nested json
current_weather = flatten_data(current_weather)
# format changes to match the weather df
current_weather['dt_iso'] = pd.to_datetime(current_weather['dt'], unit='s').isoformat()
# match old_weather_headers to dict_keys
header_matching = [['dt', 'dt'], ['dt_iso', 'dt_iso'], ['timezone', 'timezone'], ['city_name', 'name'],
['lat', 'coord_lat'],
['lon', 'coord_lon'], ['temp', 'main_temp'], ['visibility', 'visibility'],
['dew_point', 'dew_point'],
['feels_like', 'feels_like'], ['temp_min', 'main_temp_min'], ['temp_max', 'main_temp_max'],
['pressure', 'main_pressure'], ['sea_level', 'sea_level'], ['grnd_level', 'grnd_level'],
['humidity', 'main_humidity'], ['wind_speed', 'wind_speed'], ['wind_deg', 'wind_deg'],
['wind_gust', 'wind_gust'], ['rain_1h', 'rain_1h'], ['rain_3h', 'rain_3h'],
['snow_1h', 'snow_1h'],
['snow_3h', 'snow_3h'], ['clouds_all', 'clouds_all'], ['weather_id', 'weather_0_id'],
['weather_main', 'weather_0_main'], ['weather_description', 'weather_0_description'],
['weather_icon', 'weather_0_icon'], ['base', 'base']]
current_weather_dict = {}
for i in range(len(header_matching)):
try:
current_weather_dict[header_matching[i][0]] = current_weather[header_matching[i][1]]
except:
current_weather_dict[header_matching[i][0]] = np.nan
print('warning: ', header_matching[i][0])
return current_weather_dict
def append_current_weather(long,lat, hourly_weather_path):
current_weather_dict = get_current_weather_dict(long, lat, key_open_weather)
# add the hour weather dict to existing hourly weather csv: read, append, save
weather_df = pd.read_csv(hourly_weather_path)
weather_df = weather_df.append(current_weather_dict, ignore_index=True)
weather_df.to_csv(hourly_weather_path, index=False)
def main():
# measure time: start
start_time = time.time()
long = "48.74309462845568"
lat = "9.101391671042892"
hourly_weather_path = '/home/chrei/code/quantifiedSelfData/2022/weather_api_append.csv'
# every hour get the weather data and append to the csv
while True:
append_current_weather(long,lat, hourly_weather_path)
# stop time and print time
print("--- %s seconds ---" % (time.time() - start_time))
time.sleep(3600)
main()
| 4,049 | 38.320388 | 143 |
py
|
correlate
|
correlate-master/venvCorrleateOnly3.9Fuck/lib/python3.9/site-packages/tigramite/plotting.py
|
"""Tigramite plotting package."""
# Author: Jakob Runge <[email protected]>
#
# License: GNU General Public License v3.0
import numpy as np
import matplotlib
from matplotlib.colors import ListedColormap
import matplotlib.transforms as transforms
from matplotlib import pyplot, ticker
from matplotlib.ticker import FormatStrFormatter
import matplotlib.patches as mpatches
from matplotlib.collections import PatchCollection
import sys
from operator import sub
import networkx as nx
import tigramite.data_processing as pp
from copy import deepcopy
import matplotlib.path as mpath
import matplotlib.patheffects as PathEffects
# TODO: Add proper docstrings to internal functions...
def _par_corr_trafo(cmi):
"""Transformation of CMI to partial correlation scale."""
# Set negative values to small positive number
# (zero would be interpreted as non-significant in some functions)
if np.ndim(cmi) == 0:
if cmi < 0.0:
cmi = 1e-8
else:
cmi[cmi < 0.0] = 1e-8
return np.sqrt(1.0 - np.exp(-2.0 * cmi))
def _par_corr_to_cmi(par_corr):
"""Transformation of partial correlation to CMI scale."""
return -0.5 * np.log(1.0 - par_corr ** 2)
def _myround(x, base=5, round_mode="updown"):
"""Rounds x to a float with precision base."""
if round_mode == "updown":
return base * round(float(x) / base)
elif round_mode == "down":
return base * np.floor(float(x) / base)
elif round_mode == "up":
return base * np.ceil(float(x) / base)
return base * round(float(x) / base)
def _make_nice_axes(ax, where=None, skip=2, color=None):
"""Makes nice axes."""
if where is None:
where = ["left", "bottom"]
if color is None:
color = {"left": "black", "right": "black", "bottom": "black", "top": "black"}
if type(skip) == int:
skip_x = skip_y = skip
else:
skip_x = skip[0]
skip_y = skip[1]
for loc, spine in ax.spines.items():
if loc in where:
spine.set_position(("outward", 5)) # outward by 10 points
spine.set_color(color[loc])
if loc == "left" or loc == "right":
pyplot.setp(ax.get_yticklines(), color=color[loc])
pyplot.setp(ax.get_yticklabels(), color=color[loc])
if loc == "top" or loc == "bottom":
pyplot.setp(ax.get_xticklines(), color=color[loc])
elif loc in [
item for item in ["left", "bottom", "right", "top"] if item not in where
]:
spine.set_color("none") # don't draw spine
else:
raise ValueError("unknown spine location: %s" % loc)
# ax.xaxis.get_major_formatter().set_useOffset(False)
# turn off ticks where there is no spine
if "top" in where and "bottom" not in where:
ax.xaxis.set_ticks_position("top")
ax.set_xticks(ax.get_xticks()[::skip_x])
elif "bottom" in where:
ax.xaxis.set_ticks_position("bottom")
ax.set_xticks(ax.get_xticks()[::skip_x])
else:
ax.xaxis.set_ticks_position("none")
ax.xaxis.set_ticklabels([])
if "right" in where and "left" not in where:
ax.yaxis.set_ticks_position("right")
ax.set_yticks(ax.get_yticks()[::skip_y])
elif "left" in where:
ax.yaxis.set_ticks_position("left")
ax.set_yticks(ax.get_yticks()[::skip_y])
else:
ax.yaxis.set_ticks_position("none")
ax.yaxis.set_ticklabels([])
ax.patch.set_alpha(0.0)
def _get_absmax(val_matrix):
"""Get value at absolute maximum in lag function array.
For an (N, N, tau)-array this comutes the lag of the absolute maximum
along the tau-axis and stores the (positive or negative) value in
the (N,N)-array absmax."""
absmax_indices = np.abs(val_matrix).argmax(axis=2)
i, j = np.indices(val_matrix.shape[:2])
return val_matrix[i, j, absmax_indices]
def _add_timeseries(
fig,
axes,
i,
time,
dataseries,
label,
use_mask=False,
mask=None,
missing_flag=None,
grey_masked_samples=False,
data_linewidth=1.0,
skip_ticks_data_x=1,
skip_ticks_data_y=1,
unit=None,
last=False,
time_label="",
label_fontsize=10,
color="black",
grey_alpha=1.0,
):
"""Adds a time series plot to an axis.
Plot of dataseries is added to axis. Allows for proper visualization of
masked data.
Parameters
----------
fig : figure instance
Figure instance.
axes : axis instance
Either gridded axis object or single axis instance.
i : int
Index of axis in gridded axis object.
time : array
Timelabel array.
dataseries : array-like
One-dimensional data series array of variable.
missing_flag : number, optional (default: None)
Flag for missing values in dataframe. Dismisses all time slices of
samples where missing values occur in any variable and also flags
samples for all lags up to 2*tau_max. This avoids biases, see section on
masking in Supplement of [1]_.
label : str
Variable label.
use_mask : bool, optional (default: False)
Whether to use masked data.
mask : array-like, optional (default: None)
Data mask where True labels masked samples.
grey_masked_samples : bool, optional (default: False)
Whether to mark masked samples by grey fills ('fill') or grey data
('data').
data_linewidth : float, optional (default: 1.)
Linewidth.
skip_ticks_data_x : int, optional (default: 1)
Skip every other tickmark.
skip_ticks_data_y : int, optional (default: 1)
Skip every other tickmark.
unit : str, optional (default: None)
Units of variable.
last : bool, optional (default: False)
Specifiy whether this is the last panel where also the bottom axis is
plotted.
time_label : str, optional (default: '')
Label of time axis.
label_fontsize : int, optional (default: 10)
Fontsize.
color : str, optional (default: black)
Line color.
grey_alpha : float, optional (default: 1.)
Opacity of line.
"""
# axes[i].xaxis.get_major_formatter().set_useOffset(False)
try:
ax = axes[i]
except:
ax = axes
if missing_flag is not None:
dataseries_nomissing = np.ma.masked_where(
dataseries == missing_flag, dataseries
)
else:
dataseries_nomissing = np.ma.masked_where(
np.zeros(dataseries.shape), dataseries
)
if use_mask:
maskdata = np.ma.masked_where(mask, dataseries_nomissing)
if grey_masked_samples == "fill":
ax.fill_between(
time,
maskdata.min(),
maskdata.max(),
where=mask,
color="grey",
interpolate=True,
linewidth=0.0,
alpha=grey_alpha,
)
elif grey_masked_samples == "data":
ax.plot(
time,
dataseries_nomissing,
color="grey",
marker=".",
markersize=data_linewidth,
linewidth=data_linewidth,
clip_on=False,
alpha=grey_alpha,
)
ax.plot(
time,
maskdata,
color=color,
linewidth=data_linewidth,
marker=".",
markersize=data_linewidth,
clip_on=False,
)
else:
ax.plot(
time,
dataseries_nomissing,
color=color,
linewidth=data_linewidth,
clip_on=False,
)
if last:
_make_nice_axes(
ax, where=["left", "bottom"], skip=(skip_ticks_data_x, skip_ticks_data_y)
)
ax.set_xlabel(r"%s" % time_label, fontsize=label_fontsize)
else:
_make_nice_axes(ax, where=["left"], skip=(skip_ticks_data_x, skip_ticks_data_y))
# ax.get_xaxis().get_major_formatter().set_useOffset(False)
ax.xaxis.set_major_formatter(FormatStrFormatter("%.0f"))
ax.label_outer()
ax.set_xlim(time[0], time[-1])
trans = transforms.blended_transform_factory(fig.transFigure, ax.transAxes)
if unit:
ax.set_ylabel(r"%s [%s]" % (label, unit), fontsize=label_fontsize)
else:
ax.set_ylabel(r"%s" % (label), fontsize=label_fontsize)
# ax.text(.02, .5, r'%s [%s]' % (label, unit), fontsize=label_fontsize,
# horizontalalignment='left', verticalalignment='center',
# rotation=90, transform=trans)
# else:
# ax.text(.02, .5, r'%s' % (label), fontsize=label_fontsize,
# horizontalalignment='left', verticalalignment='center',
# rotation=90, transform=trans)
pyplot.tight_layout()
def plot_timeseries(
dataframe=None,
save_name=None,
fig_axes=None,
figsize=None,
var_units=None,
time_label="time",
use_mask=False,
grey_masked_samples=False,
data_linewidth=1.0,
skip_ticks_data_x=1,
skip_ticks_data_y=2,
label_fontsize=12,
):
"""Create and save figure of stacked panels with time series.
Parameters
----------
dataframe : data object, optional
This is the Tigramite dataframe object. It has the attributes
dataframe.values yielding a np array of shape (observations T,
variables N) and optionally a mask of the same shape.
save_name : str, optional (default: None)
Name of figure file to save figure. If None, figure is shown in window.
fig_axes : subplots instance, optional (default: None)
Figure and axes instance. If None they are created as
fig, axes = pyplot.subplots(N,...)
figsize : tuple of floats, optional (default: None)
Figure size if new figure is created. If None, default pyplot figsize
is used.
var_units : list of str, optional (default: None)
Units of variables.
time_label : str, optional (default: '')
Label of time axis.
use_mask : bool, optional (default: False)
Whether to use masked data.
grey_masked_samples : bool, optional (default: False)
Whether to mark masked samples by grey fills ('fill') or grey data
('data').
data_linewidth : float, optional (default: 1.)
Linewidth.
skip_ticks_data_x : int, optional (default: 1)
Skip every other tickmark.
skip_ticks_data_y : int, optional (default: 2)
Skip every other tickmark.
label_fontsize : int, optional (default: 10)
Fontsize of variable labels.
"""
# Read in all attributes from dataframe
data = dataframe.values
mask = dataframe.mask
var_names = dataframe.var_names
missing_flag = dataframe.missing_flag
datatime = dataframe.datatime
T, N = data.shape
if var_units is None:
var_units = ["" for i in range(N)]
if fig_axes is None:
fig, axes = pyplot.subplots(N, sharex=True, figsize=figsize)
else:
fig, axes = fig_axes
for i in range(N):
if mask is None:
mask_i = None
else:
mask_i = mask[:, i]
_add_timeseries(
fig=fig,
axes=axes,
i=i,
time=datatime,
dataseries=data[:, i],
label=var_names[i],
use_mask=use_mask,
mask=mask_i,
missing_flag=missing_flag,
grey_masked_samples=grey_masked_samples,
data_linewidth=data_linewidth,
skip_ticks_data_x=skip_ticks_data_x,
skip_ticks_data_y=skip_ticks_data_y,
unit=var_units[i],
last=(i == N - 1),
time_label=time_label,
label_fontsize=label_fontsize,
)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.15, right=0.95, hspace=0.3)
pyplot.tight_layout()
if save_name is not None:
fig.savefig(save_name)
else:
return fig, axes
def plot_lagfuncs(val_matrix, name=None, setup_args={}, add_lagfunc_args={}):
"""Wrapper helper function to plot lag functions.
Sets up the matrix object and plots the lagfunction, see parameters in
setup_matrix and add_lagfuncs.
Parameters
----------
val_matrix : array_like
Matrix of shape (N, N, tau_max+1) containing test statistic values.
name : str, optional (default: None)
File name. If None, figure is shown in window.
setup_args : dict
Arguments for setting up the lag function matrix, see doc of
setup_matrix.
add_lagfunc_args : dict
Arguments for adding a lag function matrix, see doc of add_lagfuncs.
Returns
-------
matrix : object
Further lag functions can be overlaid using the
matrix.add_lagfuncs(val_matrix) function.
"""
N, N, tau_max_plusone = val_matrix.shape
tau_max = tau_max_plusone - 1
matrix = setup_matrix(N=N, tau_max=tau_max, **setup_args)
matrix.add_lagfuncs(val_matrix=val_matrix, **add_lagfunc_args)
if name is not None:
matrix.savefig(name=name)
return matrix
class setup_matrix:
"""Create matrix of lag function panels.
Class to setup figure object. The function add_lagfuncs(...) allows to plot
the val_matrix of shape (N, N, tau_max+1). Multiple lagfunctions can be
overlaid for comparison.
Parameters
----------
N : int
Number of variables
tau_max : int
Maximum time lag.
var_names : list, optional (default: None)
List of variable names. If None, range(N) is used.
figsize : tuple of floats, optional (default: None)
Figure size if new figure is created. If None, default pyplot figsize
is used.
minimum : int, optional (default: -1)
Lower y-axis limit.
maximum : int, optional (default: 1)
Upper y-axis limit.
label_space_left : float, optional (default: 0.1)
Fraction of horizontal figure space to allocate left of plot for labels.
label_space_top : float, optional (default: 0.05)
Fraction of vertical figure space to allocate top of plot for labels.
legend_width : float, optional (default: 0.15)
Fraction of horizontal figure space to allocate right of plot for
legend.
x_base : float, optional (default: 1.)
x-tick intervals to show.
y_base : float, optional (default: .4)
y-tick intervals to show.
plot_gridlines : bool, optional (default: False)
Whether to show a grid.
lag_units : str, optional (default: '')
lag_array : array, optional (default: None)
Optional specification of lags overwriting np.arange(0, tau_max+1)
label_fontsize : int, optional (default: 10)
Fontsize of variable labels.
"""
def __init__(
self,
N,
tau_max,
var_names=None,
figsize=None,
minimum=-1,
maximum=1,
label_space_left=0.1,
label_space_top=0.05,
legend_width=0.15,
legend_fontsize=10,
x_base=1.0,
y_base=0.5,
plot_gridlines=False,
lag_units="",
lag_array=None,
label_fontsize=10,
):
self.tau_max = tau_max
self.labels = []
self.lag_units = lag_units
# if lag_array is None:
# self.lag_array = np.arange(0, self.tau_max + 1)
# else:
self.lag_array = lag_array
if x_base is None:
self.x_base = 1
else:
self.x_base = x_base
self.legend_width = legend_width
self.legend_fontsize = legend_fontsize
self.label_space_left = label_space_left
self.label_space_top = label_space_top
self.label_fontsize = label_fontsize
self.fig = pyplot.figure(figsize=figsize)
self.axes_dict = {}
if var_names is None:
var_names = range(N)
plot_index = 1
for i in range(N):
for j in range(N):
self.axes_dict[(i, j)] = self.fig.add_subplot(N, N, plot_index)
# Plot process labels
if j == 0:
trans = transforms.blended_transform_factory(
self.fig.transFigure, self.axes_dict[(i, j)].transAxes
)
self.axes_dict[(i, j)].text(
0.01,
0.5,
"%s" % str(var_names[i]),
fontsize=label_fontsize,
horizontalalignment="left",
verticalalignment="center",
transform=trans,
)
if i == 0:
trans = transforms.blended_transform_factory(
self.axes_dict[(i, j)].transAxes, self.fig.transFigure
)
self.axes_dict[(i, j)].text(
0.5,
0.99,
r"${\to}$ " + "%s" % str(var_names[j]),
fontsize=label_fontsize,
horizontalalignment="center",
verticalalignment="top",
transform=trans,
)
# Make nice axis
_make_nice_axes(
self.axes_dict[(i, j)], where=["left", "bottom"], skip=(1, 1)
)
if x_base is not None:
self.axes_dict[(i, j)].xaxis.set_major_locator(
ticker.FixedLocator(np.arange(0, self.tau_max + 1, x_base))
)
if x_base / 2.0 % 1 == 0:
self.axes_dict[(i, j)].xaxis.set_minor_locator(
ticker.FixedLocator(
np.arange(0, self.tau_max + 1, x_base / 2.0)
)
)
if y_base is not None:
self.axes_dict[(i, j)].yaxis.set_major_locator(
ticker.FixedLocator(
np.arange(
_myround(minimum, y_base, "down"),
_myround(maximum, y_base, "up") + y_base,
y_base,
)
)
)
self.axes_dict[(i, j)].yaxis.set_minor_locator(
ticker.FixedLocator(
np.arange(
_myround(minimum, y_base, "down"),
_myround(maximum, y_base, "up") + y_base,
y_base / 2.0,
)
)
)
self.axes_dict[(i, j)].set_ylim(
_myround(minimum, y_base, "down"),
_myround(maximum, y_base, "up"),
)
if j != 0:
self.axes_dict[(i, j)].get_yaxis().set_ticklabels([])
self.axes_dict[(i, j)].set_xlim(0, self.tau_max)
if plot_gridlines:
self.axes_dict[(i, j)].grid(
True,
which="major",
color="black",
linestyle="dotted",
dashes=(1, 1),
linewidth=0.05,
zorder=-5,
)
plot_index += 1
def add_lagfuncs(
self,
val_matrix,
sig_thres=None,
conf_matrix=None,
color="black",
label=None,
two_sided_thres=True,
marker=".",
markersize=5,
alpha=1.0,
):
"""Add lag function plot from val_matrix array.
Parameters
----------
val_matrix : array_like
Matrix of shape (N, N, tau_max+1) containing test statistic values.
sig_thres : array-like, optional (default: None)
Matrix of significance thresholds. Must be of same shape as
val_matrix.
conf_matrix : array-like, optional (default: None)
Matrix of shape (, N, tau_max+1, 2) containing confidence bounds.
color : str, optional (default: 'black')
Line color.
label : str
Test statistic label.
two_sided_thres : bool, optional (default: True)
Whether to draw sig_thres for pos. and neg. values.
marker : matplotlib marker symbol, optional (default: '.')
Marker.
markersize : int, optional (default: 5)
Marker size.
alpha : float, optional (default: 1.)
Opacity.
"""
if label is not None:
self.labels.append((label, color, marker, markersize, alpha))
for ij in list(self.axes_dict):
i = ij[0]
j = ij[1]
maskedres = np.copy(val_matrix[i, j, int(i == j) :])
self.axes_dict[(i, j)].plot(
range(int(i == j), self.tau_max + 1),
maskedres,
linestyle="",
color=color,
marker=marker,
markersize=markersize,
alpha=alpha,
clip_on=False,
)
if conf_matrix is not None:
maskedconfres = np.copy(conf_matrix[i, j, int(i == j) :])
self.axes_dict[(i, j)].plot(
range(int(i == j), self.tau_max + 1),
maskedconfres[:, 0],
linestyle="",
color=color,
marker="_",
markersize=markersize - 2,
alpha=alpha,
clip_on=False,
)
self.axes_dict[(i, j)].plot(
range(int(i == j), self.tau_max + 1),
maskedconfres[:, 1],
linestyle="",
color=color,
marker="_",
markersize=markersize - 2,
alpha=alpha,
clip_on=False,
)
self.axes_dict[(i, j)].plot(
range(int(i == j), self.tau_max + 1),
np.zeros(self.tau_max + 1 - int(i == j)),
color="black",
linestyle="dotted",
linewidth=0.1,
)
if sig_thres is not None:
maskedsigres = sig_thres[i, j, int(i == j) :]
self.axes_dict[(i, j)].plot(
range(int(i == j), self.tau_max + 1),
maskedsigres,
color=color,
linestyle="solid",
linewidth=0.1,
alpha=alpha,
)
if two_sided_thres:
self.axes_dict[(i, j)].plot(
range(int(i == j), self.tau_max + 1),
-sig_thres[i, j, int(i == j) :],
color=color,
linestyle="solid",
linewidth=0.1,
alpha=alpha,
)
# pyplot.tight_layout()
def savefig(self, name=None):
"""Save matrix figure.
Parameters
----------
name : str, optional (default: None)
File name. If None, figure is shown in window.
"""
# Trick to plot legend
if len(self.labels) > 0:
axlegend = self.fig.add_subplot(111, frameon=False)
axlegend.spines["left"].set_color("none")
axlegend.spines["right"].set_color("none")
axlegend.spines["bottom"].set_color("none")
axlegend.spines["top"].set_color("none")
axlegend.set_xticks([])
axlegend.set_yticks([])
# self.labels.append((label, color, marker, markersize, alpha))
for item in self.labels:
label = item[0]
color = item[1]
marker = item[2]
markersize = item[3]
alpha = item[4]
axlegend.plot(
[],
[],
linestyle="",
color=color,
marker=marker,
markersize=markersize,
label=label,
alpha=alpha,
)
axlegend.legend(
loc="upper left",
ncol=1,
bbox_to_anchor=(1.05, 0.0, 0.1, 1.0),
borderaxespad=0,
fontsize=self.legend_fontsize,
).draw_frame(False)
self.fig.subplots_adjust(
left=self.label_space_left,
right=1.0 - self.legend_width,
top=1.0 - self.label_space_top,
hspace=0.35,
wspace=0.35,
)
pyplot.figtext(
0.5,
0.01,
r"lag $\tau$ [%s]" % self.lag_units,
horizontalalignment="center",
fontsize=self.label_fontsize,
)
else:
self.fig.subplots_adjust(
left=self.label_space_left,
right=0.95,
top=1.0 - self.label_space_top,
hspace=0.35,
wspace=0.35,
)
pyplot.figtext(
0.55,
0.01,
r"lag $\tau$ [%s]" % self.lag_units,
horizontalalignment="center",
fontsize=self.label_fontsize,
)
if self.lag_array is not None:
assert self.lag_array.shape == np.arange(self.tau_max + 1).shape
for ij in list(self.axes_dict):
i = ij[0]
j = ij[1]
self.axes_dict[(i, j)].set_xticklabels(self.lag_array[:: self.x_base])
if name is not None:
self.fig.savefig(name)
else:
pyplot.show()
def _draw_network_with_curved_edges(
fig,
ax,
G,
pos,
node_rings,
node_labels,
node_label_size,
node_alpha=1.0,
standard_size=100,
node_aspect=None,
standard_cmap="OrRd",
standard_color="lightgrey",
log_sizes=False,
cmap_links="YlOrRd",
cmap_links_edges="YlOrRd",
links_vmin=0.0,
links_vmax=1.0,
links_edges_vmin=0.0,
links_edges_vmax=1.0,
links_ticks=0.2,
links_edges_ticks=0.2,
link_label_fontsize=8,
arrowstyle="->, head_width=0.4, head_length=1",
arrowhead_size=3.0,
curved_radius=0.2,
label_fontsize=4,
label_fraction=0.5,
link_colorbar_label="link",
# link_edge_colorbar_label='link_edge',
inner_edge_curved=False,
inner_edge_style="solid",
network_lower_bound=0.2,
show_colorbar=True,
):
"""Function to draw a network from networkx graph instance.
Various attributes are used to specify the graph's properties.
This function is just a beta-template for now that can be further
customized.
"""
from matplotlib.patches import FancyArrowPatch, Circle, Ellipse
ax.spines["left"].set_color("none")
ax.spines["right"].set_color("none")
ax.spines["bottom"].set_color("none")
ax.spines["top"].set_color("none")
ax.set_xticks([])
ax.set_yticks([])
N = len(G)
# This fixes a positioning bug in matplotlib.
ax.scatter(0, 0, zorder=-10, alpha=0)
def draw_edge(
ax,
u,
v,
d,
seen,
arrowstyle="->, head_width=0.4, head_length=1",
outer_edge=True,
):
# avoiding attribute error raised by changes in networkx
if hasattr(G, "node"):
# works with networkx 1.10
n1 = G.node[u]["patch"]
n2 = G.node[v]["patch"]
else:
# works with networkx 2.4
n1 = G.nodes[u]["patch"]
n2 = G.nodes[v]["patch"]
if outer_edge:
rad = -1.0 * curved_radius
if cmap_links is not None:
facecolor = data_to_rgb_links.to_rgba(d["outer_edge_color"])
else:
if d["outer_edge_color"] is not None:
facecolor = d["outer_edge_color"]
else:
facecolor = standard_color
width = d["outer_edge_width"]
alpha = d["outer_edge_alpha"]
if (u, v) in seen:
rad = seen.get((u, v))
rad = (rad + np.sign(rad) * 0.1) * -1.0
arrowstyle = arrowstyle
# link_edge = d['outer_edge_edge']
linestyle = d.get("outer_edge_style")
if d.get("outer_edge_attribute", None) == "spurious":
facecolor = "grey"
if d.get("outer_edge_type") in ["<-o", "<--", "<-x"]:
n1, n2 = n2, n1
if d.get("outer_edge_type") in [
"o-o",
"o--",
"--o",
"---",
"x-x",
"x--",
"--x",
"o-x",
"x-o",
# "+->",
# "<-+",
]:
arrowstyle = "-"
# linewidth = width*factor
elif d.get("outer_edge_type") == "<->":
arrowstyle = "<->, head_width=0.4, head_length=1"
# linewidth = width*factor
elif d.get("outer_edge_type") in ["o->", "-->", "<-o", "<--", "<-x", "x->", "+->", "<-+"]:
arrowstyle = "->, head_width=0.4, head_length=1"
else:
rad = -1.0 * inner_edge_curved * curved_radius
if cmap_links is not None:
facecolor = data_to_rgb_links.to_rgba(d["inner_edge_color"])
else:
if d["inner_edge_color"] is not None:
facecolor = d["inner_edge_color"]
else:
facecolor = standard_color
width = d["inner_edge_width"]
alpha = d["inner_edge_alpha"]
if d.get("inner_edge_attribute", None) == "spurious":
facecolor = "grey"
if d.get("inner_edge_type") in ["<-o", "<--", "<-x", "<-+"]:
n1, n2 = n2, n1
if d.get("inner_edge_type") in [
"o-o",
"o--",
"--o",
"---",
"x-x",
"x--",
"--x",
"o-x",
"x-o",
]:
arrowstyle = "-"
elif d.get("inner_edge_type") == "<->":
arrowstyle = "<->, head_width=0.4, head_length=1"
elif d.get("inner_edge_type") in ["o->", "-->", "<-o", "<--", "<-x", "x->", "+->"]:
arrowstyle = "->, head_width=0.4, head_length=1"
linestyle = d.get("inner_edge_style")
coor1 = n1.center
coor2 = n2.center
marker_size = width ** 2
figuresize = fig.get_size_inches()
e_p = FancyArrowPatch(
coor1,
coor2,
arrowstyle=arrowstyle,
connectionstyle=f"arc3,rad={rad}",
mutation_scale=width,
lw=width / 2,
alpha=alpha,
linestyle=linestyle,
color=facecolor,
clip_on=False,
patchA=n1,
patchB=n2,
shrinkA=0,
shrinkB=0,
zorder=-1,
)
ax.add_artist(e_p)
path = e_p.get_path()
vertices = path.vertices.copy()
m, n = vertices.shape
start = vertices[0]
end = vertices[-1]
# This must be added to avoid rescaling of the plot, when no 'o'
# or 'x' is added to the graph.
ax.scatter(*start, zorder=-10, alpha=0)
if outer_edge:
if d.get("outer_edge_type") in ["o->", "o--"]:
circle_marker_start = ax.scatter(
*start,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
elif d.get("outer_edge_type") == "<-o":
circle_marker_end = ax.scatter(
*start,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "--o":
circle_marker_end = ax.scatter(
*end,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") in ["x--", "x->"]:
circle_marker_start = ax.scatter(
*start,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
elif d.get("outer_edge_type") in ["+--", "+->"]:
circle_marker_start = ax.scatter(
*start,
marker="P",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
elif d.get("outer_edge_type") == "<-x":
circle_marker_end = ax.scatter(
*start,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "<-+":
circle_marker_end = ax.scatter(
*start,
marker="P",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "--x":
circle_marker_end = ax.scatter(
*end,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "o-o":
circle_marker_start = ax.scatter(
*start,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
circle_marker_end = ax.scatter(
*end,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "x-x":
circle_marker_start = ax.scatter(
*start,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
circle_marker_end = ax.scatter(
*end,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "o-x":
circle_marker_start = ax.scatter(
*start,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
circle_marker_end = ax.scatter(
*end,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "x-o":
circle_marker_start = ax.scatter(
*start,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
circle_marker_end = ax.scatter(
*end,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
else:
if d.get("inner_edge_type") in ["o->", "o--"]:
circle_marker_start = ax.scatter(
*start,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
elif d.get("outer_edge_type") == "<-o":
circle_marker_end = ax.scatter(
*start,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "--o":
circle_marker_end = ax.scatter(
*end,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("inner_edge_type") in ["x--", "x->"]:
circle_marker_start = ax.scatter(
*start,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
elif d.get("inner_edge_type") in ["+--", "+->"]:
circle_marker_start = ax.scatter(
*start,
marker="P",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
elif d.get("outer_edge_type") == "<-x":
circle_marker_end = ax.scatter(
*start,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "<-+":
circle_marker_end = ax.scatter(
*start,
marker="P",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("outer_edge_type") == "--x":
circle_marker_end = ax.scatter(
*end,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("inner_edge_type") == "o-o":
circle_marker_start = ax.scatter(
*start,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
circle_marker_end = ax.scatter(
*end,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("inner_edge_type") == "x-x":
circle_marker_start = ax.scatter(
*start,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
circle_marker_end = ax.scatter(
*end,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("inner_edge_type") == "o-x":
circle_marker_start = ax.scatter(
*start,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
circle_marker_end = ax.scatter(
*end,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
elif d.get("inner_edge_type") == "x-o":
circle_marker_start = ax.scatter(
*start,
marker="X",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_start)
circle_marker_end = ax.scatter(
*end,
marker="o",
s=marker_size,
facecolor="w",
edgecolor=facecolor,
zorder=1,
)
ax.add_collection(circle_marker_end)
if d["label"] is not None and outer_edge:
# Attach labels of lags
trans = None # patch.get_transform()
path = e_p.get_path()
verts = path.to_polygons(trans)[0]
if len(verts) > 2:
label_vert = verts[1, :]
l = d["label"]
string = str(l)
txt = ax.text(
label_vert[0],
label_vert[1],
string,
fontsize=link_label_fontsize,
verticalalignment="center",
horizontalalignment="center",
color="w",
zorder=1,
)
txt.set_path_effects(
[PathEffects.withStroke(linewidth=2, foreground="k")]
)
return rad
# Collect all edge weights to get color scale
all_links_weights = []
all_links_edge_weights = []
for (u, v, d) in G.edges(data=True):
if u != v:
if d["outer_edge"] and d["outer_edge_color"] is not None:
all_links_weights.append(d["outer_edge_color"])
if d["inner_edge"] and d["inner_edge_color"] is not None:
all_links_weights.append(d["inner_edge_color"])
if cmap_links is not None and len(all_links_weights) > 0:
if links_vmin is None:
links_vmin = np.array(all_links_weights).min()
if links_vmax is None:
links_vmax = np.array(all_links_weights).max()
data_to_rgb_links = pyplot.cm.ScalarMappable(
norm=None, cmap=pyplot.get_cmap(cmap_links)
)
data_to_rgb_links.set_array(np.array(all_links_weights))
data_to_rgb_links.set_clim(vmin=links_vmin, vmax=links_vmax)
# Create colorbars for links
# setup colorbar axes.
if show_colorbar:
cax_e = pyplot.axes(
[
0.55,
ax.get_subplotspec().get_position(ax.figure).bounds[1] + 0.02,
0.4,
0.025 + (len(all_links_edge_weights) == 0) * 0.035,
],
frameon=False,
)
cb_e = pyplot.colorbar(
data_to_rgb_links, cax=cax_e, orientation="horizontal"
)
# try:
cb_e.set_ticks(
np.arange(
_myround(links_vmin, 0.5, "down"),
_myround(links_vmax, 0.5, "up") + 0.5,
0.5,
)
)
cb_e.outline.clear()
cax_e.set_xlabel(
link_colorbar_label, labelpad=1, fontsize=label_fontsize, zorder=-10
)
##
# Draw nodes
##
node_sizes = np.zeros((len(node_rings), N))
for ring in list(node_rings): # iterate through to get all node sizes
if node_rings[ring]["sizes"] is not None:
node_sizes[ring] = node_rings[ring]["sizes"]
else:
node_sizes[ring] = standard_size
max_sizes = node_sizes.max(axis=1)
total_max_size = node_sizes.sum(axis=0).max()
node_sizes /= total_max_size
node_sizes *= standard_size
def get_aspect(ax):
# Total figure size
figW, figH = ax.get_figure().get_size_inches()
# print(figW, figH)
# Axis size on figure
_, _, w, h = ax.get_position().bounds
# Ratio of display units
# print(w, h)
disp_ratio = (figH * h) / (figW * w)
# Ratio of data units
# Negative over negative because of the order of subtraction
data_ratio = sub(*ax.get_ylim()) / sub(*ax.get_xlim())
# print(data_ratio, disp_ratio)
return disp_ratio / data_ratio
if node_aspect is None:
node_aspect = get_aspect(ax)
# start drawing the outer ring first...
for ring in list(node_rings)[::-1]:
# print ring
# dictionary of rings: {0:{'sizes':(N,)-array, 'color_array':(N,)-array
# or None, 'cmap':string, 'vmin':float or None, 'vmax':float or None}}
if node_rings[ring]["color_array"] is not None:
color_data = node_rings[ring]["color_array"]
if node_rings[ring]["vmin"] is not None:
vmin = node_rings[ring]["vmin"]
else:
vmin = node_rings[ring]["color_array"].min()
if node_rings[ring]["vmax"] is not None:
vmax = node_rings[ring]["vmax"]
else:
vmax = node_rings[ring]["color_array"].max()
if node_rings[ring]["cmap"] is not None:
cmap = node_rings[ring]["cmap"]
else:
cmap = standard_cmap
data_to_rgb = pyplot.cm.ScalarMappable(
norm=None, cmap=pyplot.get_cmap(cmap)
)
data_to_rgb.set_array(color_data)
data_to_rgb.set_clim(vmin=vmin, vmax=vmax)
colors = [data_to_rgb.to_rgba(color_data[n]) for n in G]
# if node_rings[ring]["colorbar"]: # chrei removed this
# Create colorbars for nodes
# cax_n = pyplot.axes([.8 + ring*0.11,
# ax.get_subplotspec().get_position(ax.figure).bounds[1]+0.05, 0.025, 0.35], frameon=False) #
# setup colorbar axes.
# setup colorbar axes.
# cax_n = pyplot.axes(
# [
# 0.05,
# ax.get_subplotspec().get_position(ax.figure).bounds[1] + 0.02 + ring * 0.11,
# 0.4,
# 0.025 + (len(node_rings) == 1) * 0.035,
# ],
# frameon=False,
# )
# cb_n = pyplot.colorbar(data_to_rgb, cax=cax_n, orientation="horizontal")
# cb_n.set_ticks(
# np.arange(
# _myround(vmin, node_rings[ring]["ticks"], "down"),
# _myround(vmax, node_rings[ring]["ticks"], "up")
# + node_rings[ring]["ticks"],
# node_rings[ring]["ticks"],
# )
# )
# cb_n.outline.clear()
# cb_n.set_ticks()
# cax_n.set_xlabel( # chrei
# node_rings[ring]["label"], labelpad=1, fontsize=label_fontsize
# )
else:
colors = None
vmin = None
vmax = None
for n in G:
if type(node_alpha) == dict:
alpha = node_alpha[n]
else:
alpha = 1.0
if colors is None:
c = Ellipse(
pos[n],
width=node_sizes[: ring + 1].sum(axis=0)[n] * node_aspect,
height=node_sizes[: ring + 1].sum(axis=0)[n],
clip_on=False,
facecolor=standard_color,
edgecolor=standard_color,
zorder=-ring - 1,
)
else:
c = Ellipse(
pos[n],
width=node_sizes[: ring + 1].sum(axis=0)[n] * node_aspect,
height=node_sizes[: ring + 1].sum(axis=0)[n],
clip_on=False,
facecolor=colors[n],
edgecolor=colors[n],
zorder=-ring - 1,
)
ax.add_patch(c)
# avoiding attribute error raised by changes in networkx
if hasattr(G, "node"):
# works with networkx 1.10
G.node[n]["patch"] = c
else:
# works with networkx 2.4
G.nodes[n]["patch"] = c
if ring == 0:
ax.text(
pos[n][0],
pos[n][1],
node_labels[n],
fontsize=node_label_size,
horizontalalignment="center",
verticalalignment="center",
alpha=1.0,
)
# Draw edges
seen = {}
for (u, v, d) in G.edges(data=True):
if d.get("no_links"):
d["inner_edge_alpha"] = 1e-8
d["outer_edge_alpha"] = 1e-8
if u != v:
if d["outer_edge"]:
seen[(u, v)] = draw_edge(ax, u, v, d, seen, arrowstyle, outer_edge=True)
if d["inner_edge"]:
seen[(u, v)] = draw_edge(ax, u, v, d, seen, outer_edge=False)
pyplot.subplots_adjust(bottom=network_lower_bound)
def plot_graph(
link_matrix=None,
val_matrix=None,
sig_thres=None,
var_names=None,
fig_ax=None,
figsize=None,
save_name=None,
link_colorbar_label="MCI",
node_colorbar_label="auto-MCI",
link_width=None,
link_attribute=None,
node_pos=None,
arrow_linewidth=10.0,
vmin_edges=-1,
vmax_edges=1.0,
edge_ticks=0.4,
cmap_edges="RdBu_r",
vmin_nodes=0,
vmax_nodes=1.0,
node_ticks=0.4,
cmap_nodes="OrRd",
node_size=0.3,
node_aspect=None,
arrowhead_size=20,
curved_radius=0.2,
label_fontsize=10,
alpha=1.0,
node_label_size=10,
link_label_fontsize=10,
lag_array=None,
network_lower_bound=0.2,
show_colorbar=True, # chrei
inner_edge_style="dashed",
):
"""Creates a network plot.
This is still in beta. The network is defined either from True values in
link_matrix, or from thresholding the val_matrix with sig_thres. Nodes
denote variables, straight links contemporaneous dependencies and curved
arrows lagged dependencies. The node color denotes the maximal absolute
auto-dependency and the link color the value at the lag with maximal
absolute cross-dependency. The link label lists the lags with significant
dependency in order of absolute magnitude. The network can also be plotted
over a map drawn before on the same axis. Then the node positions can be
supplied in appropriate axis coordinates via node_pos.
Parameters
----------
link_matrix : bool array-like, optional (default: None)
Matrix of significant links. Must be of same shape as val_matrix. Either
sig_thres or link_matrix has to be provided.
val_matrix : array_like
Matrix of shape (N, N, tau_max+1) containing test statistic values.
sig_thres : array-like, optional (default: None)
Matrix of significance thresholds. Must be of same shape as val_matrix.
Either sig_thres or link_matrix has to be provided.
var_names : list, optional (default: None)
List of variable names. If None, range(N) is used.
fig_ax : tuple of figure and axis object, optional (default: None)
Figure and axes instance. If None they are created.
figsize : tuple
Size of figure.
save_name : str, optional (default: None)
Name of figure file to save figure. If None, figure is shown in window.
link_colorbar_label : str, optional (default: 'MCI')
Test statistic label.
node_colorbar_label : str, optional (default: 'auto-MCI')
Test statistic label for auto-dependencies.
link_width : array-like, optional (default: None)
Array of val_matrix.shape specifying relative link width with maximum
given by arrow_linewidth. If None, all links have same width.
link_attribute : array-like, optional (default: None)
String array of val_matrix.shape specifying link attributes.
node_pos : dictionary, optional (default: None)
Dictionary of node positions in axis coordinates of form
node_pos = {'x':array of shape (N,), 'y':array of shape(N)}. These
coordinates could have been transformed before for basemap plots.
arrow_linewidth : float, optional (default: 30)
Linewidth.
vmin_edges : float, optional (default: -1)
Link colorbar scale lower bound.
vmax_edges : float, optional (default: 1)
Link colorbar scale upper bound.
edge_ticks : float, optional (default: 0.4)
Link tick mark interval.
cmap_edges : str, optional (default: 'RdBu_r')
Colormap for links.
vmin_nodes : float, optional (default: 0)
Node colorbar scale lower bound.
vmax_nodes : float, optional (default: 1)
Node colorbar scale upper bound.
node_ticks : float, optional (default: 0.4)
Node tick mark interval.
cmap_nodes : str, optional (default: 'OrRd')
Colormap for links.
node_size : int, optional (default: 0.3)
Node size.
node_aspect : float, optional (default: None)
Ratio between the heigth and width of the varible nodes.
arrowhead_size : int, optional (default: 20)
Size of link arrow head. Passed on to FancyArrowPatch object.
curved_radius, float, optional (default: 0.2)
Curvature of links. Passed on to FancyArrowPatch object.
label_fontsize : int, optional (default: 10)
Fontsize of colorbar labels.
alpha : float, optional (default: 1.)
Opacity.
node_label_size : int, optional (default: 10)
Fontsize of node labels.
link_label_fontsize : int, optional (default: 6)
Fontsize of link labels.
lag_array : array, optional (default: None)
Optional specification of lags overwriting np.arange(0, tau_max+1)
network_lower_bound : float, optional (default: 0.2)
Fraction of vertical space below graph plot.
show_colorbar : bool
Whether to show colorbars for links and nodes.
"""
if fig_ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, frame_on=False)
else:
fig, ax = fig_ax
(link_matrix, val_matrix, link_width, link_attribute) = _check_matrices(
link_matrix, val_matrix, link_width, link_attribute, sig_thres
)
N, N, dummy = val_matrix.shape
tau_max = dummy - 1
if np.count_nonzero(link_matrix != "") == np.count_nonzero(
np.diagonal(link_matrix) != ""
):
diagonal = True
else:
diagonal = False
if np.count_nonzero(link_matrix == "") == link_matrix.size or diagonal:
link_matrix[0, 1, 0] = "---"
no_links = True
else:
no_links = False
if var_names is None:
var_names = range(N)
# Define graph links by absolute maximum (positive or negative like for
# partial correlation)
# val_matrix[np.abs(val_matrix) < sig_thres] = 0.
# Only draw link in one direction among contemp
# Remove lower triangle
link_matrix_upper = np.copy(link_matrix)
link_matrix_upper[:, :, 0] = np.triu(link_matrix_upper[:, :, 0])
# net = _get_absmax(link_matrix != "")
net = np.any(link_matrix_upper != "", axis=2)
G = nx.DiGraph(net)
# This handels Graphs with no links.
# nx.draw(G, alpha=0, zorder=-10)
node_color = np.zeros(N)
# list of all strengths for color map
all_strengths = []
# Add attributes, contemporaneous and lagged links are handled separately
for (u, v, dic) in G.edges(data=True):
dic["no_links"] = no_links
# average lagfunc for link u --> v ANDOR u -- v
if tau_max > 0:
# argmax of absolute maximum
argmax = np.abs(val_matrix[u, v][1:]).argmax() + 1
else:
argmax = 0
if u != v:
# For contemp links masking or finite samples can lead to different
# values for u--v and v--u
# Here we use the maximum for the width and weight (=color)
# of the link
# Draw link if u--v OR v--u at lag 0 is nonzero
# dic['inner_edge'] = ((np.abs(val_matrix[u, v][0]) >=
# sig_thres[u, v][0]) or
# (np.abs(val_matrix[v, u][0]) >=
# sig_thres[v, u][0]))
dic["inner_edge"] = link_matrix_upper[u, v, 0]
dic["inner_edge_type"] = link_matrix_upper[u, v, 0]
dic["inner_edge_alpha"] = alpha
dic["inner_edge_color"] = val_matrix[u, v, 0]
# # value at argmax of average
# if np.abs(val_matrix[u, v][0] - val_matrix[v, u][0]) > .0001:
# print("Contemporaneous I(%d; %d)=%.3f != I(%d; %d)=%.3f" % (
# u, v, val_matrix[u, v][0], v, u, val_matrix[v, u][0]) +
# " due to conditions, finite sample effects or "
# "masking, here edge color = "
# "larger (absolute) value.")
# dic['inner_edge_color'] = _get_absmax(
# np.array([[[val_matrix[u, v][0],
# val_matrix[v, u][0]]]])).squeeze()
if link_width is None:
dic["inner_edge_width"] = arrow_linewidth
else:
dic["inner_edge_width"] = (
link_width[u, v, 0] / link_width.max() * arrow_linewidth
)
if link_attribute is None:
dic["inner_edge_attribute"] = None
else:
dic["inner_edge_attribute"] = link_attribute[u, v, 0]
# # fraction of nonzero values
dic["inner_edge_style"] = "solid"
# else:
# dic['inner_edge_style'] = link_style[
# u, v, 0]
all_strengths.append(dic["inner_edge_color"])
if tau_max > 0:
# True if ensemble mean at lags > 0 is nonzero
# dic['outer_edge'] = np.any(
# np.abs(val_matrix[u, v][1:]) >= sig_thres[u, v][1:])
dic["outer_edge"] = np.any(link_matrix_upper[u, v, 1:] != "")
else:
dic["outer_edge"] = False
dic["outer_edge_type"] = link_matrix_upper[u, v, argmax]
dic["outer_edge_alpha"] = alpha
if link_width is None:
# fraction of nonzero values
dic["outer_edge_width"] = arrow_linewidth
else:
dic["outer_edge_width"] = (
link_width[u, v, argmax] / link_width.max() * arrow_linewidth
)
if link_attribute is None:
# fraction of nonzero values
dic["outer_edge_attribute"] = None
else:
dic["outer_edge_attribute"] = link_attribute[u, v, argmax]
# value at argmax of average
dic["outer_edge_color"] = val_matrix[u, v][argmax]
all_strengths.append(dic["outer_edge_color"])
# Sorted list of significant lags (only if robust wrt
# d['min_ensemble_frac'])
if tau_max > 0:
lags = np.abs(val_matrix[u, v][1:]).argsort()[::-1] + 1
sig_lags = (np.where(link_matrix_upper[u, v, 1:] != "")[0] + 1).tolist()
else:
lags, sig_lags = [], []
if lag_array is not None:
dic["label"] = str([lag_array[l] for l in lags if l in sig_lags])[1:-1]
else:
dic["label"] = str([l for l in lags if l in sig_lags])[1:-1]
else:
# Node color is max of average autodependency
node_color[u] = val_matrix[u, v][argmax]
dic["inner_edge_attribute"] = None
dic["outer_edge_attribute"] = None
# dic['outer_edge_edge'] = False
# dic['outer_edge_edgecolor'] = None
# dic['inner_edge_edge'] = False
# dic['inner_edge_edgecolor'] = None
# If no links are present, set value to zero
if len(all_strengths) == 0:
all_strengths = [0.0]
if node_pos is None:
pos = nx.circular_layout(deepcopy(G))
else:
pos = {}
for i in range(N):
pos[i] = (node_pos["x"][i], node_pos["y"][i])
if cmap_nodes is None:
node_color = None
node_rings = {
0: {
"sizes": None,
"color_array": node_color,
"cmap": cmap_nodes,
"vmin": vmin_nodes,
"vmax": vmax_nodes,
"ticks": node_ticks,
"label": node_colorbar_label,
"colorbar": show_colorbar,
}
}
_draw_network_with_curved_edges(
fig=fig,
ax=ax,
G=deepcopy(G),
pos=pos,
# dictionary of rings: {0:{'sizes':(N,)-array, 'color_array':(N,)-array
# or None, 'cmap':string,
node_rings=node_rings,
# 'vmin':float or None, 'vmax':float or None, 'label':string or None}}
node_labels=var_names,
node_label_size=node_label_size,
node_alpha=alpha,
standard_size=node_size,
node_aspect=node_aspect,
standard_cmap="OrRd",
standard_color="orange",
log_sizes=False,
cmap_links=cmap_edges,
links_vmin=vmin_edges,
links_vmax=vmax_edges,
links_ticks=edge_ticks,
# cmap_links_edges='YlOrRd', links_edges_vmin=-1., links_edges_vmax=1.,
# links_edges_ticks=.2, link_edge_colorbar_label='link_edge',
arrowstyle="simple",
arrowhead_size=arrowhead_size,
curved_radius=curved_radius,
label_fontsize=label_fontsize,
link_label_fontsize=link_label_fontsize,
link_colorbar_label=link_colorbar_label,
network_lower_bound=network_lower_bound,
show_colorbar=show_colorbar,
# label_fraction=label_fraction,
)
if save_name is not None:
pyplot.savefig(save_name, dpi=300)
else:
return fig, ax
def _reverse_patt(patt):
"""Inverts a link pattern"""
if patt == "":
return ""
left_mark, middle_mark, right_mark = patt[0], patt[1], patt[2]
if left_mark == "<":
new_right_mark = ">"
else:
new_right_mark = left_mark
if right_mark == ">":
new_left_mark = "<"
else:
new_left_mark = right_mark
return new_left_mark + middle_mark + new_right_mark
# if patt in ['---', 'o--', '--o', 'o-o', '']:
# return patt[::-1]
# elif patt == '<->':
# return '<->'
# elif patt == 'o->':
# return '<-o'
# elif patt == '<-o':
# return 'o->'
# elif patt == '-->':
# return '<--'
# elif patt == '<--':
# return '-->'
def _check_matrices(link_matrix, val_matrix, link_width, link_attribute, sig_thres):
if link_matrix is None and (val_matrix is None or sig_thres is None):
raise ValueError(
"Need to specify either val_matrix together with sig_thres, or link_matrix"
)
if link_matrix is not None:
pass
elif link_matrix is None and sig_thres is not None and val_matrix is not None:
link_matrix = np.abs(val_matrix) >= sig_thres
else:
raise ValueError(
"Need to specify either val_matrix together with sig_thres, or link_matrix"
)
if link_matrix.dtype != "<U3":
# Transform to new link_matrix data type U3
old_matrix = np.copy(link_matrix)
link_matrix = np.zeros(old_matrix.shape, dtype="<U3")
link_matrix[:] = ""
for i, j, tau in zip(*np.where(old_matrix)):
if tau == 0:
if old_matrix[j, i, 0] == 0:
link_matrix[i, j, 0] = "-->"
link_matrix[j, i, 0] = "<--"
else:
link_matrix[i, j, 0] = "o-o"
link_matrix[j, i, 0] = "o-o"
else:
link_matrix[i, j, tau] = "-->"
else:
# print(link_matrix[:,:,0])
# Assert that link_matrix has valid and consistent lag-zero entries
for i, j, tau in zip(*np.where(link_matrix)):
if tau == 0:
if link_matrix[i, j, 0] != _reverse_patt(link_matrix[j, i, 0]):
raise ValueError(
"link_matrix needs to have consistent lag-zero patterns (eg"
" link_matrix[i,j,0]='-->' requires link_matrix[j,i,0]='<--')"
)
if (
val_matrix is not None
and val_matrix[i, j, 0] != val_matrix[j, i, 0]
):
raise ValueError("val_matrix needs to be symmetric for lag-zero")
if (
link_width is not None
and link_width[i, j, 0] != link_width[j, i, 0]
):
raise ValueError("link_width needs to be symmetric for lag-zero")
if (
link_attribute is not None
and link_attribute[i, j, 0] != link_attribute[j, i, 0]
):
raise ValueError(
"link_attribute needs to be symmetric for lag-zero"
)
if link_matrix[i, j, tau] not in [
"---",
"o--",
"--o",
"o-o",
"o->",
"<-o",
"-->",
"<--",
"<->",
"x-o",
"o-x",
"x--",
"--x",
"x->",
"<-x",
"x-x",
"<-+",
"+->",
]:
raise ValueError("Invalid link_matrix entry.")
if val_matrix is None:
val_matrix = (link_matrix != "").astype("int")
if link_width is not None and not np.all(link_width >= 0.0):
raise ValueError("link_width must be non-negative")
return link_matrix, val_matrix, link_width, link_attribute
def plot_time_series_graph(
link_matrix=None,
val_matrix=None,
sig_thres=None,
var_names=None,
fig_ax=None,
figsize=None,
link_colorbar_label="MCI",
save_name=None,
link_width=None,
link_attribute=None,
arrow_linewidth=8,
vmin_edges=-1,
vmax_edges=1.0,
edge_ticks=0.4,
cmap_edges="RdBu_r",
order=None,
node_size=0.1,
node_aspect=None,
arrowhead_size=20,
curved_radius=0.2,
label_fontsize=12,
alpha=1.0,
node_label_size=12,
label_space_left=0.1,
label_space_top=0.0,
network_lower_bound=0.2,
inner_edge_style="dashed",
):
"""Creates a time series graph.
This is still in beta. The time series graph's links are colored by
val_matrix.
Parameters
----------
link_matrix : bool array-like, optional (default: None)
Matrix of significant links. Must be of same shape as val_matrix. Either
sig_thres or link_matrix has to be provided.
val_matrix : array_like
Matrix of shape (N, N, tau_max+1) containing test statistic values.
sig_thres : array-like, optional (default: None)
Matrix of significance thresholds. Must be of same shape as val_matrix.
Either sig_thres or link_matrix has to be provided.
var_names : list, optional (default: None)
List of variable names. If None, range(N) is used.
fig_ax : tuple of figure and axis object, optional (default: None)
Figure and axes instance. If None they are created.
figsize : tuple
Size of figure.
save_name : str, optional (default: None)
Name of figure file to save figure. If None, figure is shown in window.
link_colorbar_label : str, optional (default: 'MCI')
Test statistic label.
link_width : array-like, optional (default: None)
Array of val_matrix.shape specifying relative link width with maximum
given by arrow_linewidth. If None, all links have same width.
order : list, optional (default: None)
order of variables from top to bottom.
arrow_linewidth : float, optional (default: 30)
Linewidth.
vmin_edges : float, optional (default: -1)
Link colorbar scale lower bound.
vmax_edges : float, optional (default: 1)
Link colorbar scale upper bound.
edge_ticks : float, optional (default: 0.4)
Link tick mark interval.
cmap_edges : str, optional (default: 'RdBu_r')
Colormap for links.
node_size : int, optional (default: 0.1)
Node size.
node_aspect : float, optional (default: None)
Ratio between the heigth and width of the varible nodes.
arrowhead_size : int, optional (default: 20)
Size of link arrow head. Passed on to FancyArrowPatch object.
curved_radius, float, optional (default: 0.2)
Curvature of links. Passed on to FancyArrowPatch object.
label_fontsize : int, optional (default: 10)
Fontsize of colorbar labels.
alpha : float, optional (default: 1.)
Opacity.
node_label_size : int, optional (default: 10)
Fontsize of node labels.
link_label_fontsize : int, optional (default: 6)
Fontsize of link labels.
label_space_left : float, optional (default: 0.1)
Fraction of horizontal figure space to allocate left of plot for labels.
label_space_top : float, optional (default: 0.)
Fraction of vertical figure space to allocate top of plot for labels.
network_lower_bound : float, optional (default: 0.2)
Fraction of vertical space below graph plot.
inner_edge_style : string, optional (default: 'dashed')
Style of inner_edge contemporaneous links.
"""
if fig_ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, frame_on=False)
else:
fig, ax = fig_ax
(link_matrix, val_matrix, link_width, link_attribute) = _check_matrices(
link_matrix, val_matrix, link_width, link_attribute, sig_thres
)
N, N, dummy = link_matrix.shape
tau_max = dummy - 1
max_lag = tau_max + 1
if np.count_nonzero(link_matrix == "") == link_matrix.size:
link_matrix[0, 1, 0] = "---"
no_links = True
else:
no_links = False
if var_names is None:
var_names = range(N)
if order is None:
order = range(N)
if set(order) != set(range(N)):
raise ValueError("order must be a permutation of range(N)")
def translate(row, lag):
return row * max_lag + lag
# Define graph links by absolute maximum (positive or negative like for
# partial correlation)
tsg = np.zeros((N * max_lag, N * max_lag))
tsg_val = np.zeros((N * max_lag, N * max_lag))
tsg_width = np.zeros((N * max_lag, N * max_lag))
tsg_style = np.zeros((N * max_lag, N * max_lag), dtype=link_matrix.dtype)
if link_attribute is not None:
tsg_attr = np.zeros((N * max_lag, N * max_lag), dtype=link_attribute.dtype)
# Only draw link in one direction among contemp
# Remove lower triangle
link_matrix_tsg = np.copy(link_matrix)
link_matrix_tsg[:, :, 0] = np.triu(link_matrix[:, :, 0])
for i, j, tau in np.column_stack(np.where(link_matrix_tsg)):
for t in range(max_lag):
if (
0 <= translate(i, t - tau)
and translate(i, t - tau) % max_lag <= translate(j, t) % max_lag
):
tsg[
translate(i, t - tau), translate(j, t)
] = 1.0 # val_matrix[i, j, tau]
tsg_val[translate(i, t - tau), translate(j, t)] = val_matrix[i, j, tau]
tsg_style[translate(i, t - tau), translate(j, t)] = link_matrix[
i, j, tau
]
if link_width is not None:
tsg_width[translate(i, t - tau), translate(j, t)] = (
link_width[i, j, tau] / link_width.max() * arrow_linewidth
)
if link_attribute is not None:
tsg_attr[translate(i, t - tau), translate(j, t)] = link_attribute[
i, j, tau
]
G = nx.DiGraph(tsg)
# node_color = np.zeros(N)
# list of all strengths for color map
all_strengths = []
# Add attributes, contemporaneous and lagged links are handled separately
for (u, v, dic) in G.edges(data=True):
dic["no_links"] = no_links
if u != v:
dic["inner_edge"] = False
dic["outer_edge"] = True
dic["outer_edge_type"] = tsg_style[u, v]
dic["outer_edge_alpha"] = alpha
if link_width is None:
# fraction of nonzero values
dic["outer_edge_width"] = dic["inner_edge_width"] = arrow_linewidth
else:
dic["outer_edge_width"] = dic["inner_edge_width"] = tsg_width[u, v]
if link_attribute is None:
dic["outer_edge_attribute"] = None
else:
dic["outer_edge_attribute"] = tsg_attr[u, v]
# value at argmax of average
dic["outer_edge_color"] = tsg_val[u, v]
all_strengths.append(dic["outer_edge_color"])
dic["label"] = None
# If no links are present, set value to zero
if len(all_strengths) == 0:
all_strengths = [0.0]
posarray = np.zeros((N * max_lag, 2))
for i in range(N * max_lag):
posarray[i] = np.array([(i % max_lag), (1.0 - i // max_lag)])
pos_tmp = {}
for i in range(N * max_lag):
# for n in range(N):
# for tau in range(max_lag):
# i = n*N + tau
pos_tmp[i] = np.array(
[
((i % max_lag) - posarray.min(axis=0)[0])
/ (posarray.max(axis=0)[0] - posarray.min(axis=0)[0]),
((1.0 - i // max_lag) - posarray.min(axis=0)[1])
/ (posarray.max(axis=0)[1] - posarray.min(axis=0)[1]),
]
)
pos_tmp[i][np.isnan(pos_tmp[i])] = 0.0
pos = {}
for n in range(N):
for tau in range(max_lag):
pos[n * max_lag + tau] = pos_tmp[order[n] * max_lag + tau]
node_rings = {
0: {"sizes": None, "color_array": None, "label": "", "colorbar": False,}
}
node_labels = ["" for i in range(N * max_lag)]
_draw_network_with_curved_edges(
fig=fig,
ax=ax,
G=deepcopy(G),
pos=pos,
node_rings=node_rings,
node_labels=node_labels,
node_label_size=node_label_size,
node_alpha=alpha,
standard_size=node_size,
node_aspect=node_aspect,
standard_cmap="OrRd",
standard_color="lightgrey",
log_sizes=False,
cmap_links=cmap_edges,
links_vmin=vmin_edges,
links_vmax=vmax_edges,
links_ticks=edge_ticks,
arrowstyle="simple",
arrowhead_size=arrowhead_size,
curved_radius=curved_radius,
label_fontsize=label_fontsize,
label_fraction=0.5,
link_colorbar_label=link_colorbar_label,
inner_edge_curved=True,
network_lower_bound=network_lower_bound,
inner_edge_style=inner_edge_style,
)
for i in range(N):
trans = transforms.blended_transform_factory(fig.transFigure, ax.transData)
ax.text(
label_space_left,
pos[order[i] * max_lag][1],
f"{var_names[order[i]]}",
fontsize=label_fontsize,
horizontalalignment="left",
verticalalignment="center",
transform=trans,
)
for tau in np.arange(max_lag - 1, -1, -1):
trans = transforms.blended_transform_factory(ax.transData, fig.transFigure)
if tau == max_lag - 1:
ax.text(
pos[tau][0],
1.0 - label_space_top,
r"$t$",
fontsize=int(label_fontsize * 0.8),
horizontalalignment="center",
verticalalignment="top",
transform=trans,
)
else:
ax.text(
pos[tau][0],
1.0 - label_space_top,
r"$t-%s$" % str(max_lag - tau - 1),
fontsize=int(label_fontsize * 0.8),
horizontalalignment="center",
verticalalignment="top",
transform=trans,
)
if save_name is not None:
pyplot.savefig(save_name, dpi=300)
else:
return fig, ax
def plot_mediation_time_series_graph(
path_node_array,
tsg_path_val_matrix,
var_names=None,
fig_ax=None,
figsize=None,
link_colorbar_label="link coeff. (edge color)",
node_colorbar_label="MCE (node color)",
save_name=None,
link_width=None,
arrow_linewidth=8,
vmin_edges=-1,
vmax_edges=1.0,
edge_ticks=0.4,
cmap_edges="RdBu_r",
order=None,
vmin_nodes=-1.0,
vmax_nodes=1.0,
node_ticks=0.4,
cmap_nodes="RdBu_r",
node_size=0.1,
node_aspect=None,
arrowhead_size=20,
curved_radius=0.2,
label_fontsize=12,
alpha=1.0,
node_label_size=12,
label_space_left=0.1,
label_space_top=0.0,
network_lower_bound=0.2,
):
"""Creates a mediation time series graph plot.
This is still in beta. The time series graph's links are colored by
val_matrix.
Parameters
----------
tsg_path_val_matrix : array_like
Matrix of shape (N*tau_max, N*tau_max) containing link weight values.
path_node_array: array_like
Array of shape (N,) containing node values.
var_names : list, optional (default: None)
List of variable names. If None, range(N) is used.
fig_ax : tuple of figure and axis object, optional (default: None)
Figure and axes instance. If None they are created.
figsize : tuple
Size of figure.
save_name : str, optional (default: None)
Name of figure file to save figure. If None, figure is shown in window.
link_colorbar_label : str, optional (default: 'link coeff. (edge color)')
Link colorbar label.
node_colorbar_label : str, optional (default: 'MCE (node color)')
Node colorbar label.
link_width : array-like, optional (default: None)
Array of val_matrix.shape specifying relative link width with maximum
given by arrow_linewidth. If None, all links have same width.
order : list, optional (default: None)
order of variables from top to bottom.
arrow_linewidth : float, optional (default: 30)
Linewidth.
vmin_edges : float, optional (default: -1)
Link colorbar scale lower bound.
vmax_edges : float, optional (default: 1)
Link colorbar scale upper bound.
edge_ticks : float, optional (default: 0.4)
Link tick mark interval.
cmap_edges : str, optional (default: 'RdBu_r')
Colormap for links.
vmin_nodes : float, optional (default: 0)
Node colorbar scale lower bound.
vmax_nodes : float, optional (default: 1)
Node colorbar scale upper bound.
node_ticks : float, optional (default: 0.4)
Node tick mark interval.
cmap_nodes : str, optional (default: 'OrRd')
Colormap for links.
node_size : int, optional (default: 0.1)
Node size.
node_aspect : float, optional (default: None)
Ratio between the heigth and width of the varible nodes.
arrowhead_size : int, optional (default: 20)
Size of link arrow head. Passed on to FancyArrowPatch object.
curved_radius, float, optional (default: 0.2)
Curvature of links. Passed on to FancyArrowPatch object.
label_fontsize : int, optional (default: 10)
Fontsize of colorbar labels.
alpha : float, optional (default: 1.)
Opacity.
node_label_size : int, optional (default: 10)
Fontsize of node labels.
link_label_fontsize : int, optional (default: 6)
Fontsize of link labels.
label_space_left : float, optional (default: 0.1)
Fraction of horizontal figure space to allocate left of plot for labels.
label_space_top : float, optional (default: 0.)
Fraction of vertical figure space to allocate top of plot for labels.
network_lower_bound : float, optional (default: 0.2)
Fraction of vertical space below graph plot.
"""
N = len(path_node_array)
Nmaxlag = tsg_path_val_matrix.shape[0]
max_lag = Nmaxlag // N
if var_names is None:
var_names = range(N)
if fig_ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, frame_on=False)
else:
fig, ax = fig_ax
if link_width is not None and not np.all(link_width >= 0.0):
raise ValueError("link_width must be non-negative")
if order is None:
order = range(N)
if set(order) != set(range(N)):
raise ValueError("order must be a permutation of range(N)")
def translate(row, lag):
return row * max_lag + lag
if np.count_nonzero(tsg_path_val_matrix) == np.count_nonzero(
np.diagonal(tsg_path_val_matrix)
):
diagonal = True
else:
diagonal = False
if np.count_nonzero(tsg_path_val_matrix) == tsg_path_val_matrix.size or diagonal:
tsg_path_val_matrix[0, 1] = 1
no_links = True
else:
no_links = False
# Define graph links by absolute maximum (positive or negative like for
# partial correlation)
tsg = tsg_path_val_matrix
tsg_attr = np.zeros((N * max_lag, N * max_lag))
G = nx.DiGraph(tsg)
# node_color = np.zeros(N)
# list of all strengths for color map
all_strengths = []
# Add attributes, contemporaneous and lagged links are handled separately
for (u, v, dic) in G.edges(data=True):
dic["no_links"] = no_links
dic["outer_edge_attribute"] = None
if u != v:
if u % max_lag == v % max_lag:
dic["inner_edge"] = True
dic["outer_edge"] = False
else:
dic["inner_edge"] = False
dic["outer_edge"] = True
dic["inner_edge_alpha"] = alpha
dic["inner_edge_color"] = _get_absmax(
np.array([[[tsg[u, v], tsg[v, u]]]])
).squeeze()
dic["inner_edge_width"] = arrow_linewidth
all_strengths.append(dic["inner_edge_color"])
dic["outer_edge_alpha"] = alpha
dic["outer_edge_width"] = arrow_linewidth
# value at argmax of average
dic["outer_edge_color"] = tsg[u, v]
all_strengths.append(dic["outer_edge_color"])
dic["label"] = None
# dic['outer_edge_edge'] = False
# dic['outer_edge_edgecolor'] = None
# dic['inner_edge_edge'] = False
# dic['inner_edge_edgecolor'] = None
# If no links are present, set value to zero
if len(all_strengths) == 0:
all_strengths = [0.0]
posarray = np.zeros((N * max_lag, 2))
for i in range(N * max_lag):
posarray[i] = np.array([(i % max_lag), (1.0 - i // max_lag)])
pos_tmp = {}
for i in range(N * max_lag):
# for n in range(N):
# for tau in range(max_lag):
# i = n*N + tau
pos_tmp[i] = np.array(
[
((i % max_lag) - posarray.min(axis=0)[0])
/ (posarray.max(axis=0)[0] - posarray.min(axis=0)[0]),
((1.0 - i // max_lag) - posarray.min(axis=0)[1])
/ (posarray.max(axis=0)[1] - posarray.min(axis=0)[1]),
]
)
pos_tmp[i][np.isnan(pos_tmp[i])] = 0.0
pos = {}
for n in range(N):
for tau in range(max_lag):
pos[n * max_lag + tau] = pos_tmp[order[n] * max_lag + tau]
node_color = np.zeros(N * max_lag)
for inet, n in enumerate(range(0, N * max_lag, max_lag)):
node_color[n : n + max_lag] = path_node_array[inet]
# node_rings = {0: {'sizes': None, 'color_array': color_array,
# 'label': '', 'colorbar': False,
# }
# }
node_rings = {
0: {
"sizes": None,
"color_array": node_color,
"cmap": cmap_nodes,
"vmin": vmin_nodes,
"vmax": vmax_nodes,
"ticks": node_ticks,
"label": node_colorbar_label,
"colorbar": True,
}
}
# ] for v in range(max_lag)]
node_labels = ["" for i in range(N * max_lag)]
_draw_network_with_curved_edges(
fig=fig,
ax=ax,
G=deepcopy(G),
pos=pos,
# dictionary of rings: {0:{'sizes':(N,)-array, 'color_array':(N,)-array
# or None, 'cmap':string,
node_rings=node_rings,
# 'vmin':float or None, 'vmax':float or None, 'label':string or None}}
node_labels=node_labels,
node_label_size=node_label_size,
node_alpha=alpha,
standard_size=node_size,
node_aspect=node_aspect,
standard_cmap="OrRd",
standard_color="grey",
log_sizes=False,
cmap_links=cmap_edges,
links_vmin=vmin_edges,
links_vmax=vmax_edges,
links_ticks=edge_ticks,
# cmap_links_edges='YlOrRd', links_edges_vmin=-1., links_edges_vmax=1.,
# links_edges_ticks=.2, link_edge_colorbar_label='link_edge',
arrowhead_size=arrowhead_size,
curved_radius=curved_radius,
label_fontsize=label_fontsize,
label_fraction=0.5,
link_colorbar_label=link_colorbar_label,
inner_edge_curved=True,
network_lower_bound=network_lower_bound
# inner_edge_style=inner_edge_style
)
for i in range(N):
trans = transforms.blended_transform_factory(fig.transFigure, ax.transData)
ax.text(
label_space_left,
pos[order[i] * max_lag][1],
"%s" % str(var_names[order[i]]),
fontsize=label_fontsize,
horizontalalignment="left",
verticalalignment="center",
transform=trans,
)
for tau in np.arange(max_lag - 1, -1, -1):
trans = transforms.blended_transform_factory(ax.transData, fig.transFigure)
if tau == max_lag - 1:
ax.text(
pos[tau][0],
1.0 - label_space_top,
r"$t$",
fontsize=label_fontsize,
horizontalalignment="center",
verticalalignment="top",
transform=trans,
)
else:
ax.text(
pos[tau][0],
1.0 - label_space_top,
r"$t-%s$" % str(max_lag - tau - 1),
fontsize=label_fontsize,
horizontalalignment="center",
verticalalignment="top",
transform=trans,
)
# fig.subplots_adjust(left=0.1, right=.98, bottom=.25, top=.9)
# savestring = os.path.expanduser(save_name)
if save_name is not None:
pyplot.savefig(save_name)
else:
pyplot.show()
def plot_mediation_graph(
path_val_matrix,
path_node_array=None,
var_names=None,
fig_ax=None,
figsize=None,
save_name=None,
link_colorbar_label="link coeff. (edge color)",
node_colorbar_label="MCE (node color)",
link_width=None,
node_pos=None,
arrow_linewidth=10.0,
vmin_edges=-1,
vmax_edges=1.0,
edge_ticks=0.4,
cmap_edges="RdBu_r",
vmin_nodes=-1.0,
vmax_nodes=1.0,
node_ticks=0.4,
cmap_nodes="RdBu_r",
node_size=0.3,
node_aspect=None,
arrowhead_size=20,
curved_radius=0.2,
label_fontsize=10,
lag_array=None,
alpha=1.0,
node_label_size=10,
link_label_fontsize=10,
network_lower_bound=0.2,
):
"""Creates a network plot visualizing the pathways of a mediation analysis.
This is still in beta. The network is defined from non-zero entries in
``path_val_matrix``. Nodes denote variables, straight links contemporaneous
dependencies and curved arrows lagged dependencies. The node color denotes
the mediated causal effect (MCE) and the link color the value at the lag
with maximal link coefficient. The link label lists the lags with
significant dependency in order of absolute magnitude. The network can also
be plotted over a map drawn before on the same axis. Then the node positions
can be supplied in appropriate axis coordinates via node_pos.
Parameters
----------
path_val_matrix : array_like
Matrix of shape (N, N, tau_max+1) containing link weight values.
path_node_array: array_like
Array of shape (N,) containing node values.
var_names : list, optional (default: None)
List of variable names. If None, range(N) is used.
fig_ax : tuple of figure and axis object, optional (default: None)
Figure and axes instance. If None they are created.
figsize : tuple
Size of figure.
save_name : str, optional (default: None)
Name of figure file to save figure. If None, figure is shown in window.
link_colorbar_label : str, optional (default: 'link coeff. (edge color)')
Link colorbar label.
node_colorbar_label : str, optional (default: 'MCE (node color)')
Node colorbar label.
link_width : array-like, optional (default: None)
Array of val_matrix.shape specifying relative link width with maximum
given by arrow_linewidth. If None, all links have same width.
node_pos : dictionary, optional (default: None)
Dictionary of node positions in axis coordinates of form
node_pos = {'x':array of shape (N,), 'y':array of shape(N)}. These
coordinates could have been transformed before for basemap plots.
arrow_linewidth : float, optional (default: 30)
Linewidth.
vmin_edges : float, optional (default: -1)
Link colorbar scale lower bound.
vmax_edges : float, optional (default: 1)
Link colorbar scale upper bound.
edge_ticks : float, optional (default: 0.4)
Link tick mark interval.
cmap_edges : str, optional (default: 'RdBu_r')
Colormap for links.
vmin_nodes : float, optional (default: 0)
Node colorbar scale lower bound.
vmax_nodes : float, optional (default: 1)
Node colorbar scale upper bound.
node_ticks : float, optional (default: 0.4)
Node tick mark interval.
cmap_nodes : str, optional (default: 'OrRd')
Colormap for links.
node_size : int, optional (default: 0.3)
Node size.
node_aspect : float, optional (default: None)
Ratio between the heigth and width of the varible nodes.
arrowhead_size : int, optional (default: 20)
Size of link arrow head. Passed on to FancyArrowPatch object.
curved_radius, float, optional (default: 0.2)
Curvature of links. Passed on to FancyArrowPatch object.
label_fontsize : int, optional (default: 10)
Fontsize of colorbar labels.
alpha : float, optional (default: 1.)
Opacity.
node_label_size : int, optional (default: 10)
Fontsize of node labels.
link_label_fontsize : int, optional (default: 6)
Fontsize of link labels.
network_lower_bound : float, optional (default: 0.2)
Fraction of vertical space below graph plot.
lag_array : array, optional (default: None)
Optional specification of lags overwriting np.arange(0, tau_max+1)
"""
val_matrix = path_val_matrix
if fig_ax is None:
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, frame_on=False)
else:
fig, ax = fig_ax
if link_width is not None and not np.all(link_width >= 0.0):
raise ValueError("link_width must be non-negative")
N, N, dummy = val_matrix.shape
tau_max = dummy - 1
if np.count_nonzero(val_matrix) == np.count_nonzero(np.diagonal(val_matrix)):
diagonal = True
else:
diagonal = False
if np.count_nonzero(val_matrix) == val_matrix.size or diagonal:
val_matrix[0, 1, 0] = 1
no_links = True
else:
no_links = False
if var_names is None:
var_names = range(N)
# Define graph links by absolute maximum (positive or negative like for
# partial correlation)
# val_matrix[np.abs(val_matrix) < sig_thres] = 0.
link_matrix = val_matrix != 0.0
net = _get_absmax(val_matrix)
G = nx.DiGraph(net)
node_color = np.zeros(N)
# list of all strengths for color map
all_strengths = []
# Add attributes, contemporaneous and lagged links are handled separately
for (u, v, dic) in G.edges(data=True):
dic["outer_edge_attribute"] = None
dic["no_links"] = no_links
# average lagfunc for link u --> v ANDOR u -- v
if tau_max > 0:
# argmax of absolute maximum
argmax = np.abs(val_matrix[u, v][1:]).argmax() + 1
else:
argmax = 0
if u != v:
# For contemp links masking or finite samples can lead to different
# values for u--v and v--u
# Here we use the maximum for the width and weight (=color)
# of the link
# Draw link if u--v OR v--u at lag 0 is nonzero
# dic['inner_edge'] = ((np.abs(val_matrix[u, v][0]) >=
# sig_thres[u, v][0]) or
# (np.abs(val_matrix[v, u][0]) >=
# sig_thres[v, u][0]))
dic["inner_edge"] = link_matrix[u, v, 0] or link_matrix[v, u, 0]
dic["inner_edge_alpha"] = alpha
# value at argmax of average
if np.abs(val_matrix[u, v][0] - val_matrix[v, u][0]) > 0.0001:
print(
"Contemporaneous I(%d; %d)=%.3f != I(%d; %d)=%.3f"
% (u, v, val_matrix[u, v][0], v, u, val_matrix[v, u][0])
+ " due to conditions, finite sample effects or "
"masking, here edge color = "
"larger (absolute) value."
)
dic["inner_edge_color"] = _get_absmax(
np.array([[[val_matrix[u, v][0], val_matrix[v, u][0]]]])
).squeeze()
if link_width is None:
dic["inner_edge_width"] = arrow_linewidth
else:
dic["inner_edge_width"] = (
link_width[u, v, 0] / link_width.max() * arrow_linewidth
)
all_strengths.append(dic["inner_edge_color"])
if tau_max > 0:
# True if ensemble mean at lags > 0 is nonzero
# dic['outer_edge'] = np.any(
# np.abs(val_matrix[u, v][1:]) >= sig_thres[u, v][1:])
dic["outer_edge"] = np.any(link_matrix[u, v, 1:])
else:
dic["outer_edge"] = False
dic["outer_edge_alpha"] = alpha
if link_width is None:
# fraction of nonzero values
dic["outer_edge_width"] = arrow_linewidth
else:
dic["outer_edge_width"] = (
link_width[u, v, argmax] / link_width.max() * arrow_linewidth
)
# value at argmax of average
dic["outer_edge_color"] = val_matrix[u, v][argmax]
all_strengths.append(dic["outer_edge_color"])
# Sorted list of significant lags (only if robust wrt
# d['min_ensemble_frac'])
if tau_max > 0:
lags = np.abs(val_matrix[u, v][1:]).argsort()[::-1] + 1
sig_lags = (np.where(link_matrix[u, v, 1:])[0] + 1).tolist()
else:
lags, sig_lags = [], []
if lag_array is not None:
dic["label"] = str([lag_array[l] for l in lags if l in sig_lags])[1:-1]
else:
dic["label"] = str([l for l in lags if l in sig_lags])[1:-1]
else:
# Node color is max of average autodependency
node_color[u] = val_matrix[u, v][argmax]
# dic['outer_edge_edge'] = False
# dic['outer_edge_edgecolor'] = None
# dic['inner_edge_edge'] = False
# dic['inner_edge_edgecolor'] = None
node_color = path_node_array
# print node_color
# If no links are present, set value to zero
if len(all_strengths) == 0:
all_strengths = [0.0]
if node_pos is None:
pos = nx.circular_layout(deepcopy(G))
# pos = nx.spring_layout(deepcopy(G))
else:
pos = {}
for i in range(N):
pos[i] = (node_pos["x"][i], node_pos["y"][i])
node_rings = {
0: {
"sizes": None,
"color_array": node_color,
"cmap": cmap_nodes,
"vmin": vmin_nodes,
"vmax": vmax_nodes,
"ticks": node_ticks,
"label": node_colorbar_label,
"colorbar": True,
}
}
_draw_network_with_curved_edges(
fig=fig,
ax=ax,
G=deepcopy(G),
pos=pos,
# dictionary of rings: {0:{'sizes':(N,)-array, 'color_array':(N,)-array
# or None, 'cmap':string,
node_rings=node_rings,
# 'vmin':float or None, 'vmax':float or None, 'label':string or None}}
node_labels=var_names,
node_label_size=node_label_size,
node_alpha=alpha,
standard_size=node_size,
node_aspect=node_aspect,
standard_cmap="OrRd",
standard_color="orange",
log_sizes=False,
cmap_links=cmap_edges,
links_vmin=vmin_edges,
links_vmax=vmax_edges,
links_ticks=edge_ticks,
# cmap_links_edges='YlOrRd', links_edges_vmin=-1., links_edges_vmax=1.,
# links_edges_ticks=.2, link_edge_colorbar_label='link_edge',
arrowhead_size=arrowhead_size,
curved_radius=curved_radius,
label_fontsize=label_fontsize,
link_label_fontsize=link_label_fontsize,
link_colorbar_label=link_colorbar_label,
network_lower_bound=network_lower_bound,
# label_fraction=label_fraction,
# inner_edge_style=inner_edge_style
)
# fig.subplots_adjust(left=0.1, right=.9, bottom=.25, top=.95)
# savestring = os.path.expanduser(save_name)
if save_name is not None:
pyplot.savefig(save_name)
else:
pyplot.show()
#
# Functions to plot time series graphs from links including ancestors
#
def plot_tsg(links, X, Y, Z=None, anc_x=None, anc_y=None, anc_xy=None):
"""Plots TSG that is input in format (N*max_lag, N*max_lag).
Compared to the tigramite plotting function here links
X^i_{t-tau} --> X^j_t can be missing for different t'. Helpful to
visualize the conditioned TSG.
"""
def varlag2node(var, lag):
"""Translate from (var, lag) notation to node in TSG.
lag must be <= 0.
"""
return var * max_lag + lag
def node2varlag(node):
"""Translate from node in TSG to (var, -tau) notation.
Here tau is <= 0.
"""
var = node // max_lag
tau = node % (max_lag) - (max_lag - 1)
return var, tau
def _links_to_tsg(link_coeffs, max_lag=None):
"""Transform link_coeffs to time series graph.
TSG is of shape (N*max_lag, N*max_lag).
"""
N = len(link_coeffs)
# Get maximum lag
min_lag_links, max_lag_links = pp._get_minmax_lag(link_coeffs)
# max_lag of TSG is max lag in links + 1 for the zero lag.
if max_lag is None:
max_lag = max_lag_links + 1
tsg = np.zeros((N * max_lag, N * max_lag))
for j in range(N):
for link_props in link_coeffs[j]:
i, lag = link_props[0]
tau = abs(lag)
coeff = link_props[1]
# func = link_props[2]
if coeff != 0.0:
for t in range(max_lag):
if (
0 <= varlag2node(i, t - tau)
and varlag2node(i, t - tau) % max_lag
<= varlag2node(j, t) % max_lag
):
tsg[varlag2node(i, t - tau), varlag2node(j, t)] = 1.0
return tsg
color_list = ["lightgrey", "grey", "black", "red", "blue", "orange"]
listcmap = ListedColormap(color_list)
N = len(links)
min_lag_links, max_lag_links = pp._get_minmax_lag(links)
max_lag = max_lag_links
for anc in X + Y:
max_lag = max(max_lag, abs(anc[1]))
for anc in Y:
max_lag = max(max_lag, abs(anc[1]))
if Z is not None:
for anc in Z:
max_lag = max(max_lag, abs(anc[1]))
if anc_x is not None:
for anc in anc_x:
max_lag = max(max_lag, abs(anc[1]))
if anc_y is not None:
for anc in anc_y:
max_lag = max(max_lag, abs(anc[1]))
if anc_xy is not None:
for anc in anc_xy:
max_lag = max(max_lag, abs(anc[1]))
max_lag = max_lag + 1
tsg = _links_to_tsg(links, max_lag=max_lag)
G = nx.DiGraph(tsg)
figsize = (3, 3)
link_colorbar_label = "MCI"
arrow_linewidth = 8.0
vmin_edges = -1
vmax_edges = 1.0
edge_ticks = 0.4
cmap_edges = "RdBu_r"
order = None
node_size = .1
arrowhead_size = 20
curved_radius = 0.2
label_fontsize = 10
alpha = 1.0
node_label_size = 10
label_space_left = 0.1
label_space_top = 0.0
network_lower_bound = 0.2
inner_edge_style = "dashed"
node_color = np.ones(N * max_lag) # , dtype = 'object')
node_color[:] = 0
if anc_x is not None:
for n in [varlag2node(itau[0], max_lag - 1 + itau[1]) for itau in anc_x]:
node_color[n] = 3
if anc_y is not None:
for n in [varlag2node(itau[0], max_lag - 1 + itau[1]) for itau in anc_y]:
node_color[n] = 4
if anc_xy is not None:
for n in [varlag2node(itau[0], max_lag - 1 + itau[1]) for itau in anc_xy]:
node_color[n] = 5
for x in X:
node_color[varlag2node(x[0], max_lag - 1 + x[1])] = 2
for y in Y:
node_color[varlag2node(y[0], max_lag - 1 + y[1])] = 2
if Z is not None:
for z in Z:
node_color[varlag2node(z[0], max_lag - 1 + z[1])] = 1
fig = pyplot.figure(figsize=figsize)
ax = fig.add_subplot(111, frame_on=False)
var_names = range(N)
order = range(N)
# list of all strengths for color map
all_strengths = []
# Add attributes, contemporaneous and lagged links are handled separately
for (u, v, dic) in G.edges(data=True):
if u != v:
if tsg[u, v] and tsg[v, u]:
dic["inner_edge"] = True
dic["outer_edge"] = False
else:
dic["inner_edge"] = False
dic["outer_edge"] = True
dic["inner_edge_alpha"] = alpha
dic["inner_edge_color"] = tsg[u, v]
dic["inner_edge_width"] = arrow_linewidth
dic["inner_edge_attribute"] = dic["outer_edge_attribute"] = None
all_strengths.append(dic["inner_edge_color"])
dic["outer_edge_alpha"] = alpha
dic["outer_edge_width"] = dic["inner_edge_width"] = arrow_linewidth
# value at argmax of average
dic["outer_edge_color"] = tsg[u, v]
all_strengths.append(dic["outer_edge_color"])
dic["label"] = None
# If no links are present, set value to zero
if len(all_strengths) == 0:
all_strengths = [0.0]
posarray = np.zeros((N * max_lag, 2))
for i in range(N * max_lag):
posarray[i] = np.array([(i % max_lag), (1.0 - i // max_lag)])
pos_tmp = {}
for i in range(N * max_lag):
pos_tmp[i] = np.array(
[
((i % max_lag) - posarray.min(axis=0)[0])
/ (posarray.max(axis=0)[0] - posarray.min(axis=0)[0]),
((1.0 - i // max_lag) - posarray.min(axis=0)[1])
/ (posarray.max(axis=0)[1] - posarray.min(axis=0)[1]),
]
)
pos_tmp[i][np.isnan(pos_tmp[i])] = 0.0
pos = {}
for n in range(N):
for tau in range(max_lag):
pos[n * max_lag + tau] = pos_tmp[order[n] * max_lag + tau]
node_rings = {
0: {
"sizes": None,
"color_array": node_color,
"label": "",
"colorbar": False,
"cmap": listcmap,
"vmin": 0,
"vmax": len(color_list),
}
}
node_labels = ["" for i in range(N * max_lag)]
_draw_network_with_curved_edges(
fig=fig,
ax=ax,
G=deepcopy(G),
pos=pos,
node_rings=node_rings,
node_labels=node_labels,
node_label_size=node_label_size,
node_alpha=alpha,
standard_size=node_size,
node_aspect=None,
standard_cmap="OrRd",
standard_color="lightgrey",
log_sizes=False,
cmap_links=cmap_edges,
links_vmin=vmin_edges,
links_vmax=vmax_edges,
links_ticks=edge_ticks,
arrowstyle="simple",
arrowhead_size=arrowhead_size,
curved_radius=curved_radius,
label_fontsize=label_fontsize,
label_fraction=0.5,
link_colorbar_label=link_colorbar_label,
inner_edge_curved=True,
network_lower_bound=network_lower_bound,
inner_edge_style=inner_edge_style,
)
for i in range(N):
trans = transforms.blended_transform_factory(fig.transFigure, ax.transData)
ax.text(
label_space_left,
pos[order[i] * max_lag][1],
"%s" % str(var_names[order[i]]),
fontsize=label_fontsize,
horizontalalignment="left",
verticalalignment="center",
transform=trans,
)
for tau in np.arange(max_lag - 1, -1, -1):
trans = transforms.blended_transform_factory(ax.transData, fig.transFigure)
if tau == max_lag - 1:
ax.text(
pos[tau][0],
1.0 - label_space_top,
r"$t$",
fontsize=int(label_fontsize * 0.7),
horizontalalignment="center",
verticalalignment="top",
transform=trans,
)
else:
ax.text(
pos[tau][0],
1.0 - label_space_top,
r"$t-%s$" % str(max_lag - tau - 1),
fontsize=int(label_fontsize * 0.7),
horizontalalignment="center",
verticalalignment="top",
transform=trans,
)
return fig, ax
if __name__ == "__main__":
val_matrix = np.zeros((4, 4, 3))
# Complete test case
link_matrix = np.zeros((3,3,2), dtype='<U3')
link_matrix[0, 1, 0] = "x->"
link_matrix[1, 0, 0] = "<-x"
link_matrix[1, 2, 0] = "x->"
link_matrix[2, 1, 0] = "<-x"
link_matrix[0, 2, 0] = "x->"
link_matrix[2, 0, 0] = "<-x"
nolinks = np.zeros(link_matrix.shape)
# nolinks[range(4), range(4), 1] = 1
# plot_time_series_graph(link_matrix=nolinks)
plot_graph(link_matrix=link_matrix,
save_name="/home/rung_ja/Downloads/tsg_test.pdf")
# pyplot.show()
| 110,838 | 33.680538 | 109 |
py
|
correlate
|
correlate-master/venvCorrleateOnly3.9Fuck/lib/python3.9/site-packages/tigramite/independence_tests/independence_tests_base.py
|
"""Tigramite causal discovery for time series."""
# Author: Jakob Runge <[email protected]>
#
# License: GNU General Public License v3.0
from __future__ import print_function
import warnings
import math
import abc
import numpy as np
import six
from hashlib import sha1
@six.add_metaclass(abc.ABCMeta)
class CondIndTest():
"""Base class of conditional independence tests.
Provides useful general functions for different independence tests such as
shuffle significance testing and bootstrap confidence estimation. Also
handles masked samples. Other test classes can inherit from this class.
Parameters
----------
seed : int, optional(default = 42)
Seed for RandomState (default_rng)
mask_type : str, optional (default = None)
Must be in {'y','x','z','xy','xz','yz','xyz'}
Masking mode: Indicators for which variables in the dependence measure
I(X; Y | Z) the samples should be masked. If None, 'y' is used, which
excludes all time slices containing masked samples in Y. Explained in
[1]_.
significance : str, optional (default: 'analytic')
Type of significance test to use. In this package 'analytic',
'fixed_thres' and 'shuffle_test' are available.
fixed_thres : float, optional (default: 0.1)
If significance is 'fixed_thres', this specifies the threshold for the
absolute value of the dependence measure.
sig_samples : int, optional (default: 1000)
Number of samples for shuffle significance test.
sig_blocklength : int, optional (default: None)
Block length for block-shuffle significance test. If None, the
block length is determined from the decay of the autocovariance as
explained in [1]_.
confidence : str, optional (default: None)
Specify type of confidence estimation. If False, numpy.nan is returned.
'bootstrap' can be used with any test, for ParCorr also 'analytic' is
implemented.
conf_lev : float, optional (default: 0.9)
Two-sided confidence interval.
conf_samples : int, optional (default: 100)
Number of samples for bootstrap.
conf_blocklength : int, optional (default: None)
Block length for block-bootstrap. If None, the block length is
determined from the decay of the autocovariance as explained in [1]_.
recycle_residuals : bool, optional (default: False)
Specifies whether residuals should be stored. This may be faster, but
can cost considerable memory.
verbosity : int, optional (default: 0)
Level of verbosity.
"""
@abc.abstractmethod
def get_dependence_measure(self, array, xyz):
"""
Abstract function that all concrete classes must instantiate.
"""
pass
@abc.abstractproperty
def measure(self):
"""
Abstract property to store the type of independence test.
"""
pass
def __init__(self,
seed=42,
mask_type=None,
significance='analytic',
fixed_thres=0.1,
sig_samples=1000,
sig_blocklength=None,
confidence=None,
conf_lev=0.9,
conf_samples=100,
conf_blocklength=None,
recycle_residuals=False,
verbosity=0):
# Set the dataframe to None for now, will be reset during pcmci call
self.dataframe = None
# Set the options
self.random_state = np.random.default_rng(seed)
self.significance = significance
self.sig_samples = sig_samples
self.sig_blocklength = sig_blocklength
self.fixed_thres = fixed_thres
self.verbosity = verbosity
self.cached_ci_results = {}
# If we recycle residuals, then set up a residual cache
self.recycle_residuals = recycle_residuals
if self.recycle_residuals:
self.residuals = {}
# If we use a mask, we cannot recycle residuals
self.set_mask_type(mask_type)
# Set the confidence type and details
self.confidence = confidence
self.conf_lev = conf_lev
self.conf_samples = conf_samples
self.conf_blocklength = conf_blocklength
# Print information about the
if self.verbosity > 0:
self.print_info()
def set_mask_type(self, mask_type):
"""
Setter for mask type to ensure that this option does not clash with
recycle_residuals.
Parameters
----------
mask_type : str
Must be in {'y','x','z','xy','xz','yz','xyz'}
Masking mode: Indicators for which variables in the dependence
measure I(X; Y | Z) the samples should be masked. If None, 'y' is
used, which excludes all time slices containing masked samples in Y.
Explained in [1]_.
"""
# Set the mask type
self.mask_type = mask_type
# Check if this clashes with residual recycling
if self.mask_type is not None:
if self.recycle_residuals is True:
warnings.warn("Using a mask disables recycling residuals.")
self.recycle_residuals = False
# Check the mask type is keyed correctly
self._check_mask_type()
def print_info(self):
"""
Print information about the conditional independence test parameters
"""
info_str = "\n# Initialize conditional independence test\n\nParameters:"
info_str += "\nindependence test = %s" % self.measure
info_str += "\nsignificance = %s" % self.significance
# Check if we are using a shuffle test
if self.significance == 'shuffle_test':
info_str += "\nsig_samples = %s" % self.sig_samples
info_str += "\nsig_blocklength = %s" % self.sig_blocklength
# Check if we are using a fixed threshold
elif self.significance == 'fixed_thres':
info_str += "\nfixed_thres = %s" % self.fixed_thres
# Check if we have a confidence type
if self.confidence:
info_str += "\nconfidence = %s" % self.confidence
info_str += "\nconf_lev = %s" % self.conf_lev
# Check if this confidence type is boostrapping
if self.confidence == 'bootstrap':
info_str += "\nconf_samples = %s" % self.conf_samples
info_str += "\nconf_blocklength = %s" %self.conf_blocklength
# Check if we use a non-trivial mask type
if self.mask_type is not None:
info_str += "\nmask_type = %s" % self.mask_type
# Check if we are recycling residuals or not
if self.recycle_residuals:
info_str += "\nrecycle_residuals = %s" % self.recycle_residuals
# Print the information string
print(info_str)
def _check_mask_type(self):
"""
mask_type : str, optional (default = None)
Must be in {'y','x','z','xy','xz','yz','xyz'}
Masking mode: Indicators for which variables in the dependence
measure I(X; Y | Z) the samples should be masked. If None, 'y' is
used, which excludes all time slices containing masked samples in Y.
Explained in [1]_.
"""
if self.mask_type is not None:
mask_set = set(self.mask_type) - set(['x', 'y', 'z'])
if mask_set:
err_msg = "mask_type = %s," % self.mask_type + " but must be" +\
" list containing 'x','y','z', or any combination"
raise ValueError(err_msg)
def get_analytic_confidence(self, value, df, conf_lev):
"""
Base class assumption that this is not implemented. Concrete classes
should override when possible.
"""
raise NotImplementedError("Analytic confidence not"+\
" implemented for %s" % self.measure)
def get_model_selection_criterion(self, j, parents, tau_max=0):
"""
Base class assumption that this is not implemented. Concrete classes
should override when possible.
"""
raise NotImplementedError("Model selection not"+\
" implemented for %s" % self.measure)
def get_analytic_significance(self, value, T, dim):
"""
Base class assumption that this is not implemented. Concrete classes
should override when possible.
"""
raise NotImplementedError("Analytic significance not"+\
" implemented for %s" % self.measure)
def get_shuffle_significance(self, array, xyz, value,
return_null_dist=False):
"""
Base class assumption that this is not implemented. Concrete classes
should override when possible.
"""
raise NotImplementedError("Shuffle significance not"+\
" implemented for %s" % self.measure)
def _get_single_residuals(self, array, target_var,
standardize=True, return_means=False):
"""
Base class assumption that this is not implemented. Concrete classes
should override when possible.
"""
raise NotImplementedError("Residual calculation not"+\
" implemented for %s" % self.measure)
def set_dataframe(self, dataframe):
"""Initialize and check the dataframe.
Parameters
----------
dataframe : data object
Set tigramite dataframe object. It must have the attributes
dataframe.values yielding a numpy array of shape (observations T,
variables N) and optionally a mask of the same shape and a missing
values flag.
"""
self.dataframe = dataframe
if self.mask_type is not None:
dataframe._check_mask(require_mask=True)
def _keyfy(self, x, z):
"""Helper function to make lists unique."""
return (tuple(set(x)), tuple(set(z)))
def _get_array(self, X, Y, Z, tau_max=0, cut_off='2xtau_max',
verbosity=0):
"""Convencience wrapper around construct_array."""
if self.measure in ['par_corr']:
if len(X) > 1 or len(Y) > 1:
raise ValueError("X and Y for %s must be univariate." %
self.measure)
# Call the wrapped function
return self.dataframe.construct_array(X=X, Y=Y, Z=Z,
tau_max=tau_max,
mask_type=self.mask_type,
return_cleaned_xyz=True,
do_checks=True,
cut_off=cut_off,
verbosity=verbosity)
def _get_array_hash(self, array, xyz, XYZ):
"""Helper function to get hash of array.
For a CI test X _|_ Y | Z the order of variables within X or Y or Z
does not matter and also the order X and Y can be swapped.
Hence, to compare hashes of the whole array, we order accordingly
to create a unique, order-independent hash.
Parameters
----------
array : Data array of shape (dim, T)
Data array.
xyz : array
Identifier array of shape (dim,) identifying which row in array
corresponds to X, Y, and Z
XYZ : list of tuples
Returns
-------
combined_hash : str
Hash that identifies uniquely an array of XYZ
"""
X, Y, Z = XYZ
# First check whether CI result was already computed
# by checking whether hash of (xyz, array) already exists
# Individually sort X, Y, Z since for a CI test it does not matter
# how they are aranged
x_orderd = sorted(range(len(X)), key=X.__getitem__)
arr_x = array[xyz==0][x_orderd]
x_hash = sha1(np.ascontiguousarray(arr_x)).hexdigest()
y_orderd = sorted(range(len(Y)), key=Y.__getitem__)
arr_y = array[xyz==1][y_orderd]
y_hash = sha1(np.ascontiguousarray(arr_y)).hexdigest()
z_orderd = sorted(range(len(Z)), key=Z.__getitem__)
arr_z = array[xyz==2][z_orderd]
z_hash = sha1(np.ascontiguousarray(arr_z)).hexdigest()
sorted_xy = sorted([x_hash, y_hash])
combined_hash = (sorted_xy[0], sorted_xy[1], z_hash)
return combined_hash
def run_test(self, X, Y, Z=None, tau_max=0, cut_off='2xtau_max'):
"""Perform conditional independence test.
Calls the dependence measure and signficicance test functions. The child
classes must specify a function get_dependence_measure and either or
both functions get_analytic_significance and get_shuffle_significance.
If recycle_residuals is True, also _get_single_residuals must be
available.
Parameters
----------
X, Y, Z : list of tuples
X,Y,Z are of the form [(var, -tau)], where var specifies the
variable index and tau the time lag.
tau_max : int, optional (default: 0)
Maximum time lag. This may be used to make sure that estimates for
different lags in X, Z, all have the same sample size.
cut_off : {'2xtau_max', 'max_lag', 'max_lag_or_tau_max'}
How many samples to cutoff at the beginning. The default is
'2xtau_max', which guarantees that MCI tests are all conducted on
the same samples. For modeling, 'max_lag_or_tau_max' can be used,
which uses the maximum of tau_max and the conditions, which is
useful to compare multiple models on the same sample. Last,
'max_lag' uses as much samples as possible.
Returns
-------
val, pval : Tuple of floats
The test statistic value and the p-value.
"""
# Get the array to test on
array, xyz, XYZ = self._get_array(X, Y, Z, tau_max, cut_off)
# chrei: this is a bit of a hack # todo find proper solution with was_intervened variable
# if nan it should be due to intervention
# drop columns with nans
array = array[:, ~np.isnan(array).any(axis=0)]
X, Y, Z = XYZ
# Record the dimensions
dim, T = array.shape
# Ensure it is a valid array
if np.any(np.isnan(array)):
raise ValueError("nans in the array!")
combined_hash = self._get_array_hash(array, xyz, XYZ)
if combined_hash in self.cached_ci_results.keys():
cached = True
val, pval = self.cached_ci_results[combined_hash]
else:
cached = False
# Get the dependence measure, reycling residuals if need be
val = self._get_dependence_measure_recycle(X, Y, Z, xyz, array)
# Get the p-value
pval = self.get_significance(val, array, xyz, T, dim)
self.cached_ci_results[combined_hash] = (val, pval)
if self.verbosity > 1:
self._print_cond_ind_results(val=val, pval=pval, cached=cached,
conf=None)
# Return the value and the pvalue
return val, pval
def run_test_raw(self, x, y, z=None):
"""Perform conditional independence test directly on input arrays x, y, z.
Calls the dependence measure and signficicance test functions. The child
classes must specify a function get_dependence_measure and either or
both functions get_analytic_significance and get_shuffle_significance.
Parameters
----------
x, y, z : arrays
x,y,z are of the form (samples, dimension).
Returns
-------
val, pval : Tuple of floats
The test statistic value and the p-value.
"""
if np.ndim(x) != 2 or np.ndim(y) != 2:
raise ValueError("x,y must be arrays of shape (samples, dimension)"
" where dimension can be 1.")
if z is not None and np.ndim(z) != 2:
raise ValueError("z must be array of shape (samples, dimension)"
" where dimension can be 1.")
if z is None:
# Get the array to test on
array = np.vstack((x.T, y.T))
# xyz is the dimension indicator
xyz = np.array([0 for i in range(x.shape[1])] +
[1 for i in range(y.shape[1])])
else:
# Get the array to test on
array = np.vstack((x.T, y.T, z.T))
# xyz is the dimension indicator
xyz = np.array([0 for i in range(x.shape[1])] +
[1 for i in range(y.shape[1])] +
[2 for i in range(z.shape[1])])
# Record the dimensions
dim, T = array.shape
# Ensure it is a valid array
if np.isnan(array).sum() != 0:
raise ValueError("nans in the array!")
# Get the dependence measure
val = self.get_dependence_measure(array, xyz)
# Get the p-value
pval = self.get_significance(val, array, xyz, T, dim)
# Return the value and the pvalue
return val, pval
def _get_dependence_measure_recycle(self, X, Y, Z, xyz, array):
"""Get the dependence_measure, optionally recycling residuals
If self.recycle_residuals is True, also _get_single_residuals must be
available.
Parameters
----------
X, Y, Z : list of tuples
X,Y,Z are of the form [(var, -tau)], where var specifies the
variable index and tau the time lag.
xyz : array of ints
XYZ identifier array of shape (dim,).
array : array
Data array of shape (dim, T)
Return
------
val : float
Test statistic
"""
# Check if we are recycling residuals
if self.recycle_residuals:
# Get or calculate the cached residuals
"""
chrei plan:
get interventional and observational array til here """
x_resid = self._get_cached_residuals(X, Z, array, 0)
y_resid = self._get_cached_residuals(Y, Z, array, 1)
# Make a new residual array
array_resid = np.array([x_resid, y_resid])
xyz_resid = np.array([0, 1])
# Return the dependence measure
return self.get_dependence_measure(array_resid, xyz_resid)
# If not, return the dependence measure on the array and xyz
return self.get_dependence_measure(array, xyz)
def _get_cached_residuals(self, x_nodes, z_nodes, array, target_var):
"""
Retrieve or calculate the cached residuals for the given node sets.
Parameters
----------
x_nodes : list of tuples
List of nodes, X or Y normally. Used to key the residual cache
during lookup
z_nodes : list of tuples
List of nodes, Z normally
target_var : int
Key to differentiate X from Y.
x_nodes == X => 0, x_nodes == Y => 1
array : array
Data array of shape (dim, T)
Returns
-------
x_resid : array
Residuals calculated by _get_single_residual
"""
# Check if we have calculated these residuals
if False:#self._keyfy(x_nodes, z_nodes) in list(self.residuals): # chrei: needed when external_indepnedencies todo: if no external_independency
x_resid = self.residuals[self._keyfy(x_nodes, z_nodes)]
# If not, calculate the residuals
else:
x_resid = self._get_single_residuals(array, target_var=target_var)
if z_nodes:
self.residuals[self._keyfy(x_nodes, z_nodes)] = x_resid
# Return these residuals
return x_resid
def get_significance(self, val, array, xyz, T, dim, sig_override=None):
"""
Returns the p-value from whichever significance function is specified
for this test. If an override is used, then it will call a different
function then specified by self.significance
Parameters
----------
val : float
Test statistic value.
array : array-like
data array with X, Y, Z in rows and observations in columns
xyz : array of ints
XYZ identifier array of shape (dim,).
T : int
Sample length
dim : int
Dimensionality, ie, number of features.
sig_override : string
Must be in 'analytic', 'shuffle_test', 'fixed_thres'
Returns
-------
pval : float or numpy.nan
P-value.
"""
# Defaults to the self.significance member value
use_sig = self.significance
if sig_override is not None:
use_sig = sig_override
# Check if we are using the analytic significance
if use_sig == 'analytic':
pval = self.get_analytic_significance(value=val, T=T, dim=dim)
# Check if we are using the shuffle significance
elif use_sig == 'shuffle_test':
pval = self.get_shuffle_significance(array=array,
xyz=xyz,
value=val)
# Check if we are using the fixed_thres significance
elif use_sig == 'fixed_thres':
pval = self.get_fixed_thres_significance(
value=val,
fixed_thres=self.fixed_thres)
else:
raise ValueError("%s not known." % self.significance)
# Return the calculated value
return pval
def get_measure(self, X, Y, Z=None, tau_max=0):
"""Estimate dependence measure.
Calls the dependence measure function. The child classes must specify
a function get_dependence_measure.
Parameters
----------
X, Y [, Z] : list of tuples
X,Y,Z are of the form [(var, -tau)], where var specifies the
variable index and tau the time lag.
tau_max : int, optional (default: 0)
Maximum time lag. This may be used to make sure that estimates for
different lags in X, Z, all have the same sample size.
Returns
-------
val : float
The test statistic value.
"""
# Make the array
array, xyz, (X, Y, Z) = self._get_array(X, Y, Z, tau_max)
D, T = array.shape
# Check it is valid
if np.isnan(array).sum() != 0:
raise ValueError("nans in the array!")
# Return the dependence measure
return self._get_dependence_measure_recycle(X, Y, Z, xyz, array)
def get_confidence(self, X, Y, Z=None, tau_max=0):
"""Perform confidence interval estimation.
Calls the dependence measure and confidence test functions. The child
classes can specify a function get_dependence_measure and
get_analytic_confidence or get_bootstrap_confidence. If confidence is
False, (numpy.nan, numpy.nan) is returned.
Parameters
----------
X, Y, Z : list of tuples
X,Y,Z are of the form [(var, -tau)], where var specifies the
variable index and tau the time lag.
tau_max : int, optional (default: 0)
Maximum time lag. This may be used to make sure that estimates for
different lags in X, Z, all have the same sample size.
Returns
-------
(conf_lower, conf_upper) : Tuple of floats
Upper and lower confidence bound of confidence interval.
"""
# Check if a confidence type has been defined
if self.confidence:
# Ensure the confidence level given makes sense
if self.conf_lev < .5 or self.conf_lev >= 1.:
raise ValueError("conf_lev = %.2f, " % self.conf_lev +
"but must be between 0.5 and 1")
half_conf = self.conf_samples * (1. - self.conf_lev)/2.
if self.confidence == 'bootstrap' and half_conf < 1.:
raise ValueError("conf_samples*(1.-conf_lev)/2 is %.2f"
% half_conf + ", must be >> 1")
# Make and check the array
array, xyz, _ = self._get_array(X, Y, Z, tau_max, verbosity=0)
dim, T = array.shape
if np.isnan(array).sum() != 0:
raise ValueError("nans in the array!")
# Check if we are using analytic confidence or bootstrapping it
if self.confidence == 'analytic':
val = self.get_dependence_measure(array, xyz)
(conf_lower, conf_upper) = \
self.get_analytic_confidence(df=T-dim,
value=val,
conf_lev=self.conf_lev)
elif self.confidence == 'bootstrap':
# Overwrite analytic values
(conf_lower, conf_upper) = \
self.get_bootstrap_confidence(
array, xyz,
conf_samples=self.conf_samples,
conf_blocklength=self.conf_blocklength,
conf_lev=self.conf_lev, verbosity=self.verbosity)
elif not self.confidence:
return None
else:
raise ValueError("%s confidence estimation not implemented"
% self.confidence)
# Cache the confidence interval
self.conf = (conf_lower, conf_upper)
# Return the confidence interval
return (conf_lower, conf_upper)
def _print_cond_ind_results(self, val, pval=None, cached=None, conf=None):
"""Print results from conditional independence test.
Parameters
----------
val : float
Test stastistic value.
pval : float, optional (default: None)
p-value
conf : tuple of floats, optional (default: None)
Confidence bounds.
"""
printstr = " val = % .3f" % (val)
if pval is not None:
printstr += " | pval = %.5f" % (pval)
if conf is not None:
printstr += " | conf bounds = (%.3f, %.3f)" % (
conf[0], conf[1])
if cached is not None:
printstr += " %s" % ({0:"", 1:"[cached]"}[cached])
print(printstr)
def get_bootstrap_confidence(self, array, xyz, dependence_measure=None,
conf_samples=100, conf_blocklength=None,
conf_lev=.95, verbosity=0):
"""Perform bootstrap confidence interval estimation.
With conf_blocklength > 1 or None a block-bootstrap is performed.
Parameters
----------
array : array-like
data array with X, Y, Z in rows and observations in columns
xyz : array of ints
XYZ identifier array of shape (dim,).
dependence_measure : function (default = self.get_dependence_measure)
Dependence measure function must be of form
dependence_measure(array, xyz) and return a numeric value
conf_lev : float, optional (default: 0.9)
Two-sided confidence interval.
conf_samples : int, optional (default: 100)
Number of samples for bootstrap.
conf_blocklength : int, optional (default: None)
Block length for block-bootstrap. If None, the block length is
determined from the decay of the autocovariance as explained in
[1]_.
verbosity : int, optional (default: 0)
Level of verbosity.
Returns
-------
(conf_lower, conf_upper) : Tuple of floats
Upper and lower confidence bound of confidence interval.
"""
# Check if a dependence measure if provided or if to use default
if not dependence_measure:
dependence_measure = self.get_dependence_measure
# confidence interval is two-sided
c_int = 1. - (1. - conf_lev)/2.
dim, T = array.shape
# If not block length is given, determine the optimal block length.
# This has a maximum of 10% of the time sample length
if conf_blocklength is None:
conf_blocklength = \
self._get_block_length(array, xyz, mode='confidence')
# Determine the number of blocks total, rounding up for non-integer
# amounts
n_blks = int(math.ceil(float(T)/conf_blocklength))
# Print some information
if verbosity > 2:
print(" block_bootstrap confidence intervals"
" with block-length = %d ..." % conf_blocklength)
# Generate the block bootstrapped distribution
bootdist = np.zeros(conf_samples)
for smpl in range(conf_samples):
# Get the starting indecies for the blocks
blk_strt = self.random_state.integers(0, T - conf_blocklength + 1, n_blks)
# Get the empty array of block resampled values
array_bootstrap = \
np.zeros((dim, n_blks*conf_blocklength), dtype=array.dtype)
# Fill the array of block resamples
for i in range(conf_blocklength):
array_bootstrap[:, i::conf_blocklength] = array[:, blk_strt + i]
# Cut to proper length
array_bootstrap = array_bootstrap[:, :T]
bootdist[smpl] = dependence_measure(array_bootstrap, xyz)
# Sort and get quantile
bootdist.sort()
conf_lower = bootdist[int((1. - c_int) * conf_samples)]
conf_upper = bootdist[int(c_int * conf_samples)]
# Return the confidance limits as a tuple
return (conf_lower, conf_upper)
def _get_acf(self, series, max_lag=None):
"""Returns autocorrelation function.
Parameters
----------
series : 1D-array
data series to compute autocorrelation from
max_lag : int, optional (default: None)
maximum lag for autocorrelation function. If None is passed, 10% of
the data series length are used.
Returns
-------
autocorr : array of shape (max_lag + 1,)
Autocorrelation function.
"""
# Set the default max lag
if max_lag is None:
max_lag = int(max(5, 0.1*len(series)))
# Initialize the result
autocorr = np.ones(max_lag + 1)
# Iterate over possible lags
for lag in range(1, max_lag + 1):
# Set the values
y1_vals = series[lag:]
y2_vals = series[:len(series) - lag]
# Calculate the autocorrelation
autocorr[lag] = np.corrcoef(y1_vals, y2_vals, ddof=0)[0, 1]
return autocorr
def _get_block_length(self, array, xyz, mode):
"""Returns optimal block length for significance and confidence tests.
Determine block length using approach in Mader (2013) [Eq. (6)] which
improves the method of Pfeifer (2005) with non-overlapping blocks In
case of multidimensional X, the max is used. Further details in [1]_.
Two modes are available. For mode='significance', only the indices
corresponding to X are shuffled in array. For mode='confidence' all
variables are jointly shuffled. If the autocorrelation curve fit fails,
a block length of 5% of T is used. The block length is limited to a
maximum of 10% of T.
Parameters
----------
array : array-like
data array with X, Y, Z in rows and observations in columns
xyz : array of ints
XYZ identifier array of shape (dim,).
mode : str
Which mode to use.
Returns
-------
block_len : int
Optimal block length.
"""
# Inject a dependency on siganal, optimize
from scipy import signal, optimize
# Get the shape of the array
dim, T = array.shape
# Initiailize the indices
indices = range(dim)
if mode == 'significance':
indices = np.where(xyz == 0)[0]
# Maximum lag for autocov estimation
max_lag = int(0.1*T)
# Define the function to optimize against
def func(x_vals, a_const, decay):
return a_const * decay**x_vals
# Calculate the block length
block_len = 1
for i in indices:
# Get decay rate of envelope of autocorrelation functions
# via hilbert trafo
autocov = self._get_acf(series=array[i], max_lag=max_lag)
autocov[0] = 1.
hilbert = np.abs(signal.hilbert(autocov))
# Try to fit the curve
try:
popt, _ = optimize.curve_fit(
f=func,
xdata=np.arange(0, max_lag+1),
ydata=hilbert,
)
phi = popt[1]
# Formula of Pfeifer (2005) assuming non-overlapping blocks
l_opt = (4. * T * (phi / (1. - phi) + phi**2 / (1. - phi)**2)**2
/ (1. + 2. * phi / (1. - phi))**2)**(1. / 3.)
block_len = max(block_len, int(l_opt))
except RuntimeError:
print("Error - curve_fit failed in block_shuffle, using"
" block_len = %d" % (int(.05 * T)))
block_len = max(int(.05 * T), 2)
# Limit block length to a maximum of 10% of T
block_len = min(block_len, int(0.1 * T))
return block_len
def _get_shuffle_dist(self, array, xyz, dependence_measure,
sig_samples, sig_blocklength=None,
verbosity=0):
"""Returns shuffle distribution of test statistic.
The rows in array corresponding to the X-variable are shuffled using
a block-shuffle approach.
Parameters
----------
array : array-like
data array with X, Y, Z in rows and observations in columns
xyz : array of ints
XYZ identifier array of shape (dim,).
dependence_measure : object
Dependence measure function must be of form
dependence_measure(array, xyz) and return a numeric value
sig_samples : int, optional (default: 100)
Number of samples for shuffle significance test.
sig_blocklength : int, optional (default: None)
Block length for block-shuffle significance test. If None, the
block length is determined from the decay of the autocovariance as
explained in [1]_.
verbosity : int, optional (default: 0)
Level of verbosity.
Returns
-------
null_dist : array of shape (sig_samples,)
Contains the sorted test statistic values estimated from the
shuffled arrays.
"""
dim, T = array.shape
x_indices = np.where(xyz == 0)[0]
dim_x = len(x_indices)
if sig_blocklength is None:
sig_blocklength = self._get_block_length(array, xyz,
mode='significance')
n_blks = int(math.floor(float(T)/sig_blocklength))
# print 'n_blks ', n_blks
if verbosity > 2:
print(" Significance test with block-length = %d "
"..." % (sig_blocklength))
array_shuffled = np.copy(array)
block_starts = np.arange(0, T - sig_blocklength + 1, sig_blocklength)
# Dividing the array up into n_blks of length sig_blocklength may
# leave a tail. This tail is later randomly inserted
tail = array[x_indices, n_blks*sig_blocklength:]
null_dist = np.zeros(sig_samples)
for sam in range(sig_samples):
blk_starts = self.random_state.permutation(block_starts)[:n_blks]
x_shuffled = np.zeros((dim_x, n_blks*sig_blocklength),
dtype=array.dtype)
for i, index in enumerate(x_indices):
for blk in range(sig_blocklength):
x_shuffled[i, blk::sig_blocklength] = \
array[index, blk_starts + blk]
# Insert tail randomly somewhere
if tail.shape[1] > 0:
insert_tail_at = self.random_state.choice(block_starts)
x_shuffled = np.insert(x_shuffled, insert_tail_at,
tail.T, axis=1)
for i, index in enumerate(x_indices):
array_shuffled[index] = x_shuffled[i]
null_dist[sam] = dependence_measure(array=array_shuffled,
xyz=xyz)
null_dist.sort()
return null_dist
def get_fixed_thres_significance(self, value, fixed_thres):
"""Returns signficance for thresholding test.
Returns 0 if numpy.abs(value) is smaller than fixed_thres and 1 else.
Parameters
----------
value : number
Value of test statistic for unshuffled estimate.
fixed_thres : number
Fixed threshold, is made positive.
Returns
-------
pval : bool
Returns 0 if numpy.abs(value) is smaller than fixed_thres and 1
else.
"""
if np.abs(value) < np.abs(fixed_thres):
pval = 1.
else:
pval = 0.
return pval
def _trafo2uniform(self, x):
"""Transforms input array to uniform marginals.
Assumes x.shape = (dim, T)
Parameters
----------
x : array-like
Input array.
Returns
-------
u : array-like
array with uniform marginals.
"""
def trafo(xi):
xisorted = np.sort(xi)
yi = np.linspace(1. / len(xi), 1, len(xi))
return np.interp(xi, xisorted, yi)
if np.ndim(x) == 1:
u = trafo(x)
else:
u = np.empty(x.shape)
for i in range(x.shape[0]):
u[i] = trafo(x[i])
return u
| 38,744 | 36.616505 | 151 |
py
|
correlate
|
correlate-master/intervention_proposal/test_get_intervention.py
|
import pickle
import numpy as np
from config import checkpoint_path
from intervention_proposal.get_intervention import find_optimistic_intervention, \
drop_redundant_information_due_to_symmetry, get_ambiguous_graph_locations, create_all_graph_combinations, \
graph_to_scm, lin_f, make_redundant_information_with_symmetry, get_intervention_ignoring_directionalities
class TestGetIntervention:
def test_get_highest_abs_corr_of_var_except_auto_corr(self):
vals = np.array([
[[99.0, 99.0], [1.0, 4.0], [77, 77.0]],
[[5.0, -6.0], [-99.0, -99.0], [66.0, 66.0]],
[[88.0, -88.0], [-88.0, -88.0], [88.0, 88.0]]
])
var_name_as_str = '0'
labels_as_str = ['0', '2', '3']
external_independencies_wrt_target = ['3']
most_extreme_val, most_extreme_var = get_intervention_ignoring_directionalities(vals, var_name_as_str,
labels_as_str,
external_independencies_wrt_target,
ignore_external_independencies=False)
assert most_extreme_val == -6.0
assert most_extreme_var == '2'
def test_drop_redundant_information_due_to_symmetry(self):
# Given
original_graph = np.array([[['', '0->'], ['-->', '-->']], [['<--', '<->'], ['', '-->']]])
# When
modified_graph = drop_redundant_information_due_to_symmetry(original_graph)
# Then
true_graph = np.array([[['', '0->'], ['', '-->']], [['<--', '<->'], ['', '-->']]])
assert np.array_equal(true_graph, modified_graph)
def test_make_redundant_information_with_symmetry(self):
# 2. test
# Given
original_graph = np.array([[['', '0->'], ['', '<->']], [['<--', ''], ['', '-->']]])
val = np.array([[[0.0, 2.0], [0.0, 4.0]], [[5.0, 6.0], [0.0, 8.0]]])
# When
modified_graph, modified_val = make_redundant_information_with_symmetry(original_graph, val)
# Then
true_graph = np.array([[['', '0->'], ['-->', '<->']], [['<--', ''], ['', '-->']]])
true_val = np.array([[[0.0, 2.0], [5.0, 4.0]], [[5.0, 6.0], [0.0, 8.0]]])
assert np.array_equal(true_graph, modified_graph)
assert np.array_equal(true_val, modified_val)
def test_get_ambiguous_graph_locations(self):
# Given
my_graph = np.array([[['', 'o->'], ['', '-->']], [['x-x', '<->'], ['', '-->']]])
# When
ambiguous_locations = get_ambiguous_graph_locations(my_graph)
# Then
true_ambiguous_locations = [
[0, 0, 1, 'o->', ["-->", "<->"]],
[1, 0, 0, 'x-x', ["-->", "<->", "<--"]],
]
assert np.array_equal(true_ambiguous_locations, ambiguous_locations)
# 2. test empty
# Given
my_graph = np.array([[['', '-->'], ['', '-->']], [['<--', '<->'], ['', '-->']]])
# When
ambiguous_locations = get_ambiguous_graph_locations(my_graph)
# Then
true_ambiguous_locations = []
assert np.array_equal(true_ambiguous_locations, ambiguous_locations)
def test_create_all_graph_combinations(self):
# normal
# given
my_graph = np.array([[['', 'o->'], ['', '-->']], [['x->', '<->'], ['', '-->']]])
ambiguous_locations = [
[0, 0, 1, 'o->', ["-->", "<->"]],
[1, 0, 0, 'x->', ["-->", "<->"]],
]
# when
all_graph_combinations = create_all_graph_combinations(my_graph, ambiguous_locations)
# then
true_all_graph_combinations = [
np.array([[['', '-->'], ['', '-->']], [['-->', '<->'], ['', '-->']]]),
np.array([[['', '-->'], ['', '-->']], [['<->', '<->'], ['', '-->']]]),
np.array([[['', '<->'], ['', '-->']], [['-->', '<->'], ['', '-->']]]),
np.array([[['', '<->'], ['', '-->']], [['<->', '<->'], ['', '-->']]]),
]
assert np.array_equal(true_all_graph_combinations, all_graph_combinations)
def test_graph_to_scm(self):
# Given
my_graph = np.array([[['', '-->'], ['', '-->']], [['-->', '<->'], ['', '-->']]])
val = np.array([[[0.0, 2.0], [0.0, 4.0]], [[5.0, 6.0], [0.0, 8.0]]])
# When
scm = graph_to_scm(my_graph, val)
# Then
true_scm = {
0: [((0, -1), 2.0, lin_f), ((1, 0), 5.0, lin_f)],
1: [((0, -1), 4.0, lin_f), ((1, -1), 8.0, lin_f)],
}
assert np.array_equal(true_scm, scm)
def test_find_optimistic_intervention(self):
# Given
# load from file
with open(checkpoint_path + '{}.pkl'.format('true_scm'), 'rb') as f:
my_graph, val, var_names, ts, unintervenable_vars, random_seed, old_intervention, label, external_independencies = pickle.load(
f)
# When
ans = find_optimistic_intervention(my_graph, val, ts, unintervenable_vars, random_seed,
label, external_independencies=external_independencies,
eps=None)
# Then
solution = ('3', -2.1165126341215634)
assert ans == solution
| 5,350 | 45.12931 | 139 |
py
|
correlate
|
correlate-master/intervention_proposal/simulate.py
| 3 | 0 | 0 |
py
|
|
correlate
|
correlate-master/intervention_proposal/propose_from_eq.py
|
import numpy as np
from config import target_label, verbosity_thesis
def drop_unintervenable_variables(target_eq, measured_labels):
"""
drop variables from equations which can't be intervened upon
"""
# names of unintervenable vars
# targetlabel
unintervenable_vars = ['u_'+str(target_label)]
# measured vars
for measured_label_idx in range(len(measured_labels)):
measured_labels[measured_label_idx] = 'u_' + measured_labels[measured_label_idx]
# loop through equations
for eq_idx in range(len(target_eq)):
eq = target_eq[eq_idx]
# get var names
var_names_in_eq = eq.free_symbols
# make var_names_in_eq a list of strings
var_names_in_eq = [str(var_name) for var_name in var_names_in_eq]
# loop through var names in eq
for var_name_in_eq in var_names_in_eq:
# check if var_name_in_eq is unintervenable
if var_name_in_eq not in measured_labels: # todo remove after check
print('todo check')
if var_name_in_eq in unintervenable_vars or var_name_in_eq not in measured_labels:
# if unintervenable, drop var name from eq
target_eq[eq_idx] = eq.subs(var_name_in_eq, 0)
return target_eq
def find_most_optimistic_intervention(target_eqs):
"""
find variable name with the largest absolute coefficient in target_eq
input: target_eqs
output: most_optimistic_intervention
"""
largest_abs_coeff = 0
largest_coeff = 0
best_intervention_var_name = None
most_optimistic_graph_idx = None
for equation_idx in range(len(target_eqs)):
# get var names
var_names = [str(var_name) for var_name in target_eqs[equation_idx].free_symbols]
# get coefficients
coeffs = [target_eqs[0].coeff(var_name) for var_name in var_names]
# get most extreme coeff
abs_coeffs = [np.abs(coeff) for coeff in coeffs]
if len(abs_coeffs) > 0:
largest_abs_coeff_in_one_graph = np.max(abs_coeffs)
largest_coeff_in_one_graph = np.max(coeffs)
# if better coeff is found
if np.abs(largest_abs_coeff) < np.abs(largest_abs_coeff_in_one_graph):
# update value of most optimistic intervention
largest_abs_coeff = largest_abs_coeff_in_one_graph
largest_coeff = largest_coeff_in_one_graph
# update most optimistic intervention
best_intervention_var_name = var_names[np.argmax(np.abs(coeffs))]
most_optimistic_graph_idx = equation_idx
if verbosity_thesis >0:
print('largest_abs_coeff: ' + str(largest_abs_coeff))
print('best_intervention: ' + str(best_intervention_var_name))
print('most_optimistic_graph_idx: ' + str(most_optimistic_graph_idx))
return largest_abs_coeff, best_intervention_var_name, most_optimistic_graph_idx, largest_coeff
# # load target_ans_per_graph_dict from file via pickle
# with open(checkpoint_path+'target_eq_chr.pkl', 'rb') as f:
# target_eq = pickle.load(f)
#
# target_eq = drop_unintervenable_variables(target_eq, measured_labels)
#
# largest_abs_coeff, best_intervention, most_optimistic_graph_idx = find_most_optimistic_intervention(target_eq)
# print()
| 3,319 | 34.319149 | 112 |
py
|
correlate
|
correlate-master/intervention_proposal/get_intervention.py
|
import itertools
from multiprocessing import Pool
from matplotlib import pyplot as plt
from scipy.stats import norm
from tigramite import plotting as tp
from tqdm import tqdm
from config import private_folder_path, show_plots
import numpy as np
import pandas as pd
from config import verbosity_thesis, target_label, tau_max, percentile, n_samples_simulation
from data_generation import data_generator
def load_results(name_extension):
val_min = np.load(str(private_folder_path) + 'val_min_' + str(name_extension) + '.npy', allow_pickle=True)
graph = np.load(str(private_folder_path) + 'graph_' + str(name_extension) + '.npy', allow_pickle=True)
var_names = np.load(str(private_folder_path) + 'var_names_' + str(name_extension) + '.npy', allow_pickle=True)
print('Attention: val_min, graph, var_names loaded from file')
return val_min, graph, var_names
def lin_f(x):
return x
def graph_to_scm(my_graph, val):
"""
input: graph, val
output: scm
"""
scm = {}
for effect in range(len(my_graph[0])):
scm.update({effect: []})
effect_list = []
for cause in range(len(my_graph)):
# scm.update({cause: []})
for tau in range(len(my_graph[cause][effect])):
if my_graph[cause][effect][tau] in ['', '<--']:
continue
elif my_graph[cause][effect][tau] == '-->':
effect_list.append(((cause, -tau), val[cause][effect][tau], lin_f))
else:
ValueError('graph[cause][effect][tau] not in ["", "-->", "<--"]')
scm[effect] = effect_list
return scm
def drop_edges_for_cycle_detection(my_graph):
""" set lagged edgemarks to "".
and set contemporaneous edgemarks to "" """
my_graph_without_lagged_variables = my_graph.copy()
for cause in range(len(my_graph_without_lagged_variables)):
for effect in range(len(my_graph_without_lagged_variables[cause])):
for tau in range(len(my_graph_without_lagged_variables[cause][effect])):
if tau > 0:
my_graph_without_lagged_variables[cause][effect][tau] = ""
if tau == 0:
if my_graph_without_lagged_variables[cause][effect][tau] == '<->':
my_graph_without_lagged_variables[cause][effect][tau] = ''
""" remove node if they don't have at least one incoming and outgoing edgemark, as they then cant be in the cycle"""
# for cause in range(len(my_graph_without_lagged_variables)):
# if len(my_graph_without_lagged_variables[cause]) == 0:
# my_graph_without_lagged_variables.pop(cause)
return my_graph_without_lagged_variables
def make_redundant_information_with_symmetry(graph, val):
"""
make redundant link information of a graph with diagonal symmetry in matrix representation.
e.g. A-->B = B<--A
"""
# if arrow is forward pointing insert symmetric backward arrow
for i in range(graph.shape[0]):
for j in range(graph.shape[1]):
if i != j:
# only write in empty cells
if graph[j, i, 0] == '':
pass
# pass if original cell also already empty
original_arrow = graph[i, j, 0]
if original_arrow == '':
pass
# insert symmetric arrow
elif original_arrow == '-->':
graph[j, i, 0] = '<--'
elif original_arrow == '<--':
graph[j, i, 0] = '-->'
elif original_arrow == 'x-x':
graph[j, i, 0] = 'x-x'
elif original_arrow == 'o-o':
graph[j, i, 0] = 'o-o'
elif original_arrow == '<->':
graph[j, i, 0] = '<->'
elif original_arrow == 'o->':
graph[j, i, 0] = '<-o'
elif original_arrow == '<-o':
graph[j, i, 0] = 'o->'
else: # if arrow is not forward pointing, error
raise ValueError('Error: graph[i, j, tau] is not an arrow')
val[j, i, 0] = val[i, j, 0]
return graph, val
def plot_graph(val_min, pag, my_var_names, link_colorbar_label, make_redundant):
if make_redundant:
graph_redun, val_redun = make_redundant_information_with_symmetry(pag.copy(), val_min.copy())
else:
graph_redun = pag.copy()
val_redun = val_min.copy()
tp.plot_graph(
val_matrix=val_redun,
link_matrix=graph_redun,
var_names=my_var_names,
link_colorbar_label=link_colorbar_label,
figsize=(10, 6),
)
plt.show()
def get_all_tau_external_independencies_wrt_target(external_independencies, var_names):
"""
if a var is in external dependencies for all tau w.r.t. target_label, then add it to is unintervenable
"""
if external_independencies is None or len(external_independencies) > 0:
return []
# external_independencies to list without lag
external_independencies_str = []
for external_independency in external_independencies:
external_independencies_str.append(list(external_independency[0:-1])) # todo why is that needed?
# external_independencies to string labels
for external_independency_idx in range(len(external_independencies_str)):
external_independency = external_independencies_str[external_independency_idx]
for var_idx in range(len(external_independency)):
var = external_independency[var_idx]
external_independencies_str[external_independency_idx][var_idx] = var_names[var]
#
all_tau_external_independencies_wrt_target = []
for external_independency in external_independencies_str:
# count how often the external independency in external_independencies_str
if external_independency[1] == target_label:
if external_independencies_str.count(external_independency) > tau_max:
all_tau_external_independencies_wrt_target.append(external_independency[0])
# remove duplicates
all_tau_external_independencies_wrt_target = list(set(all_tau_external_independencies_wrt_target))
return all_tau_external_independencies_wrt_target
def drop_redundant_information_due_to_symmetry(graph):
"""
sometimes there is a redundant diagonal symmetry in matrix representation.
if so, dropping values above diagonal
"""
# iterate through 3ed dimension (tau) of graph
for tau in range(graph.shape[2]):
# drop all values of upper right triangle
for i in range(graph.shape[0]):
for j in range(graph.shape[1]):
if i < j:
edge = graph[i, j, tau]
correspoding_edge = graph[j, i, tau]
if edge == '' and correspoding_edge == '':
pass
elif edge == '-->' and correspoding_edge == '<--':
graph[i, j, tau] = ''
elif edge == '<--' and correspoding_edge == '-->':
graph[i, j, tau] = ''
elif edge == 'o->' and correspoding_edge == '<-o':
graph[i, j, tau] = ''
elif edge == '<-o' and correspoding_edge == 'o->':
graph[i, j, tau] = ''
elif edge == '<->' and correspoding_edge == '<->':
graph[i, j, tau] = ''
elif edge == 'x->' and correspoding_edge == '<-x':
graph[i, j, tau] = ''
elif edge == '<-x' and correspoding_edge == 'x->':
graph[i, j, tau] = ''
elif edge == 'o-o' and correspoding_edge == 'o-o':
graph[i, j, tau] = ''
elif edge == 'x-x' and correspoding_edge == 'x-x':
graph[i, j, tau] = ''
elif edge == 'x-o' and correspoding_edge == 'o-x':
graph[i, j, tau] = ''
elif edge == 'o-x' and correspoding_edge == 'x-o':
graph[i, j, tau] = ''
else:
pass
return graph
def get_ambiguous_graph_locations(graph):
"""
1. Locate ambiguous edgemarks of a graph by string their i,j,k matrix indices.
2. store their ambiguous original link.
3. store their new possible unambiguous links.
return:
- [i, j, k, [original_links (ambiguous)], [[new_links (unambiguous)]]]
- e.g. [0, 1, 0, ['o-o'], [["-->", " <->", "<--"]]]
"""
# ambigious edgemark list
ambiguous_edgemark_list = [
"o->", # -->, <->
"x->", # -->, <->
"<-o", # <--, <->
"<-x", # <--, <->
"o-o", # -->, <->, <--
"x-x", # -->, <->, <--
"x-o", # -->, <->, <--
"o-x"] # -->, <->, <--
new_links_combinations = [
['-->', '<->'],
['-->', '<->'],
['<--', '<->'],
['<--', '<->'],
['-->', '<->', '<--'],
['-->', '<->', '<--'],
['-->', '<->', '<--'],
['-->', '<->', '<--']]
ambiguous_locations = [] # [i, j, k, original_link, new_links
# loop through all chars in graph:
for i in range(graph.shape[0]):
for j in range(graph.shape[1]):
for k in range(graph.shape[2]):
original_link = graph[i, j, k]
# for each char in string check if it is ambiguous
if original_link in ambiguous_edgemark_list:
# get index of ambiguous edgemark
index = ambiguous_edgemark_list.index(original_link)
# append ambiguous location
ambiguous_locations.append([i, j, k, original_link, new_links_combinations[index]])
return ambiguous_locations
def get_number_of_graph_combinations(ambiguous_locations):
"""
get number of graph combinations
input: ambiguous_locations
- [i, j, k, original_link, new_links]
- e.g. [0, 1, 0, ['o-o'], [["-->", " <->", "<--"]]]
"""
number_of_graph_combinations = 1
for ambiguous_location in ambiguous_locations:
number_of_graph_combinations = number_of_graph_combinations * len(ambiguous_location[4])
return number_of_graph_combinations
def get_unambiguous_links(ambiguous_locations):
"""
for each ambiguous location get the list of new possible links
return: list of lists of new links
input: ambiguous_locations
"""
# of every list in ambiguous_locations, get 4th element (new_links) in a new list
unambiguous_links = [] # [i, j, k, new_links]
for ambiguous_location in ambiguous_locations:
unambiguous_links.append([ambiguous_location[4]])
return unambiguous_links
def get_links_permutations(corresponding_unambiguous_links_list):
"""
input: corresponding_unambiguous_links_list. each item is a list of unambiguous links corresponding to an ambiguous
link.
output: permutations_of_uniquified_links contains all links Permutations of possible links.
"""
corresponding_unambiguous_links_list = [item for sublist in corresponding_unambiguous_links_list for item in
sublist]
permutations_of_uniquified_links = list(
itertools.product(*corresponding_unambiguous_links_list)) # https://stackoverflow.com/a/2853239/7762867
# if not links_permutations:
# links_permutations = np.transpose(np.asarray(corresponding_unambiguous_links_list[0]))
return permutations_of_uniquified_links
def make_links_point_forward(graph):
graph_forward = np.copy(graph)
# iterate through 3ed dimension (tau) of graph
for tau in range(graph.shape[2]):
# if value == '<--', then switch i and j and change value to '-->'
for i in range(graph.shape[0]):
for j in range(graph.shape[1]):
if graph[i, j, tau] == '<--':
graph_forward[i, j, tau] = ''
if graph[j, i, tau] != '' and i != j:
raise ValueError('graph[j, i, tau] != ''')
graph_forward[j, i, tau] = '-->'
return graph_forward
def get_one_graph_combination(ambiguous_locations, links_permutations, graph_combinations, graph_idx):
for ambiguous_location_idx in range(len(ambiguous_locations)):
ambiguous_location = ambiguous_locations[ambiguous_location_idx]
# get original link
original_link = ambiguous_location[3]
# get new links
# new_links = ambiguous_location[4]
# get i, j, k location
i = ambiguous_location[0]
j = ambiguous_location[1]
k = ambiguous_location[2]
new_link = links_permutations[graph_idx][ambiguous_location_idx]
# get old link string
old_link = graph_combinations[graph_idx][i, j, k]
if i == j and new_link == '<--':
ValueError('i == j and new_link == <--')
# replace graph_combinations[graph_idx][i, j, k] with new_link string
graph_combinations[graph_idx][i, j, k] = old_link.replace(original_link, new_link)
# make links point forward
graph_combinations[graph_idx] = make_links_point_forward(graph_combinations[graph_idx])
return graph_combinations[graph_idx]
def create_all_graph_combinations(graph, ambiguous_locations):
"""
input: ambiguous_locations
- [i, j, k, original_link, new_links]
- e.g. [0, 1, 0, ['o-o'], [["-->", " <->", "<--"]]]
"""
# if not ambiguous_locations:
if ambiguous_locations is None or len(ambiguous_locations) == 0:
return [graph]
# if ambiguous_locations:
else:
# create number_of_graph_combinations original graphs
number_of_graph_combinations = get_number_of_graph_combinations(ambiguous_locations)
# initialize graph_combinations
graph_combinations = []
for combi_idx in range(number_of_graph_combinations):
graph_combinations.append(np.copy(graph))
corresponding_unambiguous_links_list = get_unambiguous_links(ambiguous_locations)
links_permutations = get_links_permutations(corresponding_unambiguous_links_list)
# write graph combi
for graph_idx in range(number_of_graph_combinations):
graph_combinations[graph_idx] = get_one_graph_combination(ambiguous_locations, links_permutations,
graph_combinations, graph_idx)
return graph_combinations
def compute_coeffs(multiprocessing_input):
unique_graph, unique_graph_idx, val, ts, unintervenable_vars, intervention_value_low, my_graph, random_seed_day, n_half_samples, intervention_value_high= multiprocessing_input
model = graph_to_scm(unique_graph, val)
most_extreme_coeff = 0
most_extreme_interv_var = None
# for all measured vars except unintervenable intervention_vars
for intervention_var in list(set(ts.columns) - set(unintervenable_vars)):
# intervene on intervention_var with low and high values
simulated_low_interv, health = data_generator(
scm=model,
intervention_variable=intervention_var,
intervention_value=intervention_value_low[intervention_var],
ts_old=ts,
random_seed=random_seed_day,
n_samples=n_half_samples,
labels=ts.columns,
noise_type='without'
)
# skip: cyclic contemporaneous graph (none) and 'max_lag == 0'
if health == 'good':
simulated_low_interv = pd.DataFrame(simulated_low_interv, columns=ts.columns)
target_low_interv = simulated_low_interv[target_label]
# same for high intervention value
simulated_high_interv, health = data_generator(
scm=model,
intervention_variable=intervention_var,
intervention_value=intervention_value_high[intervention_var],
ts_old=ts,
random_seed=random_seed_day,
n_samples=n_half_samples,
labels=ts.columns,
noise_type='without',
)
simulated_high_interv = pd.DataFrame(simulated_high_interv, columns=ts.columns)
target_high_interv = simulated_high_interv[target_label]
# get relative difference between low and high intervention
coeff = (target_high_interv - target_low_interv).mean()
if abs(coeff) > abs(most_extreme_coeff):
most_extreme_coeff = coeff
most_extreme_interv_var = intervention_var
elif health == 'cyclic contemporaneous scm':
mygraph_without_lagged = drop_edges_for_cycle_detection(my_graph)
if verbosity_thesis > 1 and show_plots:
print('cyclic contemporaneous scm detected for graph: ' + str(mygraph_without_lagged))
plot_graph(val, mygraph_without_lagged, ts.columns, 'contemp graph for cycle detection',
make_redundant=True)
print('skipped because cyclic contemporaneous scm')
else:
raise ValueError('health must be either good or cyclic contemporaneous scm')
return most_extreme_coeff, most_extreme_interv_var, unique_graph_idx
def find_optimistic_intervention(my_graph, val, ts, unintervenable_vars, random_seed_day,
label, external_independencies, eps
):
"""
Optimal control to find the most optimistic intervention.
"""
# don't intervene on variables that where independent of target var in interventional data for all taus,
# by add them to unintervenable_vars # todo: if this essential? try without it
external_independencies_wrt_target = get_all_tau_external_independencies_wrt_target(external_independencies,
ts.columns)
if len(external_independencies_wrt_target) > 0:
unintervenable_vars = unintervenable_vars + external_independencies_wrt_target
# drop redundant info in graph
my_graph = drop_redundant_information_due_to_symmetry(my_graph)
# find ambiguous link locations
ambiguous_locations = get_ambiguous_graph_locations(my_graph)
# create a list of all unique graph combinations
graph_combinations = create_all_graph_combinations(my_graph, ambiguous_locations)
n_half_samples = int(n_samples_simulation / 2)
# get intervention value from fitted gaussian percentile
intervention_value_low = {}
intervention_value_high = {}
intervention_value_low_mid = {}
intervention_value_high_mid = {}
# intervention_value_median = {}
for var_name in ts.columns:
# Fit a normal distribution to the data:
mu, std = norm.fit(ts[var_name])
# get 95th percentile from normal distribution
intervention_value_low[var_name] = norm.ppf(1 - percentile / 100, loc=mu, scale=std)
intervention_value_high[var_name] = norm.ppf(percentile / 100, loc=mu, scale=std)
intervention_value_low_mid[var_name] = norm.ppf(np.mean([0.5]), loc=mu, scale=std) # (1 - percentile / 100),
intervention_value_high_mid[var_name] = norm.ppf(np.mean([0.5]), loc=mu, scale=std) # (percentile / 100),
# intervention_value_median[var_name] = norm.ppf(0.5, loc=mu, scale=std)
multiprocessing_inputs = []
for graph_idx in range(len(graph_combinations)):
multiprocessing_inputs.append([graph_combinations[graph_idx], graph_idx, val, ts, unintervenable_vars, intervention_value_low, my_graph, random_seed_day, n_half_samples, intervention_value_high])
with Pool() as pool:
results = pool.map(compute_coeffs, multiprocessing_inputs)#, position=0, leave=True, delay=0)
# results = []
# for input_m in multiprocessing_inputs:
# results.append(compute_coeffs(input_m))
# for unique_graph_idx, unique_graph in enumerate(tqdm(graph_combinations, position=0, leave=True, delay=10)):
best_intervention_var_name = None
most_optimistic_graph = my_graph
most_extreme_coeff = 0
for result in results:
coeff, interv_var, unique_graph_idx = result
unique_graph = graph_combinations[unique_graph_idx]
# if abs_coeff > largest_abs_coeff:
if np.abs(coeff) > abs(most_extreme_coeff):
most_extreme_coeff = coeff
best_intervention_var_name = interv_var
most_optimistic_graph = unique_graph
# no intervention found
if best_intervention_var_name is None:
assert label == 'actual_data'
# ignoring directions
print('no intervention found. trying again ignoring scm directions')
most_extreme_coeff, best_intervention_var_name = get_intervention_ignoring_directionalities(val.copy(),
target_label,
labels_as_str=ts.columns,
external_independencies_wrt_target=external_independencies_wrt_target,
ignore_external_independencies=False)
# still no intervention found: ignore external independencies
if best_intervention_var_name is None:
print('again no intervention found. now also ignore_external_independencies')
most_extreme_coeff, best_intervention_var_name = get_intervention_ignoring_directionalities(val.copy(),
target_label,
labels_as_str=ts.columns,
external_independencies_wrt_target=external_independencies_wrt_target,
ignore_external_independencies=True)
# cant find intervention:
if best_intervention_var_name is None:
return None, None
# intervention found
# plot most optimistic graph
if verbosity_thesis > 5 and show_plots:
plot_graph(val, most_optimistic_graph, ts.columns, label+': most optimistic', make_redundant=True)
if label == 'actual_data':
# get intervention value
# take_alternative = np.random.RandomState(random_seed_day).choice([True, False])
take_alternative = bool(np.random.binomial(1, 1-eps, 1))
print('take_alternative: check that not const', take_alternative)
elif label == 'true_scm':
take_alternative = False
else:
raise ValueError('label must be either true_scm or actual_data')
# print('median intervention = ', take_alternative, 'extreme =', not take_alternative)
if take_alternative:
intervention_value = intervention_value_high_mid[best_intervention_var_name]
else: # take opt
if most_extreme_coeff > 0:
intervention_value = intervention_value_high[best_intervention_var_name]
elif most_extreme_coeff < 0:
intervention_value = intervention_value_low[best_intervention_var_name]
else:
raise ValueError('most_extreme_coeff is 0')
return best_intervention_var_name, intervention_value
def get_intervention_ignoring_directionalities(vals, var_name_as_str, labels_as_str,
external_independencies_wrt_target, ignore_external_independencies):
# fix int vs str format
var = list(labels_as_str).index(var_name_as_str)
if ignore_external_independencies:
unintervenables_without_var = []
else:
unintervenables_without_var = [list(labels_as_str).index(var) for var in external_independencies_wrt_target]
highest_abs_corr = 0
most_extreme_val = None
most_extreme_var = []
# set vals to zero if auto corr or non-target adjacent var or unintervenable var
for i in range(vals.shape[0]):
for j in range(vals.shape[1]):
if i == j or (
i != var and j != var) or i in unintervenables_without_var or j in unintervenables_without_var:
for tau in range(vals.shape[2]):
vals[i, j, tau] = 0
# find max val in vals
for i in range(vals.shape[0]):
for j in range(vals.shape[1]):
for tau in range(vals.shape[2]):
if np.abs(vals[i, j, tau]) > highest_abs_corr:
highest_abs_corr = np.abs(vals[i, j, tau])
most_extreme_val = vals[i, j, tau]
most_extreme_var = [i, j]
# remove target from most extreme var
for i in range(len(most_extreme_var)):
if most_extreme_var[i] == var:
most_extreme_var.pop(i)
break
if len(most_extreme_var) > 1:
raise Exception('len(most_extreme_var) > 0')
elif len(most_extreme_var) == 0:
print('ignore_external_independencies is True')
return None, None
else: # len(most_extreme_var) == 1
most_extreme_var = most_extreme_var[0]
return most_extreme_val, labels_as_str[most_extreme_var]
| 25,836 | 42.496633 | 203 |
py
|
correlate
|
correlate-master/causal_discovery/preprocessing.py
|
# Imports
## use `%matplotlib notebook` for interactive figures
# plt.style.use('ggplot')
from datetime import timedelta
import numpy as np
import pandas as pd
def remove_nan_seq_from_top_and_bot(df):
for column in df:
# reset index
df = df.set_index('Date')
df = df.reset_index()
# array with indices of NaN entries
indices_of_nans = df.loc[pd.isna(df[column]), :].index.values
# remove unbroken sequence of nans from beginning of list
sequence_number = -1
for i in indices_of_nans:
sequence_number += 1
if i == sequence_number:
df = df.drop([i], axis=0)
else:
break
# remove unbroken sequence of nans from end of list
# reset index
df = df.set_index('Date')
df = df.reset_index()
indices_of_nans = df.loc[pd.isna(df[column]), :].index.values
indices_of_nans = np.flip(indices_of_nans)
len_df = len(df)
sequence_number = len_df
for i in indices_of_nans:
sequence_number -= 1
if i == sequence_number:
df = df.drop([i], axis=0)
else:
break
# print remaining nans
remaining_nans = df.loc[pd.isna(df[column]), :].index.values
if len(remaining_nans) > 0:
print('remaining Nans in ' + str(column) + ': ', remaining_nans)
return df
def non_contemporary_time_series_generation(df1):
"""
so far works only when var 1 happens before far 2. E.G. sleep, exercise
Parameters
----------
df1[Date, var happening first, var happening second]: dataframe with format: one row per day
Returns
-------
dataframe with format: 2 rows per day
"""
# insert blanc row after every row
df1['Date'] = pd.to_datetime(df1['Date'], format='%Y-%m-%d %H:%M')
# df1['Date'] = datetime.strptime(df1['Date'], '%Y-%m-%dT% H:%M:%S.%f')
df1.index = range(1, 2 * len(df1) + 1, 2)
df = df1.reindex(index=range(2 * len(df1)))
# modify df:
# 1. add morning date time
# 2. write heart points
# 3. write sleep eff
# 4. write evening date time
for i, row in df.iterrows():
if i % 2 == 0:
if i != 0:
df.loc[i, df.columns[0]] = df.loc[i - 1, df.columns[0]] + timedelta(hours=7, minutes=0)
df.loc[i, df.columns[2]] = df.loc[i - 1, df.columns[2]]
if i < len(df):
df.loc[i, df.columns[1]] = df.loc[i + 1, df.columns[1]]
else: # i % 2 == 1:
# df.loc[i, 'SleepEfficiency'] = df.loc[i+1, 'SleepEfficiency']
df.loc[i, df.columns[0]] = df.loc[i, df.columns[0]] + timedelta(hours=23, minutes=0)
# df.loc[i, 'HeartPoints'] = 1.0#df.loc[i-1, 'HeartPoints']
# df.loc[i, 'SleepEfficiency'] = 1.0#df.loc[i+1, 'SleepEfficiency']
df = df.iloc[1:] # drop first row as it's missing data
return df
| 2,986 | 30.442105 | 103 |
py
|
correlate
|
correlate-master/causal_discovery/gen_configs.py
|
import numpy as np
from config import n_scms
def define_settings():
"""
generate settings for simulation study
"""
settings_default_and_list = {
# n obs before first intervention
'n_ini_obs': [50, [10, 50, 100]],
# n measured vars
'n_vars_measured': [5, np.arange(5, 11, 2)],
# number of additional latents
'n_latents': [2, np.arange(0, 4, 1)],
# significance threshold to keep an adjacency
'alpha': [0.5, [0.01,0.05,0.1,0.2,0.4]],
# how often in a row should the same intervention be applied?
'n_samples_per_generation': [1, np.arange(0, 5, 2)]
}
#
# # settings_default_and_list = {
# # # n obs before first intervention
# # 'n_ini_obs': [200, [200]],
# #
# # # n measured vars
# # 'n_vars_measured': [5, [5]],
# #
# # # fraction of additional latents
# ## 'frac_latents': [0.3, [0.3]],
# #
# # # significance threshold to keep an adjacency
# # 'alpha': [0.5, [0.5]],
# #
# # # how often in a row should the same intervention be applied?
# # 'n_samples_per_generation': [1, [1]]
# # }
#
# # check if settings are valid
# if max(settings_default_and_list['n_vars_measured'][0], max(settings_default_and_list['n_vars_measured'][1])) > 99:
# raise ValueError(
# 'Config error. n_vars_measured must have <3 digits. or change len(intervention_variable)>2: in data_generator')
#
# # get default settings
# default_settings = []
# for key, value in settings_default_and_list.items():
# default_settings.append(value[0])
#
# # generate settings
# all_param_study_settings = [[default_settings]]
# total_scms = 1
# for var in settings_default_and_list.keys():
# # add default setting
# # all_param_study_settings.append([for var in settings_default_and_list.keys()])
# this_setting_study_list = settings_default_and_list[var][1]
# one_param_study_settings = []
# for setting in this_setting_study_list:
# one_param_setting = []
# total_scms += 1
# for var2 in settings_default_and_list.keys():
# if var2 != var:
# one_param_setting.append(settings_default_and_list[var2][0])
# else:
# one_param_setting.append(setting)
# one_param_study_settings.append(np.array(one_param_setting, dtype=object))
# all_param_study_settings.append(np.array(one_param_study_settings, dtype=object))
# print('total_scms in settings:', total_scms * n_scms)
# save_str, ini obs, #vars, n_latents, obs_alpha, interv_alpha, n_samples_per_generation,nth
# alpha
all_param_study_settings = [[
np.array(['alpha',50, 5, 2, 0.01, 0.01, 1, 4], dtype=object),
np.array(['alpha',50, 5, 2, 0.05, 0.05, 1, 4], dtype=object),
np.array(['alpha',50, 5, 2, 0.1, 0.1, 1, 4], dtype=object),
# np.array(['alpha',50, 5, 2, 0.2, 0.2, 1, 4], dtype=object),
# np.array(['alpha',50, 5, 2, 0.4, 0.4, 1, 4], dtype=object),
]]
# # intervene very nth time
# all_param_study_settings = [[
# np.array(['nth',50, 5, 2, 0.05, 0.05, 1, 1], dtype=object),
# np.array(['nth',50, 5, 2, 0.05, 0.05, 1, 2], dtype=object),
# np.array(['nth',50, 5, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['nth',50, 5, 2, 0.05, 0.05, 1, np.inf], dtype=object),
# ]]
# # n ini obs
# all_param_study_settings = [[
# np.array(['n_ini_obs',5, 5, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['n_ini_obs',25, 5, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['n_ini_obs',125, 5, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['n_ini_obs',625, 5, 2, 0.05, 0.05, 1, 4], dtype=object),
# ]]
#
# n latents
# all_param_study_settings = [[
# np.array(['latents',50, 5, 0, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['latents',50, 5, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['latents',50, 5, 4, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['latents',50, 5, 6, 0.05, 0.05, 1, 4], dtype=object),
# ]]
# n observables
# all_param_study_settings = [[
# np.array(['n_obs_vars',50, 2, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['n_obs_vars',50, 4, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['n_obs_vars',50, 6, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['n_obs_vars',50, 8, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['n_obs_vars', 50, 10, 2, 0.05, 0.05, 1, 4], dtype=object),
# np.array(['n_obs_vars', 50, 12, 2, 0.05, 0.05, 1, 4], dtype=object),
# ]]
# default
all_param_study_settings = [[
np.array(['no-interv-discov1',50, 5, 2, 0.05, 0.5, 1, 4], dtype=object),
]]
# all_param_study_settings = [[
# np.array(['default3',50, 5, 2, 0.05, 0.5, 1, 4], dtype=object),
# ]]
return all_param_study_settings
| 5,113 | 38.643411 | 125 |
py
|
correlate
|
correlate-master/causal_discovery/interventional_discovery.py
|
import numpy as np
import pandas as pd
import pingouin as pg
from scipy.stats import pearsonr
from config import verbosity_thesis, tau_max, target_label, interventional_discovery_on
from data_generation import labels_to_ints
def interventional_pass_filter(ts, was_intervened, tau):
"""
return only data with interventions
"""
# iterate over all rows
# create np.array of len 8
interventional_data = np.empty((0, was_intervened.shape[1]))
where_intervened = np.empty((0, was_intervened.shape[1]), dtype=bool)
for row in range(len(was_intervened)):
# drop row if all its values are False
if True in was_intervened.iloc[row].array:
# add was_intervened_new to where_intervened as new row
where_intervened = np.vstack((where_intervened, was_intervened.iloc[row]))
# add ts_new to interventional_data as new row
interventional_data = np.vstack((interventional_data, ts.iloc[row]))
# to df
# where_intervened to dataframe with columns of was_intervened
where_intervened = pd.DataFrame(where_intervened, columns=was_intervened.columns)
# interventional_data to dataframe with columns of ts
interventional_data = pd.DataFrame(interventional_data, columns=ts.columns)
return interventional_data, where_intervened
def get_interventional_data_per_var(df, was_intervened, tau):
# drop observational samples
df, was_intervened = interventional_pass_filter(df, was_intervened, tau)
# ini dict with 2d array for each variable
interventional_data_per_var = {}
for var in was_intervened.columns:
interventional_data_per_var[var] = np.empty((0, was_intervened.shape[1]))
# fill dict with data
for row in range(len(was_intervened)):
for var in was_intervened.columns:
if was_intervened.iloc[row][var]:
interventional_data_per_var[var] = np.vstack((interventional_data_per_var[var], df.iloc[row]))
# make dict of arrays to dict of dataframes
interventional_dict_of_dfs = {}
for var in interventional_data_per_var:
interventional_dict_of_dfs[var] = pd.DataFrame(interventional_data_per_var[var], columns=df.columns)
return interventional_dict_of_dfs
def add_median_non_interventional_data(cause_and_effect_tau_shifted, df, cause, effect, n_ini_obs):
"""add median non-interventional data to interventional data which will allow to see if there is a difference"""
# get median of un-intervened data of the first n_ini_obs samples
median_unintervened_data = df.iloc[:n_ini_obs].median()
interventional_samples = len(cause_and_effect_tau_shifted)
median_non_intervened_cause_effect = np.array(
[[median_unintervened_data[cause], median_unintervened_data[effect]] for i in range(interventional_samples)])
# v-stack cause_and_effect_tau_shifted with median_non_intervened_cause_effect then to df with headers
cause_and_effect_tau_shifted = pd.DataFrame(
np.vstack((cause_and_effect_tau_shifted, median_non_intervened_cause_effect)), columns=['cause', 'effect'])
return cause_and_effect_tau_shifted
def get_probable_parents(effect, pag_edgemarks, measured_labels):
"""
get probable parents of effect variable
output: list of ['var', 'tau']
probable parent has edgemark in ['-->', 'o->', 'x->']
"""
probable_parents = []
effect_int = labels_to_ints(measured_labels, effect)
# iterate over tau
for tau in range(0, tau_max + 1):
# search for causes is column of effect var
effect_col = pag_edgemarks[:, effect_int, tau]
for item_idx in range(len(effect_col)):
item = effect_col[item_idx]
if item in ['-->', 'o->', 'x->']:
probable_parents.append([measured_labels[item_idx], str(tau)])
# search for causes is row of effect var
effect_row = pag_edgemarks[effect_int, :, tau]
for item_idx in range(len(effect_row)):
item = effect_row[item_idx]
if item in ['<--', '<-o', '<-x']:
probable_parents.append([measured_labels[item_idx], str(tau)])
# remove duplicates
if len(probable_parents) > 0:
found_duplicate = True
while found_duplicate:
found_duplicate = False
for parents_idx in range(len(probable_parents)):
parents = probable_parents[parents_idx]
# count how often parents is in probable_parents
count = 0
for parents_idx2 in range(len(probable_parents)):
if parents == probable_parents[parents_idx2]:
count += 1
if count > 1:
found_duplicate = True
probable_parents.pop(parents_idx)
break
return probable_parents
def remove_cause_tau_var(probable_parents, cause, tau):
# in probable_parents remove item if item == [cause, tau]
probable_parents = list(probable_parents)
tau=str(tau)
for item_idx in range(len(probable_parents)):
item = probable_parents[item_idx]
if item[0] == cause and item[1] == tau:
# remove item from probable_parents
probable_parents.pop(item_idx)
break
return probable_parents
def get_conditioning_df(probable_parents, df, measured_labels):
"""
get probable_parents' data and shift by tau
"""
if len(probable_parents) <1:
# return empty df
return pd.DataFrame()
else:
# get conditioning set
conditioning_df = []
column_names = []
for probable_parent in probable_parents:
to_add = df.loc[:, probable_parent[0]].copy().shift(periods=int(probable_parent[1]))
conditioning_df.append(to_add)
# column names in format 'cause_tau'
column_names.append(probable_parent[0] + '_' + probable_parent[1])
# convert to dataframe
conditioning_df = pd.DataFrame(np.array(conditioning_df).T, columns=column_names)
return conditioning_df
def align_cause_effect_due_to_lag(cause_and_effect, tau):
# ini tau shifted var
cause_and_effect_tau_shifted = cause_and_effect.copy()
if tau == 0:
return cause_and_effect_tau_shifted
else:
# shift cause down by tau, to emulate contemporaneous cause
cause_and_effect_tau_shifted['cause'] = cause_and_effect_tau_shifted[
'cause'].copy().shift(periods=tau)
return cause_and_effect_tau_shifted
def remove_weaker_links_of_contempt_cycles(dependencies_from_interv_data):
# get contemporaneous links
contemporaneous_links = []
for var in dependencies_from_interv_data:
if var[2] == 0:
contemporaneous_links.append(var)
# for all cont links
removed_link = True
while removed_link:
removed_link = False
for contemporaneous_link in contemporaneous_links:
# remove current link and check if there is a reverse link
cont_links_wo_this_link = [link for link in contemporaneous_links if link != contemporaneous_link]
for cont_link_wo_this_link in cont_links_wo_this_link:
if cont_link_wo_this_link[0] == contemporaneous_link[1] and cont_link_wo_this_link[1] == contemporaneous_link[0]:
# remove link with higher p-value
if cont_link_wo_this_link[3] > contemporaneous_link[3]:
contemporaneous_links.remove(cont_link_wo_this_link)
dependencies_from_interv_data.remove(cont_link_wo_this_link)
cont_links_wo_this_link.remove(cont_link_wo_this_link)
else:
contemporaneous_links.remove(contemporaneous_link)
dependencies_from_interv_data.remove(contemporaneous_link)
removed_link = True
break
if removed_link:
break
return dependencies_from_interv_data
def get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau):
"""
input:
d: dataframe with cause and effect
w: was intervened df
cause: cause variable
eff: effect variable
tau: time delay
"""
# ignore contemporaneous auto dependencies: return empty df if eff == cause and tau == 0
if (eff == cause and tau == 0) or (w[cause]==False).all():
return pd.DataFrame([], columns=[cause, eff+'_'+str(tau)])
# get indices where w[cause] == True
interv_cause_dates = d.loc[:, cause].copy().loc[w[cause]].index
cause_eff_df = []
for date in interv_cause_dates:
# pass effect val is interv value. if w at column eff and index date + tau is true
if date + tau > d.index[-1]:
continue
if w.loc[date + tau, eff]:
continue
# get cause and effect data for date + tau
cause_eff_df.append([d.loc[date, cause].copy(),d.loc[date + tau, eff].copy()])
# cause_eff_df to df with columns cause and effect
cause_eff_df = pd.DataFrame(cause_eff_df, columns=[cause, eff+'_'+str(tau)])
return cause_eff_df
def get_independencies_from_interv_data(df, was_intervened, pc_alpha):
"""
orient links with interventional data.
test conditional independence for each var to each other var for all taus.
output: (effect, cause or intervened, tau, p-val)
"""
if interventional_discovery_on == False:
return [], []
independencies_from_interv_data = []
dependencies_from_interv_data = []
# iterate over all taus
for tau in range(tau_max + 1):
# # get interventional data per variable
# interventional_dict = get_interventional_data_per_var(df, was_intervened, tau)
# ini dependencies list
# iterate over causes/interventions
for cause in df.columns:
# # stop if less than 3 samples, as corr coeff is not defined
# if len(interventional_dict[cause]) > 2:
# get data where on one specific var was intervened on
# df_with_intervention_on_one_cause = interventional_dict[cause]
# get values of cause var
# cause_values = df_with_intervention_on_one_cause[cause]
# skip if cause var is const, as corr is not defined
# if len(np.unique(cause_values)) > 1:
# iterate over all other (potentially effect) variables
for effect in df.columns:
# probable_parents = get_probable_parents(effect, pag_edgemarks, measured_labels)
# get values of effect var
# effect_values = df_with_intervention_on_one_cause[effect]
# cause and effect series as columns in df
# cause_and_effect = pd.DataFrame(dict(cause=cause_values, effect=effect_values))
# add median non-interventional data to interventional data which will allow to see if there is a difference
# cause_and_effect_non_interv_added = add_median_non_interventional_data(cause_and_effect.copy(), df, # todo probably not correct
# cause, effect, n_ini_obs)
interv_tau_aligned_cause_eff_df = get_interv_tau_aligned_cause_eff_df(df, was_intervened, cause, effect, tau)
# ignore contemporaneous auto-dependencies and data needs to be at least 3 (due to corr) + tau (shift drop nan) long
if len(interv_tau_aligned_cause_eff_df) <5:
continue
# get conditioning variables
# conditioning_vars = remove_cause_tau_var(probable_parents, cause, tau)
# returns probable edgemarks of effect variable with format [probable parent, tau]
# conditioning_df = get_conditioning_df(conditioning_vars, df_with_intervention_on_one_cause,
# measured_labels)
# emulate contemporaneous by shifting cause down by tau
# cause_and_effect_tau_shifted = align_cause_effect_due_to_lag(cause_and_effect, tau)
# add conditioning_set to cause_and_effect_tau_shifted as columns
# cause_and_effect_condition_tau_shifted = pd.concat(
# [cause_and_effect_tau_shifted.copy(), conditioning_df], axis=1)
# cause_and_effect_condition_tau_shifted = cause_and_effect_condition_tau_shifted.dropna()
# if len(cause_and_effect_condition_tau_shifted) > 2:
# get p_val
# ans = pg.partial_corr(data=cause_and_effect_condition_tau_shifted, x='cause', y='effect',
# covar=list(conditioning_df.columns)).round(3)
# p_val = ans['p-val'].values[0] # probability of independence
# r = ans['r'].values[0] # correlation coefficient
# statistical test
r, p_val = pearsonr(interv_tau_aligned_cause_eff_df[cause],
interv_tau_aligned_cause_eff_df[effect+'_'+str(tau)])
# if significantly independent:
if p_val > 1-pc_alpha: #interv_alpha:
# save independency information # (effect == '4' and tau == 0) or (effect == '2' and tau == 1) or (effect == '0' and tau == 0) or (effect == '3' and tau == 1)
independencies_from_interv_data.append((effect, cause, tau, p_val.round(4)))
if verbosity_thesis > 2:
print("interv discovery:", cause,
" -X>", effect, "with lag=", tau, ", p-value=",
p_val)
elif verbosity_thesis > 0:
if effect == target_label:
print("interv discovery: ", cause,
" -X> target with lag", tau, "\t, p-value=",
p_val)
# if significantly dependent:
elif p_val < pc_alpha:#(1-interv_alpha)*3:
dependencies_from_interv_data.append((effect, cause, tau, p_val.round(4)))
# if contemporaneus cycle in dependencies_from_interv_data, remove link with weaker p-value
dependencies_from_interv_data = remove_weaker_links_of_contempt_cycles(dependencies_from_interv_data)
# print
for dependency in dependencies_from_interv_data:
if verbosity_thesis > 2:
print("interv discovery: intervened var ", dependency[1],
" -->", dependency[0], "with lag=", dependency[2], ", p-value=",
dependency[3])
elif verbosity_thesis > 0:
if dependency[1] == target_label:
print("interv discovery: ", dependency[1],
" --> target with lag", dependency[2], "\t, p-value=",
dependency[3])
return independencies_from_interv_data, dependencies_from_interv_data
# # load ts dataframe from file
# import os
#
# filename = os.path.abspath("LPCMCI/tmp_test.dat")
# ts = pd.read_csv(filename)
#
# # get last row of ts and append to ts, and continue with index
# last_row = ts.iloc[-1]
# # ts = ts.append(last_row)
# ts = ts.append(ts.iloc[-1], ignore_index=True)
# ts.iloc[-2, 0] = 9
# ts.iloc[-3, 0] = 8
# ts.iloc[-1, 5] = 9
# ts.iloc[-4, 0] = 9
# ts.iloc[-5, 0] = 8
# ts.iloc[-6, 0] = 9
#
# ## load was_intervened dataframe from file
# filename = os.path.abspath("LPCMCI/tmp_was_intervened.dat")
# was_intervened = pd.read_csv(filename)
# was_intervened.iloc[-2, 0] = True
# was_intervened.iloc[-3, 0] = True
# was_intervened.iloc[-1, 5] = True
# was_intervened.iloc[-4, 0] = True
# was_intervened.iloc[-5, 0] = True
# was_intervened.iloc[-6, 0] = True
#
# pag_edgemarks = np.array([[['', '-->'], ['-->', '-->'], ['', '-->'], ['', '<->'], ['', '']],
# [['<--', '-->'], ['', '-->'], ['-->', '-->'], ['<->', ''], ['-->', '']],
# [['', '-->'], ['<--', '-->'], ['', '-->'], ['', ''], ['<->', '-->']],
# [['', 'o->'], ['<->', ''], ['', '<->'], ['', '<->'], ['', '']],
# [['', '<->'], ['<--', '<->'], ['<->', '-->'], ['', ''], ['', '-->']]])
# measured_labels=['0', '2', '3', '4', '5']
# independencies_from_interv_data = get_independencies_from_interv_data(
# df=ts,
# was_intervened=was_intervened,
# interv_alpha=0.1,
# n_ini_obs=500,
# pag_edgemarks=pag_edgemarks,
# measured_labels=measured_labels)
# print()
| 16,762 | 42.427461 | 178 |
py
|
correlate
|
correlate-master/causal_discovery/helper.py
|
import numpy as np
def parabola(x, a, b, c):
x = np.array(x)
return a * x ** 2 + b * x + c
def arg_closest(lst, x):
lst = np.subtract(lst, x)
return np.where(lst == min(lst, key=abs))[0][0]
# def reduce_tau_max(correlations):
# # 3d-> 2D via reshape, 2D->1D via amax, abs
# abs_max_corr_coeff = np.absolute(np.amax(correlations.reshape(correlations.shape[0] ** 2, -1), axis=0))
#
# abs_max_corr_coeff = np.delete(abs_max_corr_coeff, 0) # remove idx 0. idk what it was for
# time_lag = list(range(0, len(abs_max_corr_coeff))) # array of time lags
# parabola_params, _ = scipy.optimize.curve_fit(parabola, time_lag, abs_max_corr_coeff) # parabola_params
# y_parabola = parabola(time_lag, *parabola_params) # y values of fitted parabola
# parabola_first_half = y_parabola[:np.argmin(y_parabola)] # keep only part of parabola which is before argmin
# tau_max = arg_closest(parabola_first_half, corr_threshold)
# print('reduced tau_max=', tau_max)
#
# # plotting
# plt.plot(abs_max_corr_coeff, label='max correlation coefficient', color='black')
# plt.plot(time_lag, y_parabola, label='quadratic fit', color='blue')
# plt.fill_between([0, len(abs_max_corr_coeff)], 0, corr_threshold,
# facecolor='red', alpha=0.3, label='below corr threshold')
# plt.axvline(tau_max, 0, 30, label='tau_max', color='red')
# plt.title('Computation of tau_max=' + str(tau_max))
# plt.ylabel('max correlation coefficient')
# plt.ylabel('time lag')
# plt.xlim([0, len(abs_max_corr_coeff)])
# plt.ylim([0, max(abs_max_corr_coeff)])
# plt.legend(loc='best')
# plt.show()
# return tau_max
| 1,695 | 41.4 | 115 |
py
|
correlate
|
correlate-master/causal_discovery/scratch_pad.py
| 0 | 0 | 0 |
py
|
|
correlate
|
correlate-master/causal_discovery/read_results.py
|
import math
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from config import checkpoint_path, n_scms, plots_path
def boxplot_from_df(regret_of_setting_df, x_label, y_label, save_name):
n_settings = regret_of_setting_df.shape[1]
data_values = np.zeros((n_settings * n_scms))
for column_idx, column in enumerate(regret_of_setting_df.columns):
data_values[column_idx * n_scms:(1 + column_idx) * n_scms] = regret_of_setting_df[column]
data_settings = np.zeros((n_settings * n_scms), dtype=object)
for i, setting in enumerate(regret_of_setting_df.columns):
data_settings[i * n_scms:(1 + i) * n_scms] = [setting for i in range(n_scms)] # setting * np.ones(n_scms)
if x_label == 'fraction of interventions':
data_settings = 1/data_settings
data_all = np.hstack((data_settings.reshape(-1, 1), data_values.reshape(-1, 1)))
data_all = pd.DataFrame(data_all, columns=[x_label, y_label])
# ax = sns.boxenplot(x=x_label, y=y_label, data=data_all)
ax = sns.boxplot(x=x_label, y=y_label, data=data_all, showfliers=False)
plt.tight_layout()
# ave to file
plot_path = plots_path + save_name + '.png'
plt.savefig(plot_path)
print('saved to:', plot_path)
plt.show()
plt.close()
def get_regret_of_setting_df(file_name):
# file_name = 'wo-causality'
x_label = 'Regret'
y_label = 'average regret per timestep'
save_name = file_name
setting_loc = 1
# file_name = 'nth'
# x_label = 'fraction of interventions'
# y_label = 'average daily regret'
# setting_loc = 7
# save_name = file_name
#
# file_name = 'latents'
# x_label = 'number of latents'
# y_label = 'average daily regret'
# setting_loc = 3
# save_name = file_name
#
# file_name = 'n_ini_obs'
# x_label = 'number initial observations'
# y_label = 'average daily regret'
# setting_loc = 1
# save_name = file_name
#
#
# file_name = 'n_obs_vars'
# x_label = 'number of observed variables'
# y_label = 'average daily regret'
# setting_loc = 2
# save_name = file_name
# load
# regret_list_over_simulation_study 'archive2/' +
with open(checkpoint_path+ str(0) + file_name + '_regret_list_over_simulation_study.pickle', 'rb') as f:
regret_list_over_simulation_studies, simulation_studies = pickle.load(f)
# for regret_loss, setting in zip(regret_list_over_simulation_studies, simulation_studies):
# regret_loss = np.array(regret_loss)
# regret = regret_loss[:, 0]
# loss = regret_loss[:, 1]
# mean_regret = np.mean(regret)
# mean_loss = np.mean(loss)
# print('\nsetting:', setting,
# '\nmean_regret:', mean_regret,
# '\nmean_loss', mean_loss,
# '\nregret', regret,
# '\nloss', loss, '\n')
""" plot regret vs setting"""
# iterate over setting of one var
settings = []
regret_of_setting_df = []
regret_of_setting_95ile = []
cost_of_setting = []
correct_interv_vars_mean_setting = []
for simulation_study, regret_list_over_simulation_studies in zip(simulation_studies,
regret_list_over_simulation_studies):
settings.append(simulation_study[setting_loc])
regret_over_scm = np.zeros(n_scms)
cost_over_scm = np.zeros(n_scms)
n_correct_interv_vars_mean = np.zeros(n_scms)
for scm_i in range(n_scms):
regret_over_scm[scm_i] = np.mean(regret_list_over_simulation_studies[scm_i][0])
cost_over_scm[scm_i] = regret_list_over_simulation_studies[scm_i][1]
n_correct_interv_vars_mean[scm_i] = np.mean(regret_list_over_simulation_studies[scm_i][2])
regret_of_setting_df.append(regret_over_scm)
# regret_of_setting_95ile.append(np.percentile(a=regret_over_scm, q=95))
cost_of_setting.append(cost_over_scm)
correct_interv_vars_mean_setting.append(n_correct_interv_vars_mean)
regret_of_setting_df = pd.DataFrame(np.array(regret_of_setting_df).T, columns=settings)
# plot regret_list_over_simulation_studies[0][0] as timeline
# r = np.zeros((0, 200))
# for i in range(len(regret_list_over_simulation_studies)):
# plt.plot(regret_list_over_simulation_studies[i][0])
# # save to file
# plot_path = plots_path + 'regret_timeline/regret_timeline_scm_' + str(i) + '.png'
# plt.savefig(plot_path)
# # plt.close()
# # vstack regret_list_over_simulation_studies[0][i][0]
# r = np.vstack((r, regret_list_over_simulation_studies[i][0]))
# # mean across axis 0
# r = np.mean(r, axis=0)
# plt.plot(r)
# plt.show()
# get mean of each column in regret_of_setting_df
print('mean_regret_of_setting_df:', regret_of_setting_df.mean(axis=0))
print('mean_cost_of_setting:', np.mean(cost_of_setting, axis=1))
print('mean_correct_interv_vars_mean:', np.mean(correct_interv_vars_mean_setting, axis=1))
boxplot_from_df(
regret_of_setting_df,
x_label=x_label,
y_label=y_label,
save_name=save_name
)
return regret_of_setting_df, regret_list_over_simulation_studies, correct_interv_vars_mean_setting
regret_of_setting_df_1, regret_list_over_simulation_studies_1, correct_interv_vars_mean_setting_1 = get_regret_of_setting_df('default3')
regret_of_setting_df_2, regret_list_over_simulation_studies_2, correct_interv_vars_mean_setting_2 = get_regret_of_setting_df('no-interv-discov1')
# get all indices where correct_interv_vars_mean_setting_1 > correct_interv_vars_mean_setting_2
indices = np.where(correct_interv_vars_mean_setting_1[0] < correct_interv_vars_mean_setting_2[0])
print('indices:', indices)
# add regret_of_setting_df_2 to regret_of_setting_df_1 as new column with name 'no-interv-discov'
regret_of_setting_df_1['without interventional discovery'] = regret_of_setting_df_2.mean(axis=1)
# rename first column '50' to 'with interventional discovery'
regret_of_setting_df_1.rename(columns={50: 'default'}, inplace=True)
boxplot_from_df(
regret_of_setting_df_1,
x_label = 'extended LPCMCI vs original LPCMCI ',
y_label = 'average regret per timestep',
save_name='w-wo-interv-discov'
)
# # regret_of_setting_df_3, regret_list_over_simulation_studies_3 = get_regret_of_setting_df('w-interv')
#
diff_1_2 = regret_of_setting_df_1 - regret_of_setting_df_2
# diff_2_3 = regret_of_setting_df_2 - regret_of_setting_df_3
#
#
#
# # elementwise addition of diff_1_2 and diff_2_3
# diff_1_2_3 = diff_1_2 + diff_2_3
#
# # argmax of diff_2_3
print('argmax of diff_1_2:', np.array(diff_1_2).argmax())
# print('argmax of diff_2_3:', np.array(diff_2_3).argmax())
# print('argmax of diff_1_2_3:', np.array(diff_1_2_3).argmax())
#
#
# def cumulator(input_list):
# return np.cumsum(input_list)
#
#
# cum_1 = cumulator(regret_list_over_simulation_studies_1[37][0])
# cum_2 = cumulator(regret_list_over_simulation_studies_2[37][0])
# cum_3 = cumulator(regret_list_over_simulation_studies_3[37][0])
#
# # plot regret_of_setting_df_1[0], regret_of_setting_df_2[0], regret_of_setting_df_3[0]
# plt.plot(cum_1, label='default')
# plt.plot(cum_2, label='no-interv-discov')
# # plt.plot(cum_3, label='w-interv')
# plt.legend()
#
# plt.show()
# pass
| 7,413 | 37.414508 | 145 |
py
|
correlate
|
correlate-master/causal_discovery/test_interventional_discovery.py
|
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
from causal_discovery import interventional_discovery
from causal_discovery.interventional_discovery import remove_weaker_links_of_contempt_cycles, \
get_interv_tau_aligned_cause_eff_df
from config import checkpoint_path
class TestInterventionalDiscovery:
def test_get_interv_tau_aligned_cause_eff_df(self):
d = pd.DataFrame([
[1.0, 4.0, 9.0],
[2.0, 5.0, 9.0],
[3.0, 6.0, 9.0],
],columns=['0', '1', '2'])
w =pd.DataFrame([
[False, True, False],
[False, True, False],
[False, False, True],
],columns=['0', '1', '2'])
cause, eff, tau = '1', '0', 0
assert_frame_equal(get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau),(pd.DataFrame([
[4.0, 1.0],
[5.0, 2.0],
],columns=[cause, eff+'_'+str(tau)])))
cause, eff, tau = '1', '0', 1
assert_frame_equal(get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau), (pd.DataFrame([
[4.0, 2.0],
[5.0, 3.0],
],columns=[cause, eff+'_'+str(tau)])))
cause, eff, tau = '1', '2', 0
assert_frame_equal(get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau), (pd.DataFrame([
[4.0, 9.0],
[5.0, 9.0],
],columns=[cause, eff+'_'+str(tau)])))
cause, eff, tau = '1', '2', 1
assert_frame_equal(get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau), (pd.DataFrame([
[4.0, 9.0],
],columns=[cause, eff+'_'+str(tau)])))
cause, eff, tau = '2', '0', 0
assert_frame_equal(get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau), (pd.DataFrame([
[9.0, 3.0],
],columns=[cause, eff+'_'+str(tau)])))
cause, eff, tau = '2', '0', 1
res=get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau)
sol = pd.DataFrame([],columns=[cause, eff+'_'+str(tau)])
assert_frame_equal(res, sol)
cause, eff, tau = '2', '1', 0
res=get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau)
sol = pd.DataFrame([
[9.0, 6.0],
],columns=[cause, eff+'_'+str(tau)])
assert_frame_equal(res, sol)
cause, eff, tau = '2', '1', 1
res=get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau)
sol = pd.DataFrame([
],columns=[cause, eff+'_'+str(tau)])
assert_frame_equal(res, sol)
cause, eff, tau = '1', '1', 0
res=get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau)
sol = pd.DataFrame([
],columns=[cause, eff+'_'+str(tau)])
assert_frame_equal(res, sol)
cause, eff, tau = '1', '1', 1
res=get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau)
sol = pd.DataFrame([
[5.0,6.0],
],columns=[cause, eff+'_'+str(tau)])
assert_frame_equal(res, sol)
cause, eff, tau = '2', '2', 0
res=get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau)
sol = pd.DataFrame([
],columns=[cause, eff+'_'+str(tau)])
assert_frame_equal(res, sol)
cause, eff, tau = '2', '2', 1
res=get_interv_tau_aligned_cause_eff_df(d, w, cause, eff, tau)
sol = pd.DataFrame([
],columns=[cause, eff+'_'+str(tau)])
assert_frame_equal(res, sol)
def test_get_probable_parents(self):
# Given
effect = '0'
measured_labels = ['0', '1', '2', '3', '4', '5', '6', '7']
pag_edgemarks = np.array([[['', '-->'], ['-->', '-->'], ['', '-->'], ['', '<->'], ['', '']],
[['<--', '-->'], ['', '-->'], ['-->', '-->'], ['<->', ''], ['-->', '']],
[['', '-->'], ['<--', '-->'], ['', '-->'], ['', ''], ['<->', '-->']],
[['', 'o->'], ['<->', ''], ['', '<->'], ['', '<->'], ['', '']],
[['', '<->'], ['<--', '<->'], ['<->', '-->'], ['', ''], ['', '-->']]]) # When
probable_parents = interventional_discovery.get_probable_parents(effect, pag_edgemarks,
measured_labels)
# Then
true_probable_parents = np.array([['0', '1'], ['1', '1'], ['2', '1'], ['3', '1']])
assert np.array_equal(true_probable_parents, probable_parents)
# 2. test
effect = '2'
pag_edgemarks = np.load(checkpoint_path + 'pag_edgemarks.npy', allow_pickle=True)
probable_parents = interventional_discovery.get_probable_parents(effect, pag_edgemarks, measured_labels)
true_probable_parents = np.array([['0', '0'], ['1', '1'], ['2', '1'], ['3', '1']])
assert np.array_equal(true_probable_parents, probable_parents)
# 3. test
effect = '3'
probable_parents = interventional_discovery.get_probable_parents(effect, pag_edgemarks,
measured_labels)
true_probable_parents = np.array([['3', '1']])
assert np.array_equal(true_probable_parents, probable_parents)
def test_remove_cause_tau_var(self):
# Given
cause = '0'
tau = 1
probable_parents = np.array([['0', '1'], ['1', '1'], ['2', '1'], ['3', '1']])
# When
conditioning_vars = interventional_discovery.remove_cause_tau_var(probable_parents, cause, tau)
# Then
true_conditioning_vars = [['1', '1'], ['2', '1'], ['3', '1']]
assert np.array_equal(true_conditioning_vars, conditioning_vars)
def test_get_conditioning_df(self):
conditioning_vars = [['1', '1'], ['2', '1'], ['3', '1']]
measured_labels = ['0', '1', '2', '3', '4', '5', '6', '7']
df_with_intervention_on_one_cause = pd.DataFrame(
[[9.0, 0.9490806, 0.23790693, -1.0366672, -2.7219908, -0.86635816, -0.54072285, -1.4470586],
[8.0, 1.2169158, -0.8612138, 0.6158505, -0.7142994, 0.62477016, -2.4664948, 0.90347844],
[9.0, -2.7442846, -0.01076746, 1.4087411, -0.66136897, 1.0595483, -2.3066196, -2.6307123],
[8.0, -1.9110425, -2.1331735, 0.91157717, -1.5807517, 2.6822305, -1.3860753, -2.2419975],
[9.0, -0.85678804, -2.2561557, -1.0304446, -1.3044108, 1.3641999, -0.4040094, 0.6902417]],
columns=['0', '1', '2', '3', '4', '5', '6', '7'])
solution = pd.DataFrame(
[
[np.NaN, np.NaN, np.NaN],
[0.94908, 0.23791, -1.03667],
[1.21692, -0.86121, 0.61585],
[-2.74428, -0.01077, 1.40874],
[-1.91104, -2.13317, 0.91158]
], columns=['1_1', '2_1', '3_1']).round(5)
res = interventional_discovery.get_conditioning_df(conditioning_vars, df_with_intervention_on_one_cause,
measured_labels).round(5)
assert_frame_equal(res, solution)
def test_align_cause_effect_due_to_lag(self):
# Given
cause_and_effect_tau_shifted = pd.DataFrame([[5.0, -1.0], [6.0, 0.0], [7.0, 1.0], [8.0, 2.0]],
columns=['cause', 'effect'])
# When
aligned_cause_and_effect_tau_shifted = interventional_discovery.align_cause_effect_due_to_lag(
cause_and_effect_tau_shifted, 1)
# Then
true_aligned_cause_and_effect_tau_shifted = pd.DataFrame([[np.NaN, -1.0], [5.0, 0.0], [6.0, 1.0], [7.0, 2.0]],
columns=['cause', 'effect'])
assert aligned_cause_and_effect_tau_shifted.equals(true_aligned_cause_and_effect_tau_shifted)
# for tau=0
# When
aligned_cause_and_effect_tau_shifted = interventional_discovery.align_cause_effect_due_to_lag(
cause_and_effect_tau_shifted, 0)
# Then
true_aligned_cause_and_effect_tau_shifted = cause_and_effect_tau_shifted
assert aligned_cause_and_effect_tau_shifted.equals(true_aligned_cause_and_effect_tau_shifted)
def test_get_independencies_from_interv_data(self):
# Given
df = pd.read_pickle(checkpoint_path + 'df.pkl')
was_intervened = pd.read_pickle(checkpoint_path + 'was_intervened.pkl')
interv_alpha = 0.7 / 3
n_ini_obs = 500
pag_edgemarks = np.load(checkpoint_path + 'pag_edgemarks.npy', allow_pickle=True)
measured_labels = ['0', '2', '3', '4', '5']
# When
indepdendencies, dependencies = interventional_discovery.get_independencies_from_interv_data(df, was_intervened,
interv_alpha
)
# Then
true_indepdendencies = [('0', '3', 1, 0.404), ('4', '3', 0, 0.817), ('4', '3', 1, 0.993), ('5', '3', 0, 0.664),
('5', '3', 1, 0.87)]
assert np.array_equal(true_indepdendencies, indepdendencies)
def test_remove_weaker_links_of_contempt_cycles(self):
dependencies_from_interv_data = [
('0', '2', 0, 0.118),
('3', '2', 0, 0.145),
('0', '3', 1, 0.009),
('2', '3', 0, 0.012),
('5', '3', 0, 0.001)
]
dependencies_from_interv_data = remove_weaker_links_of_contempt_cycles(dependencies_from_interv_data)
assert dependencies_from_interv_data == [
('0', '2', 0, 0.118),
('0', '3', 1, 0.009),
('2', '3', 0, 0.012),
('5', '3', 0, 0.001)
]
dependencies_from_interv_data = [
('3', '0', 1, 0.1), ('0', '3', 1, 0.2),
('3', '1', 0, 0.1), ('1', '3', 0, 0.2),
('4', '0', 1, 0.1), ('0', '4', 1, 0.2),
('4', '1', 0, 0.1), ('1', '4', 0, 0.2),
]
dependencies_from_interv_data = remove_weaker_links_of_contempt_cycles(dependencies_from_interv_data)
assert dependencies_from_interv_data == [
('3', '0', 1, 0.1), ('0', '3', 1, 0.2),
('3', '1', 0, 0.1),
('4', '0', 1, 0.1), ('0', '4', 1, 0.2),
('4', '1', 0, 0.1)
]
| 10,390 | 45.388393 | 120 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/compute_experiments.py
|
# """
# research module causal discovery:
# simulate data, causal discovery, evaluate
#
# """
# import math
# import os
# import pickle
# import random as rd
# import time
#
# import numpy as np
# import tigramite.data_processing as pp
# from matplotlib import pyplot as plt
# from tigramite import plotting as tp
# from tigramite.independence_tests import ParCorr
#
# # Imports from code inside directory
# from causal_discovery.LPCMCI import generate_data_mod as mod
# from causal_discovery.LPCMCI import utilities as utilities
# from causal_discovery.LPCMCI import metrics_mod
# from causal_discovery.LPCMCI.lpcmci import LPCMCI
#
# # Directory to save results
# from data_generation import get_edgemarks_and_effect_sizes
#
# folder_name = "results/"
#
# samples = 1 # int number of time series realizations to generate
# verbosity = 0 # verbosity
# config_list = ['random_lineargaussian-8-8-0.2-0.5-0.5-0.6-0.3-1-500-par_corr-lpcmci_nprelim4-0.26-1'] # string that identifies a particular experiment consisting of a model and method.
# num_configs = len(config_list)
#
# time_start = time.time()
#
# if verbosity > 0:
# plot_data = True
# else:
# plot_data = False
#
# def generate_data(random_state, links, noise_types, noise_sigma, model, T):
# """
# input: links of SCM
# output: time series data (might be non-stationary)
# """
# class NoiseModel:
# def __init__(self, sigma=1):
# self.sigma = sigma
#
# def gaussian(self, T):
# # Get zero-mean unit variance gaussian distribution
# return self.sigma * random_state.randn(T)
#
# noises = []
# for j in links:
# noise_type = random_state.choice(noise_types) # gaussian
# sigma = noise_sigma[0] + (noise_sigma[1] - noise_sigma[0]) * random_state.rand() # 2,1.2,1,7
# noises.append(getattr(NoiseModel(sigma), noise_type))
#
# data_all_check = mod.generate_nonlinear_contemp_timeseries(links=links, T=T + 10000, noises=noises,
# random_state=random_state)
# nonstationary = mod.check_stationarity_chr(data_all_check, links)
# return nonstationary, data_all_check
#
#
# def generate_fixed_data():
# seed = 7
# auto_coeff = 0.95
# coeff = 0.4
# T = 500
#
# def lin(x): return x
#
# links = {0: [((0, -1), auto_coeff, lin),
# ((1, -1), -coeff, lin)
# ],
# 1: [((1, -1), auto_coeff, lin),
# ],
# }
#
# # Specify dynamical noise term distributions, here unit variance Gaussians
# random_state = np.random.RandomState(seed)
# noises = noises = [random_state.randn for j in links.keys()]
#
# data, nonstationarity_indicator = pp.structural_causal_process(
# links=links, T=T, noises=noises, seed=seed)
# T, N = data.shape
#
# # Initialize dataframe object, specify variable names
# var_names = [j for j in range(N)]
# dataframe = pp.DataFrame(data, var_names=var_names)
#
# filename = os.path.abspath("test.dat")
# fileobj = open(filename, mode='wb')
# off = np.array(data, dtype=np.float32)
# off.tofile(fileobj)
# fileobj.close()
#
# return dataframe, links, var_names
#
#
# def generate_dataframe(model, coeff, min_coeff, auto, sam, N, frac_unobserved, n_links, max_true_lag, T,
# contemp_fraction):
# """
# Generate dataframe and links of SCM
# 1. generates links from input data about model (mod.generate_random_contemp_model(...))
# 2. generates df from links (generate_data)
# 3. drops non-stationary data
# :param model:
# :param coeff:
# :param min_coeff:
# :param auto:
# :param sam:
# :param N:
# :param frac_unobserved:
# :param n_links:
# :param max_true_lag:
# :param T:
# :param contemp_fraction:
# :return: dataframe, links, observed_vars, original_graph
#
# """
# def lin_f(x):
# return x
#
# def f2(x):
# return x + 5. * x ** 2 * np.exp(-x ** 2 / 20.)
#
# # noise
# coupling_funcs = [lin_f]
# noise_types = ['gaussian'] # , 'weibull', 'uniform']
# noise_sigma = (0.5, 2)
#
#
# couplings = list(np.arange(min_coeff, coeff + 0.1, 0.1)) # coupling strength
# couplings += [-c for c in couplings] # add negative coupling strength
#
# auto_deps = list(np.arange(0.3, 0.6, 0.05)) # auto-correlations
#
# # Models may be non-stationary. Hence, we iterate over a number of seeds
# # to find a stationary one regarding network topology, noises, etc
# if verbosity > 999:
# model_seed = verbosity - 1000
# else:
# model_seed = sam
#
# for ir in range(1000):
# random_state = np.random.RandomState(0)# todo (model_seed)
#
# N_all = math.floor((N / (1. - frac_unobserved))) # 4
# n_links_all = math.ceil(n_links / N * N_all) # 4
# observed_vars = np.sort(random_state.choice(range(N_all), # [1,2,3]
# size=math.ceil((1. - frac_unobserved) * N_all),
# replace=False)).tolist()
#
# links = mod.generate_random_contemp_model(
# N=N_all,
# L=n_links_all,
# coupling_coeffs=couplings,
# coupling_funcs=coupling_funcs,
# auto_coeffs=auto_deps,
# tau_max=max_true_lag,
# contemp_fraction=contemp_fraction,
# # num_trials=1000,
# random_state=random_state)
#
# # generate data from links
# nonstationary, data_all_check = generate_data(random_state, links, noise_types, noise_sigma, model, T)
#
# # If the model is stationary, break the loop
# if not nonstationary:
# data_all = data_all_check[:T]
# dataframe_all = pp.DataFrame(data_all)
# data = data_all[:, observed_vars]
# original_graph, original_vals = get_edgemarks_and_effect_sizes(links)
# dataframe = pp.DataFrame(data)
#
# # save data to file
# # filename = os.path.abspath("./../../../test.dat")
# # fileobj = open(filename, mode='wb')
# # off = np.array(data, dtype=np.float32)
# # off.tofile(fileobj)
# # fileobj.close()
#
# # plot data
# if plot_data:
# tp.plot_timeseries(dataframe_all, figsize=(15, 5));
# plt.show()
#
# # plot original DAG
# if verbosity > 0:
# tp.plot_graph(
# val_matrix=original_vals, # original_vals None
# link_matrix=original_graph,
# var_names=range(N_all),
# link_colorbar_label='cross-MCI',
# node_colorbar_label='auto-MCI',
# figsize=(10, 6),
# )
# plt.show()
# # Plot time series graph
# # tp.plot_time_series_graph(
# # figsize=(12, 8),
# # val_matrix=original_vals, # original_vals None
# # link_matrix=original_graph,
# # var_names=range(N_all),
# # link_colorbar_label='MCI',
# # )
# # plt.show()
# return dataframe, links, observed_vars, original_graph
#
#
# else:
# print("Trial %d: Not a stationary model" % ir)
# model_seed += 10000
# if ir > 998:
# raise ValueError('datagenerating process unstable')
#
#
# def compute_oracle_pag(links, observed_vars, tau_max):
# """
# Compute the oracle PAG, given links and observed_vars and tau_max
# returns: oracle_pag
# """
# oracle_graph = utilities.get_oracle_pag_from_dag(links, observed_vars=observed_vars, tau_max=tau_max,
# verbosity=verbosity)[1]
# if verbosity > 0:
# # plot oracle PAG
#
# # plot oralce PAG
# tp.plot_graph(
# val_matrix=None,
# link_matrix=oracle_graph,
# var_names=observed_vars,
# link_colorbar_label='cross-MCI',
# node_colorbar_label='auto-MCI',
# figsize=(10, 6),
# )
# plt.show()
# # Plot time series graph
# tp.plot_time_series_graph(
# figsize=(12, 8),
# val_matrix=None,
# link_matrix=oracle_graph,
# var_names=observed_vars,
# link_colorbar_label='MCI',
# )
# plt.show()
#
# print("True Links")
# for j in links:
# print(j, links[j])
# print("observed_vars = ", observed_vars)
# print("True PAG")
# if tau_max > 0:
# for lag in range(tau_max + 1):
# print(oracle_graph[:, :, lag])
# else:
# print(oracle_graph.squeeze())
# return oracle_graph
#
#
# def calculate(para_setup):
# """
# Main function to run the experiment, given para_setup
#
# returns: original_graph, oracle_graph, val_min, max_cardinality,
#
# calls:
# 1. parses input parameters
# 2. calls generate_dataframe
# 3. calls compute_oracle_pag
# 4. calls LPCMCI to get graph and values
#
# """
# para_setup_string, sam = para_setup
#
# paras = para_setup_string.split('-')
# paras = [w.replace("'", "") for w in paras]
#
# model = 'random_lineargaussian'
# N = 8
# n_links = 8
# min_coeff = 0.2
# coeff = 0.5
# auto = 0.5 # auto-dependency (auto-correlation) 0.5
# contemp_fraction = 0.6
# frac_unobserved = 0.3
# max_true_lag = 1
# T = 500
#
# ci_test = 'parr_corr'
# method = 'lpcmci_nprelim4'
# pc_alpha = 0.26
# tau_max = 1
#
# #############################################
# ## Data
# #############################################
#
# dataframe, links, observed_vars, original_graph = generate_dataframe(model, coeff, min_coeff, auto, sam, N,
# frac_unobserved,
# n_links, max_true_lag, T, contemp_fraction)
#
# # dataframe, links, observed_vars = generate_fixed_data()
#
# #############################################
# ## Methods
# #############################################
# oracle_graph = compute_oracle_pag(links, observed_vars, tau_max)
#
# computation_time_start = time.time()
#
# lpcmci = LPCMCI(
# dataframe=dataframe,
# cond_ind_test=ParCorr(significance='analytic', recycle_residuals=True)
# )
#
# lpcmci.run_lpcmci(
# tau_max=tau_max,
# pc_alpha=pc_alpha,
# max_p_non_ancestral=3, # max cardinality of conditioning set, in the second removal phase
# n_preliminary_iterations=10,
# verbosity=verbosity)
#
# graph = lpcmci.graph
# l = ['-->', '', '<--', '', 'o->', '', '<-o', '', '<->', '']
# random_edgemark_graph = [[[rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)]],
# [[rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)]],
# [[rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)]],
# [[rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)]],
# [[rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)]],
# [[rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)]],
# [[rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)]],
# [[rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)],
# [rd.choice(l), rd.choice(l)], [rd.choice(l), rd.choice(l)]]]
# # graph = np.asarray(random_edgemark_graph)
# # print('\ngraph!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n', graph, '\n\n')
#
# val_min = lpcmci.val_min_matrix
# max_cardinality = lpcmci.cardinality_matrix
#
# # pcmci = PCMCI(
# # dataframe=dataframe,
# # cond_ind_test=ParCorr(significance='analytic'),
# # verbosity=1)
# #
# # results = pcmci.run_pcmciplus(tau_min=0, tau_max=tau_max, pc_alpha=pc_alpha)
# # q_matrix = pcmci.get_corrected_pvalues(p_matrix=results['p_matrix'], fdr_method='fdr_bh',
# # exclude_contemporaneous=False)
# # link_matrix = results['graph']
# #
# # graph = link_matrix
# # val_min = results['val_matrix']
# # max_cardinality = None
#
# computation_time_end = time.time()
# computation_time = computation_time_end - computation_time_start
#
# # plot predicted PAG
# if verbosity > 0:
# tp.plot_graph(
# val_matrix=val_min,
# link_matrix=graph,
# var_names=observed_vars,
# link_colorbar_label='cross-MCI',
# node_colorbar_label='auto-MCI',
# figsize=(10, 6),
# )
# plt.show()
# # Plot time series graph
# tp.plot_time_series_graph(
# figsize=(12, 8),
# val_matrix=val_min,
# link_matrix=graph,
# var_names=observed_vars,
# link_colorbar_label='MCI',
# )
# plt.show()
#
# # reduced links
# # reduced_val_min = val_min
# # reduced_graph = graph
# # reduced_val_min[abs(reduced_val_min) < remove_link_threshold] = 0 # set values below threshold to zero
# # reduced_graph[abs(reduced_val_min) < remove_link_threshold] = "" # set values below threshold to zero
# # tp.plot_graph(
# # val_matrix=reduced_val_min,
# # link_matrix=reduced_graph,
# # var_names=observed_vars,
# # link_colorbar_label='cross-MCI',
# # node_colorbar_label='auto-MCI',
# # figsize=(10, 6),
# # )
# # plt.show()
# # # Plot time series graph
# # tp.plot_time_series_graph(
# # figsize=(12, 8),
# # val_matrix=reduced_val_min,
# # link_matrix=reduced_graph,
# # var_names=observed_vars,
# # link_colorbar_label='MCI',
# # )
# # plt.show()
#
# return {
# 'original_graph': original_graph,
# 'oracle_graph': oracle_graph,
# 'val_min': val_min,
# 'max_cardinality': max_cardinality,
#
# # Method results
# 'computation_time': computation_time,
# 'graph': graph,
# }
#
#
# if __name__ == '__main__':
# """
# calls calcualte()
#
# """
#
# all_configs = dict([(conf, {'results': {},
# "graphs": {},
# "val_min": {},
# "max_cardinality": {},
#
# "oracle_graph": {},
# "original_graph": {},
# "computation_time": {}, }) for conf in config_list])
#
# job_list = [(conf, i) for i in range(samples) for conf in config_list]
#
# num_tasks = len(job_list)
#
# for config_sam in job_list:
# config, sample = config_sam
# print("Experiment %s - Realization %d" % (config, sample))
# ##################
# ### calculate ###
# ##################
# all_configs[config]['results'][sample] = calculate(config_sam)
#
# print("\nsaving all configs...")
#
# for conf in list(all_configs.keys()):
# all_configs[conf]['graphs'] = np.zeros((samples,) + all_configs[conf]['results'][0]['graph'].shape, dtype='<U3')
# all_configs[conf]['oracle_graphs'] = np.zeros(
# (samples,) + all_configs[conf]['results'][0]['oracle_graph'].shape,
# dtype='<U3')
# all_configs[conf]['original_graphs'] = np.zeros(
# (samples,) + all_configs[conf]['results'][0]['original_graph'].shape,
# dtype='<U3')
# all_configs[conf]['val_min'] = np.zeros((samples,) + all_configs[conf]['results'][0]['val_min'].shape)
# all_configs[conf]['max_cardinality'] = np.zeros(
# (samples,) + all_configs[conf]['results'][0]['max_cardinality'].shape)
# all_configs[conf]['computation_time'] = []
#
# for i in list(all_configs[conf]['results'].keys()):
# all_configs[conf]['graphs'][i] = all_configs[conf]['results'][i]['graph']
# all_configs[conf]['original_graphs'][i] = all_configs[conf]['results'][i]['original_graph']
# all_configs[conf]['oracle_graphs'][i] = all_configs[conf]['results'][i]['oracle_graph']
# all_configs[conf]['val_min'][i] = all_configs[conf]['results'][i]['val_min']
# all_configs[conf]['max_cardinality'][i] = all_configs[conf]['results'][i]['max_cardinality']
#
# all_configs[conf]['computation_time'].append(all_configs[conf]['results'][i]['computation_time'])
#
# # Save all results
# file_name = folder_name + '%s' % (conf)
#
# # Compute and save metrics in separate (smaller) file
# metrics = metrics_mod.get_evaluation(results=all_configs[conf])
# for metric in metrics:
# if metric != 'computation_time':
# print(f"{metric:30s} {metrics[metric][0]: 1.2f} +/-{metrics[metric][1]: 1.2f} ")
# else:
# print(
# f"{metric:30s} {metrics[metric][0]: 1.2f} +/-[{metrics[metric][1][0]: 1.2f}, {metrics[metric][1][1]: 1.2f}]")
# # chr:
# f1_score_adjacency = utilities.compute_f1_score(metrics['adj_anylink_precision'][0],
# metrics['adj_anylink_recall'][0])
# f1_score_edgemark = utilities.compute_f1_score(metrics['edgemarks_anylink_precision'][0],
# metrics['edgemarks_anylink_recall'][0])
# print('f1_score_adjacency:', f1_score_adjacency, '\nf1_score_edgemark:', f1_score_edgemark)
#
# print("Metrics dump ", file_name.replace("'", "").replace('"', '') + '_metrics.dat')
# file = open(file_name.replace("'", "").replace('"', '') + '_metrics.dat', 'wb')
# pickle.dump(metrics, file, protocol=-1)
# file.close()
#
# del all_configs[conf]['results']
#
# # Also save raw results
# print("dump ", file_name.replace("'", "").replace('"', '') + '.dat')
# file = open(file_name.replace("'", "").replace('"', '') + '.dat', 'wb')
# pickle.dump(all_configs[conf], file, protocol=-1)
# file.close()
#
# time_end = time.time()
# print('Run time in hours ', (time_end - time_start) / 3600.)
| 20,531 | 38.790698 | 187 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/plan.py
|
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
from causal_discovery.LPCMCI.observational_discovery import observational_causal_discovery
from causal_discovery.gen_configs import define_settings
from causal_discovery.interventional_discovery import get_independencies_from_interv_data
from config import target_label, coeff, min_coeff, n_days, checkpoint_path, n_scms, overwrite_scm
from data_generation import data_generator, generate_stationary_scm, measure
from intervention_proposal.get_intervention import find_optimistic_intervention
from regret import compute_regret, cost_function
"""
B)
3. Paul's list from meeting
Plots: colorscale, latents?
Anmeldung
Ask Paul for template or suggest one
Write key words
3. 5 set up simulation study
4. 5 interpret results 27. july
4.5 opt stochastic intervention? / multiple interventions?
5. 40 write 5. oct
-> 27 coding days + 40 writing days = 57 days = 11.5 weeks = 3 months (optimistic guess)
=> 3.75 with phinc => end of september
-> 75 coding days + 60 writing days = 135 days = 22 weeks = 5.5 months (guess delay factor: 2.8x coding, 1.5x writing)
=> 7 with phinc => end of end of year
opportunities for computational speedup:
X parallelize
- recycle residuals from lpcmci
x don't run lpcmci when there is no intervention coming
- orient ambiguities towards target var to reduce number of possible graphs
x prune weak links
- instead of append and r_ ini array and fill
"""
def is_debug():
import sys
gettrace = getattr(sys, 'gettrace', None)
if gettrace is None:
return False
else:
v = gettrace()
if v is None:
return False
else:
return True
def ensure_0_in_measured_labels(measured_labels):
if 0 not in measured_labels:
# remove last element of measured_labels
measured_labels = measured_labels[:-1]
# add 0 to measured_labels
measured_labels.append(0)
measured_labels = np.sort(measured_labels).tolist()
return measured_labels
def has_measured_cross_dependencies_on_target_var(scm, unmeasured_labels_ints):
cross_dependencies_on_target_var = []
for i in range(len(scm[int(target_label)])):
cross_dependencies_on_target_var.append(scm[int(target_label)][i][0][0])
# iterate through target_causes and drop if value is 0
cross_dependencies_on_target_var = [x for x in cross_dependencies_on_target_var if x != 0]
cross_dependencies_on_target_var = [x for x in cross_dependencies_on_target_var if x not in unmeasured_labels_ints]
if len(cross_dependencies_on_target_var) < 1:
return False
else:
return True
def get_measured_labels(n_vars_all, random_state, n_latents, scm):
""" get measured and unmeasured vars. if there is no measured cross-dependency on target var, resample"""
unmeasured_labels_ints = None # ini
measured_labels = None # ini
all_labels_ints = range(n_vars_all)
cross_dependencies_on_target_var = False
while not cross_dependencies_on_target_var:
measured_labels = np.sort(random_state.choice(all_labels_ints, # e.g. [1,4,5,...]
size=n_vars_all - n_latents,
replace=False)).tolist()
measured_labels = ensure_0_in_measured_labels(measured_labels)
# get unmeasured labels
unmeasured_labels_ints = []
for x in all_labels_ints:
if x not in measured_labels:
unmeasured_labels_ints.append(x)
# if there is no cross dependency on target var, resample latents
cross_dependencies_on_target_var = has_measured_cross_dependencies_on_target_var(scm, unmeasured_labels_ints)
if not cross_dependencies_on_target_var:
print("no cross dependency on target var, resampling latents") # todo remove afte check
unmeasured_labels_strs = [str(x) for x in unmeasured_labels_ints]
# measured_labels to strings
measured_labels = [str(x) for x in measured_labels]
""" key value map of label to index """
measured_label_as_idx = {label: idx for idx, label in enumerate(measured_labels)}
# variables that can't be intervened upon. They are the target var and the unobserved vars
unintervenable_vars = [target_label] + unmeasured_labels_strs
return measured_labels, measured_label_as_idx, unmeasured_labels_strs, unintervenable_vars
def obs_or_intervene(nth):
"""
first n_ini_obs samples are observational
then for n_days samples, very nth sample is an intervention
false: observation
true: intervention
"""
is_mixed = np.zeros(n_days).astype(bool)
for i in range(1, len(is_mixed) + 1):
if i % nth == 0:
is_mixed[i - 1] = True
else:
is_mixed[i - 1] = False
# is_intervention_list = np.append(is_obs, is_mixed)
return is_mixed
def store_interv(was_intervened, intervention_variable, n_samples_per_generation, n_vars_measured):
"""
add data to boolean array of measured variables indicating if they were intervened upon
input: requires that intervention_variable is a string of the form 'char int' e.g. 'u_0'
"""
new_series = pd.Series(np.zeros(n_vars_measured, dtype=bool), index=was_intervened.columns)
# if intervened
if intervention_variable is not None:
# get ind
if len(intervention_variable) > 2:
intervention_idx = intervention_variable[2:]
else:
intervention_idx = intervention_variable
# mark intervened var
new_series[intervention_idx] = True
# concat new_series to was_intervened
tmp_data = []
for i in range(n_samples_per_generation):
tmp_data.append(new_series)
was_intervened = pd.concat([was_intervened, pd.DataFrame(tmp_data)], axis=0, ignore_index=True)
# # save was_intervened dataframe to file
# import os
# filename = os.path.abspath("./tmp_was_intervened.dat")
# was_intervened.to_csv(filename, index=False)
return was_intervened
def calculate_parameters(n_vars_measured, n_latents, n_ini_obs):
n_measured_links = n_vars_measured
n_vars_all = n_vars_measured + n_latents # 11
labels_strs = [str(i) for i in range(n_vars_all)]
n_ini_obs = int(n_ini_obs)
return n_measured_links, n_vars_all, labels_strs, n_ini_obs
def simulation_study_with_one_scm(sim_study_input):
_, n_ini_obs, n_vars_measured, n_latents, pc_alpha, interv_alpha, n_samples_per_generation, nth = sim_study_input[0]
random_seed = sim_study_input[1]
print('setting:', sim_study_input[0], 'random_seed:', sim_study_input[1])
n_measured_links, n_vars_all, labels_strs, n_ini_obs = calculate_parameters(n_vars_measured,
n_latents,
n_ini_obs)
random_state = np.random.RandomState(random_seed)
# generate stationary scm
scm, edgemarks_true, effect_sizes_true, last_of_ts = generate_stationary_scm(coeff, min_coeff, random_seed,
random_state,
n_measured_links, n_vars_measured,
n_vars_all,
labels_strs)
if overwrite_scm:
def lin_f(x):
return x
scm = {
0: [((0, -1), 0.5, lin_f), ((1, 0), 0.5, lin_f)],
1: [((1, -1), 0.5, lin_f)],
2: [((2, -1), 0.5, lin_f), ((0, 0), 0.5, lin_f)]}
n_latents = 0
n_vars_all = 3
n_vars_measured = 3
n_measured_links = 3
edgemarks_true = np.array([[['', '-->'], ['<--', ''], ['-->', '']],
[['-->', ''], ['', '-->'], ['', '']],
[['<--', ''], ['', ''], ['', '-->']]])
effect_sizes_true = np.array([[[0, .5], [.5, 0], [1.0, 0]],
[[.5, 0], [0, .5], [0, 0]],
[[2.0, 0], [0, 0], [0, .5]]])
labels_strs = ['0', '1', '2']
last_of_ts = pd.DataFrame([[4, 1, -1]], columns=['0', '1', '2'])
# variable randomly decide which variables are measured vs latent
measured_labels, measured_label_as_idx, unmeasured_labels_strs, unintervenable_vars = get_measured_labels(
n_vars_all, random_state, n_latents, scm)
# ini var that keeps track of where the intervention is
was_intervened = pd.DataFrame(np.zeros((n_ini_obs, n_vars_measured), dtype=bool), columns=measured_labels)
# ini regret
regret_list = []
# ini
pag_edgemarks, independencies_from_interv_data, dependencies_from_interv_data, eps = None, None, None, 0.5
# schedule when to intervene
is_intervention_list = obs_or_intervene(nth) # 500 obs + 500 with every 4th intervention
""" generate first n_ini_obs samples without intervention"""
# generate observational data
ts_generated_actual, _ = data_generator(
scm=scm,
intervention_variable=None,
intervention_value=None,
ts_old=last_of_ts,
random_seed=2 ** 10,
n_samples=n_ini_obs + 100,
labels=labels_strs,
noise_type='gaussian',
)
ts_generated_actual = ts_generated_actual[-n_ini_obs:]
ts_generated_optimal = ts_generated_actual
interv_var_correct_list = []
# measure new data (hide latents)
ts_measured_actual = measure(ts_generated_actual, obs_vars=measured_labels)
""" loop: causal discovery, planning, intervention """
# intervene, observe, observe, observe, ...
for day, is_intervention in enumerate(is_intervention_list):
# safe all local variables file
# filename = checkpoint_path + 'global_save1.pkl'
# with open(filename, 'wb') as f:
# pickle.dump([day, is_intervention, ts_generated_actual, regret_list,
# interv_val, ts_measured_actual, ts_generated_optimal, regret_list, was_intervened,
# pag_edgemarks, interv_var, is_intervention_list], f)
# load
# with open(filename, 'rb') as f:
# day, is_intervention, ts_generated_actual, regret_list, interv_val, ts_measured_actual, ts_generated_optimal, regret_list, was_intervened, pag_edgemarks, interv_var, is_intervention_list = pickle.load(
# f)
# intervene or observe var?
is_intervention = is_intervention_list[day]
# if intervention is scheduled
if is_intervention:
"""
causal discovery
"""
# interventional discovery
independencies_from_interv_data, dependencies_from_interv_data = get_independencies_from_interv_data(
ts_measured_actual.copy(),
was_intervened,
pc_alpha
)
# observational discovery
pag_effect_sizes, pag_edgemarks = observational_causal_discovery(
df=ts_measured_actual.copy(),
was_intervened=was_intervened.copy(),
external_independencies=independencies_from_interv_data.copy(),
external_dependencies=dependencies_from_interv_data.copy(),
measured_label_to_idx=measured_label_as_idx,
pc_alpha=pc_alpha,
)
# pag_effect_sizes, pag_edgemarks, var_names = load_results(name_extension='simulated')
"""
propose intervention
"""
# from measured data
interv_var, interv_val = find_optimistic_intervention(
my_graph=pag_edgemarks.copy(),
val=pag_effect_sizes.copy(),
ts=ts_measured_actual, # only first n_ini_obs samples, to have the same ts as optimal
unintervenable_vars=unintervenable_vars,
random_seed_day=day,
label='actual_data',
external_independencies=independencies_from_interv_data,
eps=eps * 0.99,
)
# from true SCM
interv_var_opti, interv_val_opti = find_optimistic_intervention(
edgemarks_true.copy(),
effect_sizes_true.copy(),
ts=pd.DataFrame(ts_generated_actual, columns=labels_strs),
# needed for 1. percentile from mu, std 2. simulation start 3. labels
unintervenable_vars=unintervenable_vars,
random_seed_day=day,
label='true_scm',
external_independencies=None,
eps=None,
)
# if no intervention is scheduled
else:
interv_var, interv_val = None, None
interv_var_opti, interv_val_opti = None, None
# keep track of if and where in the ts the intervention is
was_intervened = store_interv(was_intervened, interv_var, n_samples_per_generation, n_vars_measured)
"""
intervene as proposed and generate new data.
Interv might be none
"""
# actual
ts_new_actual, _ = data_generator(
scm=scm,
intervention_variable=interv_var,
intervention_value=interv_val,
ts_old=ts_generated_actual,
random_seed=day,
n_samples=n_samples_per_generation,
labels=labels_strs,
noise_type='gaussian',
)
# optimal
ts_new_optimal, _ = data_generator(
scm=scm,
intervention_variable=interv_var_opti,
intervention_value=interv_val_opti,
ts_old=ts_generated_optimal,
random_seed=day,
n_samples=n_samples_per_generation,
labels=labels_strs,
noise_type='gaussian',
)
# append new (measured) data
ts_generated_actual = np.r_[ts_generated_actual, ts_new_actual] # append actual generated data
new_measurements = measure(ts_new_actual, obs_vars=measured_labels) # measure new data (drop latent data)
ts_measured_actual = pd.DataFrame(np.r_[ts_measured_actual, new_measurements], columns=measured_labels)
# optimal
ts_generated_optimal = pd.DataFrame(np.r_[ts_generated_optimal, ts_new_optimal], columns=labels_strs)
"""
regret
"""
# only if it was an intervention
if is_intervention and interv_var is not None:
regret_list, outcome_actual, interv_var_correct_list = compute_regret(ts_measured_actual,
ts_generated_optimal,
regret_list,
n_samples_per_generation,
interv_var_opti,
interv_var,
interv_var_correct_list)
if interv_val_opti is not None and interv_val is not None:
print('rdms:', random_seed, '\tday:', day + n_ini_obs, '\to.cme', format(outcome_actual, ".3f"), '\tr',
format(regret_list[-1], ".3f"), '\to var',
interv_var_opti, '\to val', format(interv_val_opti, ".3f"), '\ta var',
interv_var, '\ta val', format(interv_val, ".3f"), '\tind', independencies_from_interv_data,
'\tdep',
dependencies_from_interv_data)
elif interv_val_opti is not None and interv_val is None:
print('rdms:', random_seed, '\tday:', day + n_ini_obs, '\to.cme', format(outcome_actual, ".3f"), '\tr',
format(regret_list[-1], ".3f"), '\to var',
interv_var_opti, '\to val', format(interv_val_opti, ".3f"), '\ta var',
interv_var, '\ta val', interv_val)
elif interv_val_opti is None and interv_val is not None:
print('rdms:', random_seed, '\tday:', day + n_ini_obs, '\to.cme', format(outcome_actual, ".3f"), '\tr',
format(regret_list[-1], ".3f"), '\to var',
interv_var_opti, '\to val', interv_val_opti, '\ta var',
interv_var, '\ta val', interv_val)
regret_sum = sum(regret_list)
cost = cost_function(regret_list, was_intervened, n_ini_obs)
print('regret_sum:', regret_sum)
print('interv_var_correct_list_sum:', sum(interv_var_correct_list), '\n\n')
return [regret_list, cost, interv_var_correct_list]
def run_all_experiments():
# get settings
all_param_study_settings = define_settings()
# run parameter studies
for simulation_study_idx, simulation_study in enumerate(all_param_study_settings):
regret_list_over_simulation_study = []
study_name = simulation_study[0][0]
# run one parameter setting
for one_param_setting in simulation_study:
regret_list_over_scms = []
# repeat each parameter setting for 100 randomly sampled scms
for i_th_scm in tqdm(range(0, n_scms)): # n people or scms todo 0, n_scms
## run experiment ###
regret_list_over_scms.append(
simulation_study_with_one_scm((one_param_setting, i_th_scm)))
######################
regret_list_over_simulation_study.append(regret_list_over_scms)
# save results of one parameter setting
with open(
checkpoint_path + str(
simulation_study_idx) + study_name + '_regret_list_over_simulation_study.pickle',
'wb') as f:
pickle.dump([regret_list_over_simulation_study, simulation_study], f)
print('saved')
print('all done')
run_all_experiments()
| 18,416 | 41.144165 | 215 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/svarrfci.py
|
# import numpy as np
# from itertools import product, combinations
# import os
#
# class SVARRFCI():
# r"""
# This class implements an adapation of the RFCI algorithm to stationary time series with the assumption of no selection variables. The RFCI algorithm was introduced in:
#
# Colombo, D., Maathuis, M. H., Kalisch, M., and Richardson, T. S. (2012). Learning high-dimensional directed acyclic graphs with latent and selection variables. The Annals of Statistics, 40:294–321.
#
# We note the following:
# 1) The algorithm is fully order-independence. This is achieved by two things. First, we use the majority rule to decide whether a given node is in a given separating set. Since the unshielded triple rule (given in Lemma 3.1 in the above reference) demands minimal separating set, the majority vote is restricted to separating sets of minimal cardinality (this implies minimality). Second, we apply an orientation rule to the entire graph and resolve potential conflicts among the proposed orientations by means of the conflict mark 'x' before modifing the graph. This also applies to the discriminating path rule (given in Lemma 3.2 in the above reference)
# 2) Several control parameters apply modifications, see below.
#
# Parameters passed to the constructor:
# - dataframe:
# Tigramite dataframe object that contains the the time series dataset \bold{X}
# - cond_ind_test:
# A conditional independence test object that specifies which conditional independence test CI is to be used
#
# Parameters passed to self.run_svarrfci():
# - tau_max:
# The maximum considered time lag tau_max
# - pc_alpha:
# The significance level \alpha of conditional independence tests
# - max_cond_px:
# Consider a pair of variables (X^i_{t-\tau}, X^j_t) with \tau > 0. In the edge removal phase (here this is self._run_pc_removal_phase()), the algorithm does not test for conditional independence given subsets of X^i_{t-\tau} of cardinality higher than max_cond_px.
# - max_p_global:
# Restricts all conditional independence tests to conditioning sets with cardinality smaller or equal to max_p_global
# - max_q_global:
# For each ordered pair (X^i_{t-\tau}, X^j_t) of adjacent variables and for each cardinality of the conditioning sets test at most max_q_global many conditioning sets (when summing over all tested cardinalities more than max_q_global tests may be made)
# - fix_all_edges_before_final_orientation (will be removed)
# - verbosity:
# Controls the verbose output self.run_svarrfci() and the function it calls.
#
# Return value of self.run_svarrfci():
# The estimated graphin form of a link matrix. This is a numpy array of shape (self.N, self.N, self.tau_max + 1), where the entry array[i, j, \tau] is a string that visualizes the estimated link from X^i_{i-\tau} to X^j_t. For example, if array[0, 2, 1] = 'o->', then the estimated graph contains the link X^i_{t-1} o-> X^j_t. This numpy array is also saved as instance attribute self.graph. Note that self.N is the number of observed time series and self.tau_max the maximal considered time lag.
#
# A note on middle marks:
# Even if both max_p_global and max_q_global are np.inf, RFCI does not guarantee that all edges that remain in its graph after convergence (this is an RFCI-PAG) are also in the PAG. However, it does guarantee this for all edges that have a tail. We use the middle marks that we introduced for LPCMCI to explicate this distinction. In particular, we use the middle marks '?' and '' (empty). For convenience (to have strings of the same lengths) we here internally denote the empty middle mark by '-'. For post-processing purposes all middle marks are nevertheless set to the empty middle mark (here '-') in line 80, but if verbosity >= 1 a graph with the middle marks will be printed out before.
#
# A note on wildcards:
# The middle mark wildcard \ast and the edge mark wildcard are here represented as *, the edge mark wildcard \star as +
# """
#
# def __init__(self, dataframe, cond_ind_test):
# """Class constructor. Store:
# i) data
# ii) conditional independence test object
# iii) some instance attributes"""
#
# # Save the time series data that the algorithm operates on
# self.dataframe = dataframe
#
# # Set the conditional independence test to be used
# self.cond_ind_test = cond_ind_test
# self.cond_ind_test.set_dataframe(self.dataframe)
#
# # Store the shape of the data in the T and N variables
# self.T, self.N = self.dataframe.values.shape
#
#
# def run_svarrfci(self,
# tau_max = 1,
# pc_alpha = 0.05,
# max_cond_px = 0,
# max_p_global = np.inf,
# max_q_global = np.inf,
# fix_all_edges_before_final_orientation = True,
# verbosity = 0):
# """Run an adaption of the RFCI algorithm to stationary time series without selection variables on the dataset and with the conditional independence test passed to the class constructor and with the options passed to this function."""
#
# # Step 0: Intializations
# self._initialize(tau_max, pc_alpha, max_cond_px, max_p_global, max_q_global, fix_all_edges_before_final_orientation, verbosity)
#
# # Step 1: PC removal phase
# self._run_pc_removal_phase()
#
# # Step 2: RFCI orientation phase
# self._run_rfci_orientation_phase()
#
# # Post processing
# self._fix_all_edges()
# self.graph = self._dict2graph()
# self.val_min_matrix = self._dict_to_matrix(self.val_min, self.tau_max, self.N, default = 0)
# self.cardinality_matrix = self._dict_to_matrix(self.max_cardinality, self.tau_max, self.N, default = 0)
#
# # Return the estimated graph
# return self.graph
#
#
# def _initialize(self,
# tau_max,
# pc_alpha,
# max_cond_px,
# max_p_global,
# max_q_global,
# fix_all_edges_before_final_orientation,
# verbosity):
# """Function for
# i) saving the arguments passed to self.run_svarrfci() as instance attributes
# ii) initializing various memory variables for storing the current graph, sepsets etc.
# """
#
# # Save the arguments passed to self.run_svarrfci()
# self.tau_max = tau_max
# self.pc_alpha = pc_alpha
# self.max_cond_px = max_cond_px
# self.max_p_global = max_p_global
# self.max_q_global = max_q_global
# self.fix_all_edges_before_final_orientation = fix_all_edges_before_final_orientation
# self.verbosity = verbosity
#
# # Initialize the nested dictionary for storing the current graph.
# # Syntax: self.graph_dict[j][(i, -tau)] gives the string representing the link from X^i_{t-tau} to X^j_t
# self.graph_dict = {}
# for j in range(self.N):
# self.graph_dict[j] = {(i, 0): "o?o" for i in range(self.N) if j != i}
# self.graph_dict[j].update({(i, -tau): "o?>" for i in range(self.N) for tau in range(1, self.tau_max + 1)})
#
# # Initialize the nested dictionary for storing separating sets
# # Syntax: self.sepsets[j][(i, -tau)] stores separating sets of X^i_{t-tau} to X^j_t. For tau = 0, i < j.
# self.sepsets = {j: {(i, -tau): set() for i in range(self.N) for tau in range(self.tau_max + 1) if (tau > 0 or i < j)} for j in range(self.N)}
#
# # Initialize dictionaries for storing known ancestorships, non-ancestorships, and ambiguous ancestorships
# # Syntax: self.def_ancs[j] contains the set of all known ancestors of X^j_t. Equivalently for the others
# self.def_ancs = {j: set() for j in range(self.N)}
# self.def_non_ancs = {j: set() for j in range(self.N)}
# self.ambiguous_ancestorships = {j: set() for j in range(self.N)}
#
# # Initialize nested dictionaries for saving the minimum test statistic among all conditional independence tests of a given pair of variables, the maximum p-values, as well as the maximal cardinality of the known separating sets.
# # Syntax: As for self.sepsets
# self.val_min = {j: {(i, -tau): float("inf") for i in range(self.N) for tau in
# range(self.tau_max + 1) if (tau > 0 or i < j)} for j in range(self.N)}
# self.pval_max = {j: {(i, -tau): 0 for i in range(self.N) for tau in
# range(self.tau_max + 1) if (tau > 0 or i < j)} for j in range(self.N)}
# self.max_cardinality = {j: {(i, -tau): 0 for i in range(self.N) for tau in
# range(self.tau_max + 1) if (tau > 0 or i < j)} for j in range(self.N)}
#
# # Return
# return True
#
#
# def _run_pc_removal_phase(self):
# """Run the removal phase of the RFCI algorithm adapted to time series. This is essentially the skeleton phase of the PC algorithm"""
#
# # Verbose output
# if self.verbosity >= 1:
# print("\n=======================================================")
# print("=======================================================")
# print("Starting removal phase")
#
# # Remember all edges that are fully tested, even for finite max_p_global and max_q_global. Remember all edges that have not been fully tested
# self._can_fix = set()
# self._cannot_fix = set()
#
# # Iterate until convergence
# # p_pc is the cardinality of the conditioning set
# p_pc = 0
# while True:
#
# ##########################################################################################################
# ### Run the next removal iteration #######################################################################
#
# # Verbose output
# if self.verbosity >= 1:
# if p_pc == 0:
# print("\nStarting test phase\n")
# print("p = {}".format(p_pc))
#
# # Variable to check for convergence
# has_converged = True
#
# # Variable for keeping track of edges marked for removal
# to_remove = {j: {} for j in range(self.N)}
#
# # Iterate through all links
# for (i, j, lag_i) in product(range(self.N), range(self.N), range(-self.tau_max, 1)):
#
# # Decode the triple (i, j, lag_i) into pairs of variables (X, Y)
# X = (i, lag_i)
# Y = (j, 0)
#
# ######################################################################################################
# ### Exclusion of links ###############################################################################
#
# # Exclude the current link if ...
# # ... X = Y
# if lag_i == 0 and i == j:
# continue
# # ... X > Y (so, in fact, we don't distinguish between both directions of the same edge)
# if self._is_smaller(Y, X):
# continue
#
# # Get the current link from X to Y
# link = self._get_link(X, Y)
#
# # Also exclude the current link if ...
# # ... X and Y are not adjacent anymore
# if link == "":
# continue
#
# ######################################################################################################
# ### Preparation of PC search sets ####################################################################
#
# # Search for separating sets in the non-future adjacencies of X, without X and Y themselves
# S_search_YX = self._get_non_future_adj([Y]).difference({X, Y})
#
# # Search for separating sets in the non-future adjacencies of Y, without X and Y themselves, always if X and Y are contemporaneous or if specified by self.max_cond_px
# test_X = True if (lag_i == 0 or (self.max_cond_px > 0 and self.max_cond_px >= p_pc)) else False
# if test_X:
#
# S_search_XY = self._get_non_future_adj([X]).difference({X, Y})
#
# ######################################################################################################
# ### Check whether the link needs testing #############################################################
#
# # If there are less than p_pc elements in both search sets, the link does not need further testing. If the pair (X, Y) has been fully tested, i.e., it has not been added to self._cannot_fix, we add it to self._can_fix. Then, later, in case one edge mark is set to a tail, we know that the link is part of the True MAG
# if len(S_search_YX) < p_pc and (not test_X or len(S_search_XY) < p_pc):
# if (X, Y) not in self._cannot_fix:
# self._can_fix.add((X, Y))
# continue
#
# # Force-quit while leep when p_pc exceeds the specified limits
# if p_pc > self.max_p_global:
# continue
#
# # Since this link does need testing, the algorithm has not converged yet
# has_converged = False
#
# ######################################################################################################
# ### Tests for conditional independence ###############################################################
#
# # If self.max_q_global is finite, the below for loop may be broken earlier. To still guarantee order independence, the set from which the potential separating sets are created is ordered in an order independent way. Here, the elements of S_search_YX are ordered according to their minimal test statistic with Y
# if not np.isinf(self.max_q_global):
# S_search_YX = self._sort_search_set(S_search_YX, Y)
#
# # q_count counts the number of conditional independence tests made for subsets of S_search_YX
# q_count = 0
#
# # Run through all cardinality p_pc subsets of S_search_YX
# for Z in combinations(S_search_YX, p_pc):
#
# # Stop testing if the number of tests exceeds the bound specified by self.max_q_global
# q_count = q_count + 1
# if q_count > self.max_q_global:
# self._cannot_fix.add((X, Y))
# break
#
# # Test conditional independence of X and Y given Z. Correspondingly updateself.val_min, self.pval_max, and self.cardinality
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print(" %s _|_ %s | S_pc = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
#
# self._update_val_min(X, Y, val)
# self._update_pval_max(X, Y, pval)
# self._update_cardinality(X, Y, len(Z))
#
# # Check whether the test result was significant
# if pval > self.pc_alpha:
#
# # Mark the edge from X to Y for removal, save Z as minimal separating set
# to_remove[Y[0]][X] = True
# self._save_sepset(X, Y, (frozenset(Z), ""))
#
# # Verbose output
# if self.verbosity >= 1:
# print("({},{:2}) {:11} {} given {}".format(X[0], X[1], "independent", Y, Z))
#
# # Break the for loop
# break
#
# # Run through all cardinality p_pc subsets of S_search_XY
# if test_X:
#
# if not np.isinf(self.max_q_global):
# S_search_XY = self._sort_search_set(S_search_XY, X)
#
# q_count = 0
# for Z in combinations(S_search_XY, p_pc):
#
# q_count = q_count + 1
# if q_count > self.max_q_global:
# self._cannot_fix.add((X, Y))
# break
#
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print(" %s _|_ %s | S_pc = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
#
# self._update_val_min(X, Y, val)
# self._update_pval_max(X, Y, pval)
# self._update_cardinality(X, Y, len(Z))
#
# if pval > self.pc_alpha:
#
# to_remove[Y[0]][X] = True
# self._save_sepset(X, Y, (frozenset(Z), ""))
#
# if self.verbosity >= 1:
# print("({},{:2}) {:11} {} given {}".format(X[0], X[1], "independent", Y, Z))
#
# break
#
# # end for (i, j, lag_i) in product(range(self.N), range(self.N), range(-self.tau_max, 1))
#
# ##########################################################################################################
# ### Remove edges marked for removal in to_remove #########################################################
#
# # Remove edges
# for j in range(self.N):
# for (i, lag_i) in to_remove[j].keys():
#
# self._write_link((i, lag_i), (j, 0), "", verbosity = self.verbosity)
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nTest phase complete")
#
# ##########################################################################################################
# ### Check for convergence ################################################################################
#
# if has_converged:
# # If no link needed testing, this algorithm has converged. Therfore, break the while loop
# break
# else:
# # At least one link needed testing, this algorithm has not yet converged. Therefore, increase p_pc
# p_pc = p_pc + 1
#
# # end while True
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nRemoval phase complete")
# print("\nGraph:\n--------------------------------")
# self._print_graph_dict()
# print("--------------------------------")
#
# # Return
# return True
#
#
# def _run_rfci_orientation_phase(self):
# """Run the orientation phase of the RFCI algorithm: Steps 2 and 3 of algorithm 3.2 in the RFCI paper"""
#
# # Verbose output
# if self.verbosity >= 1:
# print("\n=======================================================")
# print("=======================================================")
# print("Starting RFCI orientation phase")
#
# # Run the RFCI unshielded triple rule
# M = set(self._find_triples(pattern_ij='***', pattern_jk='***', pattern_ik=''))
# self._run_rfci_utr_rule(M)
#
# # Remember whether the middle marks of all links are put to '-' by force. This is done once in the last iteration of the while loop in case self.fix_all_edges_before_final_orientations is True
# fixed_all = False
#
# # Run until convergence
# changed = True
# while changed:
#
# # Remember the current graph
# old_graph_dict = {}
# for j in range(self.N):
# old_graph_dict[j] = {k: v for (k, v) in self.graph_dict[j].items()}
#
# # Run Rules 1 - 3
# self._run_orientation_phase(rule_list = [["R-01"], ["R-02"], ["R-03"]])
#
# # Run the RFCI discriminating path rule
# self._run_rfci_dpr_rule()
#
# # Run Rules 8 - 10
# self._run_orientation_phase(rule_list = [["R-08"], ["R-09"], ["R-10"]])
#
# # Check whether there was a change
# changed = False
# for j in range(self.N):
# for (k, v)in self.graph_dict[j].items():
# if v != old_graph_dict[j][k]:
# changed = True
#
# # In case the corresonponding option is chosen and graph does not change anymore, set all middle marks to '-'
# if not changed and self.fix_all_edges_before_final_orientation and not fixed_all:
#
# self._fix_all_edges()
# changed = True
# fixed_all = True
#
# # Fix all edges that have a tail
# self._fix_edges_with_tail()
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nRFCI orientation phase complete")
# print("\nFinal graph:\n--------------------------------")
# print("--------------------------------")
# self._print_graph_dict()
# print("--------------------------------")
# print("--------------------------------\n")
#
# # Return True
# return True
#
#
# def _run_rfci_utr_rule(self, M):
# """Run the RFCI unshielded triple rule: Algorithm 4.4 of the RFCI supplement paper"""
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nStarting RFCI UTR-Rule:")
#
# # Take care that not both (A, B, C) and (C, B, A) appear in M
# M_unique = set()
# for (A, B, C) in M:
# if not (C, B, A) in M_unique:
# M_unique.add((A, B, C))
# M = M_unique
#
# # Make a list of triples that will bee tested for orientation ('L' in RFCI paper)
# L = set()
#
# # Run as long as there are unshielded triples in M
# while len(M) > 0:
#
# # Remember all unshielded triples
# old_unshielded_triples = set(self._find_triples(pattern_ij='***', pattern_jk='***', pattern_ik=''))
#
# # Make a list of edges that are marked for removal
# to_remove = set()
#
# # Run through all unshielded triples in M
# for (A, B, C) in M:
#
# # Unpack A, B, C
# (i, lag_i) = A
# (j, lag_j) = B
# (k, lag_k) = C
#
# # Get all minimal separating sets in SepSet(A, C)
# sepsets = self._get_sepsets(A, C)
# sepsets = {Z for (Z, status) in sepsets if status == "m"}
#
# ###############################################################################################################
# ###############################################################################################################
#
# remove_AB = False
# link_AB = self._get_link(A, B)
#
# # Test A indep B given union(SepSet(A, C), intersection(def-anc(B), adj(B))) setminus{A, B} setminus{future of both A and B}
#
# # Shift the lags appropriately
# if lag_i <= lag_j:
# X = (i, lag_i - lag_j) # A shifted
# Y = (j, 0) # B shifted
# delta_lag = lag_j
#
# else:
# X = (j, lag_j - lag_i) # B shifted
# Y = (i, 0) # A shifted
# delta_lag = lag_i
#
# # Run through all minimal separating sets of A and C
# for Z in sepsets:
#
# # Construct the conditioning set to test
# # Take out future elements
# Z_test = {(var, lag - delta_lag) for (var, lag) in Z if lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
#
# # Test conditional independence of X and Y given Z,
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z_test), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print("UTR: %s _|_ %s | Z_test = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z_test)]), val, pval))
#
# # Update val_min and pval_max
# self._update_val_min(X, Y, val)
# self._update_pval_max(X, Y, pval)
# self._update_cardinality(X, Y, len(Z_test))
#
# # Check whether the test result was significant
# if pval > self.pc_alpha:
# # Mark the edge from X to Y for removal
# remove_AB = True
# # Store Z as a non-weakly-minimal separating set of X and Y
# self._save_sepset(X, Y, (frozenset(Z_test), "nm"))
#
# if remove_AB:
# # Remember the edge for removal
# pair_key, new_link = self._get_pair_key_and_new_link(A, B, "")
# to_remove.add((X, Y))
#
# ###############################################################################################################
# ###############################################################################################################
#
# remove_CB = False
# link_CB = self._get_link(C, B)
#
# # Test C indep B given union(SepSet(A, C), intersection(def-anc(B), adj(B))) setminus{A, B} setminus{future of both C and B}
#
# # Shift the lags appropriately
# if lag_k <= lag_j:
# X = (k, lag_k - lag_j)
# Y = (j, 0)
# delta_lag = lag_j
# else:
# X = (j, lag_j - lag_k)
# Y = (k, 0)
# delta_lag = lag_k
#
# # Run through all minimal separating sets of A and C
# for Z in sepsets:
#
# # Construct the conditioning set to test
# # Take out future elements
# Z_test = {(var, lag - delta_lag) for (var, lag) in Z if lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
#
# # Test conditional independence of X and Y given Z,
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z_test), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print("UTR: %s _|_ %s | Z_test = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z_test)]), val, pval))
#
# # Update val_min and pval_max
# self._update_val_min(X, Y, val)
# self._update_pval_max(X, Y, pval)
# self._update_cardinality(X, Y, len(Z_test))
#
# # Check whether the test result was significant
# if pval > self.pc_alpha:
# # Mark the edge from X to Y for removal
# remove_CB = True
# # Store Z as a non-weakly-minimal separating set of X and Y
# self._save_sepset(X, Y, (frozenset(Z_test), "nm"))
#
# if remove_CB:
# # Remember the edge for removal
# pair_key, new_link = self._get_pair_key_and_new_link(C, B, "")
# to_remove.add((X, Y))
#
# ###############################################################################################################
# ###############################################################################################################
#
# if not remove_AB and not remove_CB and not link_AB[2] in ["-", "x"] and not link_CB[2] in ["-", "x"] and not (link_AB[2] == ">" and link_CB[2] == ">"):
#
# L.add((A, B, C))
#
# # end for (A, B, C) in M
#
# ###################################################################################################################
# ###################################################################################################################
#
# # Remove edges marked for removal
# for (X, Y) in to_remove:
# self._write_link(X, Y, "", verbosity = self.verbosity)
#
# # Make sepsets minimal (here, this agrees with minimal)
# for (X, Y) in to_remove:
#
# # Read out all separating sets that were found in the rule phase, then consider only those of minimal cardinality
# old_sepsets_all = {Z for (Z, _) in self._get_sepsets(X, Y)}
# min_size = min({len(Z) for Z in old_sepsets_all})
# old_sepsets_smallest = {Z for Z in old_sepsets_all if len(Z) == min_size}
#
# # For all separating sets of minimal cardinality, find minimal separating subsets in an order independent way
# self._delete_sepsets(X, Y)
# self._make_sepset_minimal(X, Y, old_sepsets_smallest)
#
# # Find new unshielded triples and determine the new "M"
# new_unshielded_triples = set(self._find_triples(pattern_ij='***', pattern_jk='***', pattern_ik=''))
# M = new_unshielded_triples.difference(old_unshielded_triples)
#
# # Take care that not both (A, B, C) and (C, B, A) appear in M
# M_unique = set()
# for (A, B, C) in M:
# if not (C, B, A) in M_unique:
# M_unique.add((A, B, C))
# M = M_unique
#
# # end while len(M) > 0
#
# #######################################################################################################################
# #######################################################################################################################
#
# # Remove all elements from L that are no langer part of an unshielded triple
# L_final = {(A, B, C) for (A, B, C) in L if self._get_link(A, B) != "" and self._get_link(C, B) != ""}
#
# # Run through all these triples and test for orientation as collider
# to_orient = []
# for (A, B, C) in L_final:
#
# if self._B_not_in_SepSet_AC(A, B, C):
#
# link_AB = self._get_link(A, B)
# link_CB = self._get_link(C, B)
#
# # Prepare the new links and save them to the output
# if link_AB[2] != ">":
# new_link_AB = link_AB[0] + link_AB[1] + ">"
# to_orient.append(self._get_pair_key_and_new_link(A, B, new_link_AB))
#
# new_link_CB = link_CB[0] + link_CB[1] + ">"
# if link_CB[2] != ">":
# to_orient.append(self._get_pair_key_and_new_link(C, B, new_link_CB))
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nUTR")
# for ((i, j, lag_i), new_link) in set(to_orient):
# print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Marked:", i, lag_i, self._get_link((i, lag_i), (j, 0)), j, 0,i, lag_i, new_link, j, 0))
# if len(to_orient) == 0:
# print("Found nothing")
#
# # Return if no orientations were found
# if len(to_orient) == 0:
# return False
#
# # Aggreate orienations
# new_ancs = {j: set() for j in range(self.N)}
# new_non_ancs = {j: set() for j in range(self.N)}
#
# for ((i, j, lag_i), new_link) in to_orient:
#
# # The old link
# old_link = self._get_link((i, lag_i), (j, 0))
#
# # Assert that no preceeding variable is marked as an ancestor of later variable
# assert not (lag_i > 0 and new_link[2] == "-")
#
# # New ancestral relation of (i, lag_i) to (j, 0)
# if new_link[0] == "-" and old_link[0] != "-":
# new_ancs[j].add((i, lag_i))
# elif new_link[0] == "<" and old_link[0] != "<":
# new_non_ancs[j].add((i, lag_i))
#
# # New ancestral relation of (j, 0) to (i, lag_i == 0)
# if lag_i == 0:
# if new_link[2] == "-" and old_link[2] != "-":
# new_ancs[i].add((j, 0))
# elif new_link[2] == ">" and old_link[2] != ">":
# new_non_ancs[i].add((j, 0))
#
# # Make the orientations
# self._apply_new_ancestral_information(new_non_ancs, new_ancs)
#
# # Return True
# return True
#
#
# def _run_rfci_dpr_rule(self):
# """Run the RFCI discriminating path rule: Lines 3 - 29 in algorithm 4.5 of the RFCI supplement paper"""
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nStarting RFCI DPR-Rule:")
#
# # Find all relevant triangles W-V-Y
# triangles = set(self._find_triples(pattern_ij='<**', pattern_jk='o*+', pattern_ik='-*>'))
#
# # Verbose output
# if self.verbosity >= 1 and len(triangles) == 0:
# print("\nFound no suitable triangles")
#
# # Run through all triangles
# while len(triangles) > 0:
#
# # Remember all paths that qualify for the orientation test
# paths_to_test_for_orientation = dict()
#
# # Remember edges marked for removal
# to_remove = set()
#
# # Run through all of these triangles
# for (W, V, Y_path) in triangles:
#
# # Find all discriminating paths for this triangle, then consider only the shortest paths
# discriminating_paths = self._get_R4_discriminating_paths_rfci((W, V, Y_path), max_length = np.inf)
#
# # If there is no discriminating path, continue with the next triple
# if len(discriminating_paths) == 0:
# continue
#
# # Only consider shortests discrimintating paths
# min_len = min([len(path) for path in discriminating_paths])
# shortest_discriminating_paths = [path for path in discriminating_paths if len(path) == min_len]
#
# # Run through all shortests discriminating paths
# for path in shortest_discriminating_paths:
#
# path_disqualified = False
#
# # Get the separating set between the end points
# X_1 = path[-1]
# all_sepsets = {Z for (Z, _) in self._get_sepsets(X_1, Y_path)}
#
# # Run through all pairs of adjancent variables on path
# for i in range(min_len - 1):
#
# # Read out the current pair of adjacent variables
# (var_A, lag_A) = path[i]
# (var_B, lag_B) = path[i + 1]
#
# # Time shift accordingly
# if lag_A <= lag_B:
# X = (var_A, lag_A - lag_B) # A shifted
# Y = (var_B, 0) # B shifted
# delta_lag = lag_B
#
# else:
# X = (var_B, lag_B - lag_A) # B shifted
# Y = (var_A, 0) # A shifted
# delta_lag = lag_A
#
# # Run through all sepsets
# for S_ik in all_sepsets:
#
# # Time shift the separating set
# S_ik_shift = {(var, lag - delta_lag) for (var, lag) in S_ik if lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
#
# # Run through all subsets of S_ik
# for p in range(len(S_ik) + 1):
# for Z in combinations(S_ik_shift, p):
#
# # HACK
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print("DPR: %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
#
# # Update val_min and pval_max
# self._update_val_min(X, Y, val)
# self._update_pval_max(X, Y, pval)
# self._update_cardinality(X, Y, len(Z))
#
# # Check whether the test result was significant
# if pval > self.pc_alpha:
# # Mark the edge from X to Y for removal and store Z as a weakly-minimal separating set of X and Y
# to_remove.add((X, Y))
# self._save_sepset(X, Y, (frozenset(Z), "m"))
#
# # Break the inner most for loops
# path_disqualified = True
# break
#
# if path_disqualified:
# break
#
# # end for Z in combinations(S_ik_shift, p)
# # end for p in range(len(S_ik) + 1)
# # end for S_ik in all_sepsets
#
# # If the path has been disqualifed, break the for loop through adjacent pairs on the path
# if path_disqualified:
# break
#
# # end for i in range(min_len - 1)
# if not path_disqualified:
# if (W, V, Y_path) in paths_to_test_for_orientation.keys():
# paths_to_test_for_orientation[(W, V, Y_path)].append(path)
# else:
# paths_to_test_for_orientation[(W, V, Y_path)] = [path]
#
# # end for path in shortest_discriminating_paths
# # end for (W, V, Y) in triangles
#
# # Remember unshielded triples at this point
# old_unshielded_triples = set(self._find_triples(pattern_ij='***', pattern_jk='***', pattern_ik=''))
#
# # Delete all edges that are marked for removal
# for (X, Y) in to_remove:
# self._write_link(X, Y, "", verbosity = self.verbosity)
#
# # Determine the unshielded triples
# new_unshielded_triples = set(self._find_triples(pattern_ij='***', pattern_jk='***', pattern_ik=''))
# new_unshielded_triples = new_unshielded_triples.difference(old_unshielded_triples)
#
# # Run the RFCI unshielded triple rule on the new unshielded triples
# restart = self._run_rfci_utr_rule(new_unshielded_triples)
#
# # Keep only those qualfied paths that are still paths
# final_paths = dict()
# for (key, path_list) in paths_to_test_for_orientation.items():
#
# disqualifed = False
#
# for path in path_list:
#
# for i in range(len(path) - 1):
# if self._get_link(path[i], path[i+1]) == "":
# disqualifed = True
# break
# if disqualifed:
# continue
#
# if key in final_paths.keys():
# final_paths[key].append(path)
# else:
# final_paths[key] = [path]
#
# # Subject the surviving paths to the orientation test
# to_orient = []
# for (key, path_list) in final_paths.items():
# for path in path_list:
#
# # Get the initial triangle
# Y = path[0]
# V = path[1]
# W = path[2]
#
# # Get the end point node
# X_1 = path[-1]
#
# # Get the current link from W to V, which we will need below
# link_WV = self._get_link(W, V)
#
# # Check which of the two cases of the rule we are in, then append the appropriate new links to the output list
# if self._B_in_SepSet_AC(X_1, V, Y):
# # New link from V to Y
# to_orient.append(self._get_pair_key_and_new_link(V, Y, "-->"))
#
# elif link_WV != "<-x" and self._B_not_in_SepSet_AC(X_1, V, Y):
# # New link from V to Y
# to_orient.append(self._get_pair_key_and_new_link(V, Y, "<->"))
#
# # If needed, also the new link from W to V
# if link_WV != "<->":
# to_orient.append(self._get_pair_key_and_new_link(W, V, "<->"))
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nDPR")
# for ((i, j, lag_i), new_link) in set(to_orient):
# print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Marked:", i, lag_i, self._get_link((i, lag_i), (j, 0)), j, 0,i, lag_i, new_link, j, 0))
# if len(to_orient) == 0:
# print("Found nothing")
#
# # Return if neither UTR nor DPR found anything
# if not restart and len(to_orient) == 0:
# return True
#
# # Aggreate orienations
# new_ancs = {j: set() for j in range(self.N)}
# new_non_ancs = {j: set() for j in range(self.N)}
#
# for ((i, j, lag_i), new_link) in to_orient:
#
# # The old link
# old_link = self._get_link((i, lag_i), (j, 0))
#
# # Assert that no preceeding variable is marked as an ancestor of later variable
# assert not (lag_i > 0 and new_link[2] == "-")
#
# # New ancestral relation of (i, lag_i) to (j, 0)
# if new_link[0] == "-" and old_link[0] != "-":
# new_ancs[j].add((i, lag_i))
# elif new_link[0] == "<" and old_link[0] != "<":
# new_non_ancs[j].add((i, lag_i))
#
# # New ancestral relation of (j, 0) to (i, lag_i == 0)
# if lag_i == 0:
# if new_link[2] == "-" and old_link[2] != "-":
# new_ancs[i].add((j, 0))
# elif new_link[2] == ">" and old_link[2] != ">":
# new_non_ancs[i].add((j, 0))
#
# # Make the orientations
# self._apply_new_ancestral_information(new_non_ancs, new_ancs)
#
# # Check for the new relevant triangles
# new_triangles = set(self._find_triples(pattern_ij='<**', pattern_jk='o*+', pattern_ik='-*>'))
# triangles = new_triangles.difference(triangles)
#
# # end while len(triangles) > 0
#
#
# def _make_sepset_minimal(self, X, Y, Z_list):
# """
# X and Y are conditionally independent given Z in Z_list However, it is not yet clear whether any of these Z are minimal separating set.
#
# This function finds minimal separating subsets in an order independent way and writes them to the self.sepsets dictionary. Only those sets which are minimal separating sets are kept.
# """
#
# # Base Case 1:
# # Z in Z_list is minimal if len(Z) <= 1 or Z \subset ancs
# any_minimal = False
#
# for Z in Z_list:
#
# if len(Z) <=1:
# self._save_sepset(X, Y, (frozenset(Z), "m"))
# any_minimal = True
#
# if any_minimal:
# return None
#
# # If not Base Case 1, we need to search for separating subsets. We do this for all Z in Z_list, and build a set sepsets_next_call that contains all separating sets for the next recursive call
# sepsets_next_call = set()
#
# for Z in Z_list:
#
# # Test for removal of all nodes in removable
# new_sepsets = []
# val_values = []
#
# for A in Z:
#
# Z_A = [node for node in Z if node != A]
#
# # Run the conditional independence test
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = Z_A, tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print("MakeMin: %s _|_ %s | Z_A = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z_A)]), val, pval))
#
# # Check whether the test result was significant
# if pval > self.pc_alpha:
# new_sepsets.append(frozenset(Z_A))
# val_values.append(val)
#
# # If new_sepsets is empty, then Z is already minimal
# if len(new_sepsets) == 0:
# self._save_sepset(X, Y, (frozenset(Z), "m"))
# any_minimal = True
#
# # If we did not yet find a minimal separating set
# if not any_minimal:
#
# # Sort all separating sets in new_sepets by their test statistic, then append those separating sets with maximal statistic to sepsets_next_call. This i) guarantees order independence while ii) continuing to test as few as possible separating sets
# new_sepsets = [node for _, node in sorted(zip(val_values, new_sepsets), reverse = True)]
#
# i = -1
# while i <= len(val_values) - 2 and val_values[i + 1] == val_values[0]:
# sepsets_next_call.add(new_sepsets[i])
# i = i + 1
#
# assert i >= 0
#
# # If we did not yet find a minimal separating set, make a recursive call
# if not any_minimal:
# self._make_sepset_minimal(X, Y, sepsets_next_call)
# else:
# return None
#
# ########################################################################################################################
# ########################################################################################################################
# ########################################################################################################################
#
# def _run_orientation_phase(self, rule_list):
# """Function for exhaustive application of the orientation rules specified by rule_list."""
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nStarting orientation phase")
# print("with rule list: ", rule_list)
#
# # Run through all priority levels of rule_list
# idx = 0
# while idx <= len(rule_list) - 1:
#
# # Some rule require that self._graph_full_dict is updated. Therefore, initialize this variable once the while loop (re)-starts at the first prioprity level
# if idx == 0:
# self._initialize_full_graph()
#
# ###########################################################################################################
# ### Rule application ######################################################################################
#
# # Get the current rules
# current_rules = rule_list[idx]
#
# # Prepare a list to remember marked orientations
# to_orient = []
#
# # Run through all current rules
# for rule in current_rules:
#
# # Verbose output
# if self.verbosity >= 1:
# print("\n{}:".format(rule))
#
# # Exhaustively apply the rule to the graph...
# orientations = self._apply_rule(rule)
#
# # Verbose output
# if self.verbosity >= 1:
# for ((i, j, lag_i), new_link) in set(orientations):
# print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Marked:", i, lag_i, self._get_link((i, lag_i), (j, 0)), j, 0,i, lag_i, new_link, j, 0))
# if len(orientations) == 0:
# print("Found nothing")
#
# # ... and stage the results for orientation and removal
# to_orient.extend(orientations)
#
# ###########################################################################################################
# ### Aggregation of marked orientations ####################################################################
#
# new_ancs = {j: set() for j in range(self.N)}
# new_non_ancs = {j: set() for j in range(self.N)}
#
# # Run through all of the nested dictionary
# for ((i, j, lag_i), new_link) in to_orient:
#
# # The old link
# old_link = self._get_link((i, lag_i), (j, 0))
#
# # Assert that no preceeding variable is marked as an ancestor of later variable
# assert not (lag_i > 0 and new_link[2] == "-")
#
# # New ancestral relation of (i, lag_i) to (j, 0)
# if new_link[0] == "-" and old_link[0] != "-":
# new_ancs[j].add((i, lag_i))
# elif new_link[0] == "<" and old_link[0] != "<":
# new_non_ancs[j].add((i, lag_i))
#
# # New ancestral relation of (j, 0) to (i, lag_i == 0)
# if lag_i == 0:
# if new_link[2] == "-" and old_link[2] != "-":
# new_ancs[i].add((j, 0))
# elif new_link[2] == ">" and old_link[2] != ">":
# new_non_ancs[i].add((j, 0))
#
# ###########################################################################################################
# ### Update ancestral information and determine next step ##################################################
#
# # Update ancestral information. The function called includes conflict resolution
# restart = self._apply_new_ancestral_information(new_non_ancs, new_ancs)
#
# # If any useful new information was found, go back to idx = 0, else increase idx by 1
# idx = 0 if restart == True else idx + 1
#
# # end while i <= len(self.rule_list) - 1
# # The algorithm has converged
#
# # Verbose output
# if self.verbosity >= 1:
# print("\nOrientation phase complete")
#
# # Return
# return True
#
# def _fix_all_edges(self):
# """Set the middle mark of all links to '-'"""
#
# for j in range(self.N):
# for (i, lag_i) in self.graph_dict[j].keys():
#
# link = self._get_link((i, lag_i), (j, 0))
# if len(link) > 0:
# new_link = link[0] + "-" + link[2]
# self.graph_dict[j][(i, lag_i)] = new_link
#
#
# def _fix_edges_with_tail(self):
# """Set the middle mark of all edges with a tail to '-', provided they are in self._can_fix. For an explanation of self._can_fix see _run_pc_removal_phase()"""
#
# for j in range(self.N):
# for (i, lag_i) in self.graph_dict[j].keys():
#
# link = self._get_link((i, lag_i), (j, 0))
# if len(link) > 0 and (link[0] == "-" or link[2] == "-") and (((i, lag_i), (j, 0)) in self._can_fix or ((j, 0), (i, lag_i)) in self._can_fix):
# new_link = link[0] + "-" + link[2]
# self.graph_dict[j][(i, lag_i)] = new_link
#
#
# def _apply_new_ancestral_information(self, new_non_ancs, new_ancs):
# """Apply the new ancestorships and non-ancestorships specified by new_non_ancs and new_ancs to the current graph. Conflicts are resolved by marking. Returns True if any circle mark was turned into a head or tail, else False."""
#
# #######################################################################################################
# ### Preprocessing #####################################################################################
#
# # Memory variables
# add_to_def_non_ancs = {j: set() for j in range(self.N)}
# add_to_def_ancs = {j: set() for j in range(self.N)}
# add_to_ambiguous_ancestorships = {j: set() for j in range(self.N)}
# put_head_or_tail = False
#
# # Default values
# if new_non_ancs is None:
# new_non_ancs = {j: set() for j in range(self.N)}
#
# if new_ancs is None:
# new_ancs = {j: set() for j in range(self.N)}
#
# # Marking A as ancestor of B implies that B is marked as a non-ancestor of A. This is only non-trivial for A before B
# for j in range(self.N):
# for (i, lag_i) in new_ancs[j]:
# if lag_i == 0:
# new_non_ancs[i].add((j, 0))
#
# #######################################################################################################
# ### Conflict resolution ###############################################################################
#
# # Iterate through new_non_ancs
# for j in range(self.N):
# for (i, lag_i) in new_non_ancs[j]:
# # X = (i, lag_i), Y = (j, 0)
# # X is marked as non-ancestor for Y
#
# # Conflict resolution
# if (i, lag_i) in self.ambiguous_ancestorships[j]:
# # There is a conflict, since it is already marked as ambiguous whether X is an ancestor of Y
# if self.verbosity >= 1:
# print("{:10} ({}, {:2}) marked as non-anc of {} but saved as ambiguous".format("Conflict:", i, lag_i, (j, 0)))
#
# elif (i, lag_i) in self.def_ancs[j]:
# # There is a conflict, since X is already marked as ancestor of Y
# add_to_ambiguous_ancestorships[j].add((i, lag_i))
#
# if self.verbosity >= 1:
# print("{:10} ({}, {:2}) marked as non-anc of {} but saved as anc".format("Conflict:", i, lag_i, (j, 0)))
#
# elif (i, lag_i) in new_ancs[j]:
# # There is a conflict, since X is also marked as a new ancestor of Y
# add_to_ambiguous_ancestorships[j].add((i, lag_i))
#
# if self.verbosity >= 1:
# print("{:10} ({}, {:2}) marked as both anc- and non-anc of {}".format("Conflict:", i, lag_i, (j, 0)))
#
# else:
# # There is no conflict
# add_to_def_non_ancs[j].add((i, lag_i))
#
# # Iterate through new_ancs
# for j in range(self.N):
# for (i, lag_i) in new_ancs[j]:
# # X = (i, lag_i), Y = (j, 0)
# # X is marked as ancestor for Y
#
# # Conflict resolution
# if (i, lag_i) in self.ambiguous_ancestorships[j]:
# # There is a conflict, since it is already marked as ambiguous whether X is an ancestor of Y
# if self.verbosity >= 1:
# print("{:10} ({}, {:2}) marked as anc of {} but saved as ambiguous".format("Conflict:", i, lag_i, (j, 0)))
#
# elif lag_i == 0 and (j, 0) in self.ambiguous_ancestorships[i]:
# # There is a conflict, since X and Y are contemporaneous and it is already marked ambiguous as whether Y is an ancestor of Y
# # Note: This is required here, because X being an ancestor of Y implies that Y is not an ancestor of X. This ambiguity cannot exist when X is before Y
# if self.verbosity >= 1:
# print("{:10} ({}, {:2}) marked as anc of {} but saved as ambiguous".format("Conflict:", i, lag_i, (j, 0)))
#
# elif (i, lag_i) in self.def_non_ancs[j]:
# # There is a conflict, since X is already marked as non-ancestor of Y
# add_to_ambiguous_ancestorships[j].add((i, lag_i))
#
# if self.verbosity >= 1:
# print("{:10} ({}, {:2}) marked as anc of {} but saved as non-anc".format("Conflict:", i, lag_i, (j, 0)))
#
# elif (i, lag_i) in new_non_ancs[j]:
# # There is a conflict, since X is also marked as a new non-ancestor of Y
# add_to_ambiguous_ancestorships[j].add((i, lag_i))
#
# if self.verbosity >= 1:
# print("{:10} ({}, {:2}) marked as both anc- and non-anc of {}".format("Conflict:", i, lag_i, (j, 0)))
#
# else:
# # There is no conflict
# add_to_def_ancs[j].add((i, lag_i))
#
# #######################################################################################################
#
# #######################################################################################################
# ### Apply the ambiguous information ###################################################################
#
# for j in range(self.N):
#
# for (i, lag_i) in add_to_ambiguous_ancestorships[j]:
#
# old_link = self._get_link((i, lag_i), (j, 0))
# if len(old_link) > 0 and old_link[0] != "x":
#
# new_link = "x" + old_link[1] + old_link[2]
# self._write_link((i, lag_i), (j, 0), new_link, verbosity = self.verbosity)
#
# if self.verbosity >= 1:
# if (i, lag_i) in self.def_ancs[j]:
# print("{:10} Removing ({}, {:2}) as anc of {}".format("Update:", i, lag_i, (j, 0)))
# if (i, lag_i) in self.def_non_ancs[j]:
# print("{:10} Removing ({}, {:2}) as non-anc of {}".format("Update:", i, lag_i, (j, 0)))
#
# self.def_ancs[j].discard((i, lag_i))
# self.def_non_ancs[j].discard((i, lag_i))
#
# if lag_i == 0:
#
# if self.verbosity >= 1 and (j, 0) in self.def_ancs[i]:
# print("{:10} Removing {} as anc of {}".format("Update:", i, lag_i, (j, 0)))
#
# self.def_ancs[i].discard((j, 0))
# # Do we also need the following?
# # self.def_non_ancs[i].discard((j, 0))
#
# if self.verbosity >= 1 and (i, lag_i) not in self.ambiguous_ancestorships[j]:
# print("{:10} Marking ancestorship of ({}, {:2}) to {} as ambiguous".format("Update:", i, lag_i, (j, 0)))
#
# self.ambiguous_ancestorships[j].add((i, lag_i))
#
# #######################################################################################################
# ### Apply the unambiguous information #################################################################
#
# for j in range(self.N):
#
# for (i, lag_i) in add_to_def_non_ancs[j]:
#
# old_link = self._get_link((i, lag_i), (j, 0))
# if len(old_link) > 0 and old_link[0] != "<":
# new_link = "<" + old_link[1] + old_link[2]
# self._write_link((i, lag_i), (j, 0), new_link, verbosity = self.verbosity)
# put_head_or_tail = True
#
# if self.verbosity >= 1 and (i, lag_i) not in self.def_non_ancs[j]:
# print("{:10} Marking ({}, {:2}) as non-anc of {}".format("Update:", i, lag_i, (j, 0)))
#
# self.def_non_ancs[j].add((i, lag_i))
#
#
# for (i, lag_i) in add_to_def_ancs[j]:
#
# old_link = self._get_link((i, lag_i), (j, 0))
# if len(old_link) > 0 and (old_link[0] != "-" or old_link[2] != ">"):
# new_link = "-" + old_link[1] + ">"
# self._write_link((i, lag_i), (j, 0), new_link, verbosity = self.verbosity)
# put_head_or_tail = True
#
# if self.verbosity >= 1 and (i, lag_i) not in self.def_ancs[j]:
# print("{:10} Marking ({}, {:2}) as anc of {}".format("Update:", i, lag_i, (j, 0)))
#
# self.def_ancs[j].add((i, lag_i))
#
# if lag_i == 0:
#
# if self.verbosity >= 1 and (j, 0) not in self.def_non_ancs[i]:
# print("{:10} Marking {} as non-anc of {}".format("Update:",(j, 0), (i, 0)))
#
# self.def_non_ancs[i].add((j, 0))
#
# #######################################################################################################
#
# return put_head_or_tail
#
#
# def _apply_rule(self, rule):
# """Call the orientation-removal-rule specified by the string argument rule"""
#
# if rule == "R-01":
# return self._apply_R01()
# elif rule == "R-02":
# return self._apply_R02()
# elif rule == "R-03":
# return self._apply_R03()
# elif rule == "R-08":
# return self._apply_R08()
# elif rule == "R-09":
# return self._apply_R09()
# elif rule == "R-10":
# return self._apply_R10()
#
#
# def _B_not_in_SepSet_AC(self, A, B, C):
# """Return True if B is not in the separating set of A and C according to the standard majority rule."""
#
# # Treat A - B - C as the same triple as C - B - A
# # Convention: A is before C or, if they are contemporaneous, the index of A is smaller than that of C
# if C[1] < A[1] or (C[1] == A[1] and C[0] < A[0]):
# return self._B_not_in_SepSet_AC(C, B, A)
#
# # Remember all separating sets that we will find
# all_sepsets = set()
#
# # Test for independence given all subsets of non-future adjacencies of A
# adj_A = self._get_non_future_adj([A]).difference({A, C})
# adj_C = self._get_non_future_adj([C]).difference({A, C})
#
# # Depending on the self.max_cond_px and self.max_p_global, determine the maximal cardinality of subsets of adj_A that are tested
# if A[1] < C[1]:
# max_p_A = min([len(adj_A), self.max_cond_px, self.max_p_global]) + 1
# else:
# max_p_A = min([len(adj_A), self.max_p_global]) + 1
#
# # If self.max_q_global is finite, order adj_A and adj_C according to self.val_min to guarantee order independence
# if not np.isinf(self.max_q_global):
# adj_A = self._sort_search_set(adj_A, A)
# adj_C = self._sort_search_set(adj_C, C)
#
# # Shift lags
# adj_A = [(var, lag - C[1]) for (var, lag) in adj_A]
# adj_C = [(var, lag - C[1]) for (var, lag) in adj_C]
# X = (A[0], A[1] - C[1])
# Y = (C[0], 0)
#
# # Test for independence given subsets of non-future adjacencies of A
# for p in range(max_p_A):
#
# # Count the number of tests made at this value of p
# q_count = 0
#
# for Z_raw in combinations(adj_A, p):
#
# # Break if the maximal number of tests specified by self.max_q_global has been exceeded
# q_count = q_count + 1
# if q_count > self.max_q_global:
# break
#
# # Prepare the conditioning set
# Z = {node for node in Z_raw if node != X and node != Y}
#
# # Test for conditional independence of X and Y given Z
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print("BnotinSepSetAC(A): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
#
# # Check whether the test result was significant. If yes, remember Z as separating set
# if pval > self.pc_alpha:
# all_sepsets.add(frozenset(Z))
#
# # Test for independence given subsets of non-future adjacencies of C
# for p in range(min(len(adj_C), self.max_p_global) + 1):
#
# q_count = 0
# for Z_raw in combinations(adj_C, p):
#
# q_count = q_count + 1
# if q_count > self.max_q_global:
# break
#
# Z = {node for node in Z_raw if node != X and node != Y}
#
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print("BnotinSepSetAC(C): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
#
# if pval > self.pc_alpha:
# all_sepsets.add(frozenset(Z))
#
# # Count number of sepsets and number of sepsets that contain B
# n_sepsets = len(all_sepsets)
# n_sepsets_with_B = len([1 for Z in all_sepsets if (B[0], B[1] - C[1]) in Z])
#
# # Return True if any separating set was found and B is in less than half of them
# return True if n_sepsets > 0 and 2*n_sepsets_with_B < n_sepsets else False
#
#
# def _B_in_SepSet_AC(self, A, B, C):
# """Return True if B is not in the separating set of A and C according to the standard majority rule on minimal separating sets"""
#
# # Treat A - B - C as the same triple as C - B - A
# # Convention: A is before C or, if they are contemporaneous, the index of A is smaller than that of C
# if C[1] < A[1] or (C[1] == A[1] and C[0] < A[0]):
# return self._B_not_in_SepSet_AC(C, B, A)
#
# # Remember all separating sets that we will find
# all_sepsets = set()
#
# # Test for independence given all subsets of non-future adjacencies of A
# adj_A = self._get_non_future_adj([A]).difference({A, C})
# adj_C = self._get_non_future_adj([C]).difference({A, C})
#
# # Depending on the self.max_cond_px and self.max_p_global, determine the maximal cardinality of subsets of adj_A that are tested
# if A[1] < C[1]:
# max_p_A = min([len(adj_A), self.max_cond_px, self.max_p_global]) + 1
# else:
# max_p_A = min([len(adj_A), self.max_p_global]) + 1
#
# # If self.max_q_global is finite, order adj_A and adj_C according to self.val_min to guarantee order independence
# if not np.isinf(self.max_q_global):
# adj_A = self._sort_search_set(adj_A, A)
# adj_C = self._sort_search_set(adj_C, C)
#
# # Shift lags
# adj_A = [(var, lag - C[1]) for (var, lag) in adj_A]
# adj_C = [(var, lag - C[1]) for (var, lag) in adj_C]
# X = (A[0], A[1] - C[1])
# Y = (C[0], 0)
#
# # Remember whether any separating set is found in the below for loop
# sepset_found = False
#
# # Test for independence given subsets of non-future adjacencies of A
# for p in range(max_p_A):
#
# # Count the number of tests made at this value of p
# q_count = 0
#
# for Z_raw in combinations(adj_A, p):
#
# # Break if the maximal number of tests specified by self.max_q_global has been exceeded
# q_count = q_count + 1
# if q_count > self.max_q_global:
# break
#
# # Prepare the conditioning set
# Z = {node for node in Z_raw if node != X and node != Y}
#
# # Test for conditional independence of X and Y given Z
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print("BinSepSetAC(A): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
#
# # Check whether the test result was significant. If yes, remember Z as separating set
# if pval > self.pc_alpha:
# all_sepsets.add(frozenset(Z))
#
# # To guarantee minimality of all separating sets, the for loop needs to be broken after this iteration
# sepset_found = True
#
# # If a separating set has already been found, break the foor loop
# if sepset_found:
# break
#
# # Remember whether any separating set is found in the below for loop
# sepset_found = False
#
# # Test for independence given subsets of non-future adjacencies of A
# for p in range(min(len(adj_C), self.max_p_global) + 1):
#
# q_count = 0
# for Z_raw in combinations(adj_C, p):
#
# q_count = q_count + 1
# if q_count > self.max_q_global:
# break
#
# Z = {node for node in Z_raw if node != X and node != Y}
#
# val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
#
# if self.verbosity >= 2:
# print("BinSepSetAC(C): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
#
# if pval > self.pc_alpha:
# all_sepsets.add(frozenset(Z))
# sepset_found = True
#
# if sepset_found:
# break
#
# # Count number of sepsets and number of sepsets that contain B
# n_sepsets = len(all_sepsets)
# n_sepsets_with_B = len([1 for Z in all_sepsets if (B[0], B[1] - C[1]) in Z])
#
# # Return True if any separating set was found and B is in more than half of them
# return True if n_sepsets > 0 and 2*n_sepsets_with_B > n_sepsets else False
#
# ########################################################################################################################
# ########################################################################################################################
# ########################################################################################################################
#
# def _apply_R01(self):
# """Return all orientations implied by orientation rule R-01"""
#
# # Build the output list
# out = []
#
# # Find all graphical structures that the rule applies to
# all_appropriate_triples = self._find_triples(pattern_ij='*?>', pattern_jk='o?+', pattern_ik='')
#
# # Run through all appropriate graphical structures
# for (A, B, C) in all_appropriate_triples:
#
# # Check whether the rule applies
# if self._B_in_SepSet_AC(A, B, C):
#
# # Prepare the new link from B to C and append it to the output list
# link_BC = self._get_link(B, C)
# new_link_BC = "-" + link_BC[1] + ">"
# out.append(self._get_pair_key_and_new_link(B, C, new_link_BC))
#
# return out
#
#
# def _apply_R02(self):
# """Return all orientations implied by orientation rule R-02"""
#
# # Build the output list
# out = []
#
# # Find all graphical structures that the rule applies to
# all_appropriate_triples = set(self._find_triples(pattern_ij='-?>', pattern_jk='*?>', pattern_ik='+?o'))
# all_appropriate_triples = all_appropriate_triples.union(set(self._find_triples(pattern_ij='*?>', pattern_jk='-?>', pattern_ik='+?o')))
#
# # Run through all appropriate graphical structures
# for (A, B, C) in all_appropriate_triples:
#
# # The rule applies to all relevant graphical structures. Therefore, prepare the new link and append it to the output list
# link_AC = self._get_link(A, C)
# new_link_AC = link_AC[0] + link_AC[1] + ">"
# out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
#
# # Return the output list
# return out
#
#
# def _apply_R03(self):
# """Return all orientations implied by orientation rule R-03"""
#
# # Build the output list
# out = []
#
# # Find all graphical structures that the rule applies to
# all_appropriate_quadruples = self._find_quadruples(pattern_ij='*?>', pattern_jk='<?*', pattern_ik='',
# pattern_il='+?o', pattern_jl='o?+', pattern_kl='+?o')
#
# # Run through all appropriate graphical structures
# for (A, B, C, D) in all_appropriate_quadruples:
#
# # Check whether the rule applies
# if self._B_in_SepSet_AC(A, D, C):
#
# # Prepare the new link from D to B and append it to the output list
# link_DB = self._get_link(D, B)
# new_link_DB = link_DB[0] + link_DB[1] + ">"
# out.append(self._get_pair_key_and_new_link(D, B, new_link_DB))
#
# # Return the output list
# return out
#
#
# def _apply_R08(self):
# """Return all orientations implied by orientation rule R-08"""
#
# # Build the output list
# out = []
#
# # Find all graphical structures that the rule applies to
# all_appropriate_triples = self._find_triples(pattern_ij='-?>', pattern_jk='-?>', pattern_ik='o?+')
#
# # Run through all appropriate graphical structures
# for (A, B, C) in all_appropriate_triples:
#
# # The rule applies to all relevant graphical structures. Therefore, prepare the new link and append it to the output list
# link_AC = self._get_link(A, C)
# new_link_AC = "-" + link_AC[1] + ">"
# out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
#
# # Return the output list
# return out
#
#
# def _apply_R09(self):
# """Return all orientations implied by orientation rule R-09"""
#
# # Build the output list
# out = []
#
# # Find unshielded triples B_1 o--*--o A o--*--> C or B_1 <--*--o A o--*--> C or B_1 <--*-- A o--*--> C
# all_appropriate_triples = set(self._find_triples(pattern_ij='o?o', pattern_jk='o?>', pattern_ik=''))
# all_appropriate_triples = all_appropriate_triples.union(set(self._find_triples(pattern_ij='<?o', pattern_jk='o?>', pattern_ik='')))
# all_appropriate_triples = all_appropriate_triples.union(set(self._find_triples(pattern_ij='<?-', pattern_jk='o?>', pattern_ik='')))
#
# # Run through all these triples
# for (B_1, A, C) in all_appropriate_triples:
#
# # Check whether A is in SepSet(B_1, C), else the rule does not apply
# if not self._B_in_SepSet_AC(B_1, A, C):
# continue
#
# # Although we do not yet know whether the rule applies, we here determine the new form of the link from A to C if the rule does apply
# link_AC = self._get_link(A, C)
# new_link_AC = "-" + link_AC[1] + ">"
# pair_key, new_link = self._get_pair_key_and_new_link(A, C, new_link_AC)
#
# # For the search of uncovered potentially directed paths from B_1 to C, determine the initial pattern as dictated by the link from A to B_1
# first_link = self._get_link(A, B_1)
# if self._match_link(pattern='o?o', link=first_link):
# initial_allowed_patterns = ['-?>', 'o?>', 'o?o']
# elif self._match_link(pattern='o?>', link=first_link) or self._match_link(pattern='-?>', link=first_link):
# initial_allowed_patterns = ['-?>']
#
# # Find all uncovered potentially directed paths from B_1 to C
# uncovered_pd_paths = self._get_potentially_directed_uncovered_paths_rfci(B_1, C, initial_allowed_patterns)
#
# # Run through all of these paths and check i) whether the node adjacent to B_1 is non-adjacent to A, ii) whether condition iv) of the rule antecedent is true. If there is any such path, then the link can be oriented
# for upd_path in uncovered_pd_paths:
#
# # Is the node adjacent to B_1 non-adjacent to A (this implies that there are at least three nodes on the path, because else the node adjacent to B_1 is C) and is A not part of the path?
# if len(upd_path) < 3 or A in upd_path or self._get_link(A, upd_path[1]) != "":
# continue
#
# # If the link from A to B_1 is into B_1, condition iv) is true
# if first_link[2] == ">":
# # Mark the link from A to C for orientation, break the for loop to continue with the next triple
# out.append((pair_key, new_link))
# break
#
# # If the link from A to B_1 is not in B_1, we need to check whether B_1 is in SepSet(A, X) where X is the node on upd_path next to B_1
# if not self._B_in_SepSet_AC(A, B_1, upd_path[1]):
# # Continue with the next upd_path
# continue
#
# # Now check whether rule iv) for all triples on upd_path
# path_qualifies = True
# for i in range(len(upd_path) - 2):
# # We consider the unshielded triples upd_path[i] - upd_path[i+1] - upd_path[i+2]
#
# # If the link between upd_path[i] and upd_path[i+1] is into the latter, condition iv) is true
# left_link = self._get_link(upd_path[i], upd_path[i+1])
# if left_link[2] == ">":
# # The path qualifies, break the inner for loop
# break
#
# # If not, then we need to continue with checking whether upd_path[i+1] in SepSet(upd_path[i+1], upd_path[i+2])
# if not self._B_in_SepSet_AC(upd_path[i], upd_path[i+1], upd_path[i+2]):
# # The path does not qualifying, break the inner for loop
# path_qualifies = False
# break
#
# # The path qualifies, mark the edge from A to C for orientation and break the outer for loop to continue with the next triple
# if path_qualifies:
# out.append((pair_key, new_link))
# break
#
# # The path does not qualify, continue with the next upd_path
#
# # end for upd_path in uncovered_pd_paths
# # end for (B_1, A, C) in all_appropriate_triples
#
# # Return the output list
# return out
#
#
# def _apply_R10(self):
# """Return all orientations implied by orientation rule R-10"""
#
# # Build the output list
# out = []
#
# # Find all triples A o--> C <-- P_C
# all_appropriate_triples = set(self._find_triples(pattern_ij='o?>', pattern_jk='<?-', pattern_ik=''))
# all_appropriate_triples = all_appropriate_triples.union(set(self._find_triples(pattern_ij='o?>', pattern_jk='<?-', pattern_ik='***')))
#
# # Collect all triples for the given pair (A, C)
# triple_sorting_dict = {}
# for (A, C, P_C) in all_appropriate_triples:
# if triple_sorting_dict.get((A, C)) is None:
# triple_sorting_dict[(A, C)] = [P_C]
# else:
# triple_sorting_dict[(A, C)].append(P_C)
#
# # Run through all (A, C) pairs
# for (A, C) in triple_sorting_dict.keys():
#
# # Find all uncovered potentially directed paths from A to C through any of the P_C nodes
# relevant_paths = []
# for P_C in triple_sorting_dict[(A, C)]:
# for upd_path in self._get_potentially_directed_uncovered_paths_rfci(A, P_C, ['-?>', 'o?>', 'o?o']):
#
# # Run through all of these paths and check i) whether the second to last element is not adjacent to C (this requires it to have a least three nodes, because else the second to last element would be A) and ii) whether the left edge of any 3-node sub-path is into the middle nor or, if not, whether the middle node is in the separating set of the two end-point nodes (of the 3-node) sub-path and iii) whether C is not element of the path. If path meets these conditions, add its second node (the adjacent to A) to the set second_nodes
#
# if len(upd_path) < 3 or C in upd_path or self._get_link(upd_path[-2], C) != "":
# continue
#
# upd_path.append(C)
#
# path_qualifies = True
# for i in range(len(upd_path) - 2):
# # We consider the unshielded triples upd_path[i] - upd_path[i+1] - upd_path[i+2]
#
# # If the link between upd_path[i] and upd_path[i+1] is into the latter, the path qualifies
# left_link = self._get_link(upd_path[i], upd_path[i+1])
# if left_link[2] == ">":
# # The path qualifies, break the inner for loop
# break
#
# # If not, then we need to continue with checking whether upd_path[i+1] in SepSet(upd_path[i+1], upd_path[i+2])
# if not self._B_in_SepSet_AC(upd_path[i], upd_path[i+1], upd_path[i+2]):
# # The path does not qualify, break the inner for loop
# path_qualifies = False
# break
#
# # The path qualifies, add upd_path[i] to second_nodes and continue with the next upd_path
# if path_qualifies:
# relevant_paths.append(upd_path)
#
# # The path does not qualify, continue with the next upd_path
#
# # end for path in self._get_potentially_directed_uncovered_paths(A, P_C, ['-*>', 'o*>', 'o*o'])
# # end for P_C in triple_sorting_dict[(A, C)]
#
# # Find all second nodes on the relevant paths
# second_nodes = list({path[1] for path in relevant_paths})
#
# # Check whether there is any pair of non-adjacent nodes in second_nodes, such that A is in their separating set. If yes, mark the link from A to C for orientation
# for i, j in product(range(len(second_nodes)), range(len(second_nodes))):
#
# if i < j and self._get_link(second_nodes[i], second_nodes[j]) == "" and self._B_in_SepSet_AC(second_nodes[i], A, second_nodes[j]):
# # Append new link and break the for loop
# link_AC = self._get_link(A, C)
# new_link_AC = "-" + link_AC[1] + ">"
# out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
# break
#
# # end for (A, C) in triple_sorting_dict.keys()
#
# # Return the output list
# return out
#
# ########################################################################################################################
# ########################################################################################################################
# ########################################################################################################################
#
# def _print_graph_dict(self):
# """Print all links in graph_dict"""
#
# for j in range(self.N):
# for ((i, lag_i), link) in self.graph_dict[j].items():
# if len(link) > 0 and (lag_i < 0 or i < j):
# print("({},{:2}) {} {}".format(i, lag_i, link, (j, 0)))
#
#
# def _is_smaller(self, X, Y):
# """
# A node X is said to be smaller than node Y if
# i) X is before Y or
# ii) X and Y are contemporaneous and the variable index of X is smaller than that of Y.
#
# Return True if X is smaller than Y, else return False
# """
#
# return (X[1] < Y [1]) or (X[1] == Y[1] and X[0] < Y[0])
#
#
# def _get_link(self, A, B):
# """Get the current link from node A to B"""
#
# (var_A, lag_A) = A
# (var_B, lag_B) = B
#
# if abs(lag_A - lag_B) > self.tau_max:
# return ""
# elif lag_A <= lag_B:
# return self.graph_dict[var_B][(var_A, lag_A - lag_B)]
# else:
# return self._reverse_link(self.graph_dict[var_A][(var_B, lag_B - lag_A)])
#
#
# def _get_non_future_adj(self, node_list):
# """Return all non-future adjacencies of all nodes in node_list"""
#
# # Build the output starting from an empty set
# out = set()
#
# # For each node W in node_list ...
# for A in node_list:
# # Unpack A
# (var_A, lag_A) = A
# # Add all (current) non-future adjacencies of A to the set out
# out = out.union({(var, lag + lag_A) for ((var, lag), link) in self.graph_dict[var_A].items() if len(link) > 0 and lag + lag_A >= -self.tau_max})
#
# # Return the desired set
# return out
#
#
# def _update_val_min(self, X, Y, val):
# """Some conditional independence test for X and Y has given the test statistic value val. Update the val_min dictionary accordingly"""
#
# if X[1] < 0 or X[0] < Y[0]:
# self.val_min[Y[0]][X] = min(self.val_min[Y[0]][X], np.abs(val))
# else:
# self.val_min[X[0]][Y] = min(self.val_min[X[0]][Y], np.abs(val))
#
#
# def _get_val_min(self, X, Y):
# """Return the value stored in self.val_min for the variable pair (X, Y)"""
#
# if X[1] < 0 or X[0] < Y[0]:
# return self.val_min[Y[0]][X]
# else:
# return self.val_min[X[0]][Y]
#
#
# def _update_cardinality(self, X, Y, cardinality):
# """X and Y were found conditionally independent given a separating set of cardinality cardinality. Update the self.cardinality accordingly"""
#
# if X[1] < 0 or X[0] < Y[0]:
# self.max_cardinality[Y[0]][X] = max(self.max_cardinality[Y[0]][X], cardinality)
# else:
# self.max_cardinality[X[0]][Y] = max(self.max_cardinality[X[0]][Y], cardinality)
#
#
# def _update_pval_max(self, X, Y, pval):
# """Some conditional independence test for X and Y has given the p-value val. Update the pval_max dictionary accordingly"""
#
# if X[1] < 0 or X[0] < Y[0]:
# self.pval_max[Y[0]][X] = max(self.pval_max[Y[0]][X], pval)
# else:
# self.pval_max[X[0]][Y] = max(self.pval_max[X[0]][Y], pval)
#
#
# def _sort_search_set(self, search_set, reference_node):
# """Sort the nodes in search_set by their val_min value with respect to the reference_node. Nodes with higher values appear earlier"""
#
# sort_by = [self._get_val_min(reference_node, node) for node in search_set]
# return [x for _, x in sorted(zip(sort_by, search_set), reverse = True)]
#
#
# def _save_sepset(self, X, Y, Z):
# """Save Z as separating sets of X and Y. Y is assumed to be at lag 0"""
#
# # Unpack X and Y
# (i, lag_i) = X
# (j, lag_j) = Y
#
# assert lag_j == 0
#
# # Save the sepset
# if lag_i < 0 or i < j:
# self.sepsets[j][X].add(Z)
# else:
# self.sepsets[i][Y].add(Z)
#
#
# def _delete_sepsets(self, X, Y):
# """Delete all separating sets of X and Y. Y is assumed to be at lag 0"""
#
# # Unpack X and Y
# (i, lag_i) = X
# (j, lag_j) = Y
#
# assert lag_j == 0
#
# # Save the sepset
# if lag_i < 0 or i < j:
# self.sepsets[j][X] = set()
# else:
# self.sepsets[i][Y] = set()
#
#
# def _reverse_link(self, link):
# """Reverse a given link, taking care to replace > with < and vice versa"""
#
# if link == "":
# return ""
#
# if link[2] == ">":
# left_mark = "<"
# else:
# left_mark = link[2]
#
# if link[0] == "<":
# right_mark = ">"
# else:
# right_mark = link[0]
#
# return left_mark + link[1] + right_mark
#
#
# def _write_link(self, A, B, new_link, verbosity = 0):
# """Write the information that the link from node A to node B takes the form of new_link into self.graph_dict. Neither is it assumed that at least of the nodes is at lag 0, nor must A be before B. If A and B are contemporaneous, also the link from B to A is written as the reverse of new_link"""
#
# # Unpack A and B
# (var_A, lag_A) = A
# (var_B, lag_B) = B
#
# # Write the link from A to B
# if lag_A < lag_B:
#
# if verbosity >= 1:
# print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_A, lag_A - lag_B, self.graph_dict[var_B][(var_A, lag_A - lag_B)], var_B, 0, var_A, lag_A - lag_B, new_link, var_B, 0))
#
# self.graph_dict[var_B][(var_A, lag_A - lag_B)] = new_link
#
#
# elif lag_A == lag_B:
#
# if verbosity >= 1:
# print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_A, lag_A - lag_B, self.graph_dict[var_B][(var_A, 0)], var_B, 0, var_A, 0, new_link, var_B, 0))
#
# print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_B, 0, self.graph_dict[var_A][(var_B, 0)], var_A, 0, var_B, 0, self._reverse_link(new_link), var_A, 0))
#
# self.graph_dict[var_B][(var_A, 0)] = new_link
# self.graph_dict[var_A][(var_B, 0)] = self._reverse_link(new_link)
#
# else:
#
# if verbosity >= 1:
# print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_B, lag_B - lag_A, self.graph_dict[var_A][(var_B, lag_B - lag_A)], var_A, 0, var_B, lag_B - lag_A, self._reverse_link(new_link), var_A, 0))
#
# self.graph_dict[var_A][(var_B, lag_B - lag_A)] = self._reverse_link(new_link)
#
# def _get_sepsets(self, A, B):
# """For two non-adjacent nodes, get the their separating stored in self.sepsets"""
#
# (var_A, lag_A) = A
# (var_B, lag_B) = B
#
# def _shift(Z, lag_B):
# return frozenset([(var, lag + lag_B) for (var, lag) in Z])
#
# if lag_A < lag_B:
# out = {(_shift(Z, lag_B), status) for (Z, status) in self.sepsets[var_B][(var_A, lag_A - lag_B)]}
# elif lag_A > lag_B:
# out = {(_shift(Z, lag_A), status) for (Z, status) in self.sepsets[var_A][(var_B, lag_B - lag_A)]}
# else:
# out = {(_shift(Z, lag_A), status) for (Z, status) in self.sepsets[max(var_A, var_B)][(min(var_A, var_B), 0)]}
#
# return out
#
#
# def _initialize_full_graph(self):
# """Initialize self.graph_full_dict. This nested dictionary represents the graph and as opposed to self.graph_dict also contains forward links"""
#
# # Build from an empty nested dictionary
# self.graph_full_dict = {j: {} for j in range(self.N)}
#
# # Run through the entire nested dictionary self.graph_dict
# for j in range(self.N):
# for ((var, lag), link) in self.graph_dict[j].items():
#
# if link != "":
# # Add non-future adjacencies
# self.graph_full_dict[j][(var, lag)] = link
#
# # Add the future adjacencies
# if lag < 0:
# self.graph_full_dict[var][(j, -lag)] = self._reverse_link(link)
#
# # Return nothing
# return None
#
#
# def _get_pair_key_and_new_link(self, A, B, link_AB):
# """The link from A to B takes the form link_AB. Bring this information into a form appropriate for the output of rule applications"""
#
# (var_A, lag_A) = A
# (var_B, lag_B) = B
#
# if lag_A <= lag_B:
# return ((var_A, var_B, lag_A - lag_B), link_AB)
# elif lag_A > lag_B:
# return ((var_B, var_A, lag_B - lag_A), self._reverse_link(link_AB))
#
#
# def _match_link(self, pattern, link):
# """Matches pattern including wildcards with link."""
#
# if pattern == '' or link == '':
# return True if pattern == link else False
# else:
# left_mark, middle_mark, right_mark = pattern
# if left_mark != '*':
# if left_mark == '+':
# if link[0] not in ['<', 'o']: return False
# else:
# if link[0] != left_mark: return False
#
# if right_mark != '*':
# if right_mark == '+':
# if link[2] not in ['>', 'o']: return False
# else:
# if link[2] != right_mark: return False
#
# if middle_mark != '*' and link[1] != middle_mark: return False
#
# return True
#
#
# def _dict2graph(self):
# """Convert self.graph_dict to graph array of shape (N, N, self.tau_max + 1)."""
#
# graph = np.zeros((self.N, self.N, self.tau_max + 1), dtype='U3')
# for j in range(self.N):
# for adj in self.graph_dict[j]:
# (i, lag_i) = adj
# graph[i, j, abs(lag_i)] = self.graph_dict[j][adj]
#
# return graph
#
#
# def _find_adj(self, graph, node, patterns, exclude=None, ignore_time_bounds=True):
# """Find adjacencies of node matching patterns."""
#
# # Setup
# i, lag_i = node
# if exclude is None: exclude = []
# if type(patterns) == str:
# patterns = [patterns]
#
# # Init
# adj = []
# # Find adjacencies going forward/contemp
# for k, lag_ik in zip(*np.where(graph[i,:,:])):
# matches = [self._match_link(patt, graph[i, k, lag_ik]) for patt in patterns]
# if np.any(matches):
# match = (k, lag_i + lag_ik)
# if match not in adj and (k, lag_i + lag_ik) not in exclude and (-self.tau_max <= lag_i + lag_ik <= 0 or ignore_time_bounds):
# adj.append(match)
#
# # Find adjacencies going backward/contemp
# for k, lag_ki in zip(*np.where(graph[:,i,:])):
# matches = [self._match_link(self._reverse_link(patt), graph[k, i, lag_ki]) for patt in patterns]
# if np.any(matches):
# match = (k, lag_i - lag_ki)
# if match not in adj and (k, lag_i - lag_ki) not in exclude and (-self.tau_max <= lag_i - lag_ki <= 0 or ignore_time_bounds):
# adj.append(match)
#
# return adj
#
#
# def _is_match(self, graph, X, Y, pattern_ij):
# """Check whether the link between X and Y agrees with pattern_ij"""
#
# (i, lag_i) = X
# (j, lag_j) = Y
# tauij = lag_j - lag_i
# if abs(tauij) >= graph.shape[2]:
# return False
# return ((tauij >= 0 and self._match_link(pattern_ij, graph[i, j, tauij])) or
# (tauij < 0 and self._match_link(self._reverse_link(pattern_ij), graph[j, i, abs(tauij)])))
#
#
# def _find_triples(self, pattern_ij, pattern_jk, pattern_ik):
# """Find triples (i, lag_i), (j, lag_j), (k, lag_k) that match patterns."""
#
# # Graph as array makes it easier to search forward AND backward in time
# graph = self._dict2graph()
#
# # print(graph[:,:,0])
# # print(graph[:,:,1])
# # print("matching ", pattern_ij, pattern_jk, pattern_ik)
#
# matched_triples = []
#
# for i in range(self.N):
# # Set lag_i = 0 without loss of generality, will be adjusted at end
# lag_i = 0
# adjacencies_i = self._find_adj(graph, (i, lag_i), pattern_ij)
# # print(i, adjacencies_i)
# for (j, lag_j) in adjacencies_i:
#
# adjacencies_j = self._find_adj(graph, (j, lag_j), pattern_jk,
# exclude=[(i, lag_i)])
# # print(j, adjacencies_j)
# for (k, lag_k) in adjacencies_j:
# if self._is_match(graph, (i, lag_i), (k, lag_k), pattern_ik):
# # Now use stationarity and shift triple such that the right-most
# # node (on a line t=..., -2, -1, 0, 1, 2, ...) is at lag 0
# righmost_lag = max(lag_i, lag_j, lag_k)
# match = ((i, lag_i - righmost_lag),
# (j, lag_j - righmost_lag),
# (k, lag_k - righmost_lag))
# largest_lag = min(lag_i - righmost_lag, lag_j - righmost_lag, lag_k - righmost_lag)
# if match not in matched_triples and \
# -self.tau_max <= largest_lag <= 0:
# matched_triples.append(match)
#
# return matched_triples
#
#
# def _find_quadruples(self, pattern_ij, pattern_jk, pattern_ik,
# pattern_il, pattern_jl, pattern_kl):
# """Find quadruples (i, lag_i), (j, lag_j), (k, lag_k), (l, lag_l) that match patterns."""
#
# # We assume this later
# assert pattern_il != ''
#
# # Graph as array makes it easier to search forward AND backward in time
# graph = self._dict2graph()
#
# matched_quadruples = []
#
# # First get triple ijk
# ijk_triples = self._find_triples(pattern_ij, pattern_jk, pattern_ik)
#
# for triple in ijk_triples:
# # Unpack triple
# (i, lag_i), (j, lag_j), (k, lag_k) = triple
#
# # Search through adjacencies
# adjacencies = set(self._find_adj(graph, (i, lag_i), pattern_il,
# exclude=[(j, lag_j), (k, lag_k)]))
# if pattern_jl != '':
# adjacencies = adjacencies.intersection(set(
# self._find_adj(graph, (j, lag_j), pattern_jl,
# exclude=[(i, lag_i), (k, lag_k)])))
# else:
# adjacencies = set([adj for adj in adjacencies
# if self._is_match(graph, (j, lag_j), adj, '')])
#
# if pattern_kl != '':
# adjacencies = adjacencies.intersection(set(
# self._find_adj(graph, (k, lag_k), pattern_kl,
# exclude=[(i, lag_i), (j, lag_j)])))
# else:
# adjacencies = set([adj for adj in adjacencies
# if self._is_match(graph, (k, lag_k), adj, '')])
#
# for adj in adjacencies:
# (l, lag_l) = adj
#
# # Now use stationarity and shift quadruple such that the right-most
# # node (on a line t=..., -2, -1, 0, 1, 2, ...) is at lag 0
# righmost_lag = max(lag_i, lag_j, lag_k, lag_l)
# match = ((i, lag_i - righmost_lag),
# (j, lag_j - righmost_lag),
# (k, lag_k - righmost_lag),
# (l, lag_l - righmost_lag),
# )
# largest_lag = min(lag_i - righmost_lag,
# lag_j - righmost_lag,
# lag_k - righmost_lag,
# lag_l - righmost_lag,
# )
# if match not in matched_quadruples and \
# -self.tau_max <= largest_lag <= 0:
# matched_quadruples.append(match)
#
# return matched_quadruples
#
#
# def _get_R4_discriminating_paths_rfci(self, triple, max_length = np.inf):
# """Find all discriminating paths starting from triple"""
#
# def _search(path_taken, max_length):
#
# # Get the last visited node and its link to Y
# last_node = path_taken[-1]
# link_to_Y = self._get_link(last_node, path_taken[0])
#
# # Base Case: If the current path is a discriminating path, return it as single entry of a list
# if len(path_taken) > 3 and link_to_Y == "":
# return [path_taken]
#
# # If the current path is not a discriminating path, continue the path
# paths = []
#
# if self._get_link(last_node, path_taken[-2])[0] == "<" and link_to_Y == "-?>" and len(path_taken) < max_length:
#
# # Search through all adjacencies of the last node
# for (var, lag) in self.graph_full_dict[last_node[0]].keys():
#
# # Build the next node and get its link to the previous
# next_node = (var, lag + last_node[1])
# next_link = self._get_link(next_node, last_node)
#
# # Check whether this node can be visited
# if next_node[1] <= 0 and next_node[1] >= -self.tau_max and next_node not in path_taken and self._match_link("*?>", next_link):
#
# # Recursive call
# paths.extend(_search(path_taken[:] + [next_node], max_length))
#
# # Return the list of discriminating paths
# return paths
#
# # Unpack the triple
# (W, V, Y) = triple
#
# # Return all discriminating paths starting at this triple
# return _search([Y, V, W], max_length)
#
#
# def _get_potentially_directed_uncovered_paths_rfci(self, start_node, end_node, initial_allowed_patterns):
# """Find all potentiall directed uncoverged paths from start_node to end_node whose first link takes one the forms specified by initial_allowed_patters"""
#
# assert start_node != end_node
#
# # Function for recursive search of potentially directed uncovered paths
# def _search(end_node, path_taken, allowed_patterns):
#
# # List for outputting potentially directed uncovered paths
# paths = []
#
# # The last visited note becomes the new start_node
# start_node = path_taken[-1]
#
# # Base case: End node has been reached
# if start_node == end_node:
# paths.append(path_taken)
#
# # Recursive build case
# else:
# # Run through the adjacencies of start_node
# #for next_node in self.graph_full_dict[start_node[0]]:
# for (var, lag) in self.graph_full_dict[start_node[0]].keys():
#
# next_node = (var, lag + start_node[1])
#
# # Consider only nodes that ...
# # ... are within the allowed time frame
# if next_node[1] < -self.tau_max or next_node[1] > 0:
# continue
# # ... have not been visited yet
# if next_node in path_taken:
# continue
# # ... are non-adjacent to the node before start_node
# if len(path_taken) >= 2 and self._get_link(path_taken[-2], next_node) != "":
# continue
# # ... whose link with start_node matches one of the allowed patters
# link = self._get_link(start_node, next_node)
# if not any([self._match_link(pattern = pattern, link = link) for pattern in allowed_patterns]):
# continue
#
# # Determine the allowed patters for the next recursive call
# if self._match_link(pattern='o?o', link=link):
# new_allowed_patters = ["o?o", "o?>", "-?>"]
# elif self._match_link(pattern='o?>', link=link) or self._match_link(pattern='-?>', link=link):
# new_allowed_patters = ["-?>"]
#
# # Determine the new path taken
# new_path_taken = path_taken[:] + [next_node]
#
# # Recursive call
# paths.extend(_search(end_node, new_path_taken, new_allowed_patters))
#
# # Output list of potentially directed uncovered paths
# return paths
#
# # end def _search(end_node, path_taken, allowed_patterns)
#
# # Output potentially directed uncovered paths
# paths = _search(end_node, [start_node], initial_allowed_patterns)
# return [path for path in paths if len(path) > 2]
#
#
# def _dict_to_matrix(self, val_dict, tau_max, n_vars, default=1):
# """Convert a dictionary to matrix format"""
#
# matrix = np.ones((n_vars, n_vars, tau_max + 1))
# matrix *= default
#
# for j in val_dict.keys():
# for link in val_dict[j].keys():
# k, tau = link
# if tau == 0:
# matrix[k, j, 0] = matrix[j, k, 0] = val_dict[j][link]
# else:
# matrix[k, j, abs(tau)] = val_dict[j][link]
# return matrix
| 106,886 | 46.696118 | 702 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/plot_experiments.py
|
import matplotlib as mpl
# print([key for key in list(mpl.rcParams.keys()) if 'pad' in key])
params = { 'figure.figsize': (8, 10),
'legend.fontsize': 8,
# 'title.fontsize': 8,
'lines.color':'black',
'lines.linewidth':1,
'xtick.labelsize':4,
'xtick.major.pad' : 3,
'xtick.major.size' : 2,
'ytick.major.pad' : 3,
'ytick.major.size' : 2,
'ytick.labelsize':7,
'axes.labelsize':8,
'font.size':8,
'axes.labelpad':2,
# 'text.usetex' : True,
# 'legend.labelsep': 0.0005
}
import collections
from matplotlib.ticker import ScalarFormatter, NullFormatter
from matplotlib import gridspec
import matplotlib.cm as cm
mpl.rcParams.update(params)
import sys
import pickle
import matplotlib.pyplot as plt
arg = sys.argv
ci_test = str(arg[1])
variant = str(arg[2])
def method_label(method):
# return method
# if not 'paper' in variant:
# return method
if method == 'svarfci':
return 'SVAR-FCI'
elif method == 'svarrfci':
return 'SVAR-RFCI'
elif 'lpcmci' in method:
if 'prelimonly' in method and 'prelim1' in method:
return r'LPCMCI$(l=0)$'
elif 'prelim0' in method:
return r'LPCMCI$(k=0)$'
elif 'prelim1' in method:
return r'LPCMCI$(k=1)$'
elif 'prelim2' in method:
return r'LPCMCI$(k=2)$'
elif 'prelim3' in method:
return r'LPCMCI$(k=3)$'
elif 'prelim4' in method:
return r'LPCMCI$(k=4)$'
else:
return method
name = {'par_corr':r'ParCorr', 'gp_dc':r'GPDC', 'cmi_knn':r'CMIknn'}
def get_metrics_from_file(para_setup):
name_string = '%s-'*len(para_setup) # % para_setup
name_string = name_string[:-1]
try:
print("load from metrics file %s_metrics.dat " % (folder_name + name_string % tuple(para_setup)))
results = pickle.load(open(folder_name + name_string % tuple(para_setup) + '_metrics.dat', 'rb'), encoding='latin1')
except:
print('failed from metrics file ' , tuple(para_setup))
return None
return results
def print_time(seconds, precision=1):
if precision == 0:
if seconds > 60*60.:
return "%.0fh" % (seconds/3600.)
elif seconds > 60.:
return "%.0fmin" % (seconds/60.)
else:
return "%.0fs" % (seconds)
else:
if seconds > 60*60.:
return "%.1fh" % (seconds/3600.)
elif seconds > 60.:
return "%.1fmin" % (seconds/60.)
else:
return "%.1fs" % (seconds)
def print_time_std(time, precision=1):
mean = time.mean()
std = time.std()
if precision == 0:
if mean > 60*60.:
return r"%.0f$\pm$%.0fh" % (mean/3600., std/3600.)
elif mean > 60.:
return r"%.0f$\pm$%.0fmin" % (mean/60., std/60.)
# return "%.0fmin" % (mean/60.)
else:
return r"%.0f$\pm$%.0fs" % (mean, std)
# return "%.0fs" % (mean)
else:
if mean > 60*60.:
return r"%.1f$\pm$%.1fh" % (mean/3600., std/3600.)
elif mean > 60.:
return r"%.1f$\pm$%.1fmin" % (mean/60., std/60.)
# return "%.0fmin" % (mean/60.)
else:
return r"%.1f$\pm$%.1fs" % (mean, std)
def draw_it(paras, which):
figsize = (4, 2.5) #(4, 2.5)
capsize = .5
marker1 = 'o'
marker2 = 's'
marker3 = '+'
alpha_marker = 1.
params = {
'legend.fontsize': 5,
'legend.handletextpad': .05,
# 'title.fontsize': 8,
'lines.color':'black',
'lines.linewidth':.5,
'lines.markersize':2,
# 'lines.capsize':4,
'xtick.labelsize':4,
'xtick.major.pad' : 1,
'xtick.major.size' : 2,
'ytick.major.pad' : 1,
'ytick.major.size' : 2,
'ytick.labelsize':4,
'axes.labelsize':8,
'font.size':8,
'axes.labelpad':2,
# 'axes.grid': True,
'axes.spines.right' : False,
'axes.spines.top' : False,
# 'lines.clip_on':False,
# 'axes.spines.left.outward' : 4,
# 'text.usetex' : True,
# 'legend.labelsep': 0.0005
}
mpl.rcParams.update(params)
fig = plt.figure(figsize=figsize)
gs = fig.add_gridspec(2, 4)
ax1a = fig.add_subplot(gs[0, 0])
ax1b = fig.add_subplot(gs[1, 0])
ax2a = fig.add_subplot(gs[0, 1])
ax2b = fig.add_subplot(gs[1, 1])
ax3a = fig.add_subplot(gs[0, 2])
ax3b = fig.add_subplot(gs[1, 2])
# ax4 = fig.add_subplot(gs[:, 3])
ax4a = fig.add_subplot(gs[0, 3])
ax4b = fig.add_subplot(gs[1, 3])
if fpr_precision == 'fpr':
if which == 'pc_alpha':
print(paras)
ax1b.plot(paras, paras, color='grey', linewidth=2.)
else:
ax1b.axhline(pc_alpha, color='grey', linewidth=2.)
for method in methods:
for para in paras:
# para_plot = para + 0.04*np.random.rand()*abs(paras[-1]-paras[0])
para_plot = paras.index(para) + methods.index(method)/float(len(methods))*.6
if which == 'auto':
auto_here = para
N_here = N
tau_max_here = tau_max
frac_unobserved_here = frac_unobserved
pc_alpha_here = pc_alpha
T_here = T
elif which == 'N':
N_here = para
auto_here = auto
tau_max_here = tau_max
frac_unobserved_here = frac_unobserved
pc_alpha_here = pc_alpha
T_here = T
elif which == 'tau_max':
N_here = N
auto_here = auto
tau_max_here = para
frac_unobserved_here = frac_unobserved
pc_alpha_here = pc_alpha
T_here = T
elif which == 'sample_size':
N_here = N
auto_here = auto
tau_max_here = tau_max
frac_unobserved_here = frac_unobserved
pc_alpha_here = pc_alpha
T_here = para
elif which == 'unobserved':
N_here = N
auto_here = auto
tau_max_here = tau_max
frac_unobserved_here = para
pc_alpha_here = pc_alpha
T_here = T
if N_here == 2:
n_links_here = 1
else:
n_links_here = links_from_N(N_here)
para_setup = (model, N_here, n_links_here, min_coeff, coeff, auto_here, contemp_fraction, frac_unobserved_here,
max_true_lag, T_here, ci_test, method, pc_alpha_here, tau_max_here)
metrics_dict = get_metrics_from_file(para_setup)
if metrics_dict is not None:
ax1a.errorbar(para_plot, *metrics_dict['adj_lagged_recall'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker1, linestyle='solid')
ax1a.errorbar(para_plot, *metrics_dict['adj_auto_recall'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker3, linestyle='solid')
ax1a.errorbar(para_plot, *metrics_dict['adj_contemp_recall'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker2, linestyle='dashed')
ax1b.errorbar(para_plot, *metrics_dict['adj_lagged_%s' % fpr_precision], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker1, linestyle='solid')
ax1b.errorbar(para_plot, *metrics_dict['adj_auto_%s' % fpr_precision], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker3, linestyle='solid')
ax1b.errorbar(para_plot, *metrics_dict['adj_contemp_%s' % fpr_precision], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker2, linestyle='dashed')
ax2a.errorbar(para_plot, *metrics_dict['edgemarks_lagged_recall'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker1, linestyle='solid')
ax2a.errorbar(para_plot, *metrics_dict['edgemarks_auto_recall'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker3, linestyle='solid')
ax2a.errorbar(para_plot, *metrics_dict['edgemarks_contemp_recall'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker2, linestyle='dashed')
ax2b.errorbar(para_plot, *metrics_dict['edgemarks_lagged_precision'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker1, linestyle='solid')
ax2b.errorbar(para_plot, *metrics_dict['edgemarks_auto_precision'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker3, linestyle='solid')
ax2b.errorbar(para_plot, *metrics_dict['edgemarks_contemp_precision'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker2, linestyle='dashed')
ax3a.errorbar(para_plot, *metrics_dict['valmin_lagged'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker1)
ax3a.errorbar(para_plot, *metrics_dict['valmin_auto'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker3)
ax3a.errorbar(para_plot, *metrics_dict['valmin_contemp'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker2)
ax3b.errorbar(para_plot, *metrics_dict['cardinality_lagged'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker1)
ax3b.errorbar(para_plot, *metrics_dict['cardinality_auto'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker3)
ax3b.errorbar(para_plot, *metrics_dict['cardinality_contemp'], capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker=marker2)
ax4a.errorbar(para_plot, metrics_dict['computation_time'][0], metrics_dict['computation_time'][1].reshape(2, 1), capsize=capsize, alpha=alpha_marker,
color=color_picker(method), marker='p', linestyle='solid')
if method == methods[0]:
ax4b.plot(para_plot, metrics_dict['directed_anylink'][0], alpha=alpha_marker,
color='black', marker='>')
ax4b.plot(para_plot, metrics_dict['bidirected_anylink'][0], alpha=alpha_marker,
color='black', marker='D')
unoriented = 1. - metrics_dict['directed_anylink'][0] - metrics_dict['bidirected_anylink'][0]
ax4b.plot(para_plot, unoriented, alpha=alpha_marker,
color='black', marker='o', fillstyle='none')
# print(axes)
axes = {'ax1a':ax1a, 'ax1b':ax1b, 'ax2a':ax2a, 'ax2b':ax2b, 'ax3a':ax3a, 'ax3b':ax3b, 'ax4a':ax4a, 'ax4b':ax4b}
for axname in axes:
ax = axes[axname]
if which == 'N':
# print(ax)
# print(axes)
ax.set_xlim(-0.5, len(paras))
if ci_test == 'par_corr':
ax.xaxis.set_ticks([paras.index(p) for p in paras] )
ax.xaxis.set_ticklabels([str(p) for p in paras] )
else:
ax.xaxis.set_ticks([paras.index(p) for p in paras] )
ax.xaxis.set_ticklabels([str(p) for p in paras] )
elif which == 'auto':
ax.set_xlim(0, len(paras))
ax.xaxis.set_ticks([paras.index(p) for p in paras] )
ax.xaxis.set_ticklabels([str(p) for p in paras] )
elif which == 'tau_max':
ax.set_xlim(-0.5, len(paras))
ax.xaxis.set_ticks([paras.index(p) for p in paras] )
ax.xaxis.set_ticklabels([str(p) for p in paras] )
elif which == 'unobserved':
ax.set_xlim(0, len(paras))
ax.xaxis.set_ticks([paras.index(p) for p in paras] )
ax.xaxis.set_ticklabels([str(p) for p in paras] )
elif which == 'sample_size':
ax.set_xlim(0, len(paras))
ax.xaxis.set_ticks([paras.index(p) for p in paras] )
ax.xaxis.set_ticklabels([str(p) for p in paras] )
# ax.set_xlabel(xlabel, fontsize=8)
for line in ax.get_lines():
line.set_clip_on(False)
# line.set_capsize(3)
# Disable spines.
if not 'ax4' in axname:
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.spines['left'].set_position(('outward', 3))
ax.spines['bottom'].set_position(('outward', 3))
else:
ax.yaxis.set_ticks_position('right')
ax.spines['right'].set_position(('outward', 3))
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('black')
ax.spines['right'].set_visible(True)
ax.spines['bottom'].set_position(('outward', 3))
ax.spines['left'].set_position(('outward', 3))
ax.grid(axis='y', linewidth=0.3)
pad = 2
if axname == 'ax1b':
label_1 = "Lagged"
label_2 = "Contemp."
label_3 = "Auto"
ax.errorbar([], [], linestyle='',
capsize=capsize, label=label_1,
color='black', marker=marker1)
ax.errorbar([], [], linestyle='',
capsize=capsize, label=label_2,
color='black', marker=marker2)
ax.errorbar([], [], linestyle='',
capsize=capsize, label=label_3,
color='black', marker=marker3)
ax.legend(ncol=2,
columnspacing=0.,
# bbox_to_anchor=(0., 1.02, 1., .03), borderaxespad=0, mode="expand",
loc='upper right', fontsize=5, framealpha=0.3
) #.draw_frame(False)
if axname == 'ax1a':
ax.set_title('Adj. TPR', fontsize=6, pad=pad)
ax.set_ylim(0., 1.)
# ax.spines['left'].set_position(('outward', 3))
# ax.grid(axis='y')
ax.tick_params(labelbottom=False)
elif axname == 'ax1b':
if fpr_precision == 'precision':
ax.set_title('Adj. precision', fontsize=6, pad=pad)
ax.set_ylim(0., 1.)
else:
# ax.tick_params(labelleft=False)
ax.set_title('Adj. FPR', fontsize=6, pad=pad)
if which != 'pc_alpha':
ax.set_yscale('symlog', linthreshy=pc_alpha*2)
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_ylim(0., .1)
elif axname == 'ax2a':
ax.set_title('Orient. recall', fontsize=6, pad=pad)
ax.set_ylim(0., 1.)
ax.tick_params(labelbottom=False)
# ax.tick_params(labelleft=False)
elif axname == 'ax2b':
ax.set_title('Orient. precision', fontsize=6, pad=pad)
ax.set_ylim(0., 1.)
# ax.tick_params(labelleft=False)
elif axname == 'ax3a':
ax.set_title('Effect size', fontsize=6, pad=pad)
ax.set_ylim(0., 0.5)
ax.tick_params(labelbottom=False)
elif axname == 'ax3b':
ax.set_title('Cardinality', fontsize=6, pad=pad)
elif axname == 'ax4a':
ax.set_title('Runtime [s]', fontsize=6, pad=pad)
# ax.set_ylim(0., 1.)
ax.tick_params(labelbottom=False)
elif axname == 'ax4b':
ax.set_title('True PAG', fontsize=6, pad=pad)
ax.set_ylim(0., 1.)
label_1 = "Lagged"
label_2 = "Contemp."
label_3 = "Auto"
ax.plot([], [], linestyle='',
label=r'directed', #$\rightarrow$',
color='black', marker='>')
ax.plot([], [], linestyle='',
label=r'bidirected', #$\leftrightarrow$',
color='black', marker='D')
ax.plot([], [], linestyle='',
label=r'unoriented', #$\rightarrow$',
color='black', marker='o', fillstyle='none')
ax.legend(ncol=1,
columnspacing=.5,
# bbox_to_anchor=(0., 1.02, 1., .03), borderaxespad=0, mode="expand",
loc='upper left', fontsize=5, framealpha=0.3
)
axlegend = fig.add_axes([0.05, .89, 1., .05])
axlegend.axis('off')
for method in methods:
axlegend.errorbar([], [], linestyle='',
capsize=capsize, label=method_label(method),
color=color_picker(method), marker='s')
# if not 'paper' in variant:
# ncol = 1
# fontsize = 5
# else:
ncol = 3 #len(methods)
fontsize = 6
axlegend.legend(ncol=ncol,
# bbox_to_anchor=(0., 1.0, 1., .03),
loc='lower left',
# borderaxespad=0, mode="expand",
markerscale=3,
columnspacing=.75,
labelspacing=.01,
fontsize=fontsize, framealpha=.5
) #.draw_frame(False)
# if 'paper' in variant and SM is False:
# if 'autocorrfinalphases' in variant: # and ci_test == 'par_corr':
# plt.figtext(0., 1., "A", fontsize=12, fontweight='bold',
# ha='left', va='top')
# elif 'autocorr' in variant:
# plt.figtext(0., 1., "B", fontsize=12, fontweight='bold',
# ha='left', va='top')
# elif 'highdim' in variant:
# plt.figtext(0., 1., "C", fontsize=12, fontweight='bold',
# ha='left', va='top')
# elif 'tau_max' in variant:
# plt.figtext(0., 1., "D", fontsize=12, fontweight='bold',
# ha='left', va='top')
if which == 'N':
plt.figtext(0.5, 0., r"Number of variables $N$", fontsize=8,
horizontalalignment='center', va='bottom')
plt.figtext(1., 1., r"$T=%d, a=%s, \tau_{\max}=%d, \lambda=%s$" %(T, auto, tau_max, frac_unobserved)
+"\n" + r"%s, $\alpha=%s$" %(name[ci_test],pc_alpha),
fontsize=6, ha='right', va='top')
elif which == 'auto':
plt.figtext(0.5, 0., r"Autocorrelation $a$", fontsize=8,
horizontalalignment='center', va='bottom')
plt.figtext(1., 1., r"$N=%d, T=%d, \tau_{\max}=%d, \lambda=%s$" %(N, T, tau_max, frac_unobserved)
+"\n" + r"%s, $\alpha=%s$" %(name[ci_test], pc_alpha),
fontsize=6, ha='right', va='top')
elif which == 'tau_max':
plt.figtext(0.5, 0., r"Time lag $\tau_{\max}$", fontsize=8,
horizontalalignment='center', va='bottom')
plt.figtext(1., 1., r"$N=%d, T=%d, a=%s, \lambda=%s$" %(N, T, auto, frac_unobserved)
+"\n" + r"%s, $\alpha=%s$" %(name[ci_test], pc_alpha),
fontsize=6, ha='right', va='top')
elif which == 'unobserved':
plt.figtext(0.5, 0., r"Frac. unobserved", fontsize=8,
horizontalalignment='center', va='bottom')
# plt.figtext(1., 1., r"$N=%d, a=%s, T=%d, \alpha=%s$" %(N, auto, T, pc_alpha),)
# fontsize=6, ha='right', va='top')
plt.figtext(1., 1., r"$N=%d, T=%d, a=%s, \tau_{\max}=%d$" %(N, T, auto, tau_max)
+"\n" + r"%s, $\alpha=%s$" %(name[ci_test], pc_alpha),
fontsize=6, ha='right', va='top')
elif which == 'sample_size':
plt.figtext(0.5, 0., r"Sample size $T$", fontsize=8,
horizontalalignment='center', va='bottom')
plt.figtext(1., 1., r"$N=%d, a=%s, \tau_{\max}=%d, \lambda=%s$" %(N, auto, tau_max, frac_unobserved)
+"\n" + r"%s, $\alpha=%s$" %(name[ci_test], pc_alpha),
fontsize=6, ha='right', va='top')
fig.subplots_adjust(left=0.06, right=0.93, hspace=.3, bottom=0.12, top=0.85, wspace=.3)
fig.savefig(save_folder + '%s.%s' %(save_suffix, save_type))
plot_files.append(save_folder + '%s.%s' %(save_suffix, save_type))
def color_picker(method):
# if not 'paper' in variant:
# colors = ['orange', 'red', 'green', 'blue', 'grey', 'lightgreen']
# return colors[methods.index(method)]
if method == 'svarfci':
return 'magenta'
elif method == 'svarrfci':
return 'orange'
elif 'lpcmci' in method:
cmap = plt.get_cmap('Greens')
if 'prelim0' in method:
return cmap(0.3)
elif 'prelim1' in method:
return cmap(0.4)
elif 'prelim2' in method:
return cmap(0.5)
elif 'prelim3' in method:
return cmap(0.6)
elif 'prelim4' in method:
return cmap(0.7)
else:
return 'grey'
def links_from_N(num_nodes):
if 'highdim' in variant:
return num_nodes
else:
return num_nodes
if __name__ == '__main__':
save_type = 'pdf'
plot_files = []
paper = False
SM = True
fpr_precision = 'fpr'
# Directory to save figures
folder_name = "results/"
save_folder = "figures/"
methods = [
"lpcmci_nprelim0",
"lpcmci_nprelim4",
"svarfci",
"svarrfci",
]
if variant == 'autocorr':
if ci_test == 'par_corr':
model = 'random_lineargaussian' # random_lineargaussian random_nonlinearmixed
T_here = [200, 500, 1000]
N_here = [5] #, 10] #[3, 5, 10]
num_rows = 3
else:
model = 'random_nonlinearmixed'
T_here = [200, 400] # [200, 500, 1000]
N_here = [3, 5, 10]
num_rows = 3
tau_max = 5
vary_auto = [0., 0.5, 0.9, 0.95, 0.99] # [0., 0.3, 0.5, 0.7, 0.9, 0.95, 0.99]
pc_alpha_here = [0.01, 0.05]
min_coeff = 0.2
coeff = 0.8
frac_unobserved = 0.3
contemp_fraction = 0.3
max_true_lag = 3
for T in T_here:
for N in N_here:
if N == 2: n_links = 1
else: n_links = N
for pc_alpha in pc_alpha_here:
para_setup_name = (variant, N, n_links, min_coeff, coeff, contemp_fraction, frac_unobserved,
max_true_lag, T, ci_test, pc_alpha, tau_max)
save_suffix = '%s-'*len(para_setup_name) % para_setup_name
save_suffix = save_suffix[:-1]
print(save_suffix)
draw_it(paras=vary_auto, which='auto')
elif variant == 'highdim':
if ci_test == 'par_corr':
model = 'random_lineargaussian' # random_lineargaussian random_nonlinearmixed
T_here = [200, 500, 1000]
vary_N = [3, 5, 7, 10, 15]
auto_here = [0., 0.5, 0.95, 0.99]
num_rows = 4
else:
model = 'random_nonlinearmixed'
T_here = [200] #, 500, 1000]
vary_N = [3, 5]
num_rows = 2
contemp_fraction = .3
frac_unobserved = 0.3
max_true_lag = 3
tau_max = 5
min_coeff = 0.2
coeff = 0.8
for T in T_here:
for auto in auto_here: #, 0.5, 0.9]:
for pc_alpha in [0.01, 0.05]: #, 0.1]:
para_setup_name = (variant, min_coeff, coeff, auto, contemp_fraction, frac_unobserved, max_true_lag, T, ci_test, pc_alpha, tau_max)
save_suffix = '%s-'*len(para_setup_name) % para_setup_name
save_suffix = save_suffix[:-1]
print(save_suffix)
draw_it(paras=vary_N, which='N')
elif variant == 'sample_size':
if ci_test == 'par_corr':
model = 'random_lineargaussian' # random_lineargaussian random_nonlinearmixed
vary_T = [200, 500, 1000]
N_here = [3, 5, 10]
auto_here = [0., 0.5, 0.95, 0.99]
num_rows = 4
else:
model = 'random_nonlinearmixed'
vary_T = [200, 500, 1000]
N_here = [5]
auto_here = [0., 0.5, 0.95, 0.99]
num_rows = 2
min_coeff = 0.2
coeff = 0.8
contemp_fraction = 0.3
frac_unobserved = 0.3
max_true_lag = 3
tau_max = 5
for N in N_here:
if N == 2: n_links = 1
else: n_links = N
for auto in auto_here:
for pc_alpha in [0.01, 0.05]: #, 0.1]:
para_setup_name = (variant, N, n_links, min_coeff, coeff, contemp_fraction, frac_unobserved, max_true_lag, auto, ci_test, pc_alpha, tau_max)
save_suffix = '%s-'*len(para_setup_name) % para_setup_name
save_suffix = save_suffix[:-1]
print(save_suffix)
draw_it(paras=vary_T, which='sample_size')
elif variant == 'unobserved':
if ci_test == 'par_corr':
model = 'random_lineargaussian' # random_lineargaussian random_nonlinearmixed
T_here = [200, 500, 1000]
N_here = [5, 10]
auto_here = [0., 0.5, 0.95, 0.99]
num_rows = 4
else:
model = 'random_nonlinearmixed'
T_here = [200, 500, 1000]
N_here = [5]
auto_here = [0., 0.5, 0.95, 0.99]
num_rows = 2
min_coeff = 0.2
coeff = 0.8
contemp_fraction = 0.3
vary_frac_unobserved = [0., 0.3, 0.5]
max_true_lag = 3
tau_max = 5
for N in N_here:
if N == 2: n_links = 1
else: n_links = N
for T in T_here:
for auto in auto_here:
for pc_alpha in [0.01, 0.05]: #, 0.1]:
para_setup_name = (variant, N, n_links, min_coeff, coeff, contemp_fraction, max_true_lag, auto, T, ci_test, pc_alpha, tau_max)
save_suffix = '%s-'*len(para_setup_name) % para_setup_name
save_suffix = save_suffix[:-1]
print(save_suffix)
draw_it(paras=vary_frac_unobserved, which='unobserved')
if variant == 'tau_max':
if ci_test == 'par_corr':
model = 'random_lineargaussian' # random_lineargaussian random_nonlinearmixed
T_here = [200, 500, 1000]
N_here = [5]
auto_here = [0., 0.5, 0.95, 0.99]
num_rows = 4
else:
model = 'random_nonlinearmixed'
T_here = [200, 500, 1000]
N_here = [5]
auto_here = [0., 0.5, 0.95, 0.99]
num_rows = 2
min_coeff = 0.2
coeff = 0.8
contemp_fraction = 0.3
frac_unobserved = 0.3
max_true_lag = 3
vary_tau_max = [3, 5, 7, 10]
for T in T_here:
for N in N_here:
if N == 2: n_links = 1
else: n_links = N
for auto in auto_here:
for pc_alpha in [0.01, 0.05]: #, 0.1]:
para_setup_name = (variant, N, n_links, min_coeff, coeff, contemp_fraction, frac_unobserved, max_true_lag, auto, T, ci_test, pc_alpha)
save_suffix = '%s-'*len(para_setup_name) % para_setup_name
save_suffix = save_suffix[:-1]
print(save_suffix)
draw_it(paras=vary_tau_max, which='tau_max')
| 28,125 | 35.814136 | 165 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/test_observational_discovery.py
|
import pickle
from matplotlib import pyplot as plt
from tigramite import plotting as tp
from tigramite.independence_tests import ParCorr
from causal_discovery.LPCMCI.lpcmci import LPCMCI
from config import checkpoint_path
class TestLPCMCI:
def test_orient_with_interv_data(self):
interv_independencies = [(0, 3, 0), (0, 3, 1), (1, 3, 0), (1, 3, 1), (2, 3, 0), (2, 3, 1)]
interv_dependencies = [(4, 2, 0)]
self.graph_dict = {
0: {(1, 0): 'o?o', (2, 0): 'o?o', (3, 0): 'o?o', (4, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): 'oL>', (4, -1): 'oL>'},
1: {(0, 0): 'o?o', (2, 0): 'o?o', (3, 0): 'o?o', (4, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): 'oL>', (4, -1): 'oL>'},
2: {(0, 0): 'o?o', (1, 0): 'o?o', (3, 0): 'o?o', (4, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): 'oL>', (4, -1): 'oL>'},
3: {(0, 0): 'o?o', (1, 0): 'o?o', (2, 0): 'o?o', (4, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): 'oL>', (4, -1): 'oL>'},
4: {(0, 0): 'o?o', (1, 0): 'o?o', (2, 0): 'o?o', (3, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): 'oL>', (4, -1): 'oL>'}
}
# independencies
if interv_independencies is not None and len(interv_independencies) > 0:
for independency in interv_independencies:
eff = (independency[0], independency[2])
cause = (independency[1], 0)
(var_cause, lag_cause) = cause
(var_eff, lag_eff) = eff
# if self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)] != "":
if self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][0] in ["o"]:
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)] = "<" + str(
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][1:])
# If A and B are contemporaneous, also the link from B to A is written as the reverse
if lag_eff == 0:
self.graph_dict[var_cause][(var_eff, 0)] = str(
self.graph_dict[var_cause][(var_eff, 0)][:2]) + ">"
else:
raise ValueError("orient with_interv_data: unexpected edgemark. expected o but is:",
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][0])
# dependencies
if interv_dependencies is not None and len(interv_dependencies) > 0:
for dependency in interv_dependencies:
eff = (dependency[0], dependency[2])
cause = (dependency[1], 0)
(var_cause, lag_cause) = cause
(var_eff, lag_eff) = eff
# if self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)] != "":
if self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][0] in ["o"] and \
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][2] in ["o", ">"]:
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)] = "-" + str(
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][1] + ">")
# If A and B are contemporaneous, also the link from B to A is written as the reverse
if lag_eff == 0:
self.graph_dict[var_cause][(var_eff, 0)] = "<"+ str(
self.graph_dict[var_cause][(var_eff, 0)][1]) + "-"
else:
raise ValueError("orient with_interv_data: unexpected edgemark. expected o but is:",
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][0])
solution_graph_dict = {
0: {(1, 0): 'o?o', (2, 0): 'o?o', (3, 0): '<?o', (4, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): '<L>', (4, -1): 'oL>'},
1: {(0, 0): 'o?o', (2, 0): 'o?o', (3, 0): '<?o', (4, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): '<L>', (4, -1): 'oL>'},
2: {(0, 0): 'o?o', (1, 0): 'o?o', (3, 0): '<?o', (4, 0): '<?-', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): '<L>', (4, -1): 'oL>'},
3: {(0, 0): 'o?>', (1, 0): 'o?>', (2, 0): 'o?>', (4, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): 'oL>', (4, -1): 'oL>'},
4: {(0, 0): 'o?o', (1, 0): 'o?o', (2, 0): '-?>', (3, 0): 'o?o', (0, -1): 'oL>', (1, -1): 'oL>',(2, -1): 'oL>', (3, -1): 'oL>', (4, -1): 'oL>'}}
assert self.graph_dict == solution_graph_dict
def test_run_lpcmci(self):
# load
filename = checkpoint_path + 'test_run_lpcmci.pkl'
with open(filename, 'rb') as f:
df, _, _, _ = pickle.load(f)
pc_alpha = 0.05
tau_max = 1
# (effect, cause, tau, p-val)
external_independencies = [(0, 3, 0), (0, 3, 1), (1, 3, 0), (1, 3, 1), (2, 3, 0), (2, 3, 1)]
external_dependencies = [ (4, 2, 1)]
# run lpcmci
lpcmci = LPCMCI(
dataframe=df,
cond_ind_test=ParCorr(
significance='analytic',
recycle_residuals=True))
lpcmci.run_lpcmci(
external_independencies=external_independencies,
external_dependencies=external_dependencies,
tau_max=tau_max,
pc_alpha=pc_alpha,
max_p_non_ancestral=2, # todo 3
n_preliminary_iterations=1, # todo 4
prelim_only=False,
verbosity=0)
graph = lpcmci.graph
# tp.plot_graph(
# val_matrix=lpcmci.val_min_matrix,
# link_matrix=graph,
# var_names=["0", "2", "3", "4", "5"],
# link_colorbar_label='current LPCMCI estimate. day',
# node_colorbar_label='auto-MCI',
# figsize=(10, 6),
# )
# plt.show()
for exi in external_independencies:
exi = list(exi)
forward_arrow = graph[exi[1], exi[0], exi[2]]
assert forward_arrow == "" or forward_arrow[0] == "<"
# symmetric for contemporaneous links
if exi[2] == 0:
backward_arrow = graph[exi[0], exi[1], exi[2]]
assert backward_arrow == "" or backward_arrow[2] == ">"
for exi in external_dependencies:
exi = list(exi)
forward_arrow = graph[exi[1], exi[0], exi[2]]
assert forward_arrow == "" or forward_arrow[0] == "-"
assert forward_arrow == "" or forward_arrow[2] == ">"
# symmetric for contemporaneous links
if exi[2] == 0:
backward_arrow = graph[exi[0], exi[1], exi[2]]
assert backward_arrow == "" or backward_arrow[2] == "-"
assert backward_arrow == "" or backward_arrow[0] == "<"
| 6,955 | 52.507692 | 155 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/utilities.py
|
from collections import OrderedDict
from itertools import product
import numpy as np
import tigramite.data_processing as pp
from causal_discovery.LPCMCI.svarfci import SVARFCI
class OracleCI:
r"""Oracle of conditional independence test X _|_ Y | Z given a graph.
Class around link_coeff causal ground truth. X _|_ Y | Z is based on
assessing whether X and Y are d-separated given Z in the graph.
Class can be used like a Tigramite conditional independence class
(e.g., ParCorr).
Parameters
----------
link_coeffs : dict
Dictionary of form {0:[((0, -1), coeff, func), ...], 1:[...], ...}.
verbosity : int, optional (default: 0)
Level of verbosity.
"""
# documentation
@property
def measure(self):
"""
Concrete property to return the measure of the independence test
"""
return self._measure
def __init__(self,
link_coeffs,
observed_vars=None,
verbosity=0):
self.verbosity = verbosity
self._measure = 'oracle_ci'
self.confidence = None
self.link_coeffs = link_coeffs
self.N = len(link_coeffs)
# Initialize already computed dsepsets of X, Y, Z
self.dsepsets = {}
# Initialize observed vars
self.observed_vars = observed_vars
if self.observed_vars is None:
self.observed_vars = range(self.N)
else:
if not set(self.observed_vars).issubset(set(range(self.N))):
raise ValueError("observed_vars must be subset of range(N).")
if self.observed_vars != sorted(self.observed_vars):
raise ValueError("observed_vars must ordered.")
if len(self.observed_vars) != len(set(self.observed_vars)):
raise ValueError("observed_vars must not contain duplicates.")
def set_dataframe(self, dataframe):
"""Dummy function."""
pass
def _check_XYZ(self, X, Y, Z):
"""Checks variables X, Y, Z.
Parameters
----------
X, Y, Z : list of tuples
For a dependence measure I(X;Y|Z), Y is of the form [(varY, 0)],
where var specifies the variable index. X typically is of the form
[(varX, -tau)] with tau denoting the time lag and Z can be
multivariate [(var1, -lag), (var2, -lag), ...] .
Returns
-------
X, Y, Z : tuple
Cleaned X, Y, Z.
"""
# Get the length in time and the number of nodes
N = self.N
# Remove duplicates in X, Y, Z
X = list(OrderedDict.fromkeys(X))
Y = list(OrderedDict.fromkeys(Y))
Z = list(OrderedDict.fromkeys(Z))
# If a node in Z occurs already in X or Y, remove it from Z
Z = [node for node in Z if (node not in X) and (node not in Y)]
# Check that all lags are non-positive and indices are in [0,N-1]
XYZ = X + Y + Z
dim = len(XYZ)
# Ensure that XYZ makes sense
if np.array(XYZ).shape != (dim, 2):
raise ValueError("X, Y, Z must be lists of tuples in format"
" [(var, -lag),...], eg., [(2, -2), (1, 0), ...]")
if np.any(np.array(XYZ)[:, 1] > 0):
raise ValueError("nodes are %s, " % str(XYZ) +
"but all lags must be non-positive")
if (np.any(np.array(XYZ)[:, 0] >= N)
or np.any(np.array(XYZ)[:, 0] < 0)):
raise ValueError("var indices %s," % str(np.array(XYZ)[:, 0]) +
" but must be in [0, %d]" % (N - 1))
if np.all(np.array(Y)[:, 1] != 0):
raise ValueError("Y-nodes are %s, " % str(Y) +
"but one of the Y-nodes must have zero lag")
return (X, Y, Z)
def _get_lagged_parents(self, var_lag, exclude_contemp=False):
"""Helper function to yield lagged parents for var_lag from
self.links_coeffs.
Parameters
----------
var_lag : tuple
Tuple of variable and lag which is assumed <= 0.
exclude_contemp : bool
Whether contemporaneous links should be exluded.
Yields
------
Next lagged parent.
"""
var, lag = var_lag
for link_props in self.link_coeffs[var]:
i, tau = link_props[0]
coeff = link_props[1]
if coeff != 0.:
if not (exclude_contemp and lag == 0):
yield (i, lag + tau)
def _get_children(self):
"""Helper function to get children from links.
Note that for children the lag is positive.
Returns
-------
children : dict
Dictionary of form {0:[(0, 1), (3, 0), ...], 1:[], ...}.
"""
N = len(self.link_coeffs)
children = dict([(j, []) for j in range(N)])
for j in range(N):
for link_props in self.link_coeffs[j]:
i, tau = link_props[0]
coeff = link_props[1]
if coeff != 0.:
children[i].append((j, abs(tau)))
return children
def _get_lagged_children(self, var_lag, children, exclude_contemp=False):
"""Helper function to yield lagged children for var_lag from children.
Parameters
----------
var_lag : tuple
Tuple of variable and lag which is assumed <= 0.
children : dict
Dictionary of form {0:[(0, 1), (3, 0), ...], 1:[], ...}.
exclude_contemp : bool
Whether contemporaneous links should be exluded.
Yields
------
Next lagged child.
"""
var, lag = var_lag
# lagged_parents = []
for child in children[var]:
k, tau = child
if not (exclude_contemp and tau == 0):
# lagged_parents.append((i, lag + tau))
yield (k, lag + tau)
def _get_non_blocked_ancestors(self, Y, conds=None, mode='non_repeating',
max_lag=None):
"""Helper function to return the non-blocked ancestors of variables Y.
Returns a dictionary of ancestors for every y in Y. y is a tuple (
var, lag) where lag <= 0. All ancestors with directed paths towards y
that are not blocked by conditions in conds are included. In mode
'non_repeating' an ancestor X^i_{t-\tau_i} with link X^i_{t-\tau_i}
--> X^j_{ t-\tau_j} is only included if X^i_{t'-\tau_i} --> X^j_{
t'-\tau_j} is not already part of the ancestors. The most lagged
ancestor for every variable X^i defines the maximum ancestral time
lag, which is also returned. In mode 'max_lag' ancestors are included
up to the maximum time lag max_lag.
It's main use is to return the maximum ancestral time lag max_lag of
y in Y for every variable in self.links_coeffs.
Parameters
----------
Y : list of tuples
Of the form [(var, -tau)], where var specifies the variable
index and tau the time lag.
conds : list of tuples
Of the form [(var, -tau)], where var specifies the variable
index and tau the time lag.
mode : {'non_repeating', 'max_lag'}
Whether repeating links should be excluded or ancestors should be
followed up to max_lag.
max_lag : int
Maximum time lag to include ancestors.
Returns
-------
ancestors : dict
Includes ancestors for every y in Y.
max_lag : int
Maximum time lag to include ancestors.
"""
def _repeating(link, seen_links):
"""Returns True if a link or its time-shifted version is already
included in seen_links."""
i, taui = link[0]
j, tauj = link[1]
for seen_link in seen_links:
seen_i, seen_taui = seen_link[0]
seen_j, seen_tauj = seen_link[1]
if (i == seen_i and j == seen_j
and abs(tauj - taui) == abs(seen_tauj - seen_taui)):
return True
return False
if conds is None:
conds = []
conds = [z for z in conds if z not in Y]
N = len(self.link_coeffs)
# Initialize max. ancestral time lag for every N
if mode == 'non_repeating':
max_lag = 0
else:
if max_lag is None:
raise ValueError("max_lag must be set in mode = 'max_lag'")
ancestors = dict([(y, []) for y in Y])
for y in Y:
j, tau = y # tau <= 0
if mode == 'non_repeating':
max_lag = max(max_lag, abs(tau))
seen_links = []
this_level = [y]
while len(this_level) > 0:
next_level = []
for varlag in this_level:
for par in self._get_lagged_parents(varlag):
i, tau = par
if par not in conds and par not in ancestors[y]:
if ((mode == 'non_repeating' and
not _repeating((par, varlag), seen_links)) or
(mode == 'max_lag' and
abs(tau) <= abs(max_lag))):
ancestors[y].append(par)
if mode == 'non_repeating':
max_lag = max(max_lag,
abs(tau))
next_level.append(par)
seen_links.append((par, varlag))
this_level = next_level
return ancestors, max_lag
def _has_any_path(self, X, Y, conds, max_lag):
"""Returns True if X and Y are d-connected by any open path.
Does breadth-first search from both X and Y and meets in the middle.
Paths are walked according to the d-separation rules where paths can
only traverse motifs <-- v <-- or <-- v --> or --> v --> or
--> [v] <-- where [.] indicates that v is conditioned on.
Furthermore, paths nodes (v, t) need to fulfill max_lag <= t <= 0
and links cannot be traversed backwards.
Parameters
----------
X, Y : lists of tuples
Of the form [(var, -tau)], where var specifies the variable
index and tau the time lag.
conds : list of tuples
Of the form [(var, -tau)], where var specifies the variable
index and tau the time lag.
max_lag : int
Maximum time lag.
"""
def _walk_to_parents(v, fringe, this_path, other_path):
"""Helper function to update paths when walking to parents."""
found_path = False
for w in self._get_lagged_parents(v):
# Cannot walk into conditioned parents and
# cannot walk beyond t or max_lag
i, t = w
if (w not in conds and
# (w, v) not in seen_links and
t <= 0 and abs(t) <= max_lag):
if ((w, 'tail') not in this_path and
(w, None) not in this_path):
if self.verbosity > 1:
print("Walk parent: %s --> %s " % (v, w))
fringe.append((w, 'tail'))
this_path[(w, 'tail')] = (v, 'arrowhead')
# seen_links.append((v, w))
# Determine whether X and Y are connected
# (w, None) indicates the start or end node X/Y
if ((w, 'tail') in other_path
or (w, 'arrowhead') in other_path
or (w, None) in other_path):
if self.verbosity > 1:
print("Found connection: ", w)
found_path = True
break
return found_path, fringe, this_path
def _walk_to_children(v, fringe, this_path, other_path):
"""Helper function to update paths when walking to children."""
found_path = False
for w in self._get_lagged_children(v, children):
# You can also walk into conditioned children,
# but cannot walk beyond t or max_lag
i, t = w
if (
# (w, v) not in seen_links and
t <= 0 and abs(t) <= max_lag):
if ((w, 'arrowhead') not in this_path and
(w, None) not in this_path):
if self.verbosity > 1:
print("Walk child: %s --> %s " % (v, w))
fringe.append((w, 'arrowhead'))
this_path[(w, 'arrowhead')] = (v, 'tail')
# seen_links.append((v, w))
# Determine whether X and Y are connected
# If the other_path contains w with a tail, then w must
# NOT be conditioned on. Alternatively, if the other_path
# contains w with an arrowhead, then w must be
# conditioned on.
if (((w, 'tail') in other_path and w not in conds)
or ((w, 'arrowhead') in other_path and w in conds)
or (w, None) in other_path):
if self.verbosity > 1:
print("Found connection: ", w)
found_path = True
break
return found_path, fringe, this_path
def _walk_fringe(this_level, fringe, this_path, other_path):
"""Helper function to walk each fringe, i.e., the path from X and Y,
respectively."""
found_path = False
for v, mark in this_level:
if v in conds:
if (mark == 'arrowhead' or mark == None):
# Motif: --> [v] <--
# If standing on a condition and coming from an
# arrowhead, you can only walk into parents
(found_path, fringe,
this_path) = _walk_to_parents(v, fringe,
this_path, other_path)
if found_path: break
else:
if (mark == 'tail' or mark == None):
# Motif: <-- v <-- or <-- v -->
# If NOT standing on a condition and coming from
# a tail mark, you can walk into parents or
# children
(found_path, fringe,
this_path) = _walk_to_parents(v, fringe,
this_path, other_path)
if found_path: break
(found_path, fringe,
this_path) = _walk_to_children(v, fringe,
this_path, other_path)
if found_path: break
elif mark == 'arrowhead':
# Motif: --> v -->
# If NOT standing on a condition and coming from
# an arrowhead mark, you can only walk into
# children
(found_path, fringe,
this_path) = _walk_to_children(v, fringe,
this_path, other_path)
if found_path: break
if self.verbosity > 1:
print("Updated fringe: ", fringe)
return found_path, fringe, this_path, other_path
if conds is None:
conds = []
conds = [z for z in conds if z not in Y and z not in X]
N = len(self.link_coeffs)
children = self._get_children()
# Iterate through nodes in X and Y
for x in X:
for y in Y:
seen_links = []
# predecessor and successors in search
# (x, None) where None indicates start/end nodes, later (v,
# 'tail') or (w, 'arrowhead') indicate how a link ends at a node
pred = {(x, None): None}
succ = {(y, None): None}
# initialize fringes, start with forward from X
forward_fringe = [(x, None)]
reverse_fringe = [(y, None)]
while forward_fringe and reverse_fringe:
if len(forward_fringe) <= len(reverse_fringe):
if self.verbosity > 1:
print("Walk from X since len(X_fringe)=%d "
"<= len(Y_fringe)=%d" % (len(forward_fringe),
len(reverse_fringe)))
this_level = forward_fringe
forward_fringe = []
(found_path, forward_fringe, pred,
succ) = _walk_fringe(this_level, forward_fringe, pred,
succ)
# print(pred)
if found_path: return True
else:
if self.verbosity > 1:
print("Walk from Y since len(X_fringe)=%d "
"> len(Y_fringe)=%d" % (len(forward_fringe),
len(reverse_fringe)))
this_level = reverse_fringe
reverse_fringe = []
(found_path, reverse_fringe, succ,
pred) = _walk_fringe(this_level, reverse_fringe, succ,
pred)
if found_path: return True
if self.verbosity > 1:
print("X_fringe = %s \n" % str(forward_fringe) +
"Y_fringe = %s" % str(reverse_fringe))
return False
def _is_dsep(self, X, Y, Z, max_lag=None, compute_ancestors=False):
"""Returns whether X and Y are d-separated given Z in the graph.
X, Y, Z are of the form (var, lag) for lag <= 0. D-separation is
based on:
1. Assessing maximum time lag max_lag of last ancestor of any X, Y, Z
with non-blocked (by Z), non-repeating directed path towards X, Y, Z
in the graph. 'non_repeating' means that an ancestor X^i_{ t-\tau_i}
with link X^i_{t-\tau_i} --> X^j_{ t-\tau_j} is only included if
X^i_{t'-\tau_i} --> X^j_{ t'-\tau_j} for t'!=t is not already part of
the ancestors.
2. Using the time series graph truncated at max_lag we then test
d-separation between X and Y conditional on Z using breadth-first
search of non-blocked paths according to d-separation rules.
Optionally makes available the ancestors up to max_lag of X, Y,
Z. This may take a very long time, however.
Parameters
----------
X, Y, Z : list of tuples
List of variables chosen for current independence test.
max_lag : int, optional (default: None)
Used here to constrain the _is_dsep function to the graph
truncated at max_lag instead of identifying the max_lag from
ancestral search.
compute_ancestors : bool
Whether to also make available the ancestors for X, Y, Z as
self.anc_all_x, self.anc_all_y, and self.anc_all_z, respectively.
Returns
-------
dseparated : bool
True if X and Y are d-separated given Z in the graph.
"""
N = len(self.link_coeffs)
if self.verbosity > 0:
print("Testing X=%s d-sep Y=%s given Z=%s in TSG" % (X, Y, Z))
if max_lag is not None:
# max_lags = dict([(j, max_lag) for j in range(N)])
if self.verbosity > 0:
print("Set max. time lag to: ", max_lag)
else:
# Get maximum non-repeated ancestral time lag
_, max_lag_X = self._get_non_blocked_ancestors(X, conds=Z,
mode='non_repeating')
_, max_lag_Y = self._get_non_blocked_ancestors(Y, conds=Z,
mode='non_repeating')
_, max_lag_Z = self._get_non_blocked_ancestors(Z, conds=Z,
mode='non_repeating')
# Get max time lag among the ancestors
max_lag = max(max_lag_X, max_lag_Y, max_lag_Z)
if self.verbosity > 0:
print("Max. non-repeated ancestral time lag: ", max_lag)
# Store overall max. lag
self.max_lag = max_lag
# _has_any_path is the main function that searches open paths
any_path = self._has_any_path(X, Y, conds=Z, max_lag=max_lag)
if self.verbosity > 0:
print("_has_any_path = ", any_path)
if any_path:
dseparated = False
else:
dseparated = True
if compute_ancestors:
if self.verbosity > 0:
print("Compute ancestors.")
# Get ancestors up to maximum ancestral time lag incl. repeated
# links
self.anc_all_x, _ = self._get_non_blocked_ancestors(X, conds=Z,
mode='max_lag', max_lag=max_lag)
self.anc_all_y, _ = self._get_non_blocked_ancestors(Y, conds=Z,
mode='max_lag', max_lag=max_lag)
self.anc_all_z, _ = self._get_non_blocked_ancestors(Z, conds=Z,
mode='max_lag', max_lag=max_lag)
return dseparated
def run_test(self, X, Y, Z=None, tau_max=0, cut_off='2xtau_max',
compute_ancestors=False,
verbosity=0):
"""Perform oracle conditional independence test.
Calls the d-separation function.
Parameters
----------
X, Y, Z : list of tuples
X,Y,Z are of the form [(var, -tau)], where var specifies the
variable index in the observed_vars and tau the time lag.
tau_max : int, optional (default: 0)
Not used here.
cut_off : {'2xtau_max', 'max_lag', 'max_lag_or_tau_max'}
Not used here.
Returns
-------
val, pval : Tuple of floats
The test statistic value and the p-value.
"""
# Translate from observed_vars index to full variable set index
X = [(self.observed_vars[x[0]], x[1]) for x in X]
Y = [(self.observed_vars[y[0]], y[1]) for y in Y]
Z = [(self.observed_vars[z[0]], z[1]) for z in Z]
# Get the array to test on
X, Y, Z = self._check_XYZ(X, Y, Z)
if not str((X, Y, Z)) in self.dsepsets:
self.dsepsets[str((X, Y, Z))] = self._is_dsep(X, Y, Z,
max_lag=None,
compute_ancestors=compute_ancestors)
if self.dsepsets[str((X, Y, Z))]:
val = 0.
pval = 1.
else:
val = 1.
pval = 0.
if verbosity > 1:
self._print_cond_ind_results(val=val, pval=pval, cached=False,
conf=None)
# Return the value and the pvalue
return val, pval
def get_measure(self, X, Y, Z=None, tau_max=0):
"""Returns dependence measure.
Returns 0 if X and Y are d-separated given Z in the graph and 1 else.
Parameters
----------
X, Y [, Z] : list of tuples
X,Y,Z are of the form [(var, -tau)], where var specifies the
variable index in the observed_vars and tau the time lag.
tau_max : int, optional (default: 0)
Maximum time lag. This may be used to make sure that estimates for
different lags in X, Z, all have the same sample size.
Returns
-------
val : float
The test statistic value.
"""
# Translate from observed_vars index to full variable set index
X = [(self.observed_vars[x[0]], x[1]) for x in X]
Y = [(self.observed_vars[y[0]], y[1]) for y in Y]
Z = [(self.observed_vars[z[0]], z[1]) for z in Z]
# Check XYZ
X, Y, Z = _check_XYZ(X, Y, Z)
if not str((X, Y, Z)) in self.dsepsets:
self.dsepsets[str((X, Y, Z))] = self._is_dsep(X, Y, Z,
max_lag=None)
if self.dsepsets[str((X, Y, Z))]:
return 0.
else:
return 1.
def _print_cond_ind_results(self, val, pval=None, cached=None, conf=None):
"""Print results from conditional independence test.
Parameters
----------
val : float
Test stastistic value.
pval : float, optional (default: None)
p-value
conf : tuple of floats, optional (default: None)
Confidence bounds.
"""
printstr = " val = %.3f" % (val)
if pval is not None:
printstr += " | pval = %.5f" % (pval)
if conf is not None:
printstr += " | conf bounds = (%.3f, %.3f)" % (
conf[0], conf[1])
if cached is not None:
printstr += " %s" % ({0: "", 1: "[cached]"}[cached])
print(printstr)
def get_model_selection_criterion(self, j, parents, tau_max=0):
"""
Base class assumption that this is not implemented. Concrete classes
should override when possible.
"""
raise NotImplementedError("Model selection not" + \
" implemented for %s" % self.measure)
def _get_minmax_lag(links):
"""Helper function to retrieve tau_min and tau_max from links
"""
N = len(links)
# Get maximum time lag
min_lag = np.inf
max_lag = 0
for j in range(N):
for link_props in links[j]:
var, lag = link_props[0]
coeff = link_props[1]
# func = link_props[2]
if coeff != 0.:
min_lag = min(min_lag, abs(lag))
max_lag = max(max_lag, abs(lag))
return min_lag, max_lag
def get_oracle_pag_from_dag(links_coeffs, observed_vars=None, tau_max=None, verbosity=0):
"""Computes PAG over observed variables from DAG on full variable set.
Uses OracleCI tests based on ancestors in DAG to obtain skeleton and sepsets.
Then applies FCI rules (including collider rule).
"""
if verbosity > 0:
print("Running _get_pag_from_dag:\n\n1. Ancestors search")
N_all = len(links_coeffs)
# If tau_max is None, compute from links_coeffs
_, max_lag_links = _get_minmax_lag(links_coeffs)
if tau_max is None:
tau_max = max_lag_links
else:
if max_lag_links > tau_max:
raise ValueError("tau_max must be >= maximum lag in links_coeffs; choose tau_max=None")
if observed_vars is None:
observed_vars = range(N_all)
else:
if not set(observed_vars).issubset(set(range(N_all))):
raise ValueError("observed_vars must be subset of range(N_all).")
N = len(observed_vars)
# Init cond_ind_test class
cond_ind_test = OracleCI(links_coeffs)
# Init graph and sepsets
graph_dict = {j: {(i, -tau): "o-o" for i in range(N) for tau in range(tau_max + 1) if tau > 0 or j != i} for j in
range(N)}
sepsets = {j: {(i, -tau): {} for i in range(N) for tau in range(tau_max + 1) if (tau > 0 or i < j)} for j in
range(N)}
sepset_answers = {}
# We will enumerate the observed variables with (i,j) which refers to the index in pag_graph
# while x, y iterates through the oberved variables in the underlying DAG
# Loop over the observed variables
for j, y in enumerate(observed_vars):
for i, x in enumerate(observed_vars):
for tau in range(0, tau_max + 1):
if (x, -tau) != (y, 0):
dag_anc_y, _ = cond_ind_test._get_non_blocked_ancestors(Y=[(y, 0)], conds=None,
mode='max_lag',
max_lag=tau_max)
# Only consider observed ancestors
pag_anc_y = [anc for anc in dag_anc_y[(y, 0)]
if anc[0] in observed_vars]
dag_anc_x, _ = cond_ind_test._get_non_blocked_ancestors(Y=[(x, -tau)],
conds=None, mode='max_lag',
max_lag=tau_max)
# Only consider observed ancestors
pag_anc_x = [anc for anc in dag_anc_x[(x, -tau)]
if anc[0] in observed_vars]
Z = list(set([z for z in pag_anc_y + pag_anc_x if z != (y, 0) and z != (x, -tau)]))
separated = cond_ind_test._is_dsep(X=[(x, -tau)], Y=[(y, 0)], Z=Z, max_lag=None)
# If X and Y are connected given Z, mark a link
if not separated and tau == 0:
graph_dict[j][(i, -tau)] = "o-o"
elif not separated and tau > 0:
graph_dict[j][(i, -tau)] = "o->"
# If X and Y are separated given Z, mark absence of links and store sepset
else:
graph_dict[j][(i, -tau)] = ""
# Translate sepset to (i,j)-space
S = frozenset((observed_vars.index(cond[0]), cond[1]) for cond in Z)
# sepsets[j][(i, -tau)] = {(S, "wm")}
sepsets[j][(i, -tau)] = {(S, "")}
if tau == 0:
# sepsets[i][(j, 0)] = {(S, "wm")}
sepsets[i][(j, 0)] = {(S, "")}
if tau > 0 or (tau == 0 and i < j):
X_type = (i, -tau)
Y_type = (j, 0)
else:
X_type = (j, 0)
Y_type = (i, 0)
for s in S:
sepset_answers[(X_type, s, Y_type)] = False
for k, tau in product(range(N), range(0, tau_max + 1)):
if sepset_answers.get((X_type, (k, -tau), Y_type)) is None:
sepset_answers[(X_type, (k, - tau), Y_type)] = True
if verbosity > 0:
print("2. FCI orientation rules")
# Initialize SVARFCI with dummy data
svarfci = SVARFCI(dataframe=pp.DataFrame(np.zeros((N + 1, N))), cond_ind_test=cond_ind_test)
svarfci._initialize(tau_max=tau_max, pc_alpha=0.01, max_cond_px=np.inf, max_p_global=np.inf, max_p_dsep=np.inf,
max_q_global=np.inf, max_pds_set=np.inf, fix_all_edges_before_final_orientation=False,
verbosity=verbosity)
svarfci._oracle = True
# Update graph_dict and sepsets
svarfci.graph_dict = graph_dict
svarfci.sepsets = sepsets
# Run *all* rules
svarfci._B_not_in_SepSet_AC_given_answers = sepset_answers
svarfci._run_fci_orientation_phase()
# Also return array version of pag graph
pag_graph = svarfci._dict2graph()
svarfci_graph_dict = svarfci.graph_dict
return svarfci_graph_dict, pag_graph
def compute_f1_score(precision, recall):
f1 = 2 * (precision * recall) / (precision + recall)
return f1
| 32,514 | 38.700855 | 117 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/generate_data_mod.py
|
from collections import defaultdict
import numpy as np
def check_stationarity(links):
"""Returns stationarity according to a unit root test
Assuming a Gaussian Vector autoregressive process
Three conditions are necessary for stationarity of the VAR(p) model:
- Absence of mean shifts;
- The noise vectors are identically distributed;
- Stability condition on Phi(t-1) coupling matrix (stabmat) of VAR(1)-version of VAR(p).
"""
N = len(links)
# Check parameters
max_lag = 0
for j in range(N):
for link_props in links[j]:
var, lag = link_props[0]
# coeff = link_props[1]
# coupling = link_props[2]
max_lag = max(max_lag, abs(lag))
graph = np.zeros((N, N, max_lag))
couplings = []
for j in range(N):
for link_props in links[j]:
var, lag = link_props[0]
coeff = link_props[1]
coupling = link_props[2]
if abs(lag) > 0:
graph[j, var, abs(lag) - 1] = coeff
couplings.append(coupling)
stabmat = np.zeros((N * max_lag, N * max_lag))
index = 0
for i in range(0, N * max_lag, N):
stabmat[:N, i:i + N] = graph[:, :, index]
if index < max_lag - 1:
stabmat[i + N:i + 2 * N, i:i + N] = np.identity(N)
index += 1
eig = np.linalg.eig(stabmat)[0]
# print "----> maxeig = ", np.abs(eig).max()
if np.all(np.abs(eig) < 1.):
stationary = True
else:
stationary = False
if len(eig) == 0:
return stationary, 0.
else:
return stationary, np.abs(eig).max()
class Graph():
def __init__(self, vertices):
self.graph = defaultdict(list)
self.V = vertices
def addEdge(self, u, v):
self.graph[u].append(v)
def isCyclicUtil(self, v, visited, recStack):
# Mark current node as visited and
# adds to recursion stack
visited[v] = True
recStack[v] = True
# Recur for all neighbours
# if any neighbour is visited and in
# recStack then graph is cyclic
for neighbour in self.graph[v]:
if not visited[neighbour]:
if self.isCyclicUtil(neighbour, visited, recStack):
return True
elif recStack[neighbour]:
return True
# The node needs to be poped from
# recursion stack before function ends
recStack[v] = False
return False
# Returns true if graph is cyclic else false
def isCyclic(self):
visited = [False] * self.V
recStack = [False] * self.V
for node in range(self.V):
if visited[node] == False:
if self.isCyclicUtil(node, visited, recStack) == True:
return True
return False
# Returns true if graph is cyclic else false
def get_cycle_nodes(self):
cycle_nodes = []
visited = [False] * self.V
recStack = [False] * self.V
for node in range(self.V):
if not visited[node]:
if self.isCyclicUtil(node, visited, recStack):
cycle_nodes.append(node)
return cycle_nodes
# A recursive function used by topologicalSort
def topologicalSortUtil(self, v, visited, stack):
# Mark the current node as visited.
visited[v] = True
# Recur for all the vertices adjacent to this vertex
for i in self.graph[v]:
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
# Push current vertex to stack which stores result
stack.insert(0, v)
# The function to do Topological Sort. It uses recursive
# topologicalSortUtil()
def topologicalSort(self):
# Mark all the vertices as not visited
visited = [False] * self.V
stack = []
# Call the recursive helper function to store Topological
# Sort starting from all vertices one by one
for i in range(self.V):
if visited[i] == False:
self.topologicalSortUtil(i, visited, stack)
return stack
def generate_nonlinear_contemp_timeseries(links, T, noises=None, random_state=None, ts_old=None,
intervention_variable=None,
intervention_value=None):
# chrei if ts_old not specified (i.e. during stationarity check),
# behave like it's the first time and don't ini with last values
if ts_old is None:
ts_old = []
# random_state
if random_state is None:
random_state = np.random
# links must be {j:[((i, -tau), func), ...], ...}
# coeff is coefficient
# func is a function f(x) that becomes linear ~x in limit
# noises is a random_state.___ function
N = len(links.keys())
if noises is None:
noises = [random_state.randn for j in range(N)]
if noises != 'without' and (N != max(links.keys()) + 1 or N != len(noises)):
raise ValueError("links and noises keys must match N.")
# Check parameters
max_lag = 0
contemp_dag = Graph(N)
for j in range(N):
for link_props in links[j]:
var, lag = link_props[0]
coeff = link_props[1]
func = link_props[2]
if lag == 0:
contemp = True
if var not in range(N):
raise ValueError("var must be in 0..{}.".format(N - 1))
if 'float' not in str(type(coeff)):
raise ValueError("coeff must be float.")
if lag > 0 or type(lag) != int:
raise ValueError("lag must be non-positive int.")
max_lag = max(max_lag, abs(lag))
# Create contemp DAG
if var != j and lag == 0:
contemp_dag.addEdge(var, j)
if contemp_dag.isCyclic() == 1:
# raise ValueError("Contemporaneous links must not contain cycle.")
return None # chrei
causal_order = contemp_dag.topologicalSort()
len_ts_old = len(ts_old)
# zeros ini
if noises != 'without':
X = np.zeros((T + max_lag, N), dtype='float32')
else:
X = np.ones((T + max_lag, N), dtype='float32')
# add noises
if noises != 'without':
for j in range(N):
X[:, j] = noises[j](T + max_lag)
# chrei: in X[from len_ts_old for tau_max+1 elements], replace these values with the last (max_lag+1) elements of ts_old
if len_ts_old > 0 and max_lag > 0:
X[:max_lag] = ts_old[-max_lag:]
for t in range(max_lag, T + max_lag): # for all time steps
for j in causal_order: # for all affected variables j ( in causal order)
# if j is intervened, set value to intervention_value
if j == intervention_variable:
X[t, j] = intervention_value
# else, j is not intervened, and compute value
else:
for link_props in links[j]: # for links affecting j
var, lag = link_props[0] # var name, lag
# if abs(lag) > 0:
coeff = link_props[1]
func = link_props[2]
base_value = X[t + lag, var]
val_to_add = coeff * func(base_value)
old_val = X[t, j]
new_value = old_val + val_to_add
X[t, j] = new_value # add value on noise for var j and time t
# chrei: remove some value because they were added for initialization before
X = X[max_lag:]
return X
def check_stationarity_chr(X, links):
if X is None:
return True # nonstationary = True
if (check_stationarity(links)[0] == False or
np.any(np.isnan(X)) or
np.any(np.isinf(X)) or
# np.max(np.abs(X)) > 1.e4 or
np.any(np.abs(np.triu(np.corrcoef(X, rowvar=0), 1)) > 0.999)):
nonstationary = True
else:
nonstationary = False
return nonstationary
def generate_random_contemp_model(N, L,
coupling_coeffs,
coupling_funcs,
auto_coeffs,
tau_max,
contemp_fraction=0.,
# num_trials=1000,
random_state=None):
def lin(x):
return x
if random_state is None:
random_state = np.random
# print links
a_len = len(auto_coeffs)
if type(coupling_coeffs) == float:
coupling_coeffs = [coupling_coeffs]
c_len = len(coupling_coeffs)
func_len = len(coupling_funcs)
if tau_max == 0:
contemp_fraction = 1.
if contemp_fraction > 0.:
contemp = True
L_lagged = int((1. - contemp_fraction) * L)
L_contemp = L - L_lagged
if L == 1:
# Randomly assign a lagged or contemp link
L_lagged = random_state.randint(0, 2)
L_contemp = int(L_lagged == False)
else:
contemp = False
L_lagged = L
L_contemp = 0
# for ir in range(num_trials):
# Random order
causal_order = list(random_state.permutation(N))
links = dict([(i, []) for i in range(N)])
# Generate auto-dependencies at lag 1
if tau_max > 0:
for i in causal_order:
a = auto_coeffs[random_state.randint(0, a_len)]
if a != 0.:
links[i].append(((int(i), -1), float(a), lin))
chosen_links = []
# Create contemporaneous DAG
contemp_links = []
for l in range(L_contemp):
cause = random_state.choice(causal_order[:-1])
effect = random_state.choice(causal_order)
while (causal_order.index(cause) >= causal_order.index(effect)
or (cause, effect) in chosen_links):
cause = random_state.choice(causal_order[:-1])
effect = random_state.choice(causal_order)
contemp_links.append((cause, effect))
chosen_links.append((cause, effect))
# Create lagged links (can be cyclic)
lagged_links = []
for l in range(L_lagged):
cause = random_state.choice(causal_order)
effect = random_state.choice(causal_order)
while (cause, effect) in chosen_links or cause == effect:
cause = random_state.choice(causal_order)
effect = random_state.choice(causal_order)
lagged_links.append((cause, effect))
chosen_links.append((cause, effect))
# print(chosen_links)
# print(contemp_links)
for (i, j) in chosen_links:
# Choose lag
if (i, j) in contemp_links:
tau = 0
else:
tau = int(random_state.randint(1, tau_max + 1))
# print tau
# CHoose coupling
c = float(coupling_coeffs[random_state.randint(0, c_len)])
if c != 0:
func = coupling_funcs[random_state.randint(0, func_len)]
links[j].append(((int(i), -tau), c, func))
# # Stationarity check assuming model with linear dependencies at least for large x
# # if check_stationarity(links)[0]:
# # return links
# X, nonstat = generate_nonlinear_contemp_timeseries(links,
# T=10000, noises=None, random_state=None)
# if nonstat == False:
# return links
# else:
# print("Trial %d: Not a stationary model" % ir)
# print("No stationary models found in {} trials".format(num_trials))
return links
# def generate_logistic_maps(N, T, links, noise_lev):
#
# # Check parameters
# # contemp = False
# max_lag = 0
# for j in range(N):
# for link_props in links[j]:
# var, lag = link_props[0]
# max_lag = max(max_lag, abs(lag))
#
# transient = int(.2*T)
#
# # Chaotic logistic map parameter
# r = 4.
#
# X = np.random.rand(T+transient, N)
#
# for t in range(max_lag, T+transient):
# for j in range(N):
# added_input = 0.
# for link_props in links[j]:
# var, lag = link_props[0]
# if var != j and abs(lag) > 0:
# coeff = link_props[1]
# coupling = link_props[2]
# added_input += coeff*X[t - abs(lag), var]
#
# X[t, j] = (X[t-1, j] * (r - r*X[t-1, j] - added_input + noise_lev*np.random.rand())) % 1
# #func(coeff, X[t+lag, var], coupling)
#
# X = X[transient:]
#
# if np.any(np.abs(X) == np.inf) or np.any(X == np.nan):
# raise ValueError("Data divergent")
# return X
def weighted_avg_and_std(values, axis, weights):
"""Returns the weighted average and standard deviation.
Parameters
---------
values : array
Data array of shape (time, variables).
axis : int
Axis to average/std about
weights : array
Weight array of shape (time, variables).
Returns
-------
(average, std) : tuple of arrays
Tuple of weighted average and standard deviation along axis.
"""
values[np.isnan(values)] = 0.
average = np.ma.average(values, axis=axis, weights=weights)
variance = np.sum(weights * (values - np.expand_dims(average, axis)
) ** 2, axis=axis) / weights.sum(axis=axis)
return (average, np.sqrt(variance))
def time_bin_with_mask(data, time_bin_length, sample_selector=None):
"""Returns time binned data where only about non-masked values is averaged.
Parameters
----------
data : array
Data array of shape (time, variables).
time_bin_length : int
Length of time bin.
mask : bool array, optional (default: None)
Data mask where True labels masked samples.
Returns
-------
(bindata, T) : tuple of array and int
Tuple of time-binned data array and new length of array.
"""
T = len(data)
time_bin_length = int(time_bin_length)
if sample_selector is None:
sample_selector = np.ones(data.shape)
if np.ndim(data) == 1.:
data.shape = (T, 1)
sample_selector.shape = (T, 1)
bindata = np.zeros(
(T // time_bin_length,) + data.shape[1:], dtype="float32")
for index, i in enumerate(range(0, T - time_bin_length + 1,
time_bin_length)):
# print weighted_avg_and_std(fulldata[i:i+time_bin_length], axis=0,
# weights=sample_selector[i:i+time_bin_length])[0]
bindata[index] = weighted_avg_and_std(data[i:i + time_bin_length],
axis=0,
weights=sample_selector[i:i +
time_bin_length])[0]
T, grid_size = bindata.shape
return (bindata.squeeze(), T)
| 15,012 | 30.606316 | 124 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/observational_discovery.py
|
from time import time
import numpy as np
from tigramite import data_processing as pp
from tigramite.independence_tests import ParCorr
from tigramite.pcmci import PCMCI
from tigramite import plotting as tp
import matplotlib.pyplot as plt
from causal_discovery.LPCMCI.lpcmci import LPCMCI
from config import causal_discovery_on, tau_max, private_folder_path, LPCMCI_or_PCMCI, remove_link_threshold, verbosity, \
verbosity_thesis, show_plots
# function that saves val_min, graph, and var_names to a file
from intervention_proposal.get_intervention import plot_graph
def save_results(val_min, graph, var_names, name_extension):
np.save(str(private_folder_path) + 'val_min_' + str(name_extension), val_min)
np.save(str(private_folder_path) + 'graph_' + str(name_extension), graph)
np.save(str(private_folder_path) + 'var_names_' + str(name_extension), var_names)
def if_intervened_replace_with_nan(ts, was_intervened):
# iterate over all rows and columns of ts
for i in range(len(ts)):
for j in range(len(ts.columns)):
if was_intervened.iloc[i, j]:
ts.iloc[i, j] = np.NaN
return ts
def external_independencies_var_names_to_int(external_independencies, measured_label_to_idx):
if external_independencies is not None and len(external_independencies) > 0:
for independency_idx in range(len(external_independencies)):
lst = list(external_independencies[independency_idx])
lst[0] = measured_label_to_idx[
external_independencies[independency_idx][0]]
lst[1] = measured_label_to_idx[
external_independencies[independency_idx][1]]
external_independencies[independency_idx] = tuple(lst)
return external_independencies
def observational_causal_discovery(df, was_intervened, external_independencies, external_dependencies, measured_label_to_idx, pc_alpha):
"""
1. get observational ts
2. ini graph with previous pag_edgemarks and pag_effect_sizes
3. reduce pag_edgemarks with observatonal data and update pag_effect_sizes
return: pag_edgemarks, pag_effect_sizes
"""
if causal_discovery_on:
""" below code is only needed for real world data"""
"""get non_zero_indices"""
"""
# non_zero_inices = pd.read_csv(str(private_folder_path) + 'results.csv', index_col=0)
# # of non_zero_inices get column called 'ref_coeff_widestk=5'
# non_zero_inices = non_zero_inices.loc[:, 'reg_coeff_widestk=5']
# # drop all rows with 0 in non_zero_inices
# non_zero_inices = non_zero_inices[non_zero_inices != 0]
# # detete all rows with nans in non_zero_inices
# non_zero_inices = non_zero_inices.dropna().index
# TODO: automatic non_zero_indices don't work yet below is hardcoded
# non_zero_inices = ['Mood', 'HumidInMax()', 'NoiseMax()', 'HeartPoints', 'Steps']
# select columns
# df = df[non_zero_inices]
# df.reset_index(level=0, inplace=True)
# df = remove_nan_seq_from_top_and_bot(df)
# df = non_contemporary_time_series_generation(df) # todo, how to automate on and off
# df = df.drop(['Date'], axis=1) # drop date col
"""
# measure how long observational_causal_discovery takes
start_time = time()
# handle interventions: in df set value to NaN if it was intervened
# during CI tests nans are then excluded
df = if_intervened_replace_with_nan(df, was_intervened)
# # standardize data
df -= df.mean(axis=0)
df /= df.std(axis=0)
var_names = df.columns
dataframe = pp.DataFrame(df.values, datatime=np.arange(len(df)),
var_names=var_names)
external_independencies = external_independencies_var_names_to_int(external_independencies,
measured_label_to_idx)
external_dependencies = external_independencies_var_names_to_int(external_dependencies,
measured_label_to_idx)
if LPCMCI_or_PCMCI:
lpcmci = LPCMCI(
dataframe=dataframe,
cond_ind_test=ParCorr(
significance='analytic',
recycle_residuals=True))
lpcmci.run_lpcmci(
external_independencies=external_independencies,
external_dependencies=external_dependencies,
tau_max=tau_max,
pc_alpha=pc_alpha,
max_p_non_ancestral=2, # todo 3
n_preliminary_iterations=1, # todo 4
prelim_only=False,
verbosity=verbosity)
graph = lpcmci.graph
val_min = lpcmci.val_min_matrix
"""test if works as expected""" # todo test for dependencies
if external_independencies is not None and len(external_independencies) > 0:
for exi in external_independencies:
exi = list(exi)
forward_arrow = graph[exi[1], exi[0], exi[2]]
assert forward_arrow == "" or forward_arrow[0] == "<"
# symmetric for contemporaneous links
if exi[2] == 0:
backward_arrow = graph[exi[0], exi[1], exi[2]]
assert backward_arrow == "" or backward_arrow[2] == ">"
if external_dependencies is not None and len(external_dependencies) > 0:
for exi in external_dependencies:
exi = list(exi)
forward_arrow = graph[exi[1], exi[0], exi[2]]
assert forward_arrow == "" or forward_arrow[0] == "-"
assert forward_arrow == "" or forward_arrow[2] == ">"
# symmetric for contemporaneous links
if exi[2] == 0:
backward_arrow = graph[exi[0], exi[1], exi[2]]
assert backward_arrow == "" or backward_arrow[2] == "-"
assert backward_arrow == "" or backward_arrow[0] == "<"
else:
"""pcmci"""
pcmci = PCMCI(
dataframe=dataframe,
cond_ind_test=ParCorr(significance='analytic'),
verbosity=verbosity)
results = pcmci.run_pcmciplus(tau_min=0, tau_max=tau_max, pc_alpha=pc_alpha)
# q_matrix = pcmci.get_corrected_pvalues(p_matrix=results['p_matrix'], fdr_method='fdr_bh',
# exclude_contemporaneous=False)
graph = results['graph']
val_min = results['val_matrix']
# remove links if are below threshold
graph[abs(val_min) < remove_link_threshold] = ""
val_min[graph == ""] = 0
# plot predicted PAG
if verbosity_thesis > 0 and show_plots == True:
tp.plot_graph(
val_matrix=val_min,
link_matrix=graph,
var_names=var_names,
link_colorbar_label='current LPCMCI estimate. day'+str(df.shape[0]),
node_colorbar_label='auto-MCI',
figsize=(10, 6),
)
plt.show()
# # save results
# save_results(val_min, graph, var_names, 'simulated')
# measure how long observational_causal_discovery tak
end_time = time()
if verbosity_thesis > 9:
print('observational_causal_discovery took: ', end_time - start_time)
return val_min, graph
# load ts dataframe from file
#
# filename = os.path.abspath("./tmp_test.dat")
# ts = pd.read_csv(filename, index_col=0)
#
# # get last row of ts and append to ts
# ts = ts.append(ts.iloc[-1])
#
# ## load was_intervened dataframe from file
# filename = os.path.abspath("./tmp_was_intervened.dat")
# was_intervened = pd.read_csv(filename, index_col=0)
# measured_labels, measured_label_to_idx, unmeasured_labels_strs = get_measured_labels(n_vars_all, random_state, frac_latents)
# external_independencies = [('2', '0', 0), ('2', '1', 0), ('2', '6', 0)]
#
# pag_effect_sizes, pag_edgemarks = observational_causal_discovery(
# external_independencies=external_independencies,
# df=ts,
# was_intervened=was_intervened.copy(),
# measured_label_to_idx=measured_label_to_idx)
#
# print()
| 8,472 | 41.365 | 136 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/metrics_mod.py
|
import numpy as np
def get_masks(true_graphs):
n_realizations, N, N, taumaxplusone = true_graphs.shape
tau_max = taumaxplusone - 1
cross_mask = np.repeat(np.identity(N).reshape(N, N, 1) == False, tau_max + 1, axis=2).astype('bool')
cross_mask[range(N), range(N), 0] = False
contemp_cross_mask_tril = np.zeros((N, N, tau_max + 1)).astype('bool')
contemp_cross_mask_tril[:, :, 0] = np.tril(np.ones((N, N)), k=-1).astype('bool')
lagged_mask = np.ones((N, N, tau_max + 1)).astype('bool')
lagged_mask[:, :, 0] = 0
# auto_mask = np.ones((N,N,tau_max + 1)).astype('bool')
auto_mask = lagged_mask * (cross_mask == False)
any_mask = np.ones((N, N, tau_max + 1)).astype('bool')
any_mask[:, :, 0] = contemp_cross_mask_tril[:, :, 0]
cross_mask = np.repeat(cross_mask.reshape(1, N, N, tau_max + 1), n_realizations, axis=0)
contemp_cross_mask_tril = np.repeat(contemp_cross_mask_tril.reshape(1, N, N, tau_max + 1), n_realizations, axis=0)
lagged_mask = np.repeat(lagged_mask.reshape(1, N, N, tau_max + 1), n_realizations, axis=0)
auto_mask = np.repeat(auto_mask.reshape(1, N, N, tau_max + 1), n_realizations, axis=0)
any_mask = np.repeat(any_mask.reshape(1, N, N, tau_max + 1), n_realizations, axis=0)
return cross_mask, contemp_cross_mask_tril, lagged_mask, auto_mask, any_mask, tau_max
def _get_match_score(true_link, pred_link):
if true_link == "" or pred_link == "": return 0
count = 0
# If left edgemark is correct add 1
if true_link[0] == pred_link[0]:
count += 1
# If right edgemark is correct add 1
if true_link[2] == pred_link[2]:
count += 1
return count
match_func = np.vectorize(_get_match_score, otypes=[int])
def _get_conflicts(pred_link):
if pred_link == "": return 0
count = 0
# If left edgemark is conflict add 1
if pred_link[0] == 'x':
count += 1
# If right edgemark is conflict add 1
if pred_link[2] == 'x':
count += 1
return count
conflict_func = np.vectorize(_get_conflicts, otypes=[int])
def _get_unoriented(true_link):
if true_link == "": return 0
count = 0
# If left edgemark is unoriented add 1
if true_link[0] == 'o':
count += 1
# If right edgemark is unoriented add 1
if true_link[2] == 'o':
count += 1
return count
unoriented_func = np.vectorize(_get_unoriented, otypes=[int])
def get_numbers(metrics, orig_true_graphs, orig_pred_graphs, val_min, cardinality, computation_time, boot_samples=200):
cross_mask, contemp_cross_mask_tril, lagged_mask, auto_mask, any_mask, tau_max = get_masks(orig_true_graphs)
n_realizations = len(orig_pred_graphs)
metrics_dict = {}
pred_graphs = orig_pred_graphs
true_graphs = orig_true_graphs
metrics_dict['valmin_lagged'] = (
((true_graphs != "") * np.abs(val_min) * cross_mask * lagged_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['valmin_auto'] = (((true_graphs != "") * np.abs(val_min) * auto_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['valmin_contemp'] = (
((true_graphs != "") * np.abs(val_min) * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['valmin_anylink'] = (((true_graphs != "") * np.abs(val_min) * any_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
metrics_dict['cardinality_lagged'] = (
((true_graphs != "") * cardinality * cross_mask * lagged_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['cardinality_auto'] = (((true_graphs != "") * cardinality * auto_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['cardinality_contemp'] = (
((true_graphs != "") * cardinality * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['cardinality_anylink'] = (((true_graphs != "") * cardinality * any_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
metrics_dict['num_links_lagged'] = (((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)),
(cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['num_links_auto'] = (((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)),
(auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['num_links_contemp'] = (((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
(contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['num_links_anylink'] = (((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)),
(any_mask).sum(axis=(1, 2, 3)))
metrics_dict['directed_lagged'] = (((true_graphs == "-->") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['directed_auto'] = (((true_graphs == "-->") * auto_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['directed_contemp'] = (
(np.logical_or(true_graphs == "-->", true_graphs == "<--") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['directed_anylink'] = (
(np.logical_or(true_graphs == "-->", true_graphs == "<--") * any_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
metrics_dict['bidirected_lagged'] = (((true_graphs == "<->") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['bidirected_auto'] = (((true_graphs == "<->") * auto_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['bidirected_contemp'] = (((true_graphs == "<->") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['bidirected_anylink'] = (((true_graphs == "<->") * any_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
# Adjacency true/false positives and precision/recall, separated by lagged/auto/contemp
metrics_dict['adj_lagged_fpr'] = (
((true_graphs == "") * (pred_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)),
((true_graphs == "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_lagged_tpr'] = (
((true_graphs != "") * (pred_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_auto_fpr'] = (((true_graphs == "") * (pred_graphs != "") * auto_mask).sum(axis=(1, 2, 3)),
((true_graphs == "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_auto_tpr'] = (((true_graphs != "") * (pred_graphs != "") * auto_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_contemp_fpr'] = (
((true_graphs == "") * (pred_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
((true_graphs == "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['adj_contemp_tpr'] = (
((true_graphs != "") * (pred_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['adj_anylink_fpr'] = (((true_graphs == "") * (pred_graphs != "") * any_mask).sum(axis=(1, 2, 3)),
((true_graphs == "") * any_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_anylink_tpr'] = (((true_graphs != "") * (pred_graphs != "") * any_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
# adj_lagged_precision = oracle * predicted / pred = tp/(tp+fp) = precision
metrics_dict['adj_lagged_precision'] = (((true_graphs != "") * (pred_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3))
,
((pred_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_lagged_recall'] = ( # tp/(tp+fn) = recall
((true_graphs != "") * (pred_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_auto_precision'] = (((true_graphs != "") * (pred_graphs != "") * auto_mask).sum(axis=(1, 2, 3)),
((pred_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_auto_recall'] = (((true_graphs != "") * (pred_graphs != "") * auto_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_contemp_precision'] = (
((true_graphs != "") * (pred_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
((pred_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['adj_contemp_recall'] = (
((true_graphs != "") * (pred_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['adj_anylink_precision'] = (((true_graphs != "") * (pred_graphs != "") * any_mask).sum(axis=(1, 2, 3)),
((pred_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
metrics_dict['adj_anylink_recall'] = (((true_graphs != "") * (pred_graphs != "") * any_mask).sum(axis=(1, 2, 3)),
((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
# chr f1 adj
# metrics_dict['adj_anylink_f1_of_recall_precision'] = 2 * metrics_dict['adj_anylink_precision'] * metrics_dict[
# 'adj_anylink_recall'] / (metrics_dict['adj_anylink_precision'] + metrics_dict['adj_anylink_recall'])
# Edge mark precision and recall
metrics_dict['edgemarks_lagged_precision'] = ((match_func(true_graphs,
pred_graphs) * (cross_mask * lagged_mask)).sum(
axis=(1, 2, 3)),
2. * ((pred_graphs != "") * cross_mask * lagged_mask).sum(
axis=(1, 2, 3)))
first = (match_func(true_graphs, pred_graphs) * (cross_mask * lagged_mask)).sum(axis=(1, 2, 3))
second = 2. * ((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3))
metrics_dict['edgemarks_lagged_recall'] = (first,
second)
metrics_dict['edgemarks_auto_precision'] = ((match_func(true_graphs, pred_graphs) * auto_mask).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['edgemarks_auto_recall'] = ((match_func(true_graphs, pred_graphs) * auto_mask).sum(axis=(1, 2, 3)),
2. * ((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['edgemarks_contemp_precision'] = (
(match_func(true_graphs, pred_graphs) * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['edgemarks_contemp_recall'] = (
(match_func(true_graphs, pred_graphs) * contemp_cross_mask_tril).sum(axis=(1, 2, 3)),
2. * ((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['edgemarks_anylink_precision'] = (
(match_func(true_graphs, pred_graphs) * any_mask).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
metrics_dict['edgemarks_anylink_recall'] = ((match_func(true_graphs, pred_graphs) * any_mask).sum(axis=(1, 2, 3)),
2. * ((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
# chr f1 edgemark
# metrics_dict['edgemarks_anylink_f1_of_recall_precision'] = 2 * metrics_dict['edgemarks_anylink_precision'] * metrics_dict[
# 'edgemarks_anylink_recall'] / (metrics_dict['edgemarks_anylink_precision'] + metrics_dict['edgemarks_anylink_recall'])
# Unoriented marks in true_graph and conflicts in pred_graph
metrics_dict['unoriented_lagged'] = (
(unoriented_func(pred_graphs) * (cross_mask * lagged_mask)).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
# metrics_dict['unoriented_lagged'] = (
# (unoriented_func(true_graphs) * (cross_mask * lagged_mask)).sum(axis=(1, 2, 3)),
# 2. * ((true_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['conflicts_lagged'] = ((conflict_func(pred_graphs) * (cross_mask * lagged_mask)).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * cross_mask * lagged_mask).sum(axis=(1, 2, 3)))
metrics_dict['unoriented_auto'] = ((unoriented_func(pred_graphs) * (auto_mask)).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
# metrics_dict['unoriented_auto'] = ((unoriented_func(true_graphs) * (auto_mask)).sum(axis=(1, 2, 3)),
# 2. * ((true_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['conflicts_auto'] = ((conflict_func(pred_graphs) * (auto_mask)).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * auto_mask).sum(axis=(1, 2, 3)))
metrics_dict['unoriented_contemp'] = (
(unoriented_func(pred_graphs) * (contemp_cross_mask_tril)).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
# metrics_dict['unoriented_contemp'] = (
# (unoriented_func(true_graphs) * (contemp_cross_mask_tril)).sum(axis=(1, 2, 3)),
# 2. * ((true_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['conflicts_contemp'] = ((conflict_func(pred_graphs) * (contemp_cross_mask_tril)).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * contemp_cross_mask_tril).sum(axis=(1, 2, 3)))
metrics_dict['unoriented_anylink'] = ((unoriented_func(pred_graphs) * (any_mask)).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
# metrics_dict['unoriented_anylink'] = ((unoriented_func(true_graphs) * (any_mask)).sum(axis=(1, 2, 3)),
# 2. * ((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
print('unoriented_anylink_oracle', (((unoriented_func(true_graphs) * (any_mask)).sum(axis=(1, 2, 3)))/
(2. * ((true_graphs != "") * any_mask).sum(axis=(1, 2, 3)))))
metrics_dict['conflicts_anylink'] = ((conflict_func(pred_graphs) * (any_mask)).sum(axis=(1, 2, 3)),
2. * ((pred_graphs != "") * any_mask).sum(axis=(1, 2, 3)))
for metric in metrics_dict.keys():
numerator, denominator = metrics_dict[metric]
metric_boot = np.zeros(boot_samples)
for b in range(boot_samples):
# Store the unsampled values in b=0
rand = np.random.randint(0, n_realizations, n_realizations)
metric_boot[b] = numerator[rand].sum() / denominator[rand].sum()
metrics_dict[metric] = (numerator.sum() / denominator.sum(), metric_boot.std())
metrics_dict['computation_time'] = (
np.mean(np.array(computation_time)), np.percentile(np.array(computation_time), [5, 95]))
return metrics_dict
def get_evaluation(results, from_file=False):
metrics = ['adj_' + link_type + "_" + metric_type for link_type in ['lagged', 'auto', 'contemp', 'anylink']
for metric_type in ['fpr', 'tpr']]
metrics += ['adj_' + link_type + "_" + metric_type for link_type in ['lagged', 'auto', 'contemp', 'anylink']
for metric_type in ['precision', 'recall']]
metrics += ['edgemarks_' + link_type + "_" + metric_type for link_type in ['lagged', 'auto', 'contemp', 'anylink']
for metric_type in ['precision', 'recall']]
metrics += [metric_type + "_" + link_type for link_type in ['lagged', 'auto', 'contemp', 'anylink']
for metric_type in ['unoriented', 'conflicts', 'num_links', 'directed', 'bidirected']]
metrics += ['valmin_' + link_type for link_type in ['lagged', 'auto', 'contemp', 'anylink']]
metrics += ['cardinality_' + link_type for link_type in ['lagged', 'auto', 'contemp', 'anylink']]
metrics += ['computation_time']
if results is not None:
# all_configs[conf]['graphs'][i] = all_configs[conf]['results'][i]['graph']
# all_configs[conf]['true_graphs'][i] = all_configs[conf]['results'][i]['true_graph']
# all_configs[conf]['computation_time'].append(all_configs[conf]['results'][i]['computation_time'])
# Same tau_max for all trials
orig_true_graphs = results['oracle_graphs']
# Minimum effect size for each link
val_min = results['val_min']
# Maximum condition cardinality for each link
cardinality = results['max_cardinality']
# Pred graphs also contain 2's for conflicting links...
orig_pred_graphs = results['graphs']
computation_time = results['computation_time']
# print(true_graphs.shape, pred_graphs.shape, contemp_cross_mask.shape, cross_mask.shape, lagged_mask.shape, (cross_mask*lagged_mask).shape )
metrics_dict = get_numbers(metrics, orig_true_graphs, orig_pred_graphs, val_min, cardinality, computation_time)
return metrics_dict
else:
return None
| 18,678 | 58.677316 | 149 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/simulate_discrete_scm.py
|
# import numpy as np
# from numpy.random import binomial
# from scipy.special import expit
# from tigramite.data_processing import Graph
#
# def binomial_scp(links, T, n_binom, random_state = None, extremity = 4/5, scale = 1/2,
# centralize = True, cut_off = True):
#
# if random_state is None:
# random_state = np.random.RandomState(None)
#
# N = len(links.keys())
#
# # Check parameters
# if type(n_binom) != int or n_binom < 2 or n_binom % 2 != 0:
# raise ValueError("n_binom must be a positive even integer")
#
# max_lag = 0
# contemp_dag = Graph(N)
# for j in range(N):
# for ((var, lag), coeff, func) in links[j]:
# if lag == 0:
# contemp = True
# if var not in range(N):
# raise ValueError("var must be in 0..{}.".format(N-1))
# if 'float' not in str(type(coeff)):
# raise ValueError("coeff must be float.")
# if lag > 0 or type(lag) != int:
# raise ValueError("lag must be non-positive int.")
# max_lag = max(max_lag, abs(lag))
#
# # Create contemp DAG
# if var != j and lag == 0:
# contemp_dag.addEdge(var, j)
#
# if contemp_dag.isCyclic() == 1:
# raise ValueError("Contemporaneous links must not contain cycle.")
#
# causal_order = contemp_dag.topologicalSort()
#
# transient = int(.2*T)
#
# data = np.zeros((T+transient, N), dtype='int')
# cut_off_value = n_binom/2
#
# for t in range(max_lag):
# for j in causal_order:
#
# p_add_logit_half = sum([coeff*func(data[t + lag, var]) for ((var, lag), coeff, func) in links[j] if t + lag >= 0])
# p_binom = 1/2 + (expit(p_add_logit_half*scale*4/N) - 1/2)*extremity
#
# if centralize:
# data[t, j] = np.rint(random_state.binomial(n_binom, p_binom) - n_binom*p_binom)
# else:
# data[t, j] = random_state.binomial(n_binom, p_binom)
#
# if cut_off and abs(data[t, j]) > cut_off_value:
# data[t, j] = np.sign(data[t, j])*cut_off_value
#
# for t in range(max_lag, T + transient):
# for j in causal_order:
#
# p_add_logit_half = sum([coeff*func(data[t + lag, var]) for ((var, lag), coeff, func) in links[j]])
# p_binom = 1/2 + (expit(p_add_logit_half*scale*4/N) - 1/2)*extremity
#
# if centralize:
# data[t, j] = np.rint(random_state.binomial(n_binom, p_binom) - n_binom*p_binom)
# else:
# data[t, j] = random_state.binomial(n_binom, p_binom)
#
# if cut_off and abs(data[t, j]) > cut_off_value:
# data[t, j] = np.sign(data[t, j])*cut_off_value
#
# data = data[transient:]
#
# return data, False
#
# def discretized_scp(links, T, n_binom, random_state = None, centralize = True, cut_off = True):
#
# if random_state is None:
# random_state = np.random.RandomState(None)
#
# N = len(links.keys())
#
# # Check parameters
# if type(n_binom) != int or n_binom < 2 or n_binom % 2 != 0:
# raise ValueError("n_binom must be a positive even integer")
#
# # Prepare noise functions
# if centralize:
# noises = [lambda n_samples: random_state.binomial(n_binom, 0.5, n_samples) - n_binom*0.5 for k in range(N)]
# else:
# noises = [lambda n_samples: random_state.binomial(n_binom, 0.5, n_samples) for k in range(N)]
#
# # Check parameters
# max_lag = 0
# contemp_dag = Graph(N)
# for j in range(N):
# for ((var, lag), coeff, func) in links[j]:
# if lag == 0:
# contemp = True
# if var not in range(N):
# raise ValueError("var must be in 0..{}.".format(N-1))
# if 'float' not in str(type(coeff)):
# raise ValueError("coeff must be float.")
# if lag > 0 or type(lag) != int:
# raise ValueError("lag must be non-positive int.")
# max_lag = max(max_lag, abs(lag))
#
# # Create contemp DAG
# if var != j and lag == 0:
# contemp_dag.addEdge(var, j)
#
# if contemp_dag.isCyclic() == 1:
# raise ValueError("Contemporaneous links must not contain cycle.")
#
# causal_order = contemp_dag.topologicalSort()
#
# transient = int(.2*T)
#
# cut_off_value = n_binom/2
#
# data = np.zeros((T+transient, N), dtype='int')
# for j in range(N):
# data[:, j] = noises[j](T+transient)
#
# for t in range(max_lag, T+transient):
# for j in causal_order:
# increment = np.rint(sum([coeff*func(data[t + lag, var]) for ((var, lag), coeff, func) in links[j]]))
# data[t, j] += increment
#
# if cut_off and abs(data[t, j]) > cut_off_value:
# data[t, j] = np.sign(data[t, j])*cut_off_value
#
# data = data[transient:]
#
# nonstationary = (np.any(np.isnan(data)) or np.any(np.isinf(data)))
#
# return data, nonstationary
| 5,084 | 35.321429 | 128 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/discG2.py
|
# import numpy as np
# from scipy.stats import chi2
# from scipy.special import xlogy
# from tigramite.independence_tests import CondIndTest
#
# class DiscG2(CondIndTest):
#
# @property
# def measure(self):
# """
# Concrete property to return the measure of the independence test
# """
# return self._measure
#
# def __init__(self,
# **kwargs):
#
# # Specification of test
# self._measure = 'DiscG2'
#
# # Set general properties
# self.two_sided = False
# self.residual_based = False
# self.recycle_residuals = False
# CondIndTest.__init__(self, **kwargs)
#
# def get_dependence_measure(self, array, xyz):
# """Returns G2 test statistic"""
#
# # Determine the rows that correspond to the variables X, Y, and the conditions Z
# #print("Array shape:", array.shape)
# n_vars, T = array.shape
# var_index_X = [i for i in range(n_vars) if xyz[i] == 0]
# var_index_Y = [i for i in range(n_vars) if xyz[i] == 1]
# var_indices_Z = [i for i in range(n_vars) if xyz[i] == 2]
#
# # Determine the unique values collectively taken by the conditions Z and remember
# # which columns of 'array' correspond to each of these unique values
# uniques_Z = {}
# for sample_index, sample in enumerate(np.transpose(array)):
#
# sample_Z_only = tuple(sample[tuple([var_indices_Z])])
#
# if sample_Z_only in uniques_Z.keys():
# uniques_Z[sample_Z_only].append(sample_index)
# else:
# uniques_Z[sample_Z_only] = [sample_index]
#
# #######################################################################################
# # Run through each of the unique values assumed by Z and sum up the G2 test statistic
# # and the degrees of freedom obtained from the corresponding subset of samples
#
# # Variables test statistic and degrees of freedom
# G2, dof = 0, 0
#
# # Run through all subsets (corresponding to the unique values of Z) of the samples
# for sample_indices in uniques_Z.values():
#
# # Restrict to samples with the same value of Z
# restricted_array = array[:, sample_indices]
#
# # Determine the unique values assumed by X and Y in this subset
# uniques_X = np.unique(restricted_array[var_index_X, :])
# uniques_Y = np.unique(restricted_array[var_index_Y, :])
# n_uniques_X = len(uniques_X)
# n_uniques_Y = len(uniques_Y)
#
# # Build a function that maps a value (x, y) of (X, Y) to its index the contingency
# # table
# x_to_cont_idx_X = {x: cont_idx_X for (cont_idx_X, x) in enumerate(uniques_X)}
# y_to_cont_idx_Y = {y: cont_idx_Y for (cont_idx_Y, y) in enumerate(uniques_Y)}
#
# _xy_to_cont_idx = lambda x, y: (x_to_cont_idx_X[x], y_to_cont_idx_Y[y])
#
# # Make the contingency table (here: s_xy) of X and Y in this subset of samples
# # as well as its marginal counts
# s_xy = np.zeros((n_uniques_X, n_uniques_Y))
# s_x = np.zeros((n_uniques_X, 1))
# s_y = np.zeros((1, n_uniques_Y))
# s = np.zeros((1, 1))
# for sample in np.transpose(restricted_array):
# x_idx, y_idx = _xy_to_cont_idx(sample[var_index_X][0], sample[var_index_Y][0])
# s_xy[x_idx, y_idx] += 1
# s_x[x_idx, 0] += 1
# s_y[0, y_idx] += 1
# s[0, 0] += 1
#
# # Degrees of freedom for this subset of samples
# dof_add = (n_uniques_X - 1)*(n_uniques_Y - 1)
#
# if dof_add > 0:
#
# # Add the G2 test statistic value for this subset of samples
# G2_subset = np.sum(2*xlogy(s_xy, s_xy*s) - 2*xlogy(s_xy, s_x*s_y))
# G2 += G2_subset
#
# # Add the degrees of freedom for this subset of samples
# dof += dof_add
#
# #######################################################################################
#
# # Write the degrees of freedom to a (temporary) instance attribute in order to pass it
# # to the signifiance functions
# self._temp_dof = dof
#
# # Return the test statistic
# return G2
#
# def get_analytic_significance(self, value, T, dim):
# """Return the p_value of test statistic value 'value', according to a chi-square
# distribution with 'self._temp_dof' degrees of freedom"""
#
# # Calculate the p_value and delete the temporary instance attribute containing
# # the degrees of freedom, which was passed from self.get_dependence_measure
# p_value = chi2.sf(value, self._temp_dof)
# del self._temp_dof
#
# # Return p_value
# return p_value
| 4,975 | 40.815126 | 97 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/svarfci.py
|
import numpy as np
from itertools import product, combinations
import os
class SVARFCI():
r"""
This class implements the SVAR-FCI algorithm introduced in:
Malinsky, D. and Spirtes, P. (2018). Causal Structure Learning from Multivariate Time Series in Settings with Unmeasured Confounding. In Le, T. D., Zhang, K., Kıcıman, E., Hyvärinen, A., and Liu, L., editors, Proceedings of 2018 ACM SIGKDD Workshop on Causal Disocvery, volume 92 of Proceedings of Machine Learning Research, pages 23–47, London, UK. PMLR.
Our implementation applies several modifications:
1) It assumes the absence of selection variables.
2) It guarantees order-independence by i) using the majority rule to decide whether a given node is in a given separating set and ii) applying a rule to the entire graph and resolving potential conflicts among the proposed orientations by means of the conflict mark 'x' before modifing the graph.
3) It allows for the following conclusion: If X^i_{t-\tau} and X^j_t for \tau > 0 are not m-separated by any subset of D-Sep(X^j_t, X^i_{t-\tau}, \mathcal{M}(\mathcal{G})) then these variables are adjacent in \mathcal{M}(\mathcal{G}). In particular, this conclusions does not require that X^i_{t-\tau} and X^j_t are moreover not m-separated by any subset of D-Sep(X^i_{t-\tau}, X^j_t, \mathcal{M}(\mathcal{G}))
4) Several control parameters apply further modifications, see below.
Parameters passed to the constructor:
- dataframe:
Tigramite dataframe object that contains the the time series dataset \bold{X}
- cond_ind_test:
A conditional independence test object that specifies which conditional independence test CI is to be used
Parameters passed to self.run_svarfci():
- tau_max:
The maximum considered time lag tau_max
- pc_alpha:
The significance level \alpha of conditional independence tests
- max_cond_px:
Consider a pair of variables (X^i_{t-\tau}, X^j_t) with \tau > 0. In the first removal phase (here this is self._run_pc_removal_phase()), the algorithm does not test for conditional independence given subsets of X^i_{t-\tau} of cardinality higher than max_cond_px. In the second removal phase (here this is self._run_dsep_removal_phase()), the algorithm does not test for conditional independence given subsets of pds_t(X^i_{t-\tau}, X^j_t) of cardinality higher than max_cond_px.
- max_p_global:
Restricts all conditional independence tests to conditioning sets with cardinality smaller or equal to max_p_global
- max_p_dsep:
Restricts all conditional independence tests in the second removal phase (here this is self._run_dsep_removal_phase()) to conditioning sets with cardinality smaller or equal to max_p_global
- max_q_global:
For each ordered pair (X^i_{t-\tau}, X^j_t) of adjacent variables and for each cardinality of the conditioning sets test at most max_q_global many conditioning sets (when summing over all tested cardinalities more than max_q_global tests may be made)
- max_pds_set:
In the second removal phase (here this is self._run_dsep_removal_phase()), the algorithm tests for conditional independence given subsets of the pds_t sets defined in the above reference. If for a given link the set pds_t(X^j_t, X^i_{t-\tau}) has more than max_pds_set many elements (or, if the link is also tested in the opposite directed, if pds_t(X^i_{t-\tau}, X^j_t) has more than max_pds_set elements), this link is not tested.
- fix_all_edges_before_final_orientation:
When one of the four previous parameters is not np.inf, the edge removals may terminate before we can be sure that all remaining edges are indeed part of the true PAG. However, soundness of the FCI orientation rules requires that they be applied only once the correct skeleton has been found. Therefore, the rules are only applied to those edges for which we are sure that they are part of the PAG. This can lead to quite uninformative results. If fix_all_edges_before_final_orientation is True, this precaution is overruled and the orientation rules are nevertheless applied to all edges.
- verbosity:
Controls the verbose output self.run_svarfci() and the function it calls.
Return value of self.run_svarfci():
The estimated graph in form of a link matrix. This is a numpy array of shape (self.N, self.N, self.tau_max + 1), where the entry array[i, j, \tau] is a string that visualizes the estimated link from X^i_{i-\tau} to X^j_t. For example, if array[0, 2, 1] = 'o->', then the estimated graph contains the link X^i_{t-1} o-> X^j_t. This numpy array is also saved as instance attribute self.graph. Note that self.N is the number of observed time series and self.tau_max the maximal considered time lag.
A note on middle marks:
In order to distinguish edges that are in the PAG for sure from edges that may not be in the PAG, we use the notion of middle marks that we introduced for LPCMCI. This becomes useful for the precaution discussed in the explanation of the parameter 'fix_all_edges_before_final_orientation', see above. In particular, we use the middle marks '?' and '' (empty). For convenience (to have strings of the same lengths) we here internally denote the empty middle mark by '-'. For post-processing purposes all middle marks are nevertheless set to the empty middle mark (here '-') in line 99, but if verbosity >= 1 a graph with the middle marks will be printed out before.
A note on wildcards:
The middle mark wildcard \ast and the edge mark wildcard are here represented as *, the edge mark wildcard \star as +
"""
def __init__(self, dataframe, cond_ind_test):
"""Class constructor. Store:
i) data
ii) conditional independence test object
iii) some instance attributes"""
# Save the time series data that the algorithm operates on
self.dataframe = dataframe
# Set the conditional independence test to be used
self.cond_ind_test = cond_ind_test
self.cond_ind_test.set_dataframe(self.dataframe)
# Store the shape of the data in the T and N variables
self.T, self.N = self.dataframe.values.shape
def run_svarfci(self,
tau_max = 1,
pc_alpha = 0.05,
max_cond_px = 0,
max_p_global = np.inf,
max_p_dsep = np.inf,
max_q_global = np.inf,
max_pds_set = np.inf,
fix_all_edges_before_final_orientation = True,
verbosity = 0):
"""Run the SVAR-FCI algorithm on the dataset and with the conditional independence test passed to the class constructor and with the options passed to this function."""
# Step 0: Initializations
self._initialize(tau_max, pc_alpha, max_cond_px, max_p_global, max_p_dsep, max_q_global, max_pds_set, fix_all_edges_before_final_orientation, verbosity)
# Step 1: PC removal phase
self._run_pc_removal_phase()
# Step 2: D-Sep removal phase (including preliminary collider orientation phase)
self._run_dsep_removal_phase()
# Step 3: FCI orientation phase
if self.fix_all_edges_before_final_orientation:
self._fix_all_edges()
self._run_fci_orientation_phase()
# Post processing
if self.verbosity >= 1:
print("Ambiguous triples", self.ambiguous_triples)
print("Max pds set: {}\n".format(self.max_pds_set_found))
self._fix_all_edges()
self.graph = self._dict2graph()
self.val_min_matrix = self._dict_to_matrix(self.val_min, self.tau_max, self.N, default = 0)
self.cardinality_matrix = self._dict_to_matrix(self.max_cardinality, self.tau_max, self.N, default = 0)
# Return the estimated graph
return self.graph
def _initialize(self,
tau_max,
pc_alpha,
max_cond_px,
max_p_global,
max_p_dsep,
max_q_global,
max_pds_set,
fix_all_edges_before_final_orientation,
verbosity):
"""Function for
i) saving the arguments passed to self.run_svarfci() as instance attributes
ii) initializing various memory variables for storing the current graph, sepsets etc.
"""
# Save the arguments passed to self.run_svarfci()
self.tau_max = tau_max
self.pc_alpha = pc_alpha
self.max_cond_px = max_cond_px
self.max_p_global = max_p_global
self.max_p_dsep = max_p_dsep
self.max_q_global = max_q_global
self.max_pds_set = max_pds_set
self.fix_all_edges_before_final_orientation = fix_all_edges_before_final_orientation
self.verbosity = verbosity
# Initialize the nested dictionary for storing the current graph.
# Syntax: self.graph_dict[j][(i, -tau)] gives the string representing the link from X^i_{t-tau} to X^j_t
self.graph_dict = {}
for j in range(self.N):
self.graph_dict[j] = {(i, 0): "o?o" for i in range(self.N) if j != i}
self.graph_dict[j].update({(i, -tau): "o?>" for i in range(self.N) for tau in range(1, self.tau_max + 1)})
# Initialize the nested dictionary for storing separating sets
# Syntax: self.sepsets[j][(i, -tau)] stores separating sets of X^i_{t-tau} to X^j_t. For tau = 0, i < j.
self.sepsets = {j: {(i, -tau): set() for i in range(self.N) for tau in range(self.tau_max + 1) if (tau > 0 or i < j)} for j in range(self.N)}
# Initialize dictionaries for storing known ancestorships, non-ancestorships, and ambiguous ancestorships
# Syntax: self.def_ancs[j] contains the set of all known ancestors of X^j_t. Equivalently for the others
self.def_ancs = {j: set() for j in range(self.N)}
self.def_non_ancs = {j: set() for j in range(self.N)}
self.ambiguous_ancestorships = {j: set() for j in range(self.N)}
# Initialize nested dictionaries for saving the minimum test statistic among all conditional independence tests of a given pair of variables, the maximum p-values, as well as the maximal cardinality of the known separating sets.
# Syntax: As for self.sepsets
self.val_min = {j: {(i, -tau): float("inf") for i in range(self.N) for tau in
range(self.tau_max + 1) if (tau > 0 or i < j)} for j in range(self.N)}
self.pval_max = {j: {(i, -tau): 0 for i in range(self.N) for tau in
range(self.tau_max + 1) if (tau > 0 or i < j)} for j in range(self.N)}
self.max_cardinality = {j: {(i, -tau): 0 for i in range(self.N) for tau in
range(self.tau_max + 1) if (tau > 0 or i < j)} for j in range(self.N)}
# Initialize a nested dictionary for caching pds-sets
# Syntax: As for self.sepsets
self._pds_t = {(j, -tau_j): {} for j in range(self.N) for tau_j in range(self.tau_max + 1)}
# Initialize a set for memorizing ambiguous triples
self.ambiguous_triples = set()
# Initialize a variable for remembering the maximal cardinality among all calculated pds-sets
self.max_pds_set_found = -1
################################################################################################
# Only relevant for use with oracle CI
self._oracle = False
################################################################################################
# Return
return True
def _run_pc_removal_phase(self):
"""Run the first removal phase of the FCI algorithm adapted to stationary time series. This is essentially the skeleton phase of the PC algorithm"""
# Verbose output
if self.verbosity >= 1:
print("\n=======================================================")
print("=======================================================")
print("Starting preliminary removal phase")
# Iterate until convergence
# p_pc is the cardinality of the conditioning set
p_pc = 0
while True:
##########################################################################################################
### Run the next removal iteration #######################################################################
# Verbose output
if self.verbosity >= 1:
if p_pc == 0:
print("\nStarting test phase\n")
print("p = {}".format(p_pc))
# Variable to check for convergence
has_converged = True
# Variable for keeping track of edges marked for removal
to_remove = {j: {} for j in range(self.N)}
# Iterate through all links
for (i, j, lag_i) in product(range(self.N), range(self.N), range(-self.tau_max, 1)):
# Decode the triple (i, j, lag_i) into pairs of variables (X, Y)
X = (i, lag_i)
Y = (j, 0)
######################################################################################################
### Exclusion of links ###############################################################################
# Exclude the current link if ...
# ... X = Y
if lag_i == 0 and i == j:
continue
# ... X > Y (so, in fact, we don't distinguish between both directions of the same edge)
if self._is_smaller(Y, X):
continue
# Get the current link from X to Y
link = self._get_link(X, Y)
# Also exclude the current link if ...
# ... X and Y are not adjacent anymore
if link == "":
continue
######################################################################################################
### Preparation of PC search sets ####################################################################
# Search for separating sets in the non-future adjacencies of X, without X and Y themselves
S_search_YX = self._get_non_future_adj([Y]).difference({X, Y})
# Search for separating sets in the non-future adjacencies of Y, without X and Y themselves, always if X and Y are contemporaneous or if specified by self.max_cond_px
test_X = True if (lag_i == 0 or (self.max_cond_px > 0 and self.max_cond_px >= p_pc)) else False
if test_X:
S_search_XY = self._get_non_future_adj([X]).difference({X, Y})
######################################################################################################
### Check whether the link needs testing #############################################################
# If there are less than p_pc elements in the search sets, the link does not need further testing
if len(S_search_YX) < p_pc and (not test_X or len(S_search_XY) < p_pc):
continue
# Force-quit while leep when p_pc exceeds the specified limits
if p_pc > self.max_p_global:
continue
# This link does need testing. Therfore, the algorithm has not converged yet
has_converged = False
######################################################################################################
### Tests for conditional independence ###############################################################
# If self.max_q_global is finite, the below for loop may be broken earlier. To still guarantee order independence, the set from which the potential separating sets are created is ordered in an order independent way. Here, the elements of S_search_YX are ordered according to their minimal test statistic with Y
if not np.isinf(self.max_q_global):
S_search_YX = self._sort_search_set(S_search_YX, Y)
# q_count counts the number of conditional independence tests made for subsets of S_search_YX
q_count = 0
# Run through all cardinality p_pc subsets of S_search_YX
for Z in combinations(S_search_YX, p_pc):
# Stop testing if the number of tests exceeds the bound specified by self.max_q_global
q_count = q_count + 1
if q_count > self.max_q_global:
break
# Test conditional independence of X and Y given Z. Correspondingly updateself.val_min, self.pval_max, and self.cardinality
val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
if self.verbosity >= 2:
print(" %s _|_ %s | S_pc = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z))
# Check whether the test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal, save Z as separating set
to_remove[Y[0]][X] = True
self._save_sepset(X, Y, (frozenset(Z), ""))
# Verbose output
if self.verbosity >= 1:
print("({},{:2}) {:11} {} given {}".format(X[0], X[1], "independent", Y, Z))
# Break the for loop
break
# Run through all cardinality p_pc subsets of S_search_XY
if test_X:
if not np.isinf(self.max_q_global):
S_search_XY = self._sort_search_set(S_search_XY, X)
q_count = 0
for Z in combinations(S_search_XY, p_pc):
q_count = q_count + 1
if q_count > self.max_q_global:
break
val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
if self.verbosity >= 2:
print(" %s _|_ %s | S_pc = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z))
if pval > self.pc_alpha:
to_remove[Y[0]][X] = True
self._save_sepset(X, Y, (frozenset(Z), ""))
if self.verbosity >= 1:
print("({},{:2}) {:11} {} given {}".format(X[0], X[1], "independent", Y, Z))
break
# end for (i, j, lag_i) in product(range(self.N), range(self.N), range(-self.tau_max, 1))
##########################################################################################################
### Remove edges marked for removal in to_remove #########################################################
# Remove edges
for j in range(self.N):
for (i, lag_i) in to_remove[j].keys():
self._write_link((i, lag_i), (j, 0), "", verbosity = self.verbosity)
# Verbose output
if self.verbosity >= 1:
print("\nTest phase complete")
##########################################################################################################
### Check for convergence ################################################################################
if has_converged:
# If no link needed testing, this algorithm has converged. Therfore, break the while loop
break
else:
# At least one link needed testing, this algorithm has not yet converged. Therefore, increase p_pc
p_pc = p_pc + 1
# end while True
# Verbose output
if self.verbosity >= 1:
print("\nPreliminary removal phase complete")
print("\nGraph:\n--------------------------------")
self._print_graph_dict()
print("--------------------------------")
# Return
return True
def _run_dsep_removal_phase(self):
"""Run the second removal phase of the FCI algorithm, including the preliminary collider orientation that is necessary for determining pds-sets"""
# Verbose output
if self.verbosity >= 1:
print("\n=======================================================")
print("=======================================================")
print("Starting final removal phase")
# Make the preliminary orientations that are necessary for determining pds_t sets
self._run_orientation_phase(rule_list = [["R-00-d"]], voting = "Majority-Preliminary")
# Remember all edges that have not been fully tested due to self.max_pds_set, self.max_q_global or self.max_p_global
self._cannot_fix = set()
# Iterate until convergence
# p_pc is the cardinality of the conditioning set
p_pc = 0
while True:
##########################################################################################################
### Run the next removal iteration #######################################################################
# Verbose output
if self.verbosity >= 1:
if p_pc == 0:
print("\nStarting test phase\n")
print("p = {}".format(p_pc))
# Variable to check for convergence
has_converged = True
# Variable for keeping track of edges marked for removal
to_remove = {j: {} for j in range(self.N)}
# Iterate through all links
for (i, j, lag_i) in product(range(self.N), range(self.N), range(-self.tau_max, 1)):
# Decode the triple (i, j, lag_i) into pairs of variables (X, Y)
X = (i, lag_i)
Y = (j, 0)
######################################################################################################
### Exclusion of links ###############################################################################
# Exclude the current link if ...
# ... X = Y
if lag_i == 0 and i == j:
continue
# ... X > Y
if self._is_smaller(Y, X):
continue
# Get the current link
link = self._get_link(X, Y)
# Also exclude the current link if ...
# ... X and Y are not adjacent anymore
if link == "":
continue
# ... X and Y are adjacent in the true MAG
if link[1] == "-":
continue
######################################################################################################
### Preparation of PC search sets ####################################################################
# Verbose output
if self.verbosity >= 2:
print("_get_pds_t ")
# Search for separating sets in pds_t(Y, X)
S_search_YX = self._get_pds_t(Y, X)
# Search for separating sets in pds_t(X, Y) always if X and Y are contemporaneous or if specified by self.max_cond_px
test_X = True if (lag_i == 0 or (self.max_cond_px > 0 and self.max_cond_px >= p_pc)) else False
if test_X:
S_search_XY = self._get_pds_t(X, Y)
# If the pds_t sets exceed the specified bounds, do not test this link. Remember that the link has not been fully tested
if len(S_search_YX) > self.max_pds_set or (test_X and len(S_search_XY) > self.max_pds_set):
self._cannot_fix.add((X, Y))
continue
######################################################################################################
### Check whether the link needs testing #############################################################
# If there are less than p_pc elements in the search set(s), the link does not need further testing. X and Y are adjacent in the true MAG, unless the link has not been fully tested
if len(S_search_YX) < p_pc and (not test_X or len(S_search_XY) < p_pc):
if (X, Y) not in self._cannot_fix:
self._write_link(X, Y, link[0] + "-" + link[2], verbosity = self.verbosity)
continue
# Force-quit while leep when p_pc exceeds the specified limits
if p_pc > self.max_p_global or p_pc > self.max_p_dsep:
continue
# Since this link does need testing, the algorithm has not converged yet
has_converged = False
######################################################################################################
### Tests for conditional independence ###############################################################
# Verbose output
if self.verbosity >= 1:
print("for S_pc in combinations(S_search_YX, p_pc)")
# If self.max_q_global is finite, the below for loop may be broken earlier. To still guarantee order independence, the set from which the potential separating sets are created is ordered in an order independent way. Here, the elements of S_search_YX are ordered according to their minimal test statistic with Y
if not np.isinf(self.max_q_global):
S_search_YX = self._sort_search_set(S_search_YX, Y)
# q_count counts the number of conditional independence tests made for subsets of S_search_YX
q_count = 0
# Run through all cardinality p_pc subsets of S_search_YX
for Z in combinations(S_search_YX, p_pc):
# Stop testing if the number of tests exceeds the bound specified by self.max_q_global. Remember that the link hast not been fully tested
q_count = q_count + 1
if q_count > self.max_q_global:
self._cannot_fix.add((X, Y))
break
# Test conditional independence of X and Y given Z. Correspondingly updateself.val_min, self.pval_max, and self.cardinality
val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
if self.verbosity >= 2:
print(" %s _|_ %s | S_pc = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z))
# Check whether the test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal and save sepset
to_remove[Y[0]][X] = True
self._save_sepset(X, Y, (frozenset(Z), ""))
# Verbose output
if self.verbosity >= 1:
print("({},{:2}) {:11} {} given {}".format(X[0], X[1], "independent", Y, Z))
# Break the for loop
break
if test_X:
if self.verbosity >= 1:
print("for S_pc in combinations(S_search_XY, p_pc)")
if not np.isinf(self.max_q_global):
S_search_XY = self._sort_search_set(S_search_XY, X)
q_count = 0
for Z in combinations(S_search_XY, p_pc):
q_count = q_count + 1
if q_count > self.max_q_global:
self._cannot_fix.add((X, Y))
break
val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
if self.verbosity >= 2:
print(" %s _|_ %s | S_pc = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
# Update val_min and pval_max
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z))
if pval > self.pc_alpha:
to_remove[Y[0]][X] = True
self._save_sepset(X, Y, (frozenset(Z), ""))
if self.verbosity >= 1:
print("({},{:2}) {:11} {} given {}".format(X[0], X[1], "independent", Y, Z))
break
# end for (i, j, lag_i) in product(range(self.N), range(self.N), range(-(tau_max + 1), 1))
##########################################################################################################
### Remove edges marked for removal in to_remove #########################################################
# Remove edges
for j in range(self.N):
for (i, lag_i) in to_remove[j].keys():
self._write_link((i, lag_i), (j, 0), "", verbosity = self.verbosity)
# Verbose output
if self.verbosity >= 1:
print("\nTest phase complete")
##########################################################################################################
### Check for convergence ################################################################################
if has_converged:
# If no link needed testing, this algorithm has converged. Therfore, break the while loop
break
else:
# At least one link needed testing, this algorithm has not yet converged. Therefore, increase p_pc
p_pc = p_pc + 1
# end while True
# Undo all preliminary collider orientations
self._unorient_all_edges()
self.def_non_ancs = {j: set() for j in range(self.N)}
# Verbose output
if self.verbosity >= 1:
print("\nFinal removal phase complete")
print("\nGraph:\n--------------------------------")
self._print_graph_dict()
print("--------------------------------")
# Return
return True
def _run_fci_orientation_phase(self):
"""Run the final orientation phase the FCI algorithm"""
# Verbose output
if self.verbosity >= 1:
print("\n=======================================================")
print("=======================================================")
print("Starting FCI orientation phase")
# Orient colliders colliders
self._run_orientation_phase(rule_list = [["R-00-d"]], voting = "Majority-Final")
# Exhaustively apply the other relevant orientation rules.
# Rules 5, 6 and 7 are not relevant because by assumption there are no selection variables
self._run_orientation_phase(rule_list = [["R-01"], ["R-02"], ["R-03"], ["R-04"], ["R-08"], ["R-09"], ["R-10"]], voting = "Majority-Final")
# Verbose output
if self.verbosity >= 1:
print("\nFCI orientation phase complete")
print("\nFinal graph:\n--------------------------------")
print("--------------------------------")
self._print_graph_dict()
print("--------------------------------")
print("--------------------------------\n")
# Return
return True
########################################################################################################################
########################################################################################################################
########################################################################################################################
def _run_orientation_phase(self, rule_list, voting):
"""Function for exhaustive application of the orientation rules specified by rule_list. The argument voting specifies the rule with which it is decided whether B is in the separating set of A and C, where A - B - C is an unshielded triple"""
# Verbose output
if self.verbosity >= 1:
print("\nStarting orientation phase")
print("with rule list: ", rule_list)
# Run through all priority levels of rule_list
idx = 0
while idx <= len(rule_list) - 1:
# Some rule require that self._graph_full_dict is updated. Therefore, initialize this variable once the while loop (re)-starts at the first prioprity level
if idx == 0:
self._initialize_full_graph()
###########################################################################################################
### Rule application ######################################################################################
# Get the current rules
current_rules = rule_list[idx]
# Prepare a list to remember marked orientations
to_orient = []
# Run through all current rules
for rule in current_rules:
# Verbose output
if self.verbosity >= 1:
print("\n{}:".format(rule))
# Exhaustively apply the rule to the graph...
orientations = self._apply_rule(rule, voting)
# Verbose output
if self.verbosity >= 1:
for ((i, j, lag_i), new_link) in set(orientations):
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Marked:", i, lag_i, self._get_link((i, lag_i), (j, 0)), j, 0,i, lag_i, new_link, j, 0))
if len(orientations) == 0:
print("Found nothing")
# ... and stage the results for orientation and removal
to_orient.extend(orientations)
###########################################################################################################
### Aggregation of marked orientations ####################################################################
new_ancs = {j: set() for j in range(self.N)}
new_non_ancs = {j: set() for j in range(self.N)}
# Run through all of the nested dictionary
for ((i, j, lag_i), new_link) in to_orient:
# The old link
old_link = self._get_link((i, lag_i), (j, 0))
# Assert that no preceeding variable is marked as an ancestor of later variable
assert not (lag_i > 0 and new_link[2] == "-")
# New ancestral relation of (i, lag_i) to (j, 0)
if new_link[0] == "-" and old_link[0] != "-":
new_ancs[j].add((i, lag_i))
elif new_link[0] == "<" and old_link[0] != "<":
new_non_ancs[j].add((i, lag_i))
# New ancestral relation of (j, 0) to (i, lag_i == 0)
if lag_i == 0:
if new_link[2] == "-" and old_link[2] != "-":
new_ancs[i].add((j, 0))
elif new_link[2] == ">" and old_link[2] != ">":
new_non_ancs[i].add((j, 0))
###########################################################################################################
### Update ancestral information and determine next step ##################################################
# Update ancestral information. The function called includes conflict resolution
restart = self._apply_new_ancestral_information(new_non_ancs, new_ancs)
# If any useful new information was found, go back to idx = 0, else increase idx by 1
idx = 0 if restart == True else idx + 1
# end while i <= len(self.rule_list) - 1
# The algorithm has converged
# Verbose output
if self.verbosity >= 1:
print("\nOrientation phase complete")
# Return
return True
def _get_pds_t(self, A, B):
"""Return pds_t(A, B) according to the current graph"""
# Unpack A and B, then assert that at least one of them is at lag 0
var_A, lag_A = A
var_B, lag_B = B
assert lag_A == 0 or lag_B == 0
# If pds_t(A, B) is in memory, return from memory
memo = self._pds_t[A].get(B)
if memo is not None:
return memo
# Else, re-compute it with breath-first search according to the current graph
visited = set()
start_from = {((var, lag + lag_A), A) for (var, lag) in self.graph_full_dict[var_A].keys() if lag + lag_A >= -self.tau_max and lag + lag_A <= 0}
while start_from:
new_start_from = set()
for (current_node, previous_node) in start_from:
visited.add((current_node, previous_node))
for (var, lag) in self.graph_full_dict[current_node[0]]:
next_node = (var, lag + current_node[1])
if next_node[1] < -self.tau_max:
continue
if next_node[1] > 0:
continue
if (next_node, current_node) in visited:
continue
if next_node == previous_node:
continue
if self._get_link(next_node, previous_node) == "" and (self._get_link(previous_node, current_node)[2] == "o" or self._get_link(next_node, current_node)[2] == "o"):
continue
new_start_from.add((next_node, current_node))
start_from = new_start_from
# Cache results and return
res = {node for (node, _) in visited if node != A and node != B}
self.max_pds_set_found = max(self.max_pds_set_found, len(res))
self._pds_t[A][B] = res
return self._pds_t[A][B]
def _unorient_all_edges(self):
"""Remove all orientations, except the non-ancestorships implied by time order"""
for j in range(self.N):
for (i, lag_i) in self.graph_dict[j].keys():
link = self._get_link((i, lag_i), (j, 0))
if len(link) > 0:
if lag_i == 0:
new_link = "o" + link[1] + "o"
else:
new_link = "o" + link[1] + ">"
self.graph_dict[j][(i, lag_i)] = new_link
def _fix_all_edges(self):
"""Set the middle mark of all links to '-'"""
for j in range(self.N):
for (i, lag_i) in self.graph_dict[j].keys():
link = self._get_link((i, lag_i), (j, 0))
if len(link) > 0:
new_link = link[0] + "-" + link[2]
self.graph_dict[j][(i, lag_i)] = new_link
def _apply_new_ancestral_information(self, new_non_ancs, new_ancs):
"""Apply the new ancestorships and non-ancestorships specified by new_non_ancs and new_ancs to the current graph. Conflicts are resolved by marking. Returns True if any circle mark was turned into a head or tail, else False."""
#######################################################################################################
### Preprocessing #####################################################################################
# Memory variables
add_to_def_non_ancs = {j: set() for j in range(self.N)}
add_to_def_ancs = {j: set() for j in range(self.N)}
add_to_ambiguous_ancestorships = {j: set() for j in range(self.N)}
put_head_or_tail = False
# Default values
if new_non_ancs is None:
new_non_ancs = {j: set() for j in range(self.N)}
if new_ancs is None:
new_ancs = {j: set() for j in range(self.N)}
# Marking A as ancestor of B implies that B is marked as a non-ancestor of A. This is only non-trivial for A before B
for j in range(self.N):
for (i, lag_i) in new_ancs[j]:
if lag_i == 0:
new_non_ancs[i].add((j, 0))
#######################################################################################################
### Conflict resolution ###############################################################################
# Iterate through new_non_ancs
for j in range(self.N):
for (i, lag_i) in new_non_ancs[j]:
# X = (i, lag_i), Y = (j, 0)
# X is marked as non-ancestor for Y
# Conflict resolution
if (i, lag_i) in self.ambiguous_ancestorships[j]:
# There is a conflict, since it is already marked as ambiguous whether X is an ancestor of Y
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as non-anc of {} but saved as ambiguous".format("Conflict:", i, lag_i, (j, 0)))
elif (i, lag_i) in self.def_ancs[j]:
# There is a conflict, since X is already marked as ancestor of Y
add_to_ambiguous_ancestorships[j].add((i, lag_i))
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as non-anc of {} but saved as anc".format("Conflict:", i, lag_i, (j, 0)))
elif (i, lag_i) in new_ancs[j]:
# There is a conflict, since X is also marked as a new ancestor of Y
add_to_ambiguous_ancestorships[j].add((i, lag_i))
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as both anc- and non-anc of {}".format("Conflict:", i, lag_i, (j, 0)))
else:
# There is no conflict
add_to_def_non_ancs[j].add((i, lag_i))
# Iterate through new_ancs
for j in range(self.N):
for (i, lag_i) in new_ancs[j]:
# X = (i, lag_i), Y = (j, 0)
# X is marked as ancestor for Y
# Conflict resolution
if (i, lag_i) in self.ambiguous_ancestorships[j]:
# There is a conflict, since it is already marked as ambiguous whether X is an ancestor of Y
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as anc of {} but saved as ambiguous".format("Conflict:", i, lag_i, (j, 0)))
elif lag_i == 0 and (j, 0) in self.ambiguous_ancestorships[i]:
# There is a conflict, since X and Y are contemporaneous and it is already marked ambiguous as whether Y is an ancestor of Y
# Note: This is required here, because X being an ancestor of Y implies that Y is not an ancestor of X. This ambiguity cannot exist when X is before Y
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as anc of {} but saved as ambiguous".format("Conflict:", i, lag_i, (j, 0)))
elif (i, lag_i) in self.def_non_ancs[j]:
# There is a conflict, since X is already marked as non-ancestor of Y
add_to_ambiguous_ancestorships[j].add((i, lag_i))
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as anc of {} but saved as non-anc".format("Conflict:", i, lag_i, (j, 0)))
elif (i, lag_i) in new_non_ancs[j]:
# There is a conflict, since X is also marked as a new non-ancestor of Y
add_to_ambiguous_ancestorships[j].add((i, lag_i))
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as both anc- and non-anc of {}".format("Conflict:", i, lag_i, (j, 0)))
else:
# There is no conflict
add_to_def_ancs[j].add((i, lag_i))
#######################################################################################################
#######################################################################################################
### Apply the ambiguous information ###################################################################
for j in range(self.N):
for (i, lag_i) in add_to_ambiguous_ancestorships[j]:
old_link = self._get_link((i, lag_i), (j, 0))
if len(old_link) > 0 and old_link[0] != "x":
new_link = "x" + old_link[1] + old_link[2]
self._write_link((i, lag_i), (j, 0), new_link, verbosity = self.verbosity)
if self.verbosity >= 1:
if (i, lag_i) in self.def_ancs[j]:
print("{:10} Removing ({}, {:2}) as anc of {}".format("Update:", i, lag_i, (j, 0)))
if (i, lag_i) in self.def_non_ancs[j]:
print("{:10} Removing ({}, {:2}) as non-anc of {}".format("Update:", i, lag_i, (j, 0)))
self.def_ancs[j].discard((i, lag_i))
self.def_non_ancs[j].discard((i, lag_i))
if lag_i == 0:
if self.verbosity >= 1 and (j, 0) in self.def_ancs[i]:
print("{:10} Removing {} as anc of {}".format("Update:", i, lag_i, (j, 0)))
self.def_ancs[i].discard((j, 0))
# Do we also need the following?
# self.def_non_ancs[i].discard((j, 0))
if self.verbosity >= 1 and (i, lag_i) not in self.ambiguous_ancestorships[j]:
print("{:10} Marking ancestorship of ({}, {:2}) to {} as ambiguous".format("Update:", i, lag_i, (j, 0)))
self.ambiguous_ancestorships[j].add((i, lag_i))
#######################################################################################################
### Apply the unambiguous information #################################################################
for j in range(self.N):
for (i, lag_i) in add_to_def_non_ancs[j]:
old_link = self._get_link((i, lag_i), (j, 0))
if len(old_link) > 0 and old_link[0] != "<":
new_link = "<" + old_link[1] + old_link[2]
self._write_link((i, lag_i), (j, 0), new_link, verbosity = self.verbosity)
put_head_or_tail = True
if self.verbosity >= 1 and (i, lag_i) not in self.def_non_ancs[j]:
print("{:10} Marking ({}, {:2}) as non-anc of {}".format("Update:", i, lag_i, (j, 0)))
self.def_non_ancs[j].add((i, lag_i))
for (i, lag_i) in add_to_def_ancs[j]:
old_link = self._get_link((i, lag_i), (j, 0))
if len(old_link) > 0 and (old_link[0] != "-" or old_link[2] != ">"):
new_link = "-" + old_link[1] + ">"
self._write_link((i, lag_i), (j, 0), new_link, verbosity = self.verbosity)
put_head_or_tail = True
if self.verbosity >= 1 and (i, lag_i) not in self.def_ancs[j]:
print("{:10} Marking ({}, {:2}) as anc of {}".format("Update:", i, lag_i, (j, 0)))
self.def_ancs[j].add((i, lag_i))
if lag_i == 0:
if self.verbosity >= 1 and (j, 0) not in self.def_non_ancs[i]:
print("{:10} Marking {} as non-anc of {}".format("Update:",(j, 0), (i, 0)))
self.def_non_ancs[i].add((j, 0))
#######################################################################################################
return put_head_or_tail
def _apply_rule(self, rule, voting):
"""Call the orientation-removal-rule specified by the string argument rule. Pass on voting."""
if rule == "R-00-d":
return self._apply_R00(voting)
elif rule == "R-01":
return self._apply_R01(voting)
elif rule == "R-02":
return self._apply_R02()
elif rule == "R-03":
return self._apply_R03(voting)
elif rule == "R-04":
return self._apply_R04(voting)
elif rule == "R-08":
return self._apply_R08()
elif rule == "R-09":
return self._apply_R09(voting)
elif rule == "R-10":
return self._apply_R10(voting)
def _B_not_in_SepSet_AC(self, A, B, C, voting):
"""Return True if B is not in the separating set of A and C. If voting = 'Majority-Final', this is done according to the standard majority rule. If voting = 'Majority-Final', for A-B-C that would be marked as ambiguous triples by 'Majority-Final', also return True."""
# Treat A - B - C as the same triple as C - B - A
# Convention: A is before C or, if they are contemporaneous, the index of A is smaller than that of C
if C[1] < A[1] or (C[1] == A[1] and C[0] < A[0]):
return self._B_not_in_SepSet_AC(C, B, A, voting)
################################################################################################
# Only relevant for use with oracle CI
if self._oracle:
return self._B_not_in_SepSet_AC_given_answers[((A[0], A[1] - C[1]), (B[0], B[1] - C[1]), (C[0], 0))]
################################################################################################
# If the triple is ambiguous, immediately return False
if (A, B, C) in self.ambiguous_triples or (C, B, A) in self.ambiguous_triples:
return False
# Remember all separating sets that we will find
all_sepsets = set()
# Test for independence given all subsets of non-future adjacencies of A
adj_A = self._get_non_future_adj([A]).difference({A, C})
adj_C = self._get_non_future_adj([C]).difference({A, C})
# Depending on the self.max_cond_px and self.max_p_global, determine the maximal cardinality of subsets of adj_A that are tested
if A[1] < C[1]:
max_p_A = min([len(adj_A), self.max_cond_px, self.max_p_global]) + 1
else:
max_p_A = min([len(adj_A), self.max_p_global]) + 1
# If self.max_q_global is finite, order adj_A and adj_C according to self.val_min to guarantee order independence
if not np.isinf(self.max_q_global):
adj_A = self._sort_search_set(adj_A, A)
adj_C = self._sort_search_set(adj_C, C)
# Shift lags
adj_A = [(var, lag - C[1]) for (var, lag) in adj_A]
adj_C = [(var, lag - C[1]) for (var, lag) in adj_C]
X = (A[0], A[1] - C[1])
Y = (C[0], 0)
# Test for independence given subsets of non-future adjacencies of A
for p in range(max_p_A):
# Count the number of tests made at this value of p
q_count = 0
for Z_raw in combinations(adj_A, p):
# Break if the maximal number of tests specified by self.max_q_global has been exceeded
q_count = q_count + 1
if q_count > self.max_q_global:
break
# Prepare the conditioning set
Z = {node for node in Z_raw if node != X and node != Y}
# Test for conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
if self.verbosity >= 2:
print("BnotinSepSetAC(A): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
# Check whether the test result was significant. If yes, remember Z as separating set
if pval > self.pc_alpha:
all_sepsets.add(frozenset(Z))
# Test for independence given subsets of non-future adjacencies of C
for p in range(min(len(adj_C), self.max_p_global) + 1):
q_count = 0
for Z_raw in combinations(adj_C, p):
q_count = q_count + 1
if q_count > self.max_q_global:
break
Z = {node for node in Z_raw if node != X and node != Y}
val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
if self.verbosity >= 2:
print("BnotinSepSetAC(C): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
if pval > self.pc_alpha:
all_sepsets.add(frozenset(Z))
# Count number of sepsets and number of sepsets that contain B
n_sepsets = len(all_sepsets)
n_sepsets_with_B = len([1 for Z in all_sepsets if (B[0], B[1] - C[1]) in Z])
# Determine the answer
if voting == "Majority-Preliminary":
# Return True if no separating set was found or if at least one separating set was found and B is in less than half of them, else False
return True if (n_sepsets == 0 or 2*n_sepsets_with_B < n_sepsets) else False
elif voting == "Majority-Final":
# Return True if at least one separating set was found and B is in less than half of them, False if at least one separating set has been found and B is in more than half of them, else mark the triple as ambiguous
if n_sepsets == 0 or 2*n_sepsets_with_B == n_sepsets:
#######################################################
#for (Z, _) in self._get_sepsets(A, C):
# return False if B in Z else True
#######################################################
self.ambiguous_triples.add((A, B, C))
return False
elif 2*n_sepsets_with_B < n_sepsets:
return True
else:
return False
else:
assert False
def _B_in_SepSet_AC(self, A, B, C, voting):
"""Return True if B is in the separating set of A and C. This is done according to the standard majority rule"""
# Treat A - B - C as the same triple as C - B - A
# Convention: A is before C or, if they are contemporaneous, the index of A is smaller than that of C
if C[1] < A[1] or (C[1] == A[1] and C[0] < A[0]):
return self._B_in_SepSet_AC(C, B, A, voting)
################################################################################################
# Only relevant for use with oracle CI
if self._oracle:
return not self._B_not_in_SepSet_AC_given_answers[((A[0], A[1] - C[1]), (B[0], B[1] - C[1]), (C[0], 0))]
################################################################################################
if (A, B, C) in self.ambiguous_triples or (C, B, A) in self.ambiguous_triples:
return False
# This function must only be called from the final orientation phase
if voting != "Majority-Final":
assert False
# Remember all separating sets that we will find
all_sepsets = set()
# Get the non-future adjacencies of A and C
adj_A = self._get_non_future_adj([A]).difference({A, C})
adj_C = self._get_non_future_adj([C]).difference({A, C})
# Depending on the self.max_cond_px and self.max_p_global, determine the maximal cardinality of subsets of adj_A that are tested
if A[1] < C[1]:
max_p_A = min([len(adj_A), self.max_cond_px, self.max_p_global]) + 1
else:
max_p_A = min([len(adj_A), self.max_p_global]) + 1
if not np.isinf(self.max_q_global):
adj_A = self._sort_search_set(adj_A, A)
adj_C = self._sort_search_set(adj_C, C)
# Shift lags
adj_A = [(var, lag - C[1]) for (var, lag) in adj_A]
adj_C = [(var, lag - C[1]) for (var, lag) in adj_C]
X = (A[0], A[1] - C[1])
Y = (C[0], 0)
# Test for independence given subsets of non-future adjacencies of A
for p in range(max_p_A):
# Count the number of tests made at this value of p
q_count = 0
for Z_raw in combinations(adj_A, p):
# Break if the maximal number of tests specified by self.max_q_global has been exceeded
q_count = q_count + 1
if q_count > self.max_q_global:
break
# Prepare the conditioning set
Z = {node for node in Z_raw if node != X and node != Y}
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
if self.verbosity >= 2:
print("BinSepSetAC(A): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
# Check whether the test result was significant. If yes, remember Z as separating set
if pval > self.pc_alpha:
all_sepsets.add(frozenset(Z))
# Test for independence given subsets of non-future adjacencies of C
for p in range(min(len(adj_C), self.max_p_global) + 1):
q_count = 0
for Z_raw in combinations(adj_C, p):
q_count = q_count + 1
if q_count > self.max_q_global:
break
Z = {node for node in Z_raw if node != X and node != Y}
val, pval = self.cond_ind_test.run_test(X = [X], Y = [Y], Z = list(Z), tau_max = self.tau_max)
if self.verbosity >= 2:
print("BinSepSetAC(C): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
if pval > self.pc_alpha:
all_sepsets.add(frozenset(Z))
# Count number of sepsets and number of sepsets that contain B
n_sepsets = len(all_sepsets)
n_sepsets_with_B = len([1 for Z in all_sepsets if (B[0], B[1] - C[1]) in Z])
# Return False if at least one separating set was found and B is in less than half of them, True if at least one separating set has been found and B is in more than half of them, else mark the triple as ambiguous
if n_sepsets == 0 or 2*n_sepsets_with_B == n_sepsets:
#######################################################
#for (Z, _) in self._get_sepsets(A, C):
# return True if B in Z else False
#######################################################
self.ambiguous_triples.add((A, B, C))
return False
elif 2*n_sepsets_with_B < n_sepsets:
return False
else:
return True
########################################################################################################################
########################################################################################################################
########################################################################################################################
def _apply_R00(self, voting):
"""Return all orientations implied by orientation rule R-00-d"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
if voting == "Majority-Preliminary":
triples_1 = self._find_triples(pattern_ij='**o', pattern_jk='o**', pattern_ik='')
triples_2 = self._find_triples(pattern_ij='**>', pattern_jk='o**', pattern_ik='')
all_appropriate_triples = set(triples_1).union(set(triples_2))
else:
triples_1 = self._find_triples(pattern_ij='*-o', pattern_jk='o-*', pattern_ik='')
triples_2 = self._find_triples(pattern_ij='*->', pattern_jk='o-*', pattern_ik='')
all_appropriate_triples = set(triples_1).union(set(triples_2))
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
if self.verbosity >= 2:
print("R00: ", (A, B, C))
# Check whether the rule applies
if self._B_not_in_SepSet_AC(A, B, C, voting):
# Prepare the new links and append them to the output
if self.verbosity >= 2:
print(" --> not in sepset ")
# From C to B
link_CB = self._get_link(C, B)
new_link_CB = link_CB[0] + link_CB[1] + ">"
out.append(self._get_pair_key_and_new_link(C, B, new_link_CB))
# If needed, also fromA to B
link_AB = self._get_link(A, B)
if link_AB[2] == "o":
new_link_AB = link_AB[0] + link_AB[1] + ">"
out.append(self._get_pair_key_and_new_link(A, B, new_link_AB))
# Return the output list
return out
def _apply_R01(self, voting):
"""Return all orientations implied by orientation rule R-01"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_triples = self._find_triples(pattern_ij='*->', pattern_jk='o-+', pattern_ik='')
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
if self.verbosity >= 2:
print("R01: ", (A, B, C))
# Check whether the rule applies
if self._B_in_SepSet_AC(A, B, C, voting):
if self.verbosity >= 2:
print(" --> in sepset ")
# Prepare the new link from B to C and append it to the output list
link_BC = self._get_link(B, C)
new_link_BC = "-" + link_BC[1] + ">"
out.append(self._get_pair_key_and_new_link(B, C, new_link_BC))
# Return the output list
return out
def _apply_R02(self):
"""Return all orientations implied by orientation rule R-02"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_triples = set(self._find_triples(pattern_ij='-->', pattern_jk='*->', pattern_ik='+-o'))
all_appropriate_triples = all_appropriate_triples.union(set(self._find_triples(pattern_ij='*->', pattern_jk='-->', pattern_ik='+-o')))
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
# The rule applies to all relevant graphical structures. Therefore, prepare the new link and append it to the output list
link_AC = self._get_link(A, C)
new_link_AC = link_AC[0] + link_AC[1] + ">"
out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
# Return the output list
return out
def _apply_R03(self, voting):
"""Return all orientations implied by orientation rule R-03"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_quadruples = self._find_quadruples(pattern_ij='*->', pattern_jk='<-*', pattern_ik='',
pattern_il='+-o', pattern_jl='o-+', pattern_kl='+-o')
# Run through all appropriate graphical structures
for (A, B, C, D) in all_appropriate_quadruples:
# Check whether the rule applies
if self._B_in_SepSet_AC(A, D, C, voting):
# Prepare the new link from D to B and append it to the output list
link_DB = self._get_link(D, B)
new_link_DB = link_DB[0] + link_DB[1] + ">"
out.append(self._get_pair_key_and_new_link(D, B, new_link_DB))
# Return the output list
return out
def _apply_R04(self, voting):
"""Return all orientations implied by orientation rule R-04"""
# Build the output list
out = []
# Find all relevant triangles W-V-Y
all_appropriate_triples = self._find_triples(pattern_ij='<-*', pattern_jk='o-+', pattern_ik='-->')
# Run through all of these triangles
for triple in all_appropriate_triples:
(W, V, Y) = triple
# Get the current link from W to V, which we will need below
link_WV = self._get_link(W, V)
# Find all discriminating paths for this triangle
# Note: To guarantee order independence, we check all discriminating paths. Alternatively, we could check the rule for all shortest such paths
discriminating_paths = self._get_R4_discriminating_paths(triple, max_length = np.inf)
# Run through all discriminating paths
for path in discriminating_paths:
# Get the end point node
X_1 = path[-1]
# Check which of the two cases of the rule we are in, then append the appropriate new links to the output list
if self._B_in_SepSet_AC(X_1, V, Y, voting):
# New link from V to Y
out.append(self._get_pair_key_and_new_link(V, Y, "-->"))
elif link_WV != "<-x" and self._B_not_in_SepSet_AC(X_1, V, Y, voting):
# New link from V to Y
out.append(self._get_pair_key_and_new_link(V, Y, "<->"))
# If needed, also the new link from W to V
if link_WV != "<->":
out.append(self._get_pair_key_and_new_link(W, V, "<->"))
# Return the output list
return out
def _apply_R08(self):
"""Return all orientations implied by orientation rule R-08"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_triples = self._find_triples(pattern_ij='-->', pattern_jk='-->', pattern_ik='o-+')
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
# The rule applies to all relevant graphical structures. Therefore, prepare the new link and append it to the output list
link_AC = self._get_link(A, C)
new_link_AC = "-" + link_AC[1] + ">"
out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
# Return the output list
return out
def _apply_R09(self, voting):
"""Return all orientations implied by orientation rule R-09"""
# Build the output list
out = []
# Find unshielded triples B_1 o--*--o A o--*--> C or B_1 <--*--o A o--*--> C or B_1 <--*-- A o--*--> C
all_appropriate_triples = set(self._find_triples(pattern_ij='o-o', pattern_jk='o->', pattern_ik=''))
all_appropriate_triples = all_appropriate_triples.union(set(self._find_triples(pattern_ij='<-o', pattern_jk='o->', pattern_ik='')))
all_appropriate_triples = all_appropriate_triples.union(set(self._find_triples(pattern_ij='<--', pattern_jk='o->', pattern_ik='')))
# Run through all these triples
for (B_1, A, C) in all_appropriate_triples:
# Check whether A is in SepSet(B_1, C), else the rule does not apply
if not self._B_in_SepSet_AC(B_1, A, C, voting):
continue
# Although we do not yet know whether the rule applies, we here determine the new form of the link from A to C if the rule does apply
link_AC = self._get_link(A, C)
new_link_AC = "-" + link_AC[1] + ">"
pair_key, new_link = self._get_pair_key_and_new_link(A, C, new_link_AC)
# For the search of uncovered potentially directed paths from B_1 to C, determine the initial pattern as dictated by the link from A to B_1
first_link = self._get_link(A, B_1)
if self._match_link(pattern='o*o', link=first_link):
initial_allowed_patterns = ['-->', 'o->', 'o-o']
elif self._match_link(pattern='o->', link=first_link) or self._match_link(pattern='-->', link=first_link):
initial_allowed_patterns = ['-->']
# Find all uncovered potentially directed paths from B_1 to C
uncovered_pd_paths = self._get_potentially_directed_uncovered_paths_fci(B_1, C, initial_allowed_patterns)
# Run through all of these paths and check i) whether the node adjacent to B_1 is non-adjacent to A, ii) whether condition iv) of the rule antecedent is true. If there is any such path, then the link can be oriented
for upd_path in uncovered_pd_paths:
# Is the node adjacent to B_1 non-adjacent to A (this implies that there are at least three nodes on the path, because else the node adjacent to B_1 is C) and is A not part of the path?
if len(upd_path) < 3 or A in upd_path or self._get_link(A, upd_path[1]) != "":
continue
# If the link from A to B_1 is into B_1, condition iv) is true
if first_link[2] == ">":
# Mark the link from A to C for orientation, break the for loop to continue with the next triple
out.append((pair_key, new_link))
break
# If the link from A to B_1 is not in B_1, we need to check whether B_1 is in SepSet(A, X) where X is the node on upd_path next to B_1
if not self._B_in_SepSet_AC(A, B_1, upd_path[1], voting):
# Continue with the next upd_path
continue
# Now check whether rule iv) for all triples on upd_path
path_qualifies = True
for i in range(len(upd_path) - 2):
# We consider the unshielded triples upd_path[i] - upd_path[i+1] - upd_path[i+2]
# If the link between upd_path[i] and upd_path[i+1] is into the latter, condition iv) is true
left_link = self._get_link(upd_path[i], upd_path[i+1])
if left_link[2] == ">":
# The path qualifies, break the inner for loop
break
# If not, then we need to continue with checking whether upd_path[i+1] in SepSet(upd_path[i+1], upd_path[i+2])
if not self._B_in_SepSet_AC(upd_path[i], upd_path[i+1], upd_path[i+2], voting):
# The path does not qualifying, break the inner for loop
path_qualifies = False
break
# The path qualifies, mark the edge from A to C for orientation and break the outer for loop to continue with the next triple
if path_qualifies:
out.append((pair_key, new_link))
break
# The path does not qualify, continue with the next upd_path
# end for upd_path in uncovered_pd_paths
# end for (B_1, A, C) in all_appropriate_triples
# Return the output list
return out
def _apply_R10(self, voting):
"""Return all orientations implied by orientation rule R-10"""
# Build the output list
out = []
# Find all triples A o--> C <-- P_C
all_appropriate_triples = set(self._find_triples(pattern_ij='o->', pattern_jk='<--', pattern_ik=''))
all_appropriate_triples = all_appropriate_triples.union(set(self._find_triples(pattern_ij='o->', pattern_jk='<--', pattern_ik='***')))
# Collect all triples for the given pair (A, C)
triple_sorting_dict = {}
for (A, C, P_C) in all_appropriate_triples:
if triple_sorting_dict.get((A, C)) is None:
triple_sorting_dict[(A, C)] = [P_C]
else:
triple_sorting_dict[(A, C)].append(P_C)
# Run through all (A, C) pairs
for (A, C) in triple_sorting_dict.keys():
# Find all uncovered potentially directed paths from A to C through any of the P_C nodes
relevant_paths = []
for P_C in triple_sorting_dict[(A, C)]:
for upd_path in self._get_potentially_directed_uncovered_paths_fci(A, P_C, ['-->', 'o->', 'o-o']):
# Run through all of these paths and check i) whether the second to last element is not adjacent to C (this requires it to have a least three nodes, because else the second to last element would be A) and ii) whether the left edge of any 3-node sub-path is into the middle nor or, if not, whether the middle node is in the separating set of the two end-point nodes (of the 3-node) sub-path and iii) whether C is not element of the path. If path meets these conditions, add its second node (the adjacent to A) to the set second_nodes
if len(upd_path) < 3 or C in upd_path or self._get_link(upd_path[-2], C) != "":
continue
upd_path.append(C)
path_qualifies = True
for i in range(len(upd_path) - 2):
# We consider the unshielded triples upd_path[i] - upd_path[i+1] - upd_path[i+2]
# If the link between upd_path[i] and upd_path[i+1] is into the latter, the path qualifies
left_link = self._get_link(upd_path[i], upd_path[i+1])
if left_link[2] == ">":
# The path qualifies, break the inner for loop
break
# If not, then we need to continue with checking whether upd_path[i+1] in SepSet(upd_path[i+1], upd_path[i+2])
if not self._B_in_SepSet_AC(upd_path[i], upd_path[i+1], upd_path[i+2], voting):
# The path does not qualify, break the inner for loop
path_qualifies = False
break
# The path qualifies, add upd_path[i] to second_nodes and continue with the next upd_path
if path_qualifies:
relevant_paths.append(upd_path)
# The path does not qualify, continue with the next upd_path
# end for path in self._get_potentially_directed_uncovered_paths(A, P_C, ['-*>', 'o*>', 'o*o'])
# end for P_C in triple_sorting_dict[(A, C)]
# Find all second nodes on the relevant paths
second_nodes = list({path[1] for path in relevant_paths})
# Check whether there is any pair of non-adjacent nodes in second_nodes, such that A is in their separating set. If yes, mark the link from A to C for orientation
for i, j in product(range(len(second_nodes)), range(len(second_nodes))):
if i < j and self._get_link(second_nodes[i], second_nodes[j]) == "" and self._B_in_SepSet_AC(second_nodes[i], A, second_nodes[j], voting):
# Append new link and break the for loop
link_AC = self._get_link(A, C)
new_link_AC = "-" + link_AC[1] + ">"
out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
break
# end for (A, C) in triple_sorting_dict.keys()
# Return the output list
return out
########################################################################################################################
########################################################################################################################
########################################################################################################################
def _print_graph_dict(self):
"""Print all links in graph_dict"""
for j in range(self.N):
for ((i, lag_i), link) in self.graph_dict[j].items():
if len(link) > 0 and (lag_i < 0 or i < j):
print("({},{:2}) {} {}".format(i, lag_i, link, (j, 0)))
def _is_smaller(self, X, Y):
"""
A node X is said to be smaller than node Y if
i) X is before Y or
ii) X and Y are contemporaneous and the variable index of X is smaller than that of Y.
Return True if X is smaller than Y, else return False
"""
return (X[1] < Y [1]) or (X[1] == Y[1] and X[0] < Y[0])
def _get_link(self, A, B):
"""Get the current link from node A to B"""
(var_A, lag_A) = A
(var_B, lag_B) = B
if abs(lag_A - lag_B) > self.tau_max:
return ""
elif lag_A <= lag_B:
return self.graph_dict[var_B][(var_A, lag_A - lag_B)]
else:
return self._reverse_link(self.graph_dict[var_A][(var_B, lag_B - lag_A)])
def _get_non_future_adj(self, node_list):
"""Return all non-future adjacencies of all nodes in node_list"""
# Build the output starting from an empty set
out = set()
# For each node W in node_list ...
for A in node_list:
# Unpack A
(var_A, lag_A) = A
# Add all (current) non-future adjacencies of A to the set out
out = out.union({(var, lag + lag_A) for ((var, lag), link) in self.graph_dict[var_A].items() if len(link) > 0 and lag + lag_A >= -self.tau_max})
# Return the desired set
return out
def _update_val_min(self, X, Y, val):
"""Some conditional independence test for X and Y has given the test statistic value val. Update the val_min dictionary accordingly"""
if X[1] < 0 or X[0] < Y[0]:
self.val_min[Y[0]][X] = min(self.val_min[Y[0]][X], np.abs(val))
else:
self.val_min[X[0]][Y] = min(self.val_min[X[0]][Y], np.abs(val))
def _get_val_min(self, X, Y):
"""Return the value stored in self.val_min for the variable pair (X, Y)"""
if X[1] < 0 or X[0] < Y[0]:
return self.val_min[Y[0]][X]
else:
return self.val_min[X[0]][Y]
def _update_cardinality(self, X, Y, cardinality):
"""X and Y were found conditionally independent given a separating set of cardinality cardinality. Update the self.cardinality accordingly"""
if X[1] < 0 or X[0] < Y[0]:
self.max_cardinality[Y[0]][X] = max(self.max_cardinality[Y[0]][X], cardinality)
else:
self.max_cardinality[X[0]][Y] = max(self.max_cardinality[X[0]][Y], cardinality)
def _update_pval_max(self, X, Y, pval):
"""Some conditional independence test for X and Y has given the p-value val. Update the pval_max dictionary accordingly"""
if X[1] < 0 or X[0] < Y[0]:
self.pval_max[Y[0]][X] = max(self.pval_max[Y[0]][X], pval)
else:
self.pval_max[X[0]][Y] = max(self.pval_max[X[0]][Y], pval)
def _sort_search_set(self, search_set, reference_node):
"""Sort the nodes in search_set by their val_min value with respect to the reference_node. Nodes with higher values appear earlier"""
sort_by = [self._get_val_min(reference_node, node) for node in search_set]
return [x for _, x in sorted(zip(sort_by, search_set), reverse = True)]
def _save_sepset(self, X, Y, Z):
"""Save Z as separating sets of X and Y. Y is assumed to be at lag 0"""
# Unpack X and Y
(i, lag_i) = X
(j, lag_j) = Y
assert lag_j == 0
# Save the sepset
if lag_i < 0 or i < j:
self.sepsets[j][X].add(Z)
else:
self.sepsets[i][Y].add(Z)
def _reverse_link(self, link):
"""Reverse a given link, taking care to replace > with < and vice versa"""
if link == "":
return ""
if link[2] == ">":
left_mark = "<"
else:
left_mark = link[2]
if link[0] == "<":
right_mark = ">"
else:
right_mark = link[0]
return left_mark + link[1] + right_mark
def _write_link(self, A, B, new_link, verbosity = 0):
"""Write the information that the link from node A to node B takes the form of new_link into self.graph_dict. Neither is it assumed that at least of the nodes is at lag 0, nor must A be before B. If A and B are contemporaneous, also the link from B to A is written as the reverse of new_link"""
# Unpack A and B
(var_A, lag_A) = A
(var_B, lag_B) = B
# Write the link from A to B
if lag_A < lag_B:
if verbosity >= 1:
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_A, lag_A - lag_B, self.graph_dict[var_B][(var_A, lag_A - lag_B)], var_B, 0, var_A, lag_A - lag_B, new_link, var_B, 0))
self.graph_dict[var_B][(var_A, lag_A - lag_B)] = new_link
elif lag_A == lag_B:
if verbosity >= 1:
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_A, lag_A - lag_B, self.graph_dict[var_B][(var_A, 0)], var_B, 0, var_A, 0, new_link, var_B, 0))
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_B, 0, self.graph_dict[var_A][(var_B, 0)], var_A, 0, var_B, 0, self._reverse_link(new_link), var_A, 0))
self.graph_dict[var_B][(var_A, 0)] = new_link
self.graph_dict[var_A][(var_B, 0)] = self._reverse_link(new_link)
else:
if verbosity >= 1:
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_B, lag_B - lag_A, self.graph_dict[var_A][(var_B, lag_B - lag_A)], var_A, 0, var_B, lag_B - lag_A, self._reverse_link(new_link), var_A, 0))
self.graph_dict[var_A][(var_B, lag_B - lag_A)] = self._reverse_link(new_link)
def _get_sepsets(self, A, B):
"""For two non-adjacent nodes, get the their separating stored in self.sepsets"""
(var_A, lag_A) = A
(var_B, lag_B) = B
def _shift(Z, lag_B):
return frozenset([(var, lag + lag_B) for (var, lag) in Z])
if lag_A < lag_B:
out = {(_shift(Z, lag_B), status) for (Z, status) in self.sepsets[var_B][(var_A, lag_A - lag_B)]}
elif lag_A > lag_B:
out = {(_shift(Z, lag_A), status) for (Z, status) in self.sepsets[var_A][(var_B, lag_B - lag_A)]}
else:
out = {(_shift(Z, lag_A), status) for (Z, status) in self.sepsets[max(var_A, var_B)][(min(var_A, var_B), 0)]}
return out
def _initialize_full_graph(self):
"""Initialize self.graph_full_dict. This nested dictionary represents the graph and as opposed to self.graph_dict also contains forward links"""
# Build from an empty nested dictionary
self.graph_full_dict = {j: {} for j in range(self.N)}
# Run through the entire nested dictionary self.graph_dict
for j in range(self.N):
for ((var, lag), link) in self.graph_dict[j].items():
if link != "":
# Add non-future adjacencies
self.graph_full_dict[j][(var, lag)] = link
# Add the future adjacencies
if lag < 0:
self.graph_full_dict[var][(j, -lag)] = self._reverse_link(link)
# Return nothing
return None
def _get_pair_key_and_new_link(self, A, B, link_AB):
"""The link from A to B takes the form link_AB. Bring this information into a form appropriate for the output of rule applications"""
(var_A, lag_A) = A
(var_B, lag_B) = B
if lag_A <= lag_B:
return ((var_A, var_B, lag_A - lag_B), link_AB)
elif lag_A > lag_B:
return ((var_B, var_A, lag_B - lag_A), self._reverse_link(link_AB))
def _match_link(self, pattern, link):
"""Matches pattern including wildcards with link."""
if pattern == '' or link == '':
return True if pattern == link else False
else:
left_mark, middle_mark, right_mark = pattern
if left_mark != '*':
if left_mark == '+':
if link[0] not in ['<', 'o']: return False
else:
if link[0] != left_mark: return False
if right_mark != '*':
if right_mark == '+':
if link[2] not in ['>', 'o']: return False
else:
if link[2] != right_mark: return False
if middle_mark != '*' and link[1] != middle_mark: return False
return True
def _dict2graph(self):
"""Convert self.graph_dict to graph array of shape (N, N, self.tau_max + 1)."""
graph = np.zeros((self.N, self.N, self.tau_max + 1), dtype='U3')
for j in range(self.N):
for adj in self.graph_dict[j]:
(i, lag_i) = adj
graph[i, j, abs(lag_i)] = self.graph_dict[j][adj]
return graph
def _find_adj(self, graph, node, patterns, exclude=None, ignore_time_bounds=True):
"""Find adjacencies of node matching patterns."""
# Setup
i, lag_i = node
if exclude is None: exclude = []
if type(patterns) == str:
patterns = [patterns]
# Init
adj = []
# Find adjacencies going forward/contemp
for k, lag_ik in zip(*np.where(graph[i,:,:])):
matches = [self._match_link(patt, graph[i, k, lag_ik]) for patt in patterns]
if np.any(matches):
match = (k, lag_i + lag_ik)
if match not in adj and (k, lag_i + lag_ik) not in exclude and (-self.tau_max <= lag_i + lag_ik <= 0 or ignore_time_bounds):
adj.append(match)
# Find adjacencies going backward/contemp
for k, lag_ki in zip(*np.where(graph[:,i,:])):
matches = [self._match_link(self._reverse_link(patt), graph[k, i, lag_ki]) for patt in patterns]
if np.any(matches):
match = (k, lag_i - lag_ki)
if match not in adj and (k, lag_i - lag_ki) not in exclude and (-self.tau_max <= lag_i - lag_ki <= 0 or ignore_time_bounds):
adj.append(match)
return adj
def _is_match(self, graph, X, Y, pattern_ij):
"""Check whether the link between X and Y agrees with pattern_ij"""
(i, lag_i) = X
(j, lag_j) = Y
tauij = lag_j - lag_i
if abs(tauij) >= graph.shape[2]:
return False
return ((tauij >= 0 and self._match_link(pattern_ij, graph[i, j, tauij])) or
(tauij < 0 and self._match_link(self._reverse_link(pattern_ij), graph[j, i, abs(tauij)])))
def _find_triples(self, pattern_ij, pattern_jk, pattern_ik):
"""Find triples (i, lag_i), (j, lag_j), (k, lag_k) that match patterns."""
# Graph as array makes it easier to search forward AND backward in time
graph = self._dict2graph()
# print(graph[:,:,0])
# print(graph[:,:,1])
# print("matching ", pattern_ij, pattern_jk, pattern_ik)
matched_triples = []
for i in range(self.N):
# Set lag_i = 0 without loss of generality, will be adjusted at end
lag_i = 0
adjacencies_i = self._find_adj(graph, (i, lag_i), pattern_ij)
# print(i, adjacencies_i)
for (j, lag_j) in adjacencies_i:
adjacencies_j = self._find_adj(graph, (j, lag_j), pattern_jk,
exclude=[(i, lag_i)])
# print(j, adjacencies_j)
for (k, lag_k) in adjacencies_j:
if self._is_match(graph, (i, lag_i), (k, lag_k), pattern_ik):
# Now use stationarity and shift triple such that the right-most
# node (on a line t=..., -2, -1, 0, 1, 2, ...) is at lag 0
righmost_lag = max(lag_i, lag_j, lag_k)
match = ((i, lag_i - righmost_lag),
(j, lag_j - righmost_lag),
(k, lag_k - righmost_lag))
largest_lag = min(lag_i - righmost_lag, lag_j - righmost_lag, lag_k - righmost_lag)
if match not in matched_triples and \
-self.tau_max <= largest_lag <= 0:
matched_triples.append(match)
return matched_triples
def _find_quadruples(self, pattern_ij, pattern_jk, pattern_ik,
pattern_il, pattern_jl, pattern_kl):
"""Find quadruples (i, lag_i), (j, lag_j), (k, lag_k), (l, lag_l) that match patterns."""
# We assume this later
assert pattern_il != ''
# Graph as array makes it easier to search forward AND backward in time
graph = self._dict2graph()
matched_quadruples = []
# First get triple ijk
ijk_triples = self._find_triples(pattern_ij, pattern_jk, pattern_ik)
for triple in ijk_triples:
# Unpack triple
(i, lag_i), (j, lag_j), (k, lag_k) = triple
# Search through adjacencies
adjacencies = set(self._find_adj(graph, (i, lag_i), pattern_il,
exclude=[(j, lag_j), (k, lag_k)]))
if pattern_jl != '':
adjacencies = adjacencies.intersection(set(
self._find_adj(graph, (j, lag_j), pattern_jl,
exclude=[(i, lag_i), (k, lag_k)])))
else:
adjacencies = set([adj for adj in adjacencies
if self._is_match(graph, (j, lag_j), adj, '')])
if pattern_kl != '':
adjacencies = adjacencies.intersection(set(
self._find_adj(graph, (k, lag_k), pattern_kl,
exclude=[(i, lag_i), (j, lag_j)])))
else:
adjacencies = set([adj for adj in adjacencies
if self._is_match(graph, (k, lag_k), adj, '')])
for adj in adjacencies:
(l, lag_l) = adj
# Now use stationarity and shift quadruple such that the right-most
# node (on a line t=..., -2, -1, 0, 1, 2, ...) is at lag 0
righmost_lag = max(lag_i, lag_j, lag_k, lag_l)
match = ((i, lag_i - righmost_lag),
(j, lag_j - righmost_lag),
(k, lag_k - righmost_lag),
(l, lag_l - righmost_lag),
)
largest_lag = min(lag_i - righmost_lag,
lag_j - righmost_lag,
lag_k - righmost_lag,
lag_l - righmost_lag,
)
if match not in matched_quadruples and \
-self.tau_max <= largest_lag <= 0:
matched_quadruples.append(match)
return matched_quadruples
def _get_R4_discriminating_paths(self, triple, max_length = np.inf):
"""Find all discriminating paths starting from triple"""
def _search(path_taken, max_length):
# Get the last visited node and its link to Y
last_node = path_taken[-1]
link_to_Y = self._get_link(last_node, path_taken[0])
# Base Case: If the current path is a discriminating path, return it as single entry of a list
if len(path_taken) > 3 and link_to_Y == "":
return [path_taken]
# If the current path is not a discriminating path, continue the path
paths = []
if self._get_link(last_node, path_taken[-2])[0] == "<" and link_to_Y == "-->" and len(path_taken) < max_length:
# Search through all adjacencies of the last node
for (var, lag) in self.graph_full_dict[last_node[0]].keys():
# Build the next node and get its link to the previous
next_node = (var, lag + last_node[1])
next_link = self._get_link(next_node, last_node)
# Check whether this node can be visited
if next_node[1] <= 0 and next_node[1] >= -self.tau_max and next_node not in path_taken and self._match_link("*->", next_link):
# Recursive call
paths.extend(_search(path_taken[:] + [next_node], max_length))
# Return the list of discriminating paths
return paths
# Unpack the triple
(W, V, Y) = triple
# Return all discriminating paths starting at this triple
return _search([Y, V, W], max_length)
def _get_potentially_directed_uncovered_paths_fci(self, start_node, end_node, initial_allowed_patterns):
"""Find all potentiall directed uncoverged paths from start_node to end_node whose first link takes one the forms specified by initial_allowed_patters"""
assert start_node != end_node
# Function for recursive search of potentially directed uncovered paths
def _search(end_node, path_taken, allowed_patterns):
# print(path_taken)
# List for outputting potentially directed uncovered paths
paths = []
# The last visited note becomes the new start_node
start_node = path_taken[-1]
# Base case: End node has been reached
if start_node == end_node:
paths.append(path_taken)
# Recursive build case
else:
# Run through the adjacencies of start_node
#for next_node in self.graph_full_dict[start_node[0]]:
for (var, lag) in self.graph_full_dict[start_node[0]].keys():
next_node = (var, lag + start_node[1])
# Consider only nodes that ...
# ... are within the allowed time frame
if next_node[1] < -self.tau_max or next_node[1] > 0:
continue
# ... have not been visited yet
if next_node in path_taken:
continue
# ... are non-adjacent to the node before start_node
if len(path_taken) >= 2 and self._get_link(path_taken[-2], next_node) != "":
continue
# ... are not part of an ambiguous triple
if len(path_taken) >= 2 and ((path_taken[-2], start_node, next_node) in self.ambiguous_triples or (next_node, start_node, path_taken[-2]) in self.ambiguous_triples):
continue
# ... whose link with start_node matches one of the allowed patters
link = self._get_link(start_node, next_node)
if not any([self._match_link(pattern = pattern, link = link) for pattern in allowed_patterns]):
continue
# Determine the allowed patters for the next recursive call
if self._match_link(pattern='o-o', link=link):
new_allowed_patters = ["o-o", "o->", "-->"]
elif self._match_link(pattern='o->', link=link) or self._match_link(pattern='-->', link=link):
new_allowed_patters = ["-->"]
# Determine the new path taken
new_path_taken = path_taken[:] + [next_node]
# Recursive call
paths.extend(_search(end_node, new_path_taken, new_allowed_patters))
# Output list of potentially directed uncovered paths
return paths
# end def _search(end_node, path_taken, allowed_patterns)
# Output potentially directed uncovered paths
paths = _search(end_node, [start_node], initial_allowed_patterns)
return [path for path in paths if len(path) > 2]
def _dict_to_matrix(self, val_dict, tau_max, n_vars, default=1):
"""Convert a dictionary to matrix format"""
matrix = np.ones((n_vars, n_vars, tau_max + 1))
matrix *= default
for j in val_dict.keys():
for link in val_dict[j].keys():
k, tau = link
if tau == 0:
matrix[k, j, 0] = matrix[j, k, 0] = val_dict[j][link]
else:
matrix[k, j, abs(tau)] = val_dict[j][link]
return matrix
| 98,763 | 45.985728 | 672 |
py
|
correlate
|
correlate-master/causal_discovery/LPCMCI/lpcmci.py
|
from itertools import product, combinations
import numpy as np
class LPCMCI():
r"""
This class implements the LPCMCI algorithm for constraint-based causal discovery on stationary times series with
latent confounders and without selection variables, which we introduce in the main text of this submission.
Parameters passed to the constructor:
- dataframe:
Tigramite dataframe object that contains the time series dataset \bold{X}
- cond_ind_test:
A conditional independence test object that specifies which conditional independence test CI is to be used
Parameters passed to self.run_lpcmci():
Note: Not all parameters are used for the simulation studies.
Some are temporary and might be removed in future versions
- tau_max:
The maximum considered time lag tau_max
- pc_alpha:
The significance level \alpha of conditional independence tests
- n_preliminary_iterations:
Determines the number of iterations in the preliminary phase of LPCMCI. In the paper this corresponds to the 'k' in LPCMCI(k)
- max_cond_px:
Consider a pair of variables (X^i_{t-\tau}, X^j_t) with \tau > 0. In Algorithm S2 (here this is self._run_ancestral_removal_phase()), the algorithm does not test for conditional independence given subsets of apds_t(X^i_{t-\tau}, X^j_t, C(G)) of cardinality higher than max_cond_px. In Algorithm S3 (here this is self._run_non_ancestral_removal_phase()), the algorithm does not test for conditional independence given subsets of napds_t(X^i_{t-\tau}, X^j_t, C(G)) of cardinality higher than max_cond_px.
- max_p_global:
Restricts all conditional independence tests to conditioning sets with cardinality smaller or equal to max_p_global
- max_p_non_ancestral:
Restricts all conditional independence tests in the second removal phase
(here this is self._run_dsep_removal_phase())
to conditioning sets with cardinality smaller or equal to max_p_global
- max_q_global:
For each ordered pair (X^i_{t-\tau}, X^j_t) of adjacent variables and for each cardinality of the conditioning sets test at most max_q_global many conditioning sets (when summing over all tested cardinalities more than max_q_global tests may be made)
- max_pds_set:
In Algorithm S3 (here this is self._run_non_ancestral_removal_phase()), the algorithm tests for conditional independence given subsets of the relevant napds_t sets. If for a given link the set napds_t(X^j_t, X^i_{t-\tau}, C(G)) has more than max_pds_set many elements (or, if the link is also tested in the opposite directed, if napds_t(X^i_{t-\tau}, X^j_t, C(G)) has more than max_pds_set elements), this link is not tested.
- prelim_with_collider_rules:
If True: As in pseudocode
If False: Line 22 of Algorithm S2 is replaced by line 18 of Algorithm S2 when Algorithm S2 is called from the preliminary phase (not in the last applicatin of Algorithm S2 directly before Algorithm S3 is applied)
- parents_of_lagged:
If True: As in pseudocode
If False: The default conditioning set is pa(X^j_t, C(G)) rather than pa({X^j_t, X^i_{t-\tau}, C(G)) for tau > 0
- prelim_only:
If True, stop after the preliminary phase. Can be used for detailed performance analysis
- break_once_separated:
If True: As in pseudocode
If False: The break commands are removed from Algorithms S2 and S3
- no_non_ancestral_phase:
If True, do not execute Algorithm S3. Can be used for detailed performance analysis
- use_a_pds_t_for_majority:
If True: As in pseudocode
If False: The search for separating sets instructed by the majority rule is made given subsets adj(X^j_t, C(G)) rather than subsets of apds_t(X^j_t, X^i_{t-\tau}, C(G))
- orient_contemp:
If orient_contemp == 1: As in pseudocode of Algorithm S2
If orient_contemp == 2: Also orient contemporaneous links in line 18 of Algorithm S2
If orient_comtemp == 0: Also not orient contemporaneous links in line 22 of Algorithm S2
- update_middle_marks:
If True: As in pseudoce of Algorithms S2 and S3
If False: The MMR rule is not applied
- prelim_rules:
If prelim_rules == 1: As in pseudocode of Algorithm S2
If prelim_rules == 0: Exclude rules R9^prime and R10^\prime from line 18 in Algorithm S2
- fix_all_edges_before_final_orientation:
When one of max_p_global, max_p_non_ancestral, max_q_global or max_pds_set is not np.inf, the algorithm may terminate although not all middle marks are empty. All orientation rules are nevertheless sound, since the rules always check for the appropriate middle marks. If fix_all_edges_before_final_orientation is True, all middle marks are set to the empty middle mark by force, followed by another application of the rules.
- auto_first:
If True: As in pseudcode of Algorithms S2 and S3
If False: Autodependency links are not prioritized even before contemporaneous links
- remember_only_parents:
If True: As in pseudocode of Algorithm 1
If False: If X^i_{t-\tau} has been marked as ancestor of X^j_t at any point of a preliminary iteration but the link between X^i_{t-\tau} and X^j_t was removed later, the link is nevertheless initialized with a tail at X^i_{t-\tau} in the re-initialization
- no_apr:
If no_apr == 0: As in pseudcode of Algorithms S2 and S3
If no_apr == 1: The APR is not applied by Algorithm S2, except in line 22 of its last call directly before the call of Algorithm S3
If no_apr == 2: The APR is never applied
- verbosity:
Controls the verbose output self.run_lpcmci() and the function it calls.
Return value of self.run_lpcmci():
The estimated graph in form of a link matrix. This is a numpy array of shape (self.N, self.N, self.tau_max + 1), where the entry array[i, j, \tau] is a string that visualizes the estimated link from X^i_{i-\tau} to X^j_t. For example, if array[0, 2, 1] = 'o->', then the estimated graph contains the link X^i_{t-1} o-> X^j_t. This numpy array is also saved as instance attribute self.graph. Note that self.N is the number of observed time series and self.tau_max the maximal considered time lag.
A note on middle marks:
For convenience (to have strings of the same lengths) we here internally denote the empty middle mark by '-'. For post-processing purposes all middle marks are set to the empty middle mark (here '-') in line 224 (there can be non-empty middle marks only when one of max_p_global, max_p_non_ancestral, max_q_global or max_pds_set is not np.inf), but if verbosity >= 1 a graph with the middle marks will be printed out before.
A note on wildcards:
The middle mark wildcard \ast and the edge mark wildcard are here represented as *, the edge mark wildcard \star as +
"""
def __init__(self, dataframe, cond_ind_test):
"""Class constructor. Store:
i) data
ii) conditional independence test object
iii) some instance attributes"""
# Save the time series data that the algorithm operates on
self.dataframe = dataframe
# Set the conditional independence test to be used
self.cond_ind_test = cond_ind_test
self.cond_ind_test.set_dataframe(self.dataframe)
# Store the shape of the data in the T and N variables
self.T, self.N = self.dataframe.values.shape
def run_lpcmci(self,
external_independencies,
external_dependencies,
tau_max=1,
pc_alpha=0.05,
n_preliminary_iterations=1,
max_cond_px=0,
max_p_global=np.inf,
max_p_non_ancestral=np.inf,
max_q_global=np.inf,
max_pds_set=np.inf,
prelim_with_collider_rules=True,
parents_of_lagged=True,
prelim_only=False,
break_once_separated=True,
no_non_ancestral_phase=False,
use_a_pds_t_for_majority=True,
orient_contemp=1,
update_middle_marks=True,
prelim_rules=1,
fix_all_edges_before_final_orientation=True,
auto_first=True,
remember_only_parents=True,
no_apr=0,
verbosity=0,
):
"""Run LPCMCI on the dataset and with the conditional independence test passed to the class constructor and with the options passed to this function."""
#######################################################################################################################
#######################################################################################################################
# Step 0: Initializations
self._initialize(external_independencies, external_dependencies, tau_max, pc_alpha, n_preliminary_iterations,
max_cond_px,
max_p_global, max_p_non_ancestral,
max_q_global, max_pds_set, prelim_with_collider_rules, parents_of_lagged, prelim_only,
break_once_separated, no_non_ancestral_phase, use_a_pds_t_for_majority, orient_contemp,
update_middle_marks, prelim_rules, fix_all_edges_before_final_orientation, auto_first,
remember_only_parents, no_apr, verbosity)
#######################################################################################################################
#######################################################################################################################
# Step 1: Preliminary phases
for i in range(self.n_preliminary_iterations):
# Verbose output
if self.verbosity >= 1:
print("\n=======================================================")
print("=======================================================")
print("Starting preliminary phase {:2}".format(i + 1))
# In the preliminary phases, auto-lag links are tested with first priority. Among the auto-lag links,
# different lags are not distinguished. All other links have lower priority, among which those which shorter
# lags have higher priority
self._run_ancestral_removal_phase(prelim=True)
# Verbose output
if self.verbosity >= 1:
print("\nPreliminary phase {:2} complete".format(i + 1))
print("\nGraph:\n--------------------------------")
self._print_graph_dict()
print("--------------------------------")
# When the option self.prelim_only is chosen, do not re-initialize in the last iteration
if i == self.n_preliminary_iterations - 1 and self.prelim_only:
break
# Remember ancestorships, re-initialize and re-apply the remembered ancestorships
def_ancs = self.def_ancs
if self.remember_only_parents:
smaller_def_ancs = dict()
for j in range(self.N):
smaller_def_ancs[j] = {(i, lag_i) for (i, lag_i) in def_ancs[j] if
self._get_link((i, lag_i), (j, 0)) != ""}
def_ancs = smaller_def_ancs
self._initialize_run_memory(external_independencies=external_independencies,
external_dependencies=external_dependencies)
self._apply_new_ancestral_information(None, def_ancs)
#######################################################################################################################
#######################################################################################################################
# Step 2: Full ancestral phase
if not self.prelim_only:
# Verbose output
if self.verbosity >= 1:
print("\n=======================================================")
print("=======================================================")
print("Starting final ancestral phase")
# In the standard ancestral phase, links are prioritized in the same as in the preliminary phases
self._run_ancestral_removal_phase()
# Verbose output
if self.verbosity >= 1:
print("\nFinal ancestral phase complete")
print("\nGraph:\n--------------------------------")
self._print_graph_dict()
print("--------------------------------")
#######################################################################################################################
#######################################################################################################################
# Step 3: Non-ancestral phase
if (not self.prelim_only) and (not self.no_non_ancestral_phase):
# Verbose output
if self.verbosity >= 1:
print("\n=======================================================")
print("=======================================================")
print("Starting non-ancestral phase")
# In the non-ancestral phase, large lags are prioritized
self._run_non_ancestral_removal_phase()
# Verbose output
if self.verbosity >= 1:
print("\nNon-ancestral phase complete")
print("\nGraph:\n--------------------------------")
self._print_graph_dict()
print("--------------------------------")
if self.fix_all_edges_before_final_orientation:
self._fix_all_edges()
self._run_orientation_phase(rule_list=self._rules_all, only_lagged=False)
#######################################################################################################################
#######################################################################################################################
# Verbose output
if self.verbosity >= 1:
print("\nLPCMCI has converged")
print("\nFinal graph:\n--------------------------------")
print("--------------------------------")
self._print_graph_dict()
print("--------------------------------")
print("--------------------------------\n")
print("Max search set: {}".format(self.max_na_search_set_found))
print("Max na-pds set: {}\n".format(self.max_na_pds_set_found))
# Post-processing
self._fix_all_edges()
self.graph = self._dict2graph()
self.val_min_matrix = self._dict_to_matrix(self.val_min, self.tau_max, self.N, default=0)
self.cardinality_matrix = self._dict_to_matrix(self.max_cardinality, self.tau_max, self.N, default=0)
# Return the estimated graph
return self.graph
def _initialize(self, external_independencies, external_dependencies, tau_max, pc_alpha, n_preliminary_iterations,
max_cond_px,
max_p_global, max_p_non_ancestral,
max_q_global, max_pds_set, prelim_with_collider_rules, parents_of_lagged, prelim_only,
break_once_separated, no_non_ancestral_phase, use_a_pds_t_for_majority, orient_contemp,
update_middle_marks, prelim_rules, fix_all_edges_before_final_orientation, auto_first,
remember_only_parents, no_apr, verbosity):
"""Function for
i) saving the arguments passed to self.run_lpcmci() as instance attributes
ii) initializing various memory variables for storing the current graph, sepsets etc.
"""
# Save the arguments passed to self.run_lpcmci()
self.tau_max = tau_max
self.pc_alpha = pc_alpha
self.n_preliminary_iterations = n_preliminary_iterations
self.max_cond_px = max_cond_px
self.max_p_global = max_p_global
self.max_p_non_ancestral = max_p_non_ancestral
self.max_q_global = max_q_global
self.max_pds_set = max_pds_set
self.prelim_with_collider_rules = prelim_with_collider_rules
self.parents_of_lagged = parents_of_lagged
self.prelim_only = prelim_only
self.break_once_separated = break_once_separated
self.no_non_ancestral_phase = no_non_ancestral_phase
self.use_a_pds_t_for_majority = use_a_pds_t_for_majority
self.orient_contemp = orient_contemp
self.update_middle_marks = update_middle_marks
self.prelim_rules = prelim_rules
self.fix_all_edges_before_final_orientation = fix_all_edges_before_final_orientation
self.auto_first = auto_first
self.remember_only_parents = remember_only_parents
self.no_apr = no_apr
self.verbosity = verbosity
# Rules to be executed at the end of a preliminary phase
self._rules_prelim_final = [["APR"], ["ER-08"], ["ER-02"], ["ER-01"], ["ER-09"], ["ER-10"]]
# Rules to be executed within the while loop of a preliminary phase
self._rules_prelim = [["APR"], ["ER-08"], ["ER-02"],
["ER-01"]] if self.prelim_rules == 0 else self._rules_prelim_final
# Full list of all rules
self._rules_all = [["APR"], ["ER-08"], ["ER-02"], ["ER-01"], ["ER-00-d"], ["ER-00-c"], ["ER-03"], ["R-04"],
["ER-09"], ["ER-10"], ["ER-00-b"], ["ER-00-a"]]
# Initialize various memory variables for storing the current graph, sepsets etc.
self._initialize_run_memory(external_independencies=external_independencies,
external_dependencies=external_dependencies)
# Return
return True
def orient_with_interv_data(self, interv_independencies, interv_dependencies):
"""
chrei:
for all items in interv_independencies, remove ancestry of corresponding links
If A and B are contemporaneous, also the link from B to A is written as the reverse
"""
# independencies
if interv_independencies is not None and len(interv_independencies) > 0:
for independency in interv_independencies:
eff = (independency[0], independency[2])
cause = (independency[1], 0)
(var_cause, lag_cause) = cause
(var_eff, lag_eff) = eff
# if self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)] != "":
if self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][0] in ["o"]:
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)] = "<" + str(
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][1:])
# If A and B are contemporaneous, also the link from B to A is written as the reverse
if lag_eff == 0:
self.graph_dict[var_cause][(var_eff, 0)] = str(
self.graph_dict[var_cause][(var_eff, 0)][:2]) + ">"
else:
raise ValueError("orient with_interv_data: unexpected edgemark. expected o but is:",
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][0])
# dependencies
if interv_dependencies is not None and len(interv_dependencies) > 0:
for dependency in interv_dependencies:
eff = (dependency[0], dependency[2])
cause = (dependency[1], 0)
(var_cause, lag_cause) = cause
(var_eff, lag_eff) = eff
# if self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)] != "":
if self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][0] in ["o"] and \
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][2] in ["o", ">"]:
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)] = "-" + str(
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][1] + ">")
# If A and B are contemporaneous, also the link from B to A is written as the reverse
if lag_eff == 0:
self.graph_dict[var_cause][(var_eff, 0)] = "<"+ str(
self.graph_dict[var_cause][(var_eff, 0)][1]) + "-"
else:
raise ValueError("orient with_interv_data: unexpected edgemark. expected o but is:",
self.graph_dict[var_eff][(var_cause, lag_cause - lag_eff)][0])
def _initialize_run_memory(self, external_independencies, external_dependencies):
"""Function for initializing various memory variables for storing the current graph, sepsets etc."""
# Initialize the nested dictionary for storing the current graph.
# Syntax: self.graph_dict[j][(i, -tau)] gives the string representing the link from X^i_{t-tau} to X^j_t
self.graph_dict = {}
for j in range(self.N):
self.graph_dict[j] = {(i, 0): "o?o" for i in range(self.N) if j != i}
if self.max_cond_px == 0 and self.update_middle_marks:
self.graph_dict[j].update(
{(i, -tau): "oL>" for i in range(self.N) for tau in range(1, self.tau_max + 1)})
else:
self.graph_dict[j].update(
{(i, -tau): "o?>" for i in range(self.N) for tau in range(1, self.tau_max + 1)})
# chrei:
self.orient_with_interv_data(external_independencies, external_dependencies)
# Initialize the nested dictionary for storing separating sets
# Syntax: self.sepsets[j][(i, -tau)] stores separating sets of X^i_{t-tau} to X^j_t. For tau = 0, i < j.
self.sepsets = {
j: {(i, -tau): set() for i in range(self.N) for tau in range(self.tau_max + 1) if (tau > 0 or i < j)} for j
in range(self.N)}
# Initialize dictionaries for storing known ancestorships, non-ancestorships, and ambiguous ancestorships
# Syntax: self.def_ancs[j] contains the set of all known ancestors of X^j_t. Equivalently for the others
self.def_ancs = {j: set() for j in range(self.N)}
self.def_non_ancs = {j: set() for j in range(self.N)}
self.ambiguous_ancestorships = {j: set() for j in range(self.N)}
# Initialize nested dictionaries for saving the minimum test statistic among all conditional independence tests
# of a given pair of variables, the maximum p-values, as well as the maximal cardinality of the known separating
# sets.
# Syntax: As for self.sepsets
self.val_min = {
j: {(i, -tau): float("inf") for i in range(self.N) for tau in range(self.tau_max + 1) if (tau > 0 or i < j)}
for j in range(self.N)}
self.pval_max = {
j: {(i, -tau): 0 for i in range(self.N) for tau in range(self.tau_max + 1) if (tau > 0 or i < j)} for j in
range(self.N)}
self.max_cardinality = {
j: {(i, -tau): 0 for i in range(self.N) for tau in range(self.tau_max + 1) if (tau > 0 or i < j)} for j in
range(self.N)}
# Initialize a nested dictionary for caching na-pds-sets
# Syntax: As for self.sepsets
self._na_pds_t = {(j, -tau_j): {} for j in range(self.N) for tau_j in range(self.tau_max + 1)}
# Initialize a variable for remembering the maximal cardinality among all calculated na-pds-sets, as well as the maximial cardinality of any search set in the non-ancestral phase
self.max_na_search_set_found = -1
self.max_na_pds_set_found = -1
# Return
return True
def _run_ancestral_removal_phase(self, prelim=False):
"""Run an ancestral edge removal phase, this is Algorithm S2"""
# Iterate until convergence
# p_pc is the cardinality of the non-default part of the conditioning sets.
# The full conditioning sets may have higher cardinality due to default conditioning on known parents
p_pc = 0
while_broken = False
while True:
##########################################################################################################
### Run the next removal iteration #######################################################################
# Force-quit while leep when p_pc exceeds the limit put by self.max_p_global
if p_pc > self.max_p_global:
while_broken = True
break
# Verbose output
if self.verbosity >= 1:
if p_pc == 0:
print("\nStarting test phase\n")
print("p = {}".format(p_pc))
# Variables to memorize the occurence and absence of certain events in the below edge removal phase
has_converged = True
any_removal = False
# Remember edges for which the separating set search is aborted due to max_q_global
self._cannot_mark = set()
# Generate the prioritized link list
if self.auto_first:
link_list = [product(range(self.N), range(-self.tau_max, 0))]
link_list = link_list + [product(range(self.N), range(self.N), range(-lag, -lag + 1)) for lag in
range(0, self.tau_max + 1)]
else:
link_list = [product(range(self.N), range(self.N), range(-lag, -lag + 1)) for lag in
range(0, self.tau_max + 1)]
# Run through all elements of link_list. Each element of link_list specifies ordered pairs of variables
# whose connecting edges are then subjected to conditional independence tests
for links in link_list:
# Memory variables for storing edges that are marked for removal
to_remove = {j: {} for j in range(self.N)}
# Iterate through all edges specified by links.
# Note that since the variables paris are ordered, (A, B) and (B, A) are seen as different pairs.
for pair in links:
# Decode the elements of links into pairs of variables (X, Y)
if len(pair) == 2:
X = (pair[0], pair[1])
Y = (pair[0], 0)
else:
X = (pair[0], pair[2])
Y = (pair[1], 0)
# Do not test auto-links twice
if self.auto_first and X[0] == Y[0]:
continue
######################################################################################################
### Exclusion of links ###############################################################################
# Exclude the current link if ...
# ... X = Y
if X[1] == 0 and X[0] == Y[0]:
continue
# ... X > Y
if self._is_smaller(Y, X):
continue
# Get the current link
link = self._get_link(X, Y) # dict lookup e.g. from (0,1,1) to 'oL>'
# Moreover exclude the current link if ...
# ... X and Y are not adjacent anymore
if link == "":
continue
# ... the link is definitely part of G
if link[1] == "-":
continue
######################################################################################################
### Determine which tests the link will be subjected to ###########################################
# Depending on the middle mark on the link between X and Y as well as on some global options,
# we may not need to search for separating set among the potential parents of Y and/or X.
test_Y = True if link[1] not in ["R", "!"] else False
test_X = True if (link[1] not in ["L", "!"] and (
X[1] == 0 or (self.max_cond_px > 0 and self.max_cond_px >= p_pc))) else False
######################################################################################################
### Preparation PC search set and default conditioning set ###########################################
if test_Y:
S_default_YX, S_search_YX = self._get_default_and_search_sets(Y, X, "ancestral")
if test_X:
S_default_XY, S_search_XY = self._get_default_and_search_sets(X, Y, "ancestral")
######################################################################################################
### Middle mark updates ##############################################################################
any_middle_mark_update = False
# Note: Updating the middle marks here, within the for-loop, does not spoil order independence. In fact, this update does not influence the flow of the for-loop at all
if test_Y:
if len(S_search_YX) < p_pc:
# Note that X is smaller than Y. If S_search_YX exists and has fewer than p elements, X and Y are not d-separated by S \subset Par(Y). Therefore, the middle mark on the edge between X and Y can be updated with 'R'
if (X, Y) not in self._cannot_mark:
# if X == (0, 0) and Y == (3, 0):
# print()
self._apply_middle_mark(X, Y, "R")
else:
# Since S_search_YX exists and has hat least p_pc elements, the link between X and Y will be subjected to conditional independenc tests. Therefore, the algorithm has not converged yet.
has_converged = False
if test_X:
if len(S_search_XY) < p_pc:
# Note that X is smaller than Y. If S_search_XY exists and has fewer than p elements, X and Y are not d-separated by S \subset Par(X). Therefore, the middle mark on the edge between X and Y can be updated with 'L'
if (X, Y) not in self._cannot_mark:
self._apply_middle_mark(X, Y, "L")
else:
# Since S_search_YX exists and has hat least p_pc elements, the link between X and Y will be subjected to conditional independenc tests. Therefore, the algorithm has not converged yet.
has_converged = False
######################################################################################################
######################################################################################################
### Tests for conditional independence ###############################################################
# If option self.break_once_separated is True, the below for-loops will be broken immediately once a separating set has been found. In conjunction with the modified majority rule employed for orienting links, order independence (with respect to the index 'i' on X^i_t) then requires that the tested conditioning sets are ordered in an order independent way. Here, the minimal effect size of previous conditional independence tests serve as an order independent order criterion.
if self.break_once_separated or not np.isinf(self.max_q_global):
if test_Y:
S_search_YX = self._sort_search_set(S_search_YX, Y)
if test_X:
S_search_XY = self._sort_search_set(S_search_XY, X)
# Run through all cardinality p_pc subsets of S_search_YX
if test_Y:
q_count = 0
for S_pc in combinations(S_search_YX, p_pc):
q_count = q_count + 1
if q_count > self.max_q_global:
self._cannot_mark.add((X, Y))
break
# Build the full conditioning set
Z = set(S_pc)
Z = Z.union(S_default_YX)
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z), tau_max=self.tau_max)
if self.verbosity >= 2:
print("ANC(Y): %s _|_ %s | S_def = %s, S_pc = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in S_default_YX]), ' '.join([str(z) for z in S_pc]),
val, pval))
# Accordingly update dictionaries that keep track of the test statistic, the corresponding
# p-value and the cardinality of conditioning sets
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z))
# Check whether test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal and save sepset
to_remove[Y[0]][X] = True
self._save_sepset(X, Y, (frozenset(Z), "wm"))
# Verbose output
if self.verbosity >= 1:
print("({},{:2}) {:11} {} given {} union {}".format(X[0], X[1], "independent", Y,
S_pc, S_default_YX))
if self.break_once_separated:
break
# Run through all cardinality p_pc subsets of S_search_XY
if test_X:
q_count = 0
for S_pc in combinations(S_search_XY, p_pc):
q_count = q_count + 1
if q_count > self.max_q_global:
self._cannot_mark.add((X, Y))
break
# Build the full conditioning set
Z = set(S_pc)
Z = Z.union(S_default_XY)
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z), tau_max=self.tau_max)
if self.verbosity >= 2:
print("ANC(X): %s _|_ %s | S_def = %s, S_pc = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in S_default_XY]), ' '.join([str(z) for z in S_pc]),
val, pval))
# Accordingly update dictionaries that keep track of the test statistic, the corresponding p-value and the cardinality of conditioning sets
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z))
# Check whether test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal and save sepset
to_remove[Y[0]][X] = True
self._save_sepset(X, Y, (frozenset(Z), "wm"))
# Verbose output
if self.verbosity >= 1:
print("({},{:2}) {:11} {} given {} union {}".format(X[0], X[1], "independent", Y,
S_pc, S_default_XY))
if self.break_once_separated:
break
# for pair in links
##########################################################################################################
### Remove edges marked for removal in to_remove #########################################################
# Run through all of the nested dictionary
for j in range(self.N):
for (i, lag_i) in to_remove[j].keys():
# Remember that at least one edge has been removed, remove the edge
any_removal = True
self._write_link((i, lag_i), (j, 0), "", verbosity=self.verbosity)
# end for links in link_list
# Verbose output
if self.verbosity >= 1:
print("\nTest phase complete")
##############################################################################################################
### Orientations and next step ###############################################################################
if any_removal:
# At least one edge was removed or at least one middle mark has been updated. Therefore:
# i) apply the restricted set of orientation rules,
# ii) restart the while loop at p_pc = 0, unless all edges have converged, then break the while loop
only_lagged = False if self.orient_contemp == 2 else True
any_update = self._run_orientation_phase(rule_list=self._rules_prelim, only_lagged=only_lagged)
# If the orientation phase made a non-trivial update, then restart the while loop. Else increase p_pc by one
if any_update:
if self.max_cond_px == 0 and self.update_middle_marks:
self._update_middle_marks()
p_pc = 0
else:
p_pc = p_pc + 1
else:
# The graph has not changed at all in this iteration of the while loop.
# Therefore, if all edges have converged, break the while loop.
# If at least one edge has not yet converged, increase p_pc by one.
if has_converged:
break
else:
p_pc = p_pc + 1
# end while True
##################################################################################################################
### Consistency test and middle mark update ######################################################################
# Run through the entire graph
for j in range(self.N):
for (i, lag_i) in self.graph_dict[j].keys():
X = (i, lag_i)
Y = (j, 0)
if self._is_smaller(Y, X):
continue
# Consider only those links that are still part G
link = self._get_link((i, lag_i), (j, 0))
if len(link) > 0:
# Consistency check
if not while_broken and (X, Y) not in self._cannot_mark:
assert link[1] != "?"
assert link[1] != "R" or (lag_i < 0 and (self.max_cond_px > 0 or not self.update_middle_marks))
# Update all middle marks to '!'
if link[1] not in ["-", "!"]:
self._write_link((i, lag_i), (j, 0), link[0] + "!" + link[2])
##################################################################################################################
### Final rule applications ######################################################################################
if not prelim or self.prelim_with_collider_rules:
if not prelim:
self.no_apr = self.no_apr - 1
any_update = self._run_orientation_phase(rule_list=self._rules_all, only_lagged=False)
if self.max_cond_px == 0 and self.update_middle_marks and any_update:
self._update_middle_marks()
else:
only_lagged = False if self.orient_contemp >= 1 else True
any_update = self._run_orientation_phase(rule_list=self._rules_prelim_final, only_lagged=only_lagged)
if self.max_cond_px == 0 and self.update_middle_marks and any_update:
self._update_middle_marks()
# Return
return True
def _run_non_ancestral_removal_phase(self):
"""Run the non-ancestral edge removal phase, this is Algorithm S3"""
# Update of middle marks
self._update_middle_marks()
# This function initializeds self._graph_full_dict, a nested dictionary representing the graph including links that are forward in time. This will make the calculcation of na-pds-t sets easier.
self._initialize_full_graph()
# Iterate until convergence. Here, p_pc is the cardinality of the non-default part of the conditioning sets. The full conditioning sets may have higher cardinality due to default conditioning on known parents
p_pc = 0
while True:
##########################################################################################################
### Run the next removal iteration #######################################################################
# Force-quit while leep when p_pc exceeds the limit put by self.max_p_global or self.max_p_non_ancestral
if p_pc > self.max_p_global or p_pc > self.max_p_non_ancestral:
break
# Verbose output
if self.verbosity >= 1:
if p_pc == 0:
print("\nStarting test phase\n")
print("p = {}".format(p_pc))
# Variables to memorize the occurence and absence of certain events in the below edge removal phase
has_converged = True
any_removal = False
# Remember edges for which the separating set search is aborted due to max_q_global
self._cannot_mark = set()
# Generate the prioritized link list
if self.auto_first:
link_list = [product(range(self.N), range(-self.tau_max, 0))]
link_list = link_list + [product(range(self.N), range(self.N), range(-lag, -lag + 1)) for lag in
range(0, self.tau_max + 1)]
else:
link_list = [product(range(self.N), range(self.N), range(-lag, -lag + 1)) for lag in
range(0, self.tau_max + 1)]
# Run through all elements of link_list. Each element of link_list specifies ordered pairs of variables
# whose connecting edges are then subjected to conditional independence tests
for links in link_list:
# Memory variables for storing edges that are marked for removal
to_remove = {j: {} for j in range(self.N)}
# Iterate through all edges specified by links. Note that since the variables paris are ordered, (A, B) and (B, A) are seen as different pairs.
for pair in links:
if len(pair) == 2:
X = (pair[0], pair[1])
Y = (pair[0], 0)
else:
X = (pair[0], pair[2])
Y = (pair[1], 0)
# Do not test auto-links twice
if self.auto_first and X[0] == Y[0]:
continue
######################################################################################################
### Exclusion of links ###############################################################################
# Exclude the current link if ...
# ... X = Y
if X[1] == 0 and X[0] == Y[0]:
continue
# ... X > Y
if self._is_smaller(Y, X):
continue
# Get the current link
link = self._get_link(X, Y)
# Exclude the current link if ...
if link == "":
continue
# ... the link is definitely part of G
if link[1] == "-":
continue
######################################################################################################
### Determine which tests the link will be subjected to #############################################
# The algorithm always searches for separating sets in na-pds-t(Y, X). Depending on whether the X and Y are contemporaneous on some global options, the algorithm may also search for separating sets in na-pds-t(X, Y)
test_X = True if (X[1] == 0 or (self.max_cond_px > 0 and self.max_cond_px >= p_pc)) else False
######################################################################################################
### Preparation of default conditioning sets and PC search sets ######################################
# Verbose output
if self.verbosity >= 2:
print("_get_na_pds_t ")
S_default_YX, S_search_YX = self._get_default_and_search_sets(Y, X, "non-ancestral")
self.max_na_search_set_found = max(self.max_na_search_set_found, len(S_search_YX))
if test_X:
S_default_XY, S_search_XY = self._get_default_and_search_sets(X, Y, "non-ancestral")
self.max_na_search_set_found = max(self.max_na_search_set_found, len(S_search_XY))
# If the search set exceeds the specified bounds, do not test this link
if len(S_search_YX) > self.max_pds_set or (test_X and len(S_search_XY) > self.max_pds_set):
self._cannot_mark.add((X, Y))
continue
######################################################################################################
######################################################################################################
### Middle mark updates ##############################################################################
# Note: Updating the middle marks here, within the for-loop, does not spoil order independence. In fact, this update does not influence the flow of the for-loop at all
if link[1] == "!":
if len(S_search_YX) < p_pc or (test_X and len(S_search_XY) < p_pc):
# Mark the link from X to Y as converged, remember the fixation, then continue
if (X, Y) not in self._cannot_mark:
self._write_link(X, Y, link[0] + "-" + link[2], verbosity=self.verbosity)
continue
else:
has_converged = False
elif link[1] == "R":
if len(S_search_YX) < p_pc:
# Mark the link from X to Y as converged, remember the fixation, then continue
if (X, Y) not in self._cannot_mark:
self._write_link(X, Y, link[0] + "-" + link[2], verbosity=self.verbosity)
continue
elif (test_X and len(S_search_XY) >= p_pc):
has_converged = False
elif link[1] == "L":
if test_X and len(S_seach_XY) < p_pc:
# Mark the link from X to Y as converged, remember the fixation, then continue
if (X, Y) not in self._cannot_mark:
self._write_link(X, Y, link[0] + "-" + link[2], verbosity=self.verbosity)
continue
elif len(S_search_YX) >= p_pc:
has_converged = False
else:
if len(S_search_YX) < p_pc and (not test_X or len(S_search_XY) < p_pc):
# Mark the link from X to Y as converged, remember the fixation, then continue
if (X, Y) not in self._cannot_mark:
self._write_link(X, Y, link[0] + "-" + link[2], verbosity=self.verbosity)
continue
else:
has_converged = False
######################################################################################################
### Tests for conditional independence ###############################################################
# If option self.break_once_separated is True, the below for-loops will be broken immediately once a separating set has been found. In conjunction with the modified majority rule employed for orienting links, order independence (with respect to the index 'i' on X^i_t) then requires that the tested conditioning sets are ordered in an order independent way. Here, the minimal effect size of previous conditional independence tests serve as an order independent order criterion.
if self.break_once_separated or not np.isinf(self.max_q_global):
S_search_YX = self._sort_search_set(S_search_YX, Y)
if test_X:
S_search_XY = self._sort_search_set(S_search_XY, X)
# Verbose output
if self.verbosity >= 2:
print("for S_pc in combinations(S_search_YX, p_pc)")
# Run through all cardinality p_pc subsets of S_search_YX
q_count = 0
for S_pc in combinations(S_search_YX, p_pc):
q_count = q_count + 1
if q_count > self.max_q_global:
self._cannot_mark.add((X, Y))
break
# Build the full conditioning set
Z = set(S_pc)
Z = Z.union(S_default_YX)
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z), tau_max=self.tau_max)
if self.verbosity >= 2:
print("Non-ANC(Y): %s _|_ %s | S_def = %s, S_pc = %s: val = %.2f / pval = % .4f" %
(
X, Y, ' '.join([str(z) for z in S_default_YX]), ' '.join([str(z) for z in S_pc]),
val,
pval))
# Accordingly update dictionaries that keep track of the test statistic, the corresponding p-value and the cardinality of conditioning sets
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z))
# Check whether test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal and save sepset
to_remove[Y[0]][X] = True
self._save_sepset(X, Y, (frozenset(Z), "wm"))
# Verbose output
if self.verbosity >= 1:
print("({},{:2}) {:11} {} given {} union {}".format(X[0], X[1], "independent", Y, S_pc,
S_default_YX))
if self.break_once_separated:
break
if test_X:
# Verbose output
if self.verbosity >= 2:
print("for S_pc in combinations(S_search_XY, p_pc)")
# Run through all cardinality p_pc subsets of S_search_XY
q_count = 0
for S_pc in combinations(S_search_XY, p_pc):
q_count = q_count + 1
if q_count > self.max_q_global:
self._cannot_mark.add((X, Y))
break
# Build the full conditioning set
Z = set(S_pc)
Z = Z.union(S_default_XY)
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z), tau_max=self.tau_max)
if self.verbosity >= 2:
print("Non-ANC(X): %s _|_ %s | S_def = %s, S_pc = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in S_default_XY]), ' '.join([str(z) for z in S_pc]),
val, pval))
# Accordingly update dictionaries that keep track of the test statistic, the corresponding p-value and the cardinality of conditioning sets
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z))
# Check whether test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal and save sepset
to_remove[Y[0]][X] = True
self._save_sepset(X, Y, (frozenset(Z), "wm"))
# Verbose output
if self.verbosity >= 1:
print("({},{:2}) {:11} {} given {} union {}".format(X[0], X[1], "independent", Y,
S_pc, S_default_YX))
if self.break_once_separated:
break
# end for links in link_list
##########################################################################################################
### Remove edges marked for removal in to_remove #########################################################
# Check whether there is any removal at all
any_removal_this = False
# Run through all of the nested dictionary
for j in range(self.N):
for (i, lag_i) in to_remove[j].keys():
# Remember that at least one edge has been removed, remove the edge
any_removal = True
any_removal_this = True
self._write_link((i, lag_i), (j, 0), "", verbosity=self.verbosity)
# If any_removal_this = True, we need to recalculate full graph dict
if any_removal_this:
self._initialize_full_graph()
self._na_pds_t = {(j, -tau_j): {} for j in range(self.N) for tau_j in range(self.tau_max + 1)}
# end for links in link_list
# Verbose output
if self.verbosity >= 1:
print("\nTest phase complete")
##############################################################################################################
### Orientations and next step ###############################################################################
if any_removal:
# At least one edge was removed or at least one middle mark has been updated. Therefore: i) apply the full set of orientation rules, ii) restart the while loop at p_pc = 0, unless all edges have converged, then break the while loop
any_update = self._run_orientation_phase(rule_list=self._rules_all, only_lagged=False)
if any_update:
self._initialize_full_graph()
self._na_pds_t = {(j, -tau_j): {} for j in range(self.N) for tau_j in range(self.tau_max + 1)}
p_pc = 0
else:
p_pc = p_pc + 1
else:
# The graph has not changed at all in this iteration of the while loop. Therefore, if all edges have converged, break the while loop. If at least one edge has not yet converged, increase p_pc by one.
if has_converged:
break
else:
p_pc = p_pc + 1
# end while True
##################################################################################################################
### Final rule applications ######################################################################################
self._run_orientation_phase(rule_list=self._rules_all, only_lagged=False)
# Return
return True
def _run_orientation_phase(self, rule_list, only_lagged=False):
"""Exhaustively apply the rules specified by rule_list, this is Algorithm S4"""
# Verbose output
if self.verbosity >= 1:
print("\nStarting orientation phase")
print("with rule list: ", rule_list)
# Remember whether this call to _run_orientation_phase has made any update to G
restarted_once = False
# Run through all priority levels of rule_list
idx = 0
while idx <= len(rule_list) - 1:
# Some rule require self._graph_full_dict. Therefore, it is initialized once the while loop (re)-starts at the first prioprity level
if idx == 0:
self._initialize_full_graph()
# Remember whether G will be updated with new useful information ('x' marks are considered not useful)
restart = False
###########################################################################################################
### Rule application ######################################################################################
# Get the current rules
current_rules = rule_list[idx]
# Prepare a list to remember marked orientations
to_orient = []
# Run through all current rules
for rule in current_rules:
# Verbose output
if self.verbosity >= 1:
print("\n{}:".format(rule))
# Exhaustively apply the rule to the graph...
orientations = self._apply_rule(rule, only_lagged)
# Verbose output
if self.verbosity >= 1:
for ((i, j, lag_i), new_link) in set(orientations):
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Marked:", i, lag_i,
self._get_link(
(i, lag_i),
(j, 0)), j, 0,
i, lag_i, new_link,
j, 0))
if len(orientations) == 0:
print("Found nothing")
# ... and stage the results for orientation and removal
to_orient.extend(orientations)
###########################################################################################################
### Aggregation of marked orientations ####################################################################
links_to_remove = set()
links_to_fix = set()
new_ancs = {j: set() for j in range(self.N)}
new_non_ancs = {j: set() for j in range(self.N)}
# Run through all of the nested dictionary
for ((i, j, lag_i), new_link) in to_orient:
# The old link
old_link = self._get_link((i, lag_i), (j, 0))
# Is the link marked for removal?
if new_link == "" and len(old_link) > 0:
links_to_remove.add((i, j, lag_i))
continue
# Assert that no preceeding variable is marked as an ancestor of later variable
assert not (lag_i > 0 and new_link[2] == "-")
# Is the link marked for fixation?
if new_link[1] == "-" and old_link[1] != "-":
links_to_fix.add((i, j, lag_i))
# New ancestral relation of (i, lag_i) to (j, 0)
if new_link[0] == "-" and old_link[0] != "-":
new_ancs[j].add((i, lag_i))
elif new_link[0] == "<" and old_link[0] != "<":
new_non_ancs[j].add((i, lag_i))
# New ancestral relation of (j, 0) to (i, lag_i == 0)
if lag_i == 0:
if new_link[2] == "-" and old_link[2] != "-":
new_ancs[i].add((j, 0))
elif new_link[2] == ">" and old_link[2] != ">":
new_non_ancs[i].add((j, 0))
# Resolve conflicts about removal and fixation
ambiguous_links = links_to_fix.intersection(links_to_remove)
links_to_fix = links_to_fix.difference(ambiguous_links)
links_to_remove = links_to_remove.difference(ambiguous_links)
###########################################################################################################
### Removals, update middle marks, update ancestral information ###########################################
# Remove links
for (i, j, lag_i) in links_to_remove:
self._write_link((i, lag_i), (j, 0), "", verbosity=self.verbosity)
restart = True
# Fix links
for (i, j, lag_i) in links_to_fix:
old_link = self._get_link((i, lag_i), (j, 0))
new_link = old_link[0] + "-" + old_link[2]
self._write_link((i, lag_i), (j, 0), new_link, verbosity=self.verbosity)
restart = True
# Mark links as ambiguous
for (i, j, lag_i) in ambiguous_links:
old_link = self._get_link((i, lag_i), (j, 0))
new_link = old_link[0] + "x" + old_link[2]
self._write_link((i, lag_i), (j, 0), new_link, verbosity=self.verbosity)
# Update ancestral information. The function called includes conflict resolution
restart = restart or self._apply_new_ancestral_information(new_non_ancs, new_ancs)
###########################################################################################################
### Make separating sets of removed links weakly minimal ##################################################
if len(links_to_remove) > 0:
# Verbose output
if self.verbosity >= 1:
print("\nLinks were removed by rules\n")
new_ancs = {j: set() for j in range(self.N)}
new_non_ancs = {j: set() for j in range(self.N)}
# Run through all links that have been removed
for (i, j, lag_i) in links_to_remove:
X = (i, lag_i)
Y = (j, 0)
# Get ancestors of X and Y
ancs_XY = self._get_ancs([X, Y]).difference({X, Y})
# Read out all separating sets that were found in the rule phase, then consider only those of minimal cardinality
old_sepsets_all = {Z for (Z, _) in self._get_sepsets(X, Y)}
min_size = min({len(Z) for Z in old_sepsets_all})
old_sepsets_smallest = {Z for Z in old_sepsets_all if len(Z) == min_size}
# For all separating sets of minimal cardinality, find weakly minimal separating subsets
self._delete_sepsets(X, Y)
self._make_sepset_weakly_minimal(X, Y, old_sepsets_smallest, ancs_XY)
new_sepsets = self._get_sepsets(X, Y)
# end for (i, j, lag_i) in links_to_remove
# end if len(links_to_remove) > 0
# If any useful new information was found, go back to idx = 0, else increase idx by 1
if restart:
idx = 0
restarted_once = True
else:
idx = idx + 1
# end while idx <= len(rule_list) - 1
# Verbose output
if self.verbosity >= 1:
print("\nOrientation phase complete")
# No return value
return restarted_once
########################################################################################################################
########################################################################################################################
########################################################################################################################
def _get_default_and_search_sets(self, A, B, phase):
"""Return the default conditioning set and PC search set"""
if phase == "ancestral":
# This is a-pds-t(A, B)
S_raw = self._get_a_pds_t(A, B)
# Determine the default conditioning set
S_default = self._get_parents(A, B).difference({A, B})
# Determine the PC search set
S_search = S_raw.difference(S_default)
elif phase == "non-ancestral":
# This is na-pds-t(A, B)
S_raw = self._get_na_pds_t(A, B)
self.max_na_pds_set_found = max(self.max_na_pds_set_found, len(S_raw))
# Determine the default conditioning set
S_default = S_raw.intersection(self._get_ancs([A, B]))
S_default = S_default.union(self._get_parents(A, B))
S_default = S_default.difference({A, B})
# Determine the PC search set
S_search = S_raw.difference(S_default)
# Return
return S_default, S_search
def _apply_new_ancestral_information(self, new_non_ancs, new_ancs):
"""Apply the new ancestorships and non-ancestorships specified by new_non_ancs and new_ancs to the current graph. Conflicts are resolved by marking. Returns True if any circle mark was turned into a head or tail, else False."""
#######################################################################################################
### Preprocessing #####################################################################################
# Memory variables
add_to_def_non_ancs = {j: set() for j in range(self.N)}
add_to_def_ancs = {j: set() for j in range(self.N)}
add_to_ambiguous_ancestorships = {j: set() for j in range(self.N)}
put_head_or_tail = False
# Default values
if new_non_ancs is None:
new_non_ancs = {j: set() for j in range(self.N)}
if new_ancs is None:
new_ancs = {j: set() for j in range(self.N)}
# Marking A as ancestor of B implies that B is marked as a non-ancestor of A. This is only non-trivial for A before B
for j in range(self.N):
for (i, lag_i) in new_ancs[j]:
if lag_i == 0:
new_non_ancs[i].add((j, 0))
#######################################################################################################
### Conflict resolution ###############################################################################
# Iterate through new_non_ancs
for j in range(self.N):
for (i, lag_i) in new_non_ancs[j]:
# X = (i, lag_i), Y = (j, 0)
# X is marked as non-ancestor for Y
# Conflict resolution
if (i, lag_i) in self.ambiguous_ancestorships[j]:
# There is a conflict, since it is already marked as ambiguous whether X is an ancestor of Y
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as non-anc of {} but saved as ambiguous".format("Conflict:", i,
lag_i, (j, 0)))
elif (i, lag_i) in self.def_ancs[j]:
# There is a conflict, since X is already marked as ancestor of Y
add_to_ambiguous_ancestorships[j].add((i, lag_i))
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as non-anc of {} but saved as anc".format("Conflict:", i, lag_i,
(j, 0)))
elif (i, lag_i) in new_ancs[j]:
# There is a conflict, since X is also marked as a new ancestor of Y
add_to_ambiguous_ancestorships[j].add((i, lag_i))
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as both anc- and non-anc of {}".format("Conflict:", i, lag_i,
(j, 0)))
else:
# There is no conflict
add_to_def_non_ancs[j].add((i, lag_i))
# Iterate through new_ancs
for j in range(self.N):
for (i, lag_i) in new_ancs[j]:
# X = (i, lag_i), Y = (j, 0)
# X is marked as ancestor for Y
# Conflict resolution
if (i, lag_i) in self.ambiguous_ancestorships[j]:
# There is a conflict, since it is already marked as ambiguous whether X is an ancestor of Y
if self.verbosity >= 1:
print(
"{:10} ({}, {:2}) marked as anc of {} but saved as ambiguous".format("Conflict:", i, lag_i,
(j, 0)))
elif lag_i == 0 and (j, 0) in self.ambiguous_ancestorships[i]:
# There is a conflict, since X and Y are contemporaneous and it is already marked ambiguous as whether Y is an ancestor of Y
# Note: This is required here, because X being an ancestor of Y implies that Y is not an ancestor of X. This ambiguity cannot exist when X is before Y
if self.verbosity >= 1:
print(
"{:10} ({}, {:2}) marked as anc of {} but saved as ambiguous".format("Conflict:", i, lag_i,
(j, 0)))
elif (i, lag_i) in self.def_non_ancs[j]:
# There is a conflict, since X is already marked as non-ancestor of Y
add_to_ambiguous_ancestorships[j].add((i, lag_i))
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as anc of {} but saved as non-anc".format("Conflict:", i, lag_i,
(j, 0)))
elif (i, lag_i) in new_non_ancs[j]:
# There is a conflict, since X is also marked as a new non-ancestor of Y
add_to_ambiguous_ancestorships[j].add((i, lag_i))
if self.verbosity >= 1:
print("{:10} ({}, {:2}) marked as both anc- and non-anc of {}".format("Conflict:", i, lag_i,
(j, 0)))
else:
# There is no conflict
add_to_def_ancs[j].add((i, lag_i))
#######################################################################################################
#######################################################################################################
### Apply the ambiguous information ###################################################################
for j in range(self.N):
for (i, lag_i) in add_to_ambiguous_ancestorships[j]:
old_link = self._get_link((i, lag_i), (j, 0))
if len(old_link) > 0 and old_link[0] != "x":
new_link = "x" + old_link[1] + old_link[2]
self._write_link((i, lag_i), (j, 0), new_link, verbosity=self.verbosity)
if self.verbosity >= 1:
if (i, lag_i) in self.def_ancs[j]:
print("{:10} Removing ({}, {:2}) as anc of {}".format("Update:", i, lag_i, (j, 0)))
if (i, lag_i) in self.def_non_ancs[j]:
print("{:10} Removing ({}, {:2}) as non-anc of {}".format("Update:", i, lag_i, (j, 0)))
self.def_ancs[j].discard((i, lag_i))
self.def_non_ancs[j].discard((i, lag_i))
if lag_i == 0:
if self.verbosity >= 1 and (j, 0) in self.def_ancs[i]:
print("{:10} Removing {} as anc of {}".format("Update:", i, lag_i, (j, 0)))
self.def_ancs[i].discard((j, 0))
# Do we also need the following?
# self.def_non_ancs[i].discard((j, 0))
if self.verbosity >= 1 and (i, lag_i) not in self.ambiguous_ancestorships[j]:
print("{:10} Marking ancestorship of ({}, {:2}) to {} as ambiguous".format("Update:", i, lag_i,
(j, 0)))
self.ambiguous_ancestorships[j].add((i, lag_i))
#######################################################################################################
### Apply the unambiguous information #################################################################
for j in range(self.N):
for (i, lag_i) in add_to_def_non_ancs[j]:
old_link = self._get_link((i, lag_i), (j, 0))
if len(old_link) > 0 and old_link[0] != "<":
new_link = "<" + old_link[1] + old_link[2]
self._write_link((i, lag_i), (j, 0), new_link, verbosity=self.verbosity)
put_head_or_tail = True
if self.verbosity >= 1 and (i, lag_i) not in self.def_non_ancs[j]:
print("{:10} Marking ({}, {:2}) as non-anc of {}".format("Update:", i, lag_i, (j, 0)))
self.def_non_ancs[j].add((i, lag_i))
for (i, lag_i) in add_to_def_ancs[j]:
old_link = self._get_link((i, lag_i), (j, 0))
if len(old_link) > 0 and (old_link[0] != "-" or old_link[2] != ">"):
new_link = "-" + old_link[1] + ">"
self._write_link((i, lag_i), (j, 0), new_link, verbosity=self.verbosity)
put_head_or_tail = True
if self.verbosity >= 1 and (i, lag_i) not in self.def_ancs[j]:
print("{:10} Marking ({}, {:2}) as anc of {}".format("Update:", i, lag_i, (j, 0)))
self.def_ancs[j].add((i, lag_i))
if lag_i == 0:
if self.verbosity >= 1 and (j, 0) not in self.def_non_ancs[i]:
print("{:10} Marking {} as non-anc of {}".format("Update:", (j, 0), (i, 0)))
self.def_non_ancs[i].add((j, 0))
#######################################################################################################
return put_head_or_tail
def _apply_rule(self, rule, only_lagged):
"""Call the orientation-removal-rule specified by the string argument rule."""
if rule == "APR":
return self._apply_APR(only_lagged)
elif rule == "ER-00-a":
return self._apply_ER00a(only_lagged)
elif rule == "ER-00-b":
return self._apply_ER00b(only_lagged)
elif rule == "ER-00-c":
return self._apply_ER00c(only_lagged)
elif rule == "ER-00-d":
return self._apply_ER00d(only_lagged)
elif rule == "ER-01":
return self._apply_ER01(only_lagged)
elif rule == "ER-02":
return self._apply_ER02(only_lagged)
elif rule == "ER-03":
return self._apply_ER03(only_lagged)
elif rule == "R-04":
return self._apply_R04(only_lagged)
elif rule == "ER-08":
return self._apply_ER08(only_lagged)
elif rule == "ER-09":
return self._apply_ER09(only_lagged)
elif rule == "ER-10":
return self._apply_ER10(only_lagged)
def _get_na_pds_t(self, A, B):
"""Return the set na_pds_t(A, B), with at least one of them at lag 0"""
# Unpack A and B, then assert that at least one of them is at lag 0
var_A, lag_A = A
var_B, lag_B = B
assert lag_A == 0 or lag_B == 0
# If na_pds_t(A, B) is in memory, return immediately
memo = self._na_pds_t[A].get(B)
if memo is not None:
return memo
# Else, re-compute na_pds_t(A, B) it according to the current graph and cache it.
# Re-compute na_pds_t_1(A, B) according to the current graph
na_pds_t_1 = {(var, lag + lag_A)
# W = (var, lag + lag_A) is in na_pds_t_1(A, B) if ...
for ((var, lag), link) in self.graph_dict[var_A].items()
# ... it is a non-future adjacency of A
if len(link) > 0
# ... and is not B
and (var, lag + lag_A) != B
# ... and is not before t - tau_max
and (lag + lag_A) >= -self.tau_max
# ... and is not after both A and B
# ... (i.e. is not after time t)
and (lag + lag_A) <= 0
# ... and is not a definite non-ancestor of A,
# which implies that it is not a definite descendant of A,
and link[0] != "<"
# ... and is not a definite descendant of B
# (i.e., B is not a definite ancestor of W)
and (var_B, lag_B - (lag + lag_A)) not in self.def_ancs[var]
}
# Compute na_pds_t_2(A, B)
# Find all potential C_1 nodes
C1_list = set()
for ((var, lag), link) in self.graph_full_dict[var_A].items():
node = (var, lag + lag_A)
# node is added to C1_list if, in addition to being adjacent to A, ...
# ... it is not B
if (var, lag + lag_A) == B:
continue
# ... it is not before t - tau_max
if (lag + lag_A) < -self.tau_max:
continue
# ... it is not after B
if (lag + lag_A) > lag_B:
continue
# ... it is not a definite ancestor of A
if link[0] == "-":
continue
# ... it is not a definite descendant of A
if link[2] == "-":
continue
# ... it is not a definite non-ancestor of B,
# which implies that it is not a definite descendant of B
if (var, (lag + lag_A) - lag_B) in self.def_non_ancs[var_B]:
continue
# If all tests are passed, node is added to C1_list
C1_list.add(node)
# end for ((var, lag), link) in self.graph_full_dict[var_A].items()
# Breath first search to find (a superset of) na_pds_t_2(A, B)
visited = set()
start_from = {(C1, A) for C1 in C1_list}
while start_from:
new_start_from = set()
new_do_not_visit = set()
for (current_node, previous_node) in start_from:
visited.add((current_node, previous_node))
for (var, lag) in self.graph_full_dict[current_node[0]]:
next_node = (var, lag + current_node[1])
if next_node[1] < -self.tau_max:
continue
if next_node[1] > 0:
continue
if (next_node, current_node) in visited:
continue
if next_node == previous_node:
continue
if next_node == B:
continue
if next_node == A:
continue
link_l = self._get_link(next_node, current_node)
link_r = self._get_link(previous_node, current_node)
if link_l[2] == "-" or link_r[2] == "-":
continue
if self._get_link(next_node, previous_node) == "" and (link_l[2] == "o" or link_r[2] == "o"):
continue
if (var_A, lag_A - next_node[1]) in self.def_ancs[next_node[0]] or (var_B, lag_B - next_node[1]) in \
self.def_ancs[next_node[0]]:
continue
if ((next_node[1] - lag_A > 0) or (next_node[0], next_node[1] - lag_A) in self.def_non_ancs[
var_A]) and (
(next_node[1] - lag_B > 0) or (next_node[0], next_node[1] - lag_B) in self.def_non_ancs[
var_B]):
continue
new_start_from.add((next_node, current_node))
start_from = new_start_from
# end while start_from
na_pds_t_2 = {node for (node, _) in visited}
self._na_pds_t[A][B] = na_pds_t_1.union(na_pds_t_2).difference({A, B})
return self._na_pds_t[A][B]
def _make_sepset_weakly_minimal(self, X, Y, Z_list, ancs):
"""
X and Y are conditionally independent given Z in Z_list However, it is not yet clear whether any of these Z are minimal separating set.
This function finds weakly minimal separating subsets in an order independent way and writes them to the self.sepsets dictionary. Only certainly weakly minimal separating subsets are retained.
"""
# Assert that all Z in Z_list have the same cardinality
assert len({len(Z) for Z in Z_list}) == 1
# Base Case 1:
# Z in Z_list is weakly minimal if len(Z) <= 1 or Z \subset ancs
any_weakly_minimal = False
for Z in Z_list:
if len(Z) <= 1 or Z.issubset(ancs):
self._save_sepset(X, Y, (frozenset(Z), "wm"))
any_weakly_minimal = True
if any_weakly_minimal:
return None
# If not Base Case 1, we need to search for separating subsets. We do this for all Z in Z_list, and build a set sepsets_next_call that contains all separating sets for the next recursive call
sepsets_next_call = set()
for Z in Z_list:
# Find all nodes A in Z that are not in ancs
removable = Z.difference(ancs)
# Test for removal of all nodes in removable
new_sepsets = []
val_values = []
for A in removable:
Z_A = [node for node in Z if node != A]
# Run the conditional independence test
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=Z_A, tau_max=self.tau_max)
if self.verbosity >= 2:
print("MakeMin: %s _|_ %s | Z_A = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in list(Z_A)]), val, pval))
# Check whether the test result was significant
if pval > self.pc_alpha:
new_sepsets.append(frozenset(Z_A))
val_values.append(val)
# If new_sepsets is empty, then Z is already weakly minimal
if len(new_sepsets) == 0:
self._save_sepset(X, Y, (frozenset(Z), "wm"))
any_weakly_minimal = True
# If we did not yet find a weakly minimal separating set
if not any_weakly_minimal:
# Sort all separating sets in new_sepets by their test statistic, then append those separating sets with maximal statistic to sepsets_next_call. This i) guarantees order independence while ii) continuing to test as few as possible separating sets
new_sepsets = [node for _, node in sorted(zip(val_values, new_sepsets), reverse=True)]
i = -1
while i <= len(val_values) - 2 and val_values[i + 1] == val_values[0]:
sepsets_next_call.add(new_sepsets[i])
i = i + 1
assert i >= 0
# If we did not yet find a weakly minimal separating set, make a recursive call
if not any_weakly_minimal:
self._make_sepset_weakly_minimal(X, Y, sepsets_next_call, ancs)
else:
return None
def _B_not_in_SepSet_AC(self, A, B, C):
"""Is B in less than half of the sets in SepSets(A, C)?"""
# Treat A - B - C as the same triple as C - B - A
# Convention: A is before C or, if they are contemporaneous, the index of A is smaller than that of C
if C[1] < A[1] or (C[1] == A[1] and C[0] < A[0]):
return self._B_not_in_SepSet_AC(C, B, A)
# Remember all separating sets that we will find
all_sepsets = set()
# Get the non-future adjacencies of A and C
if not self.use_a_pds_t_for_majority:
adj_A = self._get_non_future_adj([A]).difference({A, C})
adj_C = self._get_non_future_adj([C]).difference({A, C})
else:
adj_A = self._get_a_pds_t(A, C).difference({A, C})
adj_C = self._get_a_pds_t(C, A).difference({A, C})
Z_add = self._get_parents(A, C).difference({A, C})
search_A = adj_A.difference(Z_add)
search_C = adj_C.difference(Z_add)
if not np.isinf(self.max_q_global):
search_A = self._sort_search_set(search_A, A)
search_C = self._sort_search_set(search_C, C)
# Test for independence given all subsets of non-future adjacencies of A
if A[1] < C[1]:
max_p_A = min([len(search_A), self.max_cond_px, self.max_p_global]) + 1
else:
max_p_A = min([len(search_A), self.max_p_global]) + 1
# Shift lags
search_A = [(var, lag - C[1]) for (var, lag) in search_A]
search_C = [(var, lag - C[1]) for (var, lag) in search_C]
Z_add = {(var, lag - C[1]) for (var, lag) in Z_add}
X = (A[0], A[1] - C[1])
Y = (C[0], 0)
for p in range(max_p_A):
q_count = 0
for Z_raw in combinations(search_A, p):
q_count = q_count + 1
if q_count > self.max_q_global:
break
# Prepare the conditioning set
Z = {node for node in Z_raw if node != X and node != Y}
Z = Z.union(Z_add)
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z), tau_max=self.tau_max)
if self.verbosity >= 2:
print("BnotinSepSetAC(A): %s _|_ %s | Z_add = %s, Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in Z_add]),
' '.join([str(z) for z in {node for node in Z_raw if node != X and node != Y}]), val, pval))
# Check whether test result was significant
if pval > self.pc_alpha:
all_sepsets.add(frozenset(Z))
# Test for independence given all subsets of non-future adjacencies of C
for p in range(min(len(search_C), self.max_p_global) + 1):
q_count = 0
for Z_raw in combinations(search_C, p):
q_count = q_count + 1
if q_count > self.max_q_global:
break
# Prepare the conditioning set
Z = {node for node in Z_raw if node != X and node != Y}
Z = Z.union(Z_add)
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z), tau_max=self.tau_max)
if self.verbosity >= 2:
# print("BnotinSepSetAC(C): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
print("BnotinSepSetAC(C): %s _|_ %s | Z_add = %s, Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in Z_add]),
' '.join([str(z) for z in {node for node in Z_raw if node != X and node != Y}]), val, pval))
# Check whether test result was significant
if pval > self.pc_alpha:
all_sepsets.add(frozenset(Z))
# Append the already known sepset
all_sepsets = all_sepsets.union({Z for (Z, _) in self._get_sepsets(X, Y)})
# Count number of sepsets and number of sepsets that contain B
n_sepsets = len(all_sepsets)
n_sepsets_with_B = len([1 for Z in all_sepsets if (B[0], B[1] - C[1]) in Z])
return True if 2 * n_sepsets_with_B < n_sepsets else False
def _B_in_SepSet_AC(self, A, B, C):
"""Is B in more than half of the sets in SepSets(A, C)?"""
# Treat A - B - C as the same triple as C - B - A
# Convention: A is before C or, if they are contemporaneous, the index of A is smaller than that of C
if C[1] < A[1] or (C[1] == A[1] and C[0] < A[0]):
return self._B_in_SepSet_AC(C, B, A)
link_AB = self._get_link(A, B)
link_CB = self._get_link(C, B)
if link_AB == "" or link_CB == "" or link_AB[1] != "-" or link_CB[1] != "-":
# Vote is based on those sets that where found already
all_sepsets = {Z for (Z, _) in self._get_sepsets(A, C)}
# Count number of sepsets and number of sepsets that contain B
n_sepsets = len(all_sepsets)
n_sepsets_with_B = len([1 for Z in all_sepsets if B in Z])
return True if 2 * n_sepsets_with_B > n_sepsets else False
else:
# Remember all separating sets that we will find
all_sepsets = set()
# Get the non-future adjacencies of A and C
if not self.use_a_pds_t_for_majority:
adj_A = self._get_non_future_adj([A]).difference({A, C})
adj_C = self._get_non_future_adj([C]).difference({A, C})
else:
adj_A = self._get_a_pds_t(A, C).difference({A, C})
adj_C = self._get_a_pds_t(C, A).difference({A, C})
Z_add = self._get_parents(A, C).difference({A, C})
search_A = adj_A.difference(Z_add)
search_C = adj_C.difference(Z_add)
if not np.isinf(self.max_q_global):
search_A = self._sort_search_set(search_A, A)
search_C = self._sort_search_set(search_C, C)
# Test for independence given all subsets of non-future adjacencies of A
if A[1] < C[1]:
max_p_A = min([len(search_A), self.max_cond_px, self.max_p_global]) + 1
else:
max_p_A = min([len(search_A), self.max_p_global]) + 1
# Shift lags
search_A = [(var, lag - C[1]) for (var, lag) in search_A]
search_C = [(var, lag - C[1]) for (var, lag) in search_C]
Z_add = {(var, lag - C[1]) for (var, lag) in Z_add}
X = (A[0], A[1] - C[1])
Y = (C[0], 0)
for p in range(max_p_A):
q_count = 0
for Z_raw in combinations(search_A, p):
q_count = q_count + 1
if q_count > self.max_q_global:
break
# Prepare the conditioning set
Z = {node for node in Z_raw if node != X and node != Y}
Z = Z.union(Z_add)
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z), tau_max=self.tau_max)
if self.verbosity >= 2:
# print("BinSepSetAC(A): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
print("BinSepSetAC(A): %s _|_ %s | Z_add = %s, Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in Z_add]),
' '.join([str(z) for z in {node for node in Z_raw if node != X and node != Y}]), val,
pval))
# Check whether test result was significant
if pval > self.pc_alpha:
all_sepsets.add(frozenset(Z))
# Test for independence given all subsets of non-future adjacencies of C
for p in range(min(len(search_C), self.max_p_global) + 1):
q_count = 0
for Z_raw in combinations(search_C, p):
q_count = q_count + 1
if q_count > self.max_q_global:
break
# Prepare the conditioning set
Z = {node for node in Z_raw if node != X and node != Y}
Z = Z.union(Z_add)
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z), tau_max=self.tau_max)
if self.verbosity >= 2:
# print("BinSepSetAC(C): %s _|_ %s | Z = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z)]), val, pval))
print("BinSepSetAC(C): %s _|_ %s | Z_add = %s, Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in Z_add]),
' '.join([str(z) for z in {node for node in Z_raw if node != X and node != Y}]), val,
pval))
# Check whether test result was significant
if pval > self.pc_alpha:
all_sepsets.add(frozenset(Z))
# Append the already known sepset
all_sepsets = all_sepsets.union({Z for (Z, _) in self._get_sepsets(X, Y)})
# Count number of sepsets and number of sepsets that contain B
n_sepsets = len(all_sepsets)
n_sepsets_with_B = len([1 for Z in all_sepsets if (B[0], B[1] - C[1]) in Z])
return True if 2 * n_sepsets_with_B > n_sepsets else False
def _get_parents(self, A, B):
"""Return all known parents of all nodes in node_list"""
if self.parents_of_lagged or A[1] == B[1]:
out = {(var, lag + A[1]) for ((var, lag), link) in self.graph_dict[A[0]].items() if
len(link) > 0 and link[0] == "-" and lag + A[1] >= -self.tau_max}
return out.union({(var, lag + B[1]) for ((var, lag), link) in self.graph_dict[B[0]].items() if
len(link) > 0 and link[0] == "-" and lag + B[1] >= -self.tau_max})
else:
if A[1] < B[1]:
return out.union({(var, lag + B[1]) for ((var, lag), link) in self.graph_dict[B[0]].items() if
len(link) > 0 and link[0] == "-" and lag + B[1] >= -self.tau_max})
else:
return out.union({(var, lag + A[1]) for ((var, lag), link) in self.graph_dict[A[0]].items() if
len(link) > 0 and link[0] == "-" and lag + A[1] >= -self.tau_max})
def _apply_middle_mark(self, X, Y, char):
"""Update the middle mark on the link between X and Y with the character char"""
# Get the old link
old_link = self._get_link(X, Y)
# Determine the new link
if old_link[1] == "?":
new_link = old_link[0] + char + old_link[2]
elif (old_link[1] == "L" and char == "R") or (old_link[1] == "R" and char == "L"):
new_link = old_link[0] + "!" + old_link[2]
else:
assert False
# Write the new link
self._write_link(X, Y, new_link, verbosity=self.verbosity)
# Return
return True
def _update_middle_marks(self):
"""Apply rule MMR"""
if self.verbosity >= 1:
print("\nMiddle mark updates\n")
# Run through all links
for j in range(self.N):
for ((i, lag_i), link) in self.graph_dict[j].items():
if link == "":
continue
X = (i, lag_i)
Y = (j, 0)
# Apply above rule for A = X and B = Y
link_XY = self._get_link(X, Y)
smaller_XY = self._is_smaller(X, Y)
if link_XY[2] == ">":
if link_XY[1] == "?":
if smaller_XY:
new_link = link_XY[0] + "L>"
else:
new_link = link_XY[0] + "R>"
self._write_link(X, Y, new_link, verbosity=self.verbosity)
elif (link_XY[1] == "R" and smaller_XY) or (link_XY[1] == "L" and not smaller_XY):
new_link = link_XY[0] + "!>"
self._write_link(X, Y, new_link, verbosity=self.verbosity)
# Apply above rule for A = Y and B = X
link_YX = self._get_link(Y, X)
smaller_YX = self._is_smaller(Y, X)
if link_YX[2] == ">":
if link_YX[1] == "?":
if smaller_YX:
new_link = link_YX[0] + "L>"
else:
new_link = link_YX[0] + "R>"
self._write_link(Y, X, new_link, verbosity=self.verbosity)
elif (link_YX[1] == "R" and smaller_YX) or (link_YX[1] == "L" and not smaller_YX):
new_link = link_YX[0] + "!>"
self._write_link(Y, X, new_link, verbosity=self.verbosity)
def _is_smaller(self, X, Y):
"""
A node X is said to be smaller than node Y if
i) X is before Y or
ii) X and Y are contemporaneous and the variable index of X is smaller than that of Y.
Return True if X is smaller than Y, else return False
"""
return (X[1] < Y[1]) or (X[1] == Y[1] and X[0] < Y[0])
def _get_a_pds_t(self, A, B):
"""Return the set a_pds_t(A, B)"""
# Unpack A and assert that A is at lag 0
var_A, lag_A = A
# Compute a_pds_t(A, B) according to the current graph
return {(var, lag + lag_A)
# W = (var, lag) is in a_pds_t(A, B) if ...
for ((var, lag), link) in self.graph_dict[var_A].items()
# ... it is a non-future adjacency of A
if len(link) > 0
# ... and it is not B
and (var, lag + lag_A) != B
# ... it is not before t - self.tau_max
and lag + lag_A >= -self.tau_max
# ... and it is not a definite non-ancestor of A
and link[0] != "<"
}
def _get_ancs(self, node_list):
"""Return the currently known set of ancestors of all nodes in the list node_list. The nodes are not required to be at lag 0"""
# Build the output set
out = set()
# Run through all nodes
for A in node_list:
# Unpack the node
(var_A, lag_A) = A
# Add the ancestors of node to out
out = out.union(
{(var, lag + lag_A) for (var, lag) in self.def_ancs[var_A] if lag + lag_A >= - self.tau_max})
# Return
return out
def _get_non_ancs(self, node_list):
"""Return the currently known set of non-ancestors of all nodes in the list node_list. The nodes are not required to be at lag 0"""
# Build the output set
out = set()
# Run through all nodes
for A in node_list:
# Unpack the node
(var_A, lag_A) = A
# Add the ancestors of node to out
out = out.union(
{(var, lag + lag_A) for (var, lag) in self.def_non_ancs[var_A] if lag + lag_A >= - self.tau_max})
# Return
return out
def _fix_all_edges(self):
"""Remove all non-trivial orientations"""
for j in range(self.N):
for (i, lag_i) in self.graph_dict[j].keys():
link = self._get_link((i, lag_i), (j, 0))
if len(link) > 0:
new_link = link[0] + "-" + link[2]
self.graph_dict[j][(i, lag_i)] = new_link
########################################################################################################################
########################################################################################################################
########################################################################################################################
def _apply_APR(self, only_lagged):
"""Return all orientations implied by orientation rule APR"""
# Build the output list
out = []
if self.no_apr > 0:
return out
# Get and run through all relevant graphical structures
for j in range(self.N):
for (i, lag_i) in self.graph_dict[j]:
A = (i, lag_i)
B = (j, 0)
if only_lagged and lag_i == 0:
continue
# Get the link from A to B
link_AB = self._get_link(A, B)
if self._match_link(pattern='-!>', link=link_AB) \
or (self._match_link(pattern='-R>', link=link_AB) and self._is_smaller(A, B)) \
or (self._match_link(pattern='-L>', link=link_AB) and self._is_smaller(B, A)):
# Write the new link from A to B to the output list
out.append(self._get_pair_key_and_new_link(A, B, "-->"))
# Return the output list
return out
def _apply_ER01(self, only_lagged):
"""Return all orientations implied by orientation rule R1^prime"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_triples = self._find_triples(pattern_ij='**>', pattern_jk='o*+', pattern_ik='')
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
if only_lagged and B[1] == C[1]:
continue
if self.verbosity >= 2:
print("ER01: ", (A, B, C))
# Check whether the rule applies
if self._B_in_SepSet_AC(A, B, C):
if self.verbosity >= 2:
print(" --> in sepset ")
# Prepare the new link from B to C and append it to the output list
link_BC = self._get_link(B, C)
new_link_BC = "-" + link_BC[1] + ">"
out.append(self._get_pair_key_and_new_link(B, C, new_link_BC))
# Return the output list
return out
def _apply_ER02(self, only_lagged):
"""Return all orientations implied by orientation rule R2^prime"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_triples = set(self._find_triples(pattern_ij='-*>', pattern_jk='**>', pattern_ik='+*o'))
all_appropriate_triples = all_appropriate_triples.union(
set(self._find_triples(pattern_ij='**>', pattern_jk='-*>', pattern_ik='+*o')))
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
if only_lagged and A[1] == C[1]:
continue
# The rule applies to all relevant graphical structures. Therefore, prepare the new link and append it to the output list
link_AC = self._get_link(A, C)
new_link_AC = link_AC[0] + link_AC[1] + ">"
out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
# print("Rule 2", A, self._get_link(A, B), B, self._get_link(B, C), C, self._get_link(A, C), new_link_AC)
# Return the output list
return out
def _apply_ER03(self, only_lagged):
"""Return all orientations implied by orientation rule R3^prime"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_quadruples = self._find_quadruples(pattern_ij='**>', pattern_jk='<**', pattern_ik='',
pattern_il='+*o', pattern_jl='o*+', pattern_kl='+*o')
# Run through all appropriate graphical structures
for (A, B, C, D) in all_appropriate_quadruples:
if only_lagged and B[1] == D[1]:
continue
# Check whether the rule applies
if self._B_in_SepSet_AC(A, D, C):
# Prepare the new link from D to B and append it to the output list
link_DB = self._get_link(D, B)
new_link_DB = link_DB[0] + link_DB[1] + ">"
out.append(self._get_pair_key_and_new_link(D, B, new_link_DB))
# Return the output list
return out
def _apply_R04(self, only_lagged):
"""Return all orientations implied by orientation rule R4 (standard FCI rule)"""
# Build the output list
out = []
# Find all relevant triangles W-V-Y
all_appropriate_triples = self._find_triples(pattern_ij='<-*', pattern_jk='o-+', pattern_ik='-->')
# Run through all of these triangles
for triple in all_appropriate_triples:
(W, V, Y) = triple
if only_lagged and (V[1] == Y[1] and W[1] == V[1]):
continue
# Get the current link from W to V, which we will need below
link_WV = self._get_link(W, V)
# Find all discriminating paths for this triangle
# Note: To guarantee order independence, we check all discriminating paths. Alternatively, we could check the rule for all shortest such paths
discriminating_paths = self._get_R4_discriminating_paths(triple, max_length=np.inf)
# Run through all discriminating paths
for path in discriminating_paths:
# Get the end point node
X_1 = path[-1]
# Check which of the two cases of the rule we are in, then append the appropriate new links to the output list
if self._B_in_SepSet_AC(X_1, V, Y):
# New link from V to Y
out.append(self._get_pair_key_and_new_link(V, Y, "-->"))
elif link_WV != "<-x" and self._B_not_in_SepSet_AC(X_1, V, Y):
# New link from V to Y
out.append(self._get_pair_key_and_new_link(V, Y, "<->"))
# If needed, also the new link from W to V
if link_WV != "<->":
out.append(self._get_pair_key_and_new_link(W, V, "<->"))
# Return the output list
return out
def _apply_ER08(self, only_lagged):
"""Return all orientations implied by orientation rule R8^prime"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_triples = self._find_triples(pattern_ij='-*>', pattern_jk='-*>', pattern_ik='o*+')
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
if only_lagged and A[1] == C[1]:
continue
# The rule applies to all relevant graphical structures. Therefore, prepare the new link and append it to the output list
link_AC = self._get_link(A, C)
new_link_AC = "-" + link_AC[1] + ">"
out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
# print("Rule 8:", A, self._get_link(A, B), B, self._get_link(B, C), C, link_AC, new_link_AC)
# Return the output list
return out
def _apply_ER09(self, only_lagged):
"""Return all orientations implied by orientation rule R9^prime"""
# Build the output list
out = []
# Find unshielded triples B_1 o--*--o A o--*--> C or B_1 <--*--o A o--*--> C or B_1 <--*-- A o--*--> C
all_appropriate_triples = set(self._find_triples(pattern_ij='o*o', pattern_jk='o*>', pattern_ik=''))
all_appropriate_triples = all_appropriate_triples.union(
set(self._find_triples(pattern_ij='<*o', pattern_jk='o*>', pattern_ik='')))
all_appropriate_triples = all_appropriate_triples.union(
set(self._find_triples(pattern_ij='<*-', pattern_jk='o*>', pattern_ik='')))
# Run through all these triples
for (B_1, A, C) in all_appropriate_triples:
if only_lagged and A[1] == C[1]:
continue
# Check whether A is in SepSet(B_1, C), else the rule does not apply
if not self._B_in_SepSet_AC(B_1, A, C):
continue
# Although we do not yet know whether the rule applies, we here determine the new form of the link from A to C if the rule does apply
link_AC = self._get_link(A, C)
new_link_AC = "-" + link_AC[1] + ">"
pair_key, new_link = self._get_pair_key_and_new_link(A, C, new_link_AC)
# For the search of uncovered potentially directed paths from B_1 to C, determine the initial pattern as dictated by the link from A to B_1
first_link = self._get_link(A, B_1)
if self._match_link(pattern='o*o', link=first_link):
initial_allowed_patterns = ['-*>', 'o*>', 'o*o']
elif self._match_link(pattern='o*>', link=first_link) or self._match_link(pattern='-*>', link=first_link):
initial_allowed_patterns = ['-*>']
# Return all uncovered potentially directed paths from B_1 to C
# uncovered_pd_paths = self._find_potentially_directed_paths(B_1, C, initial_allowed_patterns, return_if_any_path_found = False, uncovered=True, reduce_allowed_patterns=True, max_length = np.inf)
# Find all uncovered potentially directed paths from B_1 to C
uncovered_pd_paths = self._get_potentially_directed_uncovered_paths(B_1, C, initial_allowed_patterns)
# Run through all of these paths and check i) whether the node adjacent to B_1 is non-adjacent to A, ii) whether condition iv) of the rule antecedent is true. If there is any such path, then the link can be oriented
for upd_path in uncovered_pd_paths:
# Is the node adjacent to B_1 non-adjacent to A (this implies that there are at least three nodes on the path, because else the node adjacent to B_1 is C) and is A not part of the path?
if len(upd_path) < 3 or A in upd_path or self._get_link(A, upd_path[1]) != "":
continue
# If the link from A to B_1 is into B_1, condition iv) is true
if first_link[2] == ">":
# Mark the link from A to C for orientation, break the for loop to continue with the next triple
out.append((pair_key, new_link))
break
# If the link from A to B_1 is not in B_1, we need to check whether B_1 is in SepSet(A, X) where X is the node on upd_path next to B_1
if not self._B_in_SepSet_AC(A, B_1, upd_path[1]):
# Continue with the next upd_path
continue
# Now check whether rule iv) for all triples on upd_path
path_qualifies = True
for i in range(len(upd_path) - 2):
# We consider the unshielded triples upd_path[i] - upd_path[i+1] - upd_path[i+2]
# If the link between upd_path[i] and upd_path[i+1] is into the latter, condition iv) is true
left_link = self._get_link(upd_path[i], upd_path[i + 1])
if left_link[2] == ">":
# The path qualifies, break the inner for loop
break
# If not, then we need to continue with checking whether upd_path[i+1] in SepSet(upd_path[i+1], upd_path[i+2])
if not self._B_in_SepSet_AC(upd_path[i], upd_path[i + 1], upd_path[i + 2]):
# The path does not qualifying, break the inner for loop
path_qualifies = False
break
# The path qualifies, mark the edge from A to C for orientation and break the outer for loop to continue with the next triple
if path_qualifies:
out.append((pair_key, new_link))
break
# The path does not qualify, continue with the next upd_path
# end for upd_path in uncovered_pd_paths
# end for (B_1, A, C) in all_appropriate_triples
# Return the output list
return out
def _apply_ER10(self, only_lagged):
"""Return all orientations implied by orientation rule R10^prime"""
# Build the output list
out = []
# Find all triples A o--> C <-- P_C
all_appropriate_triples = set(self._find_triples(pattern_ij='o*>', pattern_jk='<*-', pattern_ik=''))
all_appropriate_triples = all_appropriate_triples.union(
set(self._find_triples(pattern_ij='o*>', pattern_jk='<*-', pattern_ik='***')))
# Collect all triples for the given pair (A, C)
triple_sorting_dict = {}
for (A, C, P_C) in all_appropriate_triples:
if triple_sorting_dict.get((A, C)) is None:
triple_sorting_dict[(A, C)] = [P_C]
else:
triple_sorting_dict[(A, C)].append(P_C)
# Run through all (A, C) pairs
for (A, C) in triple_sorting_dict.keys():
if only_lagged and A[1] == C[1]:
continue
# Find all uncovered potentially directed paths from A to C through any of the P_C nodes
relevant_paths = []
for P_C in triple_sorting_dict[(A, C)]:
for upd_path in self._get_potentially_directed_uncovered_paths(A, P_C, ['-*>', 'o*>', 'o*o']):
# Run through all of these paths and check i) whether the second to last element is not adjacent to C (this requires it to have a least three nodes, because else the second to last element would be A) and ii) whether the left edge of any 3-node sub-path is into the middle nor or, if not, whether the middle node is in the separating set of the two end-point nodes (of the 3-node) sub-path and iii) whether C is not element of the path. If path meets these conditions, add its second node (the adjacent to A) to the set second_nodes
if len(upd_path) < 3 or C in upd_path or self._get_link(upd_path[-2], C) != "":
continue
upd_path.append(C)
path_qualifies = True
for i in range(len(upd_path) - 2):
# We consider the unshielded triples upd_path[i] - upd_path[i+1] - upd_path[i+2]
# If the link between upd_path[i] and upd_path[i+1] is into the latter, the path qualifies
left_link = self._get_link(upd_path[i], upd_path[i + 1])
if left_link[2] == ">":
# The path qualifies, break the inner for loop
break
# If not, then we need to continue with checking whether upd_path[i+1] in SepSet(upd_path[i+1], upd_path[i+2])
if not self._B_in_SepSet_AC(upd_path[i], upd_path[i + 1], upd_path[i + 2]):
# The path does not qualify, break the inner for loop
path_qualifies = False
break
# The path qualifies, add upd_path[i] to second_nodes and continue with the next upd_path
if path_qualifies:
relevant_paths.append(upd_path)
# The path does not qualify, continue with the next upd_path
# end for path in self._get_potentially_directed_uncovered_paths(A, P_C, ['-*>', 'o*>', 'o*o'])
# end for P_C in triple_sorting_dict[(A, C)]
# Find all second nodes on the relevant paths
second_nodes = list({path[1] for path in relevant_paths})
# Check whether there is any pair of non-adjacent nodes in second_nodes, such that A is in their separating set. If yes, mark the link from A to C for orientation
for i, j in product(range(len(second_nodes)), range(len(second_nodes))):
if i < j and self._get_link(second_nodes[i], second_nodes[j]) == "" and self._B_in_SepSet_AC(
second_nodes[i], A, second_nodes[j]):
# Append new link and break the for loop
link_AC = self._get_link(A, C)
new_link_AC = "-" + link_AC[1] + ">"
out.append(self._get_pair_key_and_new_link(A, C, new_link_AC))
break
# end for (A, C) in triple_sorting_dict.keys()
# Return the output list
return out
def _apply_ER00a(self, only_lagged):
"""Return all orientations implied by orientation rule R0^prime a"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
all_appropriate_triples = self._find_triples(pattern_ij='***', pattern_jk='***', pattern_ik='')
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
# Unpack A, B, C
(i, lag_i) = A
(j, lag_j) = B
(k, lag_k) = C
if only_lagged and (A[1] == B[1] or B[1] == C[1]):
continue
# Get all weakly minimal separating sets in SepSet(A, C)
# Remark: The non weakly minimal separating sets may be larger, that's why we disfavor them
sepsets = self._get_sepsets(A, C)
sepsets = {Z for (Z, status) in sepsets if status == "wm"}
###################################################################################
### Part 1) of the rule ###########################################################
remove_AB = False
link_AB = self._get_link(A, B)
# i) Middle mark must not be "x" or "-"
if link_AB[1] not in ['-', 'x']:
# Test A indep B given union(SepSet(A, C), intersection(def-anc(B), adj(B))) setminus{A, B} setminus{future of both A and B}
# Conditioning on parents
Z_add = self._get_parents(A, B).difference({A, B})
# Shift the lags appropriately
if lag_i <= lag_j:
X = (i, lag_i - lag_j) # A shifted
Y = (j, 0) # B shifted
delta_lag = lag_j
else:
X = (j, lag_j - lag_i) # B shifted
Y = (i, 0) # A shifted
delta_lag = lag_i
# Run through all weakly minimal separating sets of A and C
for Z in sepsets:
# Construct the conditioning set to test
Z_test = Z.union(Z_add).difference({A, B})
Z_test = {(var, lag - delta_lag) for (var, lag) in Z_test if
lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
Z_add2 = {(var, lag - delta_lag) for (var, lag) in Z_add.difference({A, B}) if
lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z_test), tau_max=self.tau_max)
if self.verbosity >= 2:
# print("ER00a(part1): %s _|_ %s | Z_test = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z_test)]), val, pval))
print("ER00a(part1): %s _|_ %s | Z_add = %s, Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in Z_add2]), ' '.join([str(z) for z in Z_test]), val, pval))
# Accordingly update dictionaries that keep track of the test statistic, the corresponding p-value and the cardinality of conditioning sets
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z_test))
# Check whether test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal and save sepset
remove_AB = True
self._save_sepset(X, Y, (frozenset(Z_test), "nwm"))
if remove_AB:
# Remember the edge for removal
pair_key, new_link = self._get_pair_key_and_new_link(A, B, "")
out.append((pair_key, new_link))
###################################################################################
### Part 2) of the rule ###########################################################
remove_CB = False
link_CB = self._get_link(C, B)
# i) Middle mark must not be "x" or "-"
if link_CB[1] not in ['-', 'x']:
# Test C indep B given union(SepSet(A, C), intersection(def-anc(B), adj(B))) setminus{A, B} setminus{future of both C and B}
# Conditioning on parents
Z_add = self._get_parents(C, B).difference({C, B})
# Shift the lags appropriately
if lag_k <= lag_j:
X = (k, lag_k - lag_j)
Y = (j, 0)
delta_lag = lag_j
else:
X = (j, lag_j - lag_k)
Y = (k, 0)
delta_lag = lag_k
# Run through all weakly minimal separating sets of A and C
for Z in sepsets:
# Construct the conditioning set to test
Z_test = Z.union(Z_add).difference({C, B})
Z_test = {(var, lag - delta_lag) for (var, lag) in Z_test if
lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
Z_add2 = {(var, lag - delta_lag) for (var, lag) in Z_add.difference({A, B}) if
lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z_test), tau_max=self.tau_max)
if self.verbosity >= 2:
# print("ER00a(part2): %s _|_ %s | Z_test = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z_test)]), val, pval))
print("ER00a(part2): %s _|_ %s | Z_add = %s, Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in Z_add2]), ' '.join([str(z) for z in Z_test]), val, pval))
# Accordingly update dictionaries that keep track of the test statistic, the corresponding p-value and the cardinality of conditioning sets
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z_test))
# Check whether test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal and save sepset
remove_CB = True
self._save_sepset(X, Y, (frozenset(Z_test), "nwm"))
if remove_CB:
# Remember the edge for removal
pair_key, new_link = self._get_pair_key_and_new_link(C, B, "")
out.append((pair_key, new_link))
###################################################################################
### Part 3) of the rule ###########################################################
if remove_AB or remove_CB or link_AB[2] in ["-", "x"] or link_CB[2] in ["-", "x"] or link_AB[1] == "x" or \
link_CB[1] == "x" or (link_AB[2] == ">" and link_CB[2] == ">"):
continue
if self._B_not_in_SepSet_AC(A, B, C):
# Prepare the new links and save them to the output
if link_AB[2] != ">":
new_link_AB = link_AB[0] + link_AB[1] + ">"
out.append(self._get_pair_key_and_new_link(A, B, new_link_AB))
new_link_CB = link_CB[0] + link_CB[1] + ">"
if link_CB[2] != ">":
out.append(self._get_pair_key_and_new_link(C, B, new_link_CB))
# end for (A, B, C) in all_appropriate_triples
# Return the output list
return out
def _apply_ER00b(self, only_lagged):
"""Return all orientations implied by orientation rule R0^prime b"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
triples_1 = self._find_triples(pattern_ij='**>', pattern_jk='o!+', pattern_ik='')
triples_2 = [trip for trip in self._find_triples(pattern_ij='**>', pattern_jk='oR+', pattern_ik='') if
self._is_smaller(trip[1], trip[2])]
triples_3 = [trip for trip in self._find_triples(pattern_ij='**>', pattern_jk='oL+', pattern_ik='') if
self._is_smaller(trip[2], trip[1])]
all_appropriate_triples = set(triples_1).union(set(triples_2), set(triples_3))
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
# Unpack A, B, C
(i, lag_i) = A
(j, lag_j) = B
(k, lag_k) = C
if only_lagged and A[1] == B[1]:
continue
# Get all weakly minimal separating sets in SepSet(A, C)
# Remark: The non weakly minimal separating sets may be larger, that's why we disfavor them
sepsets = self._get_sepsets(A, C)
sepsets = {Z for (Z, status) in sepsets if status == "wm"}
###################################################################################
### Part 1) of the rule ###########################################################
remove_AB = False
link_AB = self._get_link(A, B)
# i) Middle mark must not be "x" or "-"
if link_AB[1] not in ['-', 'x']:
# Test A indep B given union(SepSet(A, C), intersection(def-anc(B), adj(B))) setminus{A, B} setminus{future of both A and B}
# Conditioning on parents
Z_add = self._get_parents(A, B).difference({A, B})
# Shift the lags appropriately
if lag_i <= lag_j:
X = (i, lag_i - lag_j)
Y = (j, 0)
delta_lag = lag_j
else:
X = (j, lag_j - lag_i)
Y = (i, 0)
delta_lag = lag_i
# Run through all weakly minimal separating sets of A and C
for Z in sepsets:
# Construct the conditioning set to test
Z_test = Z.union(Z_add).difference({A, B})
Z_test = {(var, lag - delta_lag) for (var, lag) in Z_test if
lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
Z_add2 = {(var, lag - delta_lag) for (var, lag) in Z_add.difference({A, B}) if
lag - delta_lag <= 0 and lag - delta_lag >= -self.tau_max}
# Test conditional independence of X and Y given Z
val, pval = self.cond_ind_test.run_test(X=[X], Y=[Y], Z=list(Z_test), tau_max=self.tau_max)
if self.verbosity >= 2:
# print("ER00b: %s _|_ %s | Z_test = %s: val = %.2f / pval = % .4f" %
# (X, Y, ' '.join([str(z) for z in list(Z_test)]), val, pval))
print("ER00b: %s _|_ %s | Z_add = %s, Z = %s: val = %.2f / pval = % .4f" %
(X, Y, ' '.join([str(z) for z in Z_add2]), ' '.join([str(z) for z in Z_test]), val, pval))
# Accordingly update dictionaries that keep track of the test statistic, the corresponding p-value and the cardinality of conditioning sets
self._update_val_min(X, Y, val)
self._update_pval_max(X, Y, pval)
self._update_cardinality(X, Y, len(Z_test))
# Check whether test result was significant
if pval > self.pc_alpha:
# Mark the edge from X to Y for removal and save sepset
remove_AB = True
self._save_sepset(X, Y, (frozenset(Z_test), "nwm"))
if remove_AB:
# Remember the edge for removal
pair_key, new_link = self._get_pair_key_and_new_link(A, B, "")
out.append((pair_key, new_link))
###################################################################################
### Part 2) of the rule ###########################################################
if only_lagged and B[1] == C[1]:
continue
if remove_AB or link_AB[1] == "x":
continue
if self._B_not_in_SepSet_AC(A, B, C):
# Prepare the new link and save it to the output
link_CB = self._get_link(C, B)
new_link_CB = link_CB[0] + link_CB[1] + ">"
out.append(self._get_pair_key_and_new_link(C, B, new_link_CB))
# end for (A, B, C) in all_appropriate_triples
# Return the output list
return out
def _apply_ER00c(self, only_lagged):
"""Return all orientations implied by orientation rule R0^prime c"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
triples_1 = self._find_triples(pattern_ij='*-*', pattern_jk='o!+', pattern_ik='')
triples_2 = [trip for trip in self._find_triples(pattern_ij='*-*', pattern_jk='oR+', pattern_ik='') if
self._is_smaller(trip[1], trip[2])]
triples_3 = [trip for trip in self._find_triples(pattern_ij='*-*', pattern_jk='oL+', pattern_ik='')
if self._is_smaller(trip[2], trip[1])]
all_appropriate_triples = set(triples_1).union(set(triples_2), set(triples_3))
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
if only_lagged and B[1] == C[1]:
continue
# Check whether the rule applies
if self._B_not_in_SepSet_AC(A, B, C):
# Prepare the new link and append it to the output
link_CB = self._get_link(C, B)
new_link_CB = link_CB[0] + link_CB[1] + ">"
out.append(self._get_pair_key_and_new_link(C, B, new_link_CB))
# end for (A, B, C) in all_appropriate_triples
# Return the output list
return out
def _apply_ER00d(self, only_lagged):
"""Return all orientations implied by orientation rule R0^prime d"""
# Build the output list
out = []
# Find all graphical structures that the rule applies to
triples_1 = self._find_triples(pattern_ij='*-o', pattern_jk='o-*', pattern_ik='')
triples_2 = self._find_triples(pattern_ij='*->', pattern_jk='o-*', pattern_ik='')
all_appropriate_triples = set(triples_1).union(set(triples_2))
# Run through all appropriate graphical structures
for (A, B, C) in all_appropriate_triples:
if only_lagged and (A[1] == B[1] and B[1] == C[1]):
continue
# Check whether the rule applies
if self._B_not_in_SepSet_AC(A, B, C):
# Prepare the new links and append them to the output
# From C to B
if not only_lagged or B[1] != C[1]:
link_CB = self._get_link(C, B)
new_link_CB = link_CB[0] + link_CB[1] + ">"
out.append(self._get_pair_key_and_new_link(C, B, new_link_CB))
# If needed, also fromA to B
link_AB = self._get_link(A, B)
if (not only_lagged or A[1] != B[1]) and link_AB[2] == "o":
new_link_AB = link_AB[0] + link_AB[1] + ">"
out.append(self._get_pair_key_and_new_link(A, B, new_link_AB))
# end for (A, B, C) in all_appropriate_triples
# Return the output list
return out
########################################################################################################################
########################################################################################################################
########################################################################################################################
def _print_graph_dict(self):
"""Print all links in graph_dict"""
for j in range(self.N):
for ((i, lag_i), link) in self.graph_dict[j].items():
if len(link) > 0 and (lag_i < 0 or i < j):
print("({},{:2}) {} {}".format(i, lag_i, link, (j, 0)))
def _get_link(self, A, B):
"""Get the current link from node A to B"""
(var_A, lag_A) = A
(var_B, lag_B) = B
if abs(lag_A - lag_B) > self.tau_max:
return ""
elif lag_A <= lag_B:
return self.graph_dict[var_B][(var_A, lag_A - lag_B)]
else:
return self._reverse_link(self.graph_dict[var_A][(var_B, lag_B - lag_A)])
def _get_non_future_adj(self, node_list):
"""Return all non-future adjacencies of all nodes in node_list"""
# Build the output starting from an empty set
out = set()
# For each node W in node_list ...
for A in node_list:
# Unpack A
(var_A, lag_A) = A
# Add all (current) non-future adjacencies of A to the set out
out = out.union({(var, lag + lag_A) for ((var, lag), link) in self.graph_dict[var_A].items() if
len(link) > 0 and lag + lag_A >= -self.tau_max})
# Return the desired set
return out
def _update_val_min(self, X, Y, val):
"""chrei: modified to allow negative link values"""
"""Some conditional independence test for X and Y has given the test statistic value val. Update the val_min dictionary accordingly"""
if X[1] < 0 or X[0] < Y[0]:
old_abs_min = np.abs(self.val_min[Y[0]][X])
new_abs_min = np.abs(val)
if new_abs_min < old_abs_min:
self.val_min[Y[0]][X] = val
else:
self.val_min[Y[0]][X] = self.val_min[Y[0]][X]
else:
old_abs_min = np.abs(self.val_min[X[0]][Y])
new_abs_min = np.abs(val)
if new_abs_min < old_abs_min:
self.val_min[X[0]][Y] = val
else:
self.val_min[X[0]][Y] = self.val_min[X[0]][Y]
# self.val_min[X[0]][Y] = min(self.val_min[X[0]][Y], np.abs(val))
def _update_cardinality(self, X, Y, cardinality):
"""Some conditional independence test for X and Y has given the p-value val. Update the pval_max dictionary accordingly"""
if X[1] < 0 or X[0] < Y[0]:
self.max_cardinality[Y[0]][X] = max(self.max_cardinality[Y[0]][X], cardinality)
else:
self.max_cardinality[X[0]][Y] = max(self.max_cardinality[X[0]][Y], cardinality)
def _update_pval_max(self, X, Y, pval):
"""Some conditional independence test for X and Y has given the p-value val. Update the pval_max dictionary accordingly"""
if X[1] < 0 or X[0] < Y[0]:
self.pval_max[Y[0]][X] = max(self.pval_max[Y[0]][X], pval)
else:
self.pval_max[X[0]][Y] = max(self.pval_max[X[0]][Y], pval)
def _save_sepset(self, X, Y, Z):
"""Save Z as separating sets of X and Y. Y is assumed to be at lag 0"""
# Unpack X and Y
(i, lag_i) = X
(j, lag_j) = Y
assert lag_j == 0
# Save the sepset
if lag_i < 0 or i < j:
self.sepsets[j][X].add(Z)
else:
self.sepsets[i][Y].add(Z)
def _reverse_link(self, link):
"""Reverse a given link, taking care to replace > with < and vice versa"""
if link == "":
return ""
if link[2] == ">":
left_mark = "<"
else:
left_mark = link[2]
if link[0] == "<":
right_mark = ">"
else:
right_mark = link[0]
return left_mark + link[1] + right_mark
def _write_link(self, A, B, new_link, verbosity=0):
"""
Write the information that the link from node A to node B takes the form of new_link into self.graph_dict.
Neither is it assumed that at least of the nodes is at lag 0, nor must A be before B.
If A and B are contemporaneous, also the link from B to A is written as the reverse of new_link
"""
# Unpack A and B
(var_A, lag_A) = A
(var_B, lag_B) = B
# Write the link from A to B
if lag_A < lag_B:
if verbosity >= 1:
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_A,
lag_A - lag_B,
self.graph_dict[var_B][
(var_A, lag_A - lag_B)],
var_B, 0, var_A,
lag_A - lag_B, new_link,
var_B, 0))
# print("Replacing {:3} from ({},{:2}) to {} with {:3}".format(self.graph_dict[var_B][(var_A, lag_A - lag_B)], var_A, lag_A - lag_B, (var_B, 0), new_link))
self.graph_dict[var_B][(var_A, lag_A - lag_B)] = new_link
elif lag_A == lag_B:
if verbosity >= 1:
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_A,
lag_A - lag_B,
self.graph_dict[var_B][
(var_A, 0)], var_B, 0,
var_A, 0, new_link, var_B,
0))
# print("Replacing {:3} from ({},{:2}) to {} with {:3}".format(self.graph_dict[var_B][(var_A, 0)], var_A, 0, (var_B, 0), new_link))
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_B, 0,
self.graph_dict[var_A][
(var_B, 0)], var_A, 0,
var_B, 0,
self._reverse_link(
new_link), var_A, 0))
# print("Replacing {:3} from ({},{:2}) to {} with {:3}".format(self.graph_dict[var_A][(var_B, 0)], var_B, 0, (var_A, 0), self._reverse_link(new_link)))
self.graph_dict[var_B][(var_A, 0)] = new_link
self.graph_dict[var_A][(var_B, 0)] = self._reverse_link(new_link)
else:
if verbosity >= 1:
print("{:10} ({},{:2}) {:3} ({},{:2}) ==> ({},{:2}) {:3} ({},{:2}) ".format("Writing:", var_B,
lag_B - lag_A,
self.graph_dict[var_A][
(var_B, lag_B - lag_A)],
var_A, 0, var_B,
lag_B - lag_A,
self._reverse_link(
new_link), var_A, 0))
# print("Replacing {:3} from ({},{:2}) to {} with {:3}".format(self.graph_dict[var_A][(var_B, lag_B - lag_A)], var_B, lag_B - lag_A, (var_A, 0), self._reverse_link(new_link)))
self.graph_dict[var_A][(var_B, lag_B - lag_A)] = self._reverse_link(new_link)
def _get_sepsets(self, A, B):
"""For two non-adjacent nodes, get the their separating stored in self.sepsets."""
(var_A, lag_A) = A
(var_B, lag_B) = B
def _shift(Z, lag_B):
return frozenset([(var, lag + lag_B) for (var, lag) in Z])
if lag_A < lag_B:
out = {(_shift(Z, lag_B), status) for (Z, status) in self.sepsets[var_B][(var_A, lag_A - lag_B)]}
elif lag_A > lag_B:
out = {(_shift(Z, lag_A), status) for (Z, status) in self.sepsets[var_A][(var_B, lag_B - lag_A)]}
else:
out = {(_shift(Z, lag_A), status) for (Z, status) in
self.sepsets[max(var_A, var_B)][(min(var_A, var_B), 0)]}
return out
def _initialize_full_graph(self):
"""
The function _get_na_pds_t() needs to know the future adjacencies of a given node, not only the non-future adjacencies that are stored in self.graph_dict. To aid this, this function initializes the dictionary graph_full_dict:
self.graph_full_dict[j][(i, -tau_i)] contains all adjacencies of (j, 0), in particular those for which tau_i < 0.
"""
# Build from an empty nested dictionary
self.graph_full_dict = {j: {} for j in range(self.N)}
# Run through the entire nested dictionary self.graph_dict
for j in range(self.N):
for ((var, lag), link) in self.graph_dict[j].items():
if link != "":
# Add non-future adjacencies
self.graph_full_dict[j][(var, lag)] = link
# Add the future adjacencies
if lag < 0:
self.graph_full_dict[var][(j, -lag)] = self._reverse_link(link)
# Return nothing
return None
def _get_pair_key_and_new_link(self, A, B, link_AB):
"""The link from A to B takes the form link_AB. Bring this information into a form appropriate for the output of rule applications"""
(var_A, lag_A) = A
(var_B, lag_B) = B
if lag_A <= lag_B:
return ((var_A, var_B, lag_A - lag_B), link_AB)
elif lag_A > lag_B:
return ((var_B, var_A, lag_B - lag_A), self._reverse_link(link_AB))
def _match_link(self, pattern, link):
"""Matches pattern including wildcards with link."""
if pattern == '' or link == '':
return True if pattern == link else False
else:
left_mark, middle_mark, right_mark = pattern
if left_mark != '*':
if left_mark == '+':
if link[0] not in ['<', 'o']: return False
else:
if link[0] != left_mark: return False
if right_mark != '*':
if right_mark == '+':
if link[2] not in ['>', 'o']: return False
else:
if link[2] != right_mark: return False
if middle_mark != '*' and link[1] != middle_mark: return False
return True
def _dict2graph(self):
"""Convert self.graph_dict to graph array of shape (N, N, self.tau_max + 1)."""
graph = np.zeros((self.N, self.N, self.tau_max + 1), dtype='U3')
for j in range(self.N):
for adj in self.graph_dict[j]:
(i, lag_i) = adj
graph[i, j, abs(lag_i)] = self.graph_dict[j][adj]
return graph
def _find_adj(self, graph, node, patterns, exclude=None, ignore_time_bounds=True):
"""Find adjacencies of node matching patterns."""
# Setup
i, lag_i = node
if exclude is None: exclude = []
if type(patterns) == str:
patterns = [patterns]
# Init
adj = []
# Find adjacencies going forward/contemp
for k, lag_ik in zip(*np.where(graph[i, :, :])):
matches = [self._match_link(patt, graph[i, k, lag_ik]) for patt in patterns]
if np.any(matches):
match = (k, lag_i + lag_ik)
if match not in adj and (k, lag_i + lag_ik) not in exclude and (
-self.tau_max <= lag_i + lag_ik <= 0 or ignore_time_bounds):
adj.append(match)
# Find adjacencies going backward/contemp
for k, lag_ki in zip(*np.where(graph[:, i, :])):
matches = [self._match_link(self._reverse_link(patt), graph[k, i, lag_ki]) for patt in patterns]
if np.any(matches):
match = (k, lag_i - lag_ki)
if match not in adj and (k, lag_i - lag_ki) not in exclude and (
-self.tau_max <= lag_i - lag_ki <= 0 or ignore_time_bounds):
adj.append(match)
return adj
def _is_match(self, graph, X, Y, pattern_ij):
"""Check whether the link between X and Y agrees with pattern_ij"""
(i, lag_i) = X
(j, lag_j) = Y
tauij = lag_j - lag_i
if abs(tauij) >= graph.shape[2]:
return False
return ((tauij >= 0 and self._match_link(pattern_ij, graph[i, j, tauij])) or
(tauij < 0 and self._match_link(self._reverse_link(pattern_ij), graph[j, i, abs(tauij)])))
def _find_triples(self, pattern_ij, pattern_jk, pattern_ik):
"""Find triples (i, lag_i), (j, lag_j), (k, lag_k) that match patterns."""
# Graph as array makes it easier to search forward AND backward in time
graph = self._dict2graph()
# print(graph[:,:,0])
# print(graph[:,:,1])
# print("matching ", pattern_ij, pattern_jk, pattern_ik)
matched_triples = []
for i in range(self.N):
# Set lag_i = 0 without loss of generality, will be adjusted at end
lag_i = 0
adjacencies_i = self._find_adj(graph, (i, lag_i), pattern_ij)
# print(i, adjacencies_i)
for (j, lag_j) in adjacencies_i:
adjacencies_j = self._find_adj(graph, (j, lag_j), pattern_jk,
exclude=[(i, lag_i)])
# print(j, adjacencies_j)
for (k, lag_k) in adjacencies_j:
if self._is_match(graph, (i, lag_i), (k, lag_k), pattern_ik):
# Now use stationarity and shift triple such that the right-most
# node (on a line t=..., -2, -1, 0, 1, 2, ...) is at lag 0
righmost_lag = max(lag_i, lag_j, lag_k)
match = ((i, lag_i - righmost_lag),
(j, lag_j - righmost_lag),
(k, lag_k - righmost_lag))
largest_lag = min(lag_i - righmost_lag, lag_j - righmost_lag, lag_k - righmost_lag)
if match not in matched_triples and \
-self.tau_max <= largest_lag <= 0:
matched_triples.append(match)
return matched_triples
def _find_quadruples(self, pattern_ij, pattern_jk, pattern_ik,
pattern_il, pattern_jl, pattern_kl):
"""Find quadruples (i, lag_i), (j, lag_j), (k, lag_k), (l, lag_l) that match patterns."""
# We assume this later
assert pattern_il != ''
# Graph as array makes it easier to search forward AND backward in time
graph = self._dict2graph()
matched_quadruples = []
# First get triple ijk
ijk_triples = self._find_triples(pattern_ij, pattern_jk, pattern_ik)
for triple in ijk_triples:
# Unpack triple
(i, lag_i), (j, lag_j), (k, lag_k) = triple
# Search through adjacencies
adjacencies = set(self._find_adj(graph, (i, lag_i), pattern_il,
exclude=[(j, lag_j), (k, lag_k)]))
if pattern_jl != '':
adjacencies = adjacencies.intersection(set(
self._find_adj(graph, (j, lag_j), pattern_jl,
exclude=[(i, lag_i), (k, lag_k)])))
else:
adjacencies = set([adj for adj in adjacencies
if self._is_match(graph, (j, lag_j), adj, '')])
if pattern_kl != '':
adjacencies = adjacencies.intersection(set(
self._find_adj(graph, (k, lag_k), pattern_kl,
exclude=[(i, lag_i), (j, lag_j)])))
else:
adjacencies = set([adj for adj in adjacencies
if self._is_match(graph, (k, lag_k), adj, '')])
for adj in adjacencies:
(l, lag_l) = adj
# Now use stationarity and shift quadruple such that the right-most
# node (on a line t=..., -2, -1, 0, 1, 2, ...) is at lag 0
righmost_lag = max(lag_i, lag_j, lag_k, lag_l)
match = ((i, lag_i - righmost_lag),
(j, lag_j - righmost_lag),
(k, lag_k - righmost_lag),
(l, lag_l - righmost_lag),
)
largest_lag = min(lag_i - righmost_lag,
lag_j - righmost_lag,
lag_k - righmost_lag,
lag_l - righmost_lag,
)
if match not in matched_quadruples and \
-self.tau_max <= largest_lag <= 0:
matched_quadruples.append(match)
return matched_quadruples
def _get_R4_discriminating_paths(self, triple, max_length=np.inf):
"""Find all discriminating paths starting from triple"""
def _search(path_taken, max_length):
# Get the last visited node and its link to Y
last_node = path_taken[-1]
link_to_Y = self._get_link(last_node, path_taken[0])
# Base Case: If the current path is a discriminating path, return it as single entry of a list
if len(path_taken) > 3 and link_to_Y == "":
return [path_taken]
# If the current path is not a discriminating path, continue the path
paths = []
if self._get_link(last_node, path_taken[-2])[0] == "<" and link_to_Y == "-->" and len(
path_taken) < max_length:
# Search through all adjacencies of the last node
for (var, lag) in self.graph_full_dict[last_node[0]].keys():
# Build the next node and get its link to the previous
next_node = (var, lag + last_node[1])
next_link = self._get_link(next_node, last_node)
# Check whether this node can be visited
if next_node[1] <= 0 and next_node[
1] >= -self.tau_max and next_node not in path_taken and self._match_link("*->", next_link):
# Recursive call
paths.extend(_search(path_taken[:] + [next_node], max_length))
# Return the list of discriminating paths
return paths
# Unpack the triple
(W, V, Y) = triple
# Return all discriminating paths starting at this triple
return _search([Y, V, W], max_length)
def _get_potentially_directed_uncovered_paths(self, start_node, end_node, initial_allowed_patterns):
"""Find all potentiall directed uncoverged paths from start_node to end_node whose first link takes one the forms specified by initial_allowed_patters"""
assert start_node != end_node
# Function for recursive search of potentially directed uncovered paths
def _search(end_node, path_taken, allowed_patterns):
# List for outputting potentially directed uncovered paths
paths = []
# The last visited note becomes the new start_node
start_node = path_taken[-1]
# Base case: End node has been reached
if start_node == end_node:
paths.append(path_taken)
# Recursive build case
else:
# Run through the adjacencies of start_node
# for next_node in self.graph_full_dict[start_node[0]]:
for (var, lag) in self.graph_full_dict[start_node[0]].keys():
next_node = (var, lag + start_node[1])
# Consider only nodes that ...
# ... are within the allowed time frame
if next_node[1] < -self.tau_max or next_node[1] > 0:
continue
# ... have not been visited yet
if next_node in path_taken:
continue
# ... are non-adjacent to the node before start_node
if len(path_taken) >= 2 and self._get_link(path_taken[-2], next_node) != "":
continue
# ... whose link with start_node matches one of the allowed patters
link = self._get_link(start_node, next_node)
if not any([self._match_link(pattern=pattern, link=link) for pattern in allowed_patterns]):
continue
# Determine the allowed patters for the next recursive call
if self._match_link(pattern='o*o', link=link):
new_allowed_patters = ["o*o", "o*>", "-*>"]
elif self._match_link(pattern='o*>', link=link) or self._match_link(pattern='-*>', link=link):
new_allowed_patters = ["-*>"]
# Determine the new path taken
new_path_taken = path_taken[:] + [next_node]
# Recursive call
paths.extend(_search(end_node, new_path_taken, new_allowed_patters))
# Output list of potentially directed uncovered paths
return paths
# end def _search(end_node, path_taken, allowed_patterns)
# Output potentially directed uncovered paths
paths = _search(end_node, [start_node], initial_allowed_patterns)
return [path for path in paths if len(path) > 2]
def _sort_search_set(self, search_set, reference_node):
"""Sort the nodes in search_set by their val_min value with respect to the reference_node. Nodes with higher values appear earlier"""
sort_by = [self._get_val_min(reference_node, node) for node in search_set]
return [x for _, x in sorted(zip(sort_by, search_set), reverse=True)]
def _get_val_min(self, X, Y):
"""Some conditional independence test for X and Y has given the test statistic value val. Update the val_min dictionary accordingly"""
if X[1] < 0 or X[0] < Y[0]:
return self.val_min[Y[0]][X]
else:
return self.val_min[X[0]][Y]
def _get_pval_max(self, X, Y):
"""Some conditional independence test for X and Y has given the p-value val. Update the pval_max dictionary accordingly"""
if X[1] < 0 or X[0] < Y[0]:
return self.pval_max[Y[0]][X]
else:
return self.pval_max[X[0]][Y]
def _delete_sepsets(self, X, Y):
"""Delete all separating sets of X and Y. Y is assumed to be at lag 0"""
# Unpack X and Y
(i, lag_i) = X
(j, lag_j) = Y
assert lag_j == 0
# Save the sepset
if lag_i < 0 or i < j:
self.sepsets[j][X] = set()
else:
self.sepsets[i][Y] = set()
def _dict_to_matrix(self, val_dict, tau_max, n_vars, default=1):
"""Convert a dictionary to matrix format"""
matrix = np.ones((n_vars, n_vars, tau_max + 1))
matrix *= default
for j in val_dict.keys():
for link in val_dict[j].keys():
k, tau = link
if tau == 0:
matrix[k, j, 0] = matrix[j, k, 0] = val_dict[j][link]
else:
matrix[k, j, abs(tau)] = val_dict[j][link]
return matrix
| 162,086 | 47.601799 | 552 |
py
|
correlate
|
correlate-master/1_data_extraction/weather.py
|
import os
from datetime import datetime
import pandas as pd
from helper import histograms
"""
go through csv files
create aggregation df for each file
select which aggregation is needed: max, min, mean, sum
append to data frame
write dataframe
"""
path_to_csv_files = '/home/chrei/code/quantifiedSelfData/2022/'
outputname = './weather_daily_summaries.csv'
verbose = False
excludedFiles = ['weather (another copy).csv', 'weather (copy).csv']
print('starting ...')
def csv_2_df(csv_root, csv_file):
"""
as name says
"""
feature = os.path.splitext(csv_file)[0]
if verbose:
print('feature:', feature)
print(csv_root, csv_file)
df = pd.read_csv(os.path.join(csv_root, csv_file)) # read csv to df
if verbose:
print('read df:', df)
df = df.drop(['dt', 'timezone', 'city_name', 'lat', 'lon', 'sea_level', 'grnd_level', 'weather_id', 'weather_main',
'weather_description', 'weather_icon'], axis=1)
print('change date format. takes a while...')
for i, row in df.iterrows():
date_time = datetime.strptime(str(row['dt_iso'][:-19]), '%Y-%m-%d')
df.loc[i, 'dt_iso'] = date_time
print('aggregating')
df_mean = df.groupby(df['dt_iso']).mean().round(1)
df_sum = df.groupby(df['dt_iso']).sum().round(1)
df_min = df.groupby(df['dt_iso']).min().round(1)
df_max = df.groupby(df['dt_iso']).max().round(1)
print('building df')
daily_aggregation_df = pd.DataFrame()
daily_aggregation_df['wOutTempMean'] = df_mean['temp']
daily_aggregation_df['wOutTempMin'] = df_min['temp_min']
daily_aggregation_df['wOutTempMax'] = df_max['temp_max']
daily_aggregation_df['wOutTempDelta'] = df_max['temp_max'] - df_min['temp_min']
daily_aggregation_df['wOutTempFeelMean'] = df_mean['feels_like']
daily_aggregation_df['wOutTempFeelMin'] = df_min['feels_like']
daily_aggregation_df['wOutTempFeelMax'] = df_max['feels_like']
daily_aggregation_df['wOutPressMean'] = df_mean['pressure']
daily_aggregation_df['wOutPressMin'] = df_min['pressure']
daily_aggregation_df['wOutPressMax'] = df_max['pressure']
daily_aggregation_df['wOutPressDelta'] = df_max['pressure'] - df_min['pressure']
daily_aggregation_df['wOutHumMean'] = df_mean['humidity']
daily_aggregation_df['wOutHumMin'] = df_min['humidity']
daily_aggregation_df['wOutHumMax'] = df_max['humidity']
daily_aggregation_df['wOutWindMean'] = df_mean['wind_speed']
daily_aggregation_df['wOutWindMin'] = df_min['wind_speed']
daily_aggregation_df['wOutWindMax'] = df_max['wind_speed']
daily_aggregation_df['wOutCloudMean'] = df_mean['clouds_all']
daily_aggregation_df['wOutCloudMin'] = df_min['clouds_all']
daily_aggregation_df['wOutCloudMax'] = df_max['clouds_all']
daily_aggregation_df['wOutPrecipit'] = df_sum['rain_1h'] + df_sum['rain_3h'] + df_sum['snow_1h'] + df_sum[
'snow_3h']
return daily_aggregation_df
for root, dirs, files in os.walk(path_to_csv_files):
for file in files: # go through all csv files
if file.endswith("weather.csv") and file.startswith('weather.csv') and file not in excludedFiles:
df = csv_2_df(root, file)
# histograms(df, '/home/chrei/PycharmProjects/correlate/plots/raw_distributions/')
# print file
print('writing...')
df.to_csv(outputname)
print('done! :)')
| 3,373 | 36.488889 | 119 |
py
|
correlate
|
correlate-master/1_data_extraction/netatmo.py
|
from datetime import datetime
from math import isnan
from tqdm import tqdm
from config import private_folder_path
from helper import histograms
import numpy as np
import pandas as pd
"""
1. export:
https://my.netatmo.com/app/station -> settings -> data management -> download
-> format csv -> download 3 months junks for indoor and outdoor
-> manually agregate indoors and outdoors
-> delete last line (has no data)
-> delete first two lines (have no data)
-> adjust path to input file
-> run script
-> check /home/chrei/PycharmProjects/correlate/plots/raw_distributions/
-> get data from /home/chrei/code/quantifiedSelfData/netatmo_daily_summaries.csv
"""
outputname = str(private_folder_path) + 'netatmo_daily_summaries.csv'
df = pd.read_csv('/home/chrei/code/quantifiedSelfData/2022/netatmo/Indoor_12_05_2022.csv') # , index_col=0
# histograms
histograms(df.drop(['Timestamp', 'Timezone : Europe/Berlin'], axis=1),
'/home/chrei/PycharmProjects/correlate/plots/raw_distributions/')
currentDay = datetime.strptime(df['Timezone : Europe/Berlin'][0], '%Y/%m/%d %H:%M:%S').strftime('%Y/%m/%d')
lastDay = ''
t_min5 = []
t_max95 = []
t_median = []
humidity_min5 = []
humidity_max95 = []
humidity_median = []
co2_min5 = []
co2_max95 = []
co2_median = []
noise_min5 = []
noise_max95 = []
noise_median = []
pressure_min5 = []
pressure_max95 = []
pressure_median = []
date_agg = []
t_min5_agg = []
t_max95_agg = []
t_median_agg = []
humidity_min5_agg = []
humidity_max95_agg = []
humidity_median_agg = []
co2_min5_agg = []
co2_max95_agg = []
co2_median_agg = []
noise_min5_agg = []
noise_max95_agg = []
noise_median_agg = []
pressure_min5_agg = []
pressure_max95_agg = []
pressure_median_agg = []
cold_start = True
for _, row in tqdm(df.iterrows()):
currentDay = datetime.strptime(row['Timezone : Europe/Berlin'], '%Y/%m/%d %H:%M:%S').strftime('%Y/%m/%d')
if currentDay != lastDay:
if not cold_start:
""" save daily aggs"""
date_agg.append(lastDay)
t_min5_agg.append(round(np.percentile(t_min5, 5), 1))
t_max95_agg.append(round(np.percentile(t_max95, 95), 1))
t_median_agg.append(round(np.percentile(t_median, 50), 1))
humidity_min5_agg.append(round(np.percentile(humidity_min5, 5), 1))
humidity_max95_agg.append(round(np.percentile(humidity_max95, 95), 1))
humidity_median_agg.append(round(np.percentile(humidity_median, 50), 1))
co2_min5_agg.append(round(np.percentile(co2_min5, 5), 1))
co2_max95_agg.append(round(np.percentile(co2_max95, 95), 1))
co2_median_agg.append(round(np.percentile(co2_median, 50), 1))
noise_min5_agg.append(round(np.percentile(noise_min5, 5), 1))
noise_max95_agg.append(round(np.percentile(noise_max95, 95), 1))
noise_median_agg.append(round(np.percentile(noise_median, 50), 1))
pressure_min5_agg.append(round(np.percentile(pressure_min5, 5), 1))
pressure_max95_agg.append(round(np.percentile(pressure_max95, 95), 1))
pressure_median_agg.append(round(np.percentile(pressure_median, 50), 1))
"""reset current """
t_min5 = []
t_max95 = []
t_median = []
humidity_min5 = []
humidity_max95 = []
humidity_median = []
co2_min5 = []
co2_max95 = []
co2_median = []
noise_min5 = []
noise_max95 = []
noise_median = []
pressure_min5 = []
pressure_max95 = []
pressure_median = []
cold_start = False
"""append current"""
if not isnan(row['Temperature']):
t_min5.append(float(row['Temperature']))
if not isnan(row['Temperature']):
t_max95.append(float(row['Temperature']))
if not isnan(row['Temperature']):
t_median.append(float(row['Temperature']))
if not isnan(row['Temperature']):
t_median.append(float(row['Temperature']))
if not isnan(row['Humidity']):
humidity_min5.append(float(row['Humidity']))
if not isnan(row['Humidity']):
humidity_max95.append(float(row['Humidity']))
if not isnan(row['Humidity']):
humidity_median.append(float(row['Humidity']))
if not isnan(row['CO2']):
co2_min5.append(float(row['CO2']))
if not isnan(row['CO2']):
co2_max95.append(float(row['CO2']))
if not isnan(row['CO2']):
co2_median.append(float(row['CO2']))
if not isnan(row['Noise']):
noise_min5.append(float(row['Noise']))
if not isnan(row['Noise']):
noise_max95.append(float(row['Noise']))
if not isnan(row['Noise']):
noise_median.append(float(row['Noise']))
if not isnan(row['Pressure']):
pressure_min5.append(float(row['Pressure']))
if not isnan(row['Pressure']):
pressure_max95.append(float(row['Pressure']))
if not isnan(row['Pressure']):
pressure_median.append(float(row['Pressure']))
lastDay = currentDay
""" save daily aggs"""
date_agg.append(lastDay)
t_min5_agg.append(round(np.percentile(t_min5, 5), 1))
t_max95_agg.append(round(np.percentile(t_max95, 95), 1))
t_median_agg.append(round(np.percentile(t_median, 50), 1))
# t_median_agg.append(round(np.percentile(t_median), 1))
humidity_min5_agg.append(round(np.percentile(humidity_min5, 5), 1))
humidity_max95_agg.append(round(np.percentile(humidity_max95, 95), 1))
humidity_median_agg.append(round(np.percentile(humidity_median, 50), 1))
co2_min5_agg.append(round(np.percentile(co2_min5, 5), 1))
co2_max95_agg.append(round(np.percentile(co2_max95, 95), 1))
co2_median_agg.append(round(np.percentile(co2_median, 50), 1))
noise_min5_agg.append(round(np.percentile(noise_min5, 5), 1))
noise_max95_agg.append(round(np.percentile(noise_max95, 95), 1))
noise_median_agg.append(round(np.percentile(noise_median, 50), 1))
pressure_min5_agg.append(round(np.percentile(pressure_min5, 5), 1))
pressure_max95_agg.append(round(np.percentile(pressure_max95, 95), 1))
pressure_median_agg.append(round(np.percentile(pressure_median, 50), 1))
df = pd.DataFrame(list(
zip(date_agg,
t_min5_agg, t_max95_agg, t_median_agg, humidity_min5_agg, humidity_max95_agg,
humidity_median_agg,
co2_min5_agg, co2_max95_agg, co2_median_agg, noise_min5_agg, noise_max95_agg, noise_median_agg,
pressure_min5_agg,
pressure_max95_agg, pressure_median_agg)),
columns=['wInDate',
'wInTMin5', 'wInTMax95', 'wInTMedian', 'wInHumidityMin5',
'wInHumidityMax95', 'wInHumidityMedian', 'wInCO2Min5', 'wInCO2Max95', 'wInCO2Median',
'wInNoiseMin5', 'wInNoiseMax95', 'wInNoiseMedian', 'wInPressureMin5', 'wInPressureMax95',
'wInPressureMedian'])
df.to_csv(outputname)
| 6,756 | 37.392045 | 109 |
py
|
correlate
|
correlate-master/1_data_extraction/mfp.py
|
from datetime import datetime
import numpy as np
import pandas as pd
from config import private_folder_path
output_name = str(private_folder_path)+'mfp_daily_summaries.csv'
df = pd.read_csv('/home/chrei/PycharmProjects/correlate/0_data_raw/MFP/meals.csv') # , index_col=0
currentDay = datetime.strptime(df.columns[0], '%B %d, %Y')
first_date = currentDay
df.columns = df.iloc[0]
sugar = []
Cholest = []
date = []
# get date
for _, row in (df.iterrows()):
try:
currentDay = datetime.strptime(row['FOODS'], '%B %d, %Y')
except:
pass
date.append(currentDay)
# remove units
cols_to_check = ['Calories', 'Cholest', 'Sugars', 'Protein', 'Fat', 'Carbs', 'Sodium', 'Fiber']
df[cols_to_check] = df[cols_to_check].replace({'--': 0}, regex=True)
df[cols_to_check] = df[cols_to_check].replace({',': ''}, regex=True)
df[cols_to_check] = df[cols_to_check].replace({np.nan: 0}, regex=True)
df[['Cholest', 'Sodium']] = df[['Cholest', 'Sodium']].replace({'mg': ''}, regex=True)
df[cols_to_check] = df[cols_to_check].replace({'g': ''}, regex=True)
# add date
df['Date'] = date
# remove unwanted headers inside of df
df = df[df.Calories != 'Calories']
# drop foods
# df.reset_index(level=0, inplace=True)
df = df.drop(['FOODS'], axis=1) # , 'Calories', 'Cholest', 'Protein', 'Fat', 'Carbs', 'Sodium', 'Fiber'
# to int conversion
df[['Calories', 'Cholest', 'Suars', 'Protein', 'Fat', 'Carbs', 'Sodium', 'Fiber']] = df[
['Calories', 'Cholest', 'Suars', 'Protein', 'Fat', 'Carbs', 'Sodium', 'Fiber']].astype('int32')
# aggregate
df = df.groupby(df['Date']).sum()
# fill missing dates with NaN
df.reset_index(level=0, inplace=True)
lastDate = currentDay
df = df.set_index('Date') # set date as index
idx = pd.date_range(first_date, lastDate)
df.index = pd.DatetimeIndex(df.index)
df = df.reindex(idx, fill_value=np.nan)
# save
df.to_csv(output_name)
print(str(output_name) + ' written')
| 1,912 | 29.854839 | 104 |
py
|
correlate
|
correlate-master/1_data_extraction/gFit_to_dailyAggrgations.py
|
import os
import pandas as pd
from tqdm import tqdm
from helper import histograms
"""
go through csv files
create aggregation df for each file
select which aggregation is needed: max, min, mean, sum
append to data frame
write dataframe
"""
path_to_csv_files = '/home/chrei/code/quantifiedSelfData/2022/takeout-20220502T163423Z-001/Takeout/Fit/Daily activity metrics/'
outputname = './google_output_2022_05_12_percentile.csv'
verbose = False
print('running ...')
def csv_2_df(csv_root, csv_file, total_agg):
"""
as name says
"""
feature = os.path.splitext(csv_file)[0]
if verbose:
print('feature:', feature)
print(csv_root, csv_file)
google_csv_as_df = pd.read_csv(os.path.join(csv_root, csv_file)) # read csv to df
if verbose:
print('read df:', google_csv_as_df)
# total_agg = total_agg.append(google_csv_as_df)
# pd.DataFrame.quantile(0.5)
# test = google_csv_as_df.agg(['median'], axis=0)
aggregated = google_csv_as_df.agg(
['sum', 'min', 'max', 'mean'], axis=0)
if verbose:
print('column names (aggregated.columns):', aggregated.columns)
# create dictionary
daily_aggregation = {'Date': [feature]}
if verbose:
print('range(len(aggregated.columns)):', range(len(aggregated.columns)))
i = -1
for attribute_name in aggregated.columns:
i += 1
if aggregated.columns[i] == 'Start time':
if verbose:
print('skip Start time:')
elif aggregated.columns[i] == 'End time':
if verbose:
print('skip End time:')
elif aggregated.columns[i] == 'Move Minutes count':
daily_aggregation[attribute_name] = [
aggregated['Move Minutes count']['sum']]
elif aggregated.columns[i] == 'Calories (kcal)':
daily_aggregation[attribute_name] = [
aggregated['Calories (kcal)']['sum']]
elif aggregated.columns[i] == 'Distance (m)':
daily_aggregation[attribute_name] = [
aggregated['Distance (m)']['sum']]
elif aggregated.columns[i] == 'Heart Points':
daily_aggregation[attribute_name] = [
aggregated['Heart Points']['sum']]
elif aggregated.columns[i] == 'Heart Minutes':
daily_aggregation[attribute_name] = [
aggregated['Heart Minutes']['sum']]
elif aggregated.columns[i] == 'Average heart rate (bpm)':
daily_aggregation[attribute_name] = [
aggregated['Average heart rate (bpm)']['mean']]
elif aggregated.columns[i] == 'Max heart rate (bpm)':
daily_aggregation[attribute_name] = [
aggregated['Max heart rate (bpm)']['max']]
elif aggregated.columns[i] == 'Min heart rate (bpm)':
daily_aggregation[attribute_name] = [
aggregated['Min heart rate (bpm)']['min']]
elif aggregated.columns[i] == 'Median heart rate (bpm)':
daily_aggregation[attribute_name] = [
aggregated['Median heart rate (bpm)']['median']]
elif aggregated.columns[i] == 'Low latitude (deg)':
daily_aggregation[attribute_name] = [
aggregated['Low latitude (deg)']['min']]
elif aggregated.columns[i] == 'Low longitude (deg)':
daily_aggregation[attribute_name] = [
aggregated['Low longitude (deg)']['min']]
elif aggregated.columns[i] == 'High latitude (deg)':
daily_aggregation[attribute_name] = [
aggregated['High latitude (deg)']['max']]
elif aggregated.columns[i] == 'High longitude (deg)':
daily_aggregation[attribute_name] = [
aggregated['High longitude (deg)']['max']]
elif aggregated.columns[i] == 'Average speed (m/s)':
daily_aggregation[attribute_name] = [
aggregated['Average speed (m/s)']['mean']]
elif aggregated.columns[i] == 'Max speed (m/s)':
daily_aggregation[attribute_name] = [
aggregated['Max speed (m/s)']['max']]
elif aggregated.columns[i] == 'Min speed (m/s)':
daily_aggregation[attribute_name] = [
aggregated['Min speed (m/s)']['min']]
elif aggregated.columns[i] == 'Step count':
daily_aggregation[attribute_name] = [
aggregated['Step count']['sum']]
elif aggregated.columns[i] == 'Average weight (kg)':
daily_aggregation[attribute_name] = [
aggregated['Average weight (kg)']['mean']]
elif aggregated.columns[i] == 'Max weight (kg)':
daily_aggregation[attribute_name] = [
aggregated['Max weight (kg)']['max']]
elif aggregated.columns[i] == 'Min weight (kg)':
daily_aggregation[attribute_name] = [
aggregated['Min weight (kg)']['min']]
elif aggregated.columns[i] == 'Other duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Other duration (ms)']['sum']]
elif aggregated.columns[i] == 'Meditating duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Meditating duration (ms)']['sum']]
elif aggregated.columns[i] == 'Hiking duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Hiking duration (ms)']['sum']]
elif aggregated.columns[i] == 'Treadmill running duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Treadmill running duration (ms)']['sum']]
elif aggregated.columns[i] == 'Biking duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Biking duration (ms)']['sum']]
elif aggregated.columns[i] == 'Weight lifting duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Weight lifting duration (ms)']['sum']]
elif aggregated.columns[i] == 'Inactive duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Inactive duration (ms)']['sum']]
elif aggregated.columns[i] == 'Walking duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Walking duration (ms)']['sum']]
elif aggregated.columns[i] == 'Running duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Running duration (ms)']['sum']]
elif aggregated.columns[i] == 'Jogging duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Jogging duration (ms)']['sum']]
elif aggregated.columns[i] == 'Yoga duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Yoga duration (ms)']['sum']]
elif aggregated.columns[i] == 'Rowing machine duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'Strength training duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'Mountain biking duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'CrossFit duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'Elliptical duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'Roller skiing duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'Stair climbing duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'Stair climbing machine duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'Swimming duration (ms)':
daily_aggregation[attribute_name] = [
aggregated[attribute_name]['sum']]
elif aggregated.columns[i] == 'Sleep duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Sleep duration (ms)']['sum']]
elif aggregated.columns[i] == 'Light sleeping duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Light sleeping duration (ms)']['sum']]
elif aggregated.columns[i] == 'Deep sleeping duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Deep sleeping duration (ms)']['sum']]
elif aggregated.columns[i] == 'REM sleeping duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['REM sleeping duration (ms)']['sum']]
elif aggregated.columns[i] == 'Awake mid-sleeping duration (ms)':
daily_aggregation[attribute_name] = [
aggregated['Awake mid-sleeping duration (ms)']['sum']]
elif aggregated.columns[i] == 'Average systolic blood pressure (mmHg)':
daily_aggregation['Average systolic blood pressure (mmHg)'] = [
aggregated['Average systolic blood pressure (mmHg)']['mean']]
elif aggregated.columns[i] == 'Body position':
daily_aggregation[attribute_name] = [
aggregated['Body position']['mean']]
elif aggregated.columns[i] == 'Max systolic blood pressure (mmHg)':
daily_aggregation[attribute_name] = [
aggregated['Max systolic blood pressure (mmHg)']['max']]
elif aggregated.columns[i] == 'Min systolic blood pressure (mmHg)':
daily_aggregation[attribute_name] = [
aggregated['Min systolic blood pressure (mmHg)']['min']]
elif aggregated.columns[i] == 'Average diastolic blood pressure (mmHg)':
daily_aggregation[
attribute_name] = [aggregated['Average diastolic blood pressure (mmHg)']['mean']]
elif aggregated.columns[i] == 'Max diastolic blood pressure (mmHg)':
daily_aggregation[attribute_name] = [
aggregated['Max diastolic blood pressure (mmHg)']['max']]
elif aggregated.columns[i] == 'Min diastolic blood pressure (mmHg)':
daily_aggregation[attribute_name] = [
aggregated['Min diastolic blood pressure (mmHg)']['min']]
elif aggregated.columns[i] == 'Average systolic blood pressure (mmHg)':
daily_aggregation[
attribute_name] = [aggregated['Average systolic blood pressure (mmHg)']['mean']]
elif aggregated.columns[i] == 'Blood pressure measurement location':
daily_aggregation[attribute_name] = [
aggregated['Blood pressure measurement location']['mean']]
else:
print('!!! UNKNOWN LABEL !!! \n', aggregated.columns[i])
if verbose:
print('daily_aggregation:', daily_aggregation)
daily_aggregation_df = pd.DataFrame(daily_aggregation)
return daily_aggregation_df, total_agg
# create df from csv files
if verbose:
print('path', path_to_csv_files)
columns = ['Date', 'Move Minutes count', 'Average systolic blood pressure (mmHg)', 'Max systolic blood pressure (mmHg)',
'Min systolic blood pressure (mmHg)', 'Average diastolic blood pressure (mmHg)',
'Max diastolic blood pressure (mmHg)', 'Min diastolic blood pressure (mmHg)', 'Body position',
'Blood pressure measurement location', 'Calories (kcal)', 'Distance (m)', 'Heart Points', 'Heart Minutes',
'Average heart rate (bpm)', 'Max heart rate (bpm)', 'Min heart rate (bpm)', 'Low latitude (deg)',
'Low longitude (deg)', 'High latitude (deg)', 'High longitude (deg)', 'Average speed (m/s)',
'Max speed (m/s)', 'Min speed (m/s)', 'Step count', 'Average weight (kg)', 'Max weight (kg)',
'Min weight (kg)', 'Inactive duration (ms)', 'Walking duration (ms)', 'Running duration (ms)',
'Light sleeping duration (ms)', 'Deep sleeping duration (ms)', 'REM sleeping duration (ms)',
'Awake mid-sleeping duration (ms)', 'Jogging duration (ms)', 'Sleep duration (ms)', 'Yoga duration (ms)',
'Other duration (ms)', 'Biking duration (ms)', 'Treadmill running duration (ms)',
'Weight lifting duration (ms)', 'Meditating duration (ms)', 'Rowing machine duration (ms)',
'Stair climbing duration (ms)', 'Strength training duration (ms)', 'CrossFit duration (ms)',
'Hiking duration (ms)', 'Mountain biking duration (ms)', 'Elliptical duration (ms)',
'Swimming duration (ms)', 'Stair climbing machine duration (ms)']
total_agg = pd.DataFrame(columns=columns)
for root, dirs, files in os.walk(path_to_csv_files):
print(len(files), 'files found')
print('aggregating...might take a while...')
i = 0
for file in tqdm(files): # go through all csv files
if verbose:
print('Filename=', file)
print('root:', root)
print('dirs:', dirs)
print('files:', files)
print('csv_2_df(root, file):', csv_2_df(root, file))
if i == 0:
df_0, total_agg = csv_2_df(root, file, total_agg)
elif i == 1:
df_1, total_agg = csv_2_df(root, file, total_agg)
df = df_0.append(df_1)
else:
df_1, total_agg = csv_2_df(root, file, total_agg)
df = df.append(df_1)
if verbose:
print(df)
print('/n')
i += 1
# histograms(total_agg.drop(['Start time', 'End time','Date','Jogging duration (ms)'], axis=1).rename(
# columns={"Average speed (m/s)": "Average_speed", "Max speed (m/s)": "Max_speed",
# "Min speed (m/s)": "Min_speed"}),
# '/home/chrei/PycharmProjects/correlate/plots/raw_distributions/google')
# sort by Date
print('sorting...')
df = df.sort_values(by=['Date'])
# print file
print('writing...')
df.to_csv(outputname)
print('done! :)')
if verbose:
print(df)
| 14,365 | 48.367698 | 127 |
py
|
correlate
|
correlate-master/1_data_extraction/exist_to_csv_2021_06_15.py
|
import pandas as pd
import os
"""
correlate
load data form json, into df, correlation matrix, visualize
"""
path_to_json_files = '/home/chrei/code/quantifiedSelfData/2022/ChrisG_a7bdb31dddb7586bea95f752ca7883740c77ae2dde615633ca99b40c74ef9192'
verbose = False
excludedFiles = ['averages.json', 'correlations.json', 'weather_summary.json', 'twitter_username.json',
'weather_icon.json', 'mood_note.json', 'custom.json', 'location_name.json']
if verbose:
print('start running...')
def json_2_1_feature_df(json_root, json_file):
"""
as name says
"""
feature = os.path.splitext(json_file)[0]
if verbose:
print(json_root, json_file)
df = pd.read_json(os.path.join(json_root, json_file)) # read json to df
df = df.rename(columns={"value": feature}) # rename column-name value to feature
df = df.set_index('date') # set date as index
return df
# create df from json files
if verbose:
print('path', (path_to_json_files))
for root, dirs, files in os.walk(path_to_json_files):
i = 0
for file in files: # go through all json files
if verbose:
print('Filename=', file)
# take only .json files, exclude averages.json because of different format and
# exclude files small files to reduce noise
# exclude correlations.json
file_size = os.stat(os.path.join(path_to_json_files, file))[6]
if file.endswith("_2022.json") and file.startswith('data_') and file not in excludedFiles:
if verbose:
print('file-size=', file_size)
print('json_2_1_feature_df(root, file):', json_2_1_feature_df(root, file))
if i == 0:
df_0 = json_2_1_feature_df(root, file)
elif i == 1:
df_1 = json_2_1_feature_df(root, file)
df = df_0.join(df_1)
else:
df_1 = json_2_1_feature_df(root, file)
df = df.join(df_1)
if verbose:
print(df)
print('/n')
i += 1
df.to_csv('exist_output_2022.csv')
if verbose:
print(df)
print('exist_output_2022.csv written')
| 2,188 | 31.191176 | 135 |
py
|
correlate
|
correlate-master/1_data_extraction/google_meditation.py
|
import os
from datetime import datetime, timedelta
import json
import dateutil
import pandas as pd
from math import floor
path_to_json_files = '/home/chrei/PycharmProjects/correlate/0_data_raw/google/takeout-20210625T075514Z-001/Takeout/Fit/All Sessions'
output_filename = 'google_meditation.csv'
verbose = True
excludedFiles = ['']
if verbose:
print('start running...')
def round_time(dt=None, roundTo=60):
if dt == None: dt = datetime.datetime.now()
seconds = (dt.replace(tzinfo=None) - dt.min).seconds
rounding = (seconds + roundTo / 2) // roundTo * roundTo
return dt + timedelta(0, rounding - seconds, -dt.microsecond)
coldStart = True
for root, dirs, files in os.walk(path_to_json_files):
for file in files: # go through all json files
# take only .json files, exclude averages.json because of different format and
# exclude files small files to reduce noise
# exclude correlations.json
if file.endswith("_MEDITATION.json") and file.startswith('2') and file not in excludedFiles:
print('file',file)
with open(os.path.join(root, file)) as json_data:
data = json.load(json_data)
data.pop('endTime', None)
data.pop('segment', None)
data.pop('aggregate', None)
data.pop('fitnessActivity', None)
data['duration_min'] = floor(float(data['duration'][:-1])/60)
data.pop('duration', None)
data['startTime'] = dateutil.parser.isoparse(data['startTime'])
df = pd.DataFrame.from_dict([data]) # read json to d
if coldStart:
all_df = df
coldStart = False
else:
all_df = all_df.append(df)
# aggregate when multiple per day
all_df['startTime'] = pd.to_datetime(all_df['startTime'])
all_df = all_df.groupby(all_df['startTime'].dt.date).sum()
# # round
# all_df.filteredRunVO2Max = round(all_df.filteredRunVO2Max, 1)
# fill missing dates with NaN
all_df.reset_index(level=0, inplace=True)
firstDate = datetime.strptime('2019/02/10', '%Y/%m/%d') # all_df['dateTime'][0]
lastDate = all_df['startTime'].iloc[-1]
all_df = all_df.set_index('startTime') # set date as index
idx = pd.date_range(firstDate, lastDate)
all_df.index = pd.DatetimeIndex(all_df.index)
all_df = all_df.reindex(idx, fill_value=float("0"))
# write file
all_df.to_csv(output_filename)
if verbose:
print(all_df)
print(str(output_filename) + '.csv written')
| 2,527 | 31.831169 | 132 |
py
|
correlate
|
correlate-master/1_data_extraction/fitbit_vo2max_json_to_csv.py
|
import os
from datetime import datetime, timedelta
import pandas as pd
path_to_json_files = '/home/chrei/code/quantifiedSelfData/walter_fitbit/2022/MyFitbitData/Walter/Physical Activity'
output_filename = 'walter_fitbit_vo2max.csv'
verbose = True
excludedFiles = ['']
if verbose:
print('start running...')
def round_time(dt=None, roundTo=60):
"""Round a datetime object to any time lapse in seconds
dt : datetime.datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
Author: Thierry Husson 2012 - Use it as you want but don't blame me.
"""
if dt == None: dt = datetime.datetime.now()
seconds = (dt.replace(tzinfo=None) - dt.min).seconds
rounding = (seconds + roundTo / 2) // roundTo * roundTo
return dt + timedelta(0, rounding - seconds, -dt.microsecond)
coldStart = True
for root, dirs, files in os.walk(path_to_json_files):
for file in files: # go through all json files
# take only .json files, exclude averages.json because of different format and
# exclude files small files to reduce noise
# exclude correlations.json
file_size = os.stat(os.path.join(path_to_json_files, file))[6]
if file.endswith(".json") and file.startswith('run_vo2_max-') and file not in excludedFiles:
df = pd.read_json(os.path.join(root, file)) # read json to d
if coldStart:
all_df = df
coldStart = False
else:
all_df = all_df.append(df)
filteredRunVO2MaxList = []
dateList = []
for i, row in all_df.iterrows():
filteredRunVO2MaxList.append(round(row.value['filteredRunVO2Max'], 5))
dateList.append(row.dateTime)
all_files_df = pd.DataFrame(dateList, columns=['dateTime'])
all_files_df['filteredRunVO2Max'] = filteredRunVO2MaxList
# aggregate when multiple per day
all_files_df['dateTime'] = pd.to_datetime(all_files_df['dateTime'])
all_files_df = all_files_df.groupby(all_files_df['dateTime'].dt.date).median()
# round
all_files_df.filteredRunVO2Max = round(all_files_df.filteredRunVO2Max, 1)
# fill missing dates with NaN
all_files_df.reset_index(level=0, inplace=True)
firstDate = datetime.strptime('2019/02/11', '%Y/%m/%d') # all_files_df['dateTime'][0]
lastDate = all_files_df['dateTime'].iloc[-1]
all_files_df = all_files_df.set_index('dateTime') # set date as index
idx = pd.date_range(firstDate, lastDate)
all_files_df.index = pd.DatetimeIndex(all_files_df.index)
all_files_df = all_files_df.reindex(idx, fill_value=float("NaN"))
# write file
all_files_df.to_csv(output_filename)
if verbose:
print(all_files_df)
print(str(output_filename) + '.csv written')
| 2,687 | 35.324324 | 115 |
py
|
correlate
|
correlate-master/1_data_extraction/fitbit_sleep_json_to_csv.py
|
import math
import os
from datetime import datetime, timedelta
import pandas as pd
"""
MyFitbitData/ChrisRe/Sleep/sleep_score.csv
reverse sorting: add column with numbers to sort -> add order numbers -> select all cells -> data -> sort... -> select column to sort
check for missing dates or zeros
responsiveness points, excertion points
fitbit -> stress score
check for missing dates or zeros
"""
path_to_json_files = '/home/chrei/code/quantifiedSelfData/walter_fitbit/2022/MyFitbitData/Walter/Sleep/'
output_filename = 'walter_fitbit_sleep.csv'
verbose = False
excludedFiles = ['']
if verbose:
print('start running...')
def round_time(dt=None, roundTo=60):
"""Round a datetime object to any time lapse in seconds
dt : datetime.datetime object, default now.
roundTo : Closest number of seconds to round to, default 1 minute.
Author: Thierry Husson 2012 - Use it as you want but don't blame me.
"""
if dt == None: dt = datetime.datetime.now()
seconds = (dt.replace(tzinfo=None) - dt.min).seconds
rounding = (seconds + roundTo / 2) // roundTo * roundTo
return dt + timedelta(0, rounding - seconds, -dt.microsecond)
def json_to_df(json_root, json_file):
"""
as name says
"""
feature = os.path.splitext(json_file)[0]
if verbose:
print(json_root, json_file)
df = pd.read_json(os.path.join(json_root, json_file)) # read json to d
if verbose:
print('df[levels]', df['levels'])
df = df.drop(['type', 'infoCode'], axis=1)
df = df.set_index('dateOfSleep') # set date as index
start_min_before_midnight = []
end_min_after_midnight = []
duration_min = []
deep = []
wake = []
light = []
rem = []
efficiency = []
nap_detected = []
for i, row in df.iterrows():
if verbose:
print("row['startTime']", row['startTime'])
# set nap sleep start and end to 0 so it doesn't affect sleep time after reduce sum
if row['mainSleep']:
start_time = datetime.strptime(row['startTime'], '%Y-%m-%dT%H:%M:%S.%f')
midnight_time = round_time(start_time, roundTo=24 * 60 * 60) # round to midnight
start_min_before_midnight.append(math.floor((midnight_time - start_time).total_seconds() / 60))
end_time = datetime.strptime(row['endTime'], '%Y-%m-%dT%H:%M:%S.%f')
midnight_time = round_time(end_time, roundTo=24 * 60 * 60) # round to midnight
end_min_after_midnight.append(math.floor((end_time - midnight_time).total_seconds() / 60))
efficiency.append(row['efficiency'])
nap_detected.append(0)
elif not row['mainSleep']:
start_min_before_midnight.append(0)
end_min_after_midnight.append(0)
efficiency.append(0)
nap_detected.append(1)
duration_min.append(math.floor(round(row['duration'] / 60000, 0)))
summary = row['levels']['summary']
if [*summary] == ['deep', 'wake', 'light', 'rem']:
deep.append(summary['deep']['minutes'])
wake.append(summary['wake']['minutes'])
light.append(summary['light']['minutes'])
rem.append(summary['rem']['minutes'])
else:
deep.append(float("NaN"))
wake.append(float("NaN"))
light.append(float("NaN"))
rem.append(float("NaN"))
# add new columns
df['startBeforeMidnight'] = start_min_before_midnight
df['endBeforeMidnight'] = end_min_after_midnight
df['duration'] = duration_min
df['deep'] = deep
df['wake'] = wake
df['light'] = light
df['rem'] = rem
df['m_efficiency'] = efficiency
df['nap_detected'] = nap_detected
# remove old replaced columns
df = df.drop(['startTime', 'endTime', 'duration', 'levels', 'efficiency'], axis=1) # ,'mainSleep'
if verbose:
print('df', df)
return df
# create df from json files
if verbose:
print('path', path_to_json_files)
for root, dirs, files in os.walk(path_to_json_files):
coldStart = True
for file in files: # go through all json files
if verbose:
print('Filename=', file)
# take only .json files, exclude averages.json because of different format and
# exclude files small files to reduce noise
# exclude correlations.json
file_size = os.stat(os.path.join(path_to_json_files, file))[6]
if file.endswith(".json") and file.startswith('sleep-') and file not in excludedFiles:
if verbose:
print('Filename target =', file)
print('file-size=', file_size)
print('json_2_1_feature_df(root, file):', json_to_df(root, file))
if coldStart:
all_files_df = json_to_df(root, file)
else:
all_files_df = all_files_df.append(json_to_df(root, file))
coldStart = False
# sort by date
all_files_df = all_files_df.sort_values(by='dateOfSleep')
# convert-index-of-a-pandas-dataframe-into-a-column
all_files_df.reset_index(level=0, inplace=True)
# drop duplicates
all_files_df = all_files_df.drop_duplicates(subset="logId")
# # aggregate when multiple sleep times per day
all_files_df['dateOfSleep'] = pd.to_datetime(all_files_df['dateOfSleep'])
all_files_df = all_files_df.groupby(all_files_df['dateOfSleep'].dt.date).sum()
all_files_df = all_files_df.drop(['logId', 'mainSleep', 'minutesAwake'], axis=1)
all_files_df.to_csv(output_filename)
if verbose:
print(all_files_df)
print(str(output_filename) + '.csv written')
| 5,576 | 34.75 | 133 |
py
|
correlate
|
correlate-master/1_data_extraction/moonIlluminationByDate.py
|
import datetime
import pylunar
"""
This script is used to extract the moon illumination data for all dates between "base" and "today".
data is printed and can be copy pasted in to csv.
uncomment printing date-list temporarily and check at https://www.timeanddate.com/moon/phases/germany/stuttgart if correct
"""
mi = pylunar.MoonInfo((42, 21, 30), (-71, 3, 35)) # stuttgart location
today = datetime.datetime.today()
base = datetime.datetime(2019,2,11,22,00,00)
timedelta = today - base
date_list = [datetime.timedelta(days=x) + base for x in range(int(timedelta.days+2))]
for i in range(len(date_list)):
mi.update(date_list[i])
# print(date_list[i])
print(round(mi.fractional_phase(),2))
| 710 | 27.44 | 122 |
py
|
correlate
|
correlate-master/prediction/fully_connected.py
|
import math
import numpy as np
import torch
import torch.utils.data as data_utils
from sklearn.preprocessing import MinMaxScaler
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from config import target_label, fully_connected_nn_prediction_on
writer = SummaryWriter()
epochs = 4115
lr = 0.0001
torch.manual_seed(0)
weight_decay = 0.101
# Define model
class NeuralNetwork(nn.Module):
def __init__(self, num_features):
super(NeuralNetwork, self).__init__()
# self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(num_features, 16),
nn.LeakyReLU(),
nn.Linear(16, 8),
# nn.LeakyReLU(),
# nn.Linear(16, 8),
# nn.LeakyReLU(),
# nn.Linear(8, 3),
nn.LeakyReLU(),
nn.Linear(8, 1),
)
def forward(self, x):
# x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
def fully_connected_nn_prediction(df):
if fully_connected_nn_prediction_on:
# dataframes to tensors
target_tensor = torch.tensor(df[target_label].values.astype(np.float32))
target_tensor = torch.unsqueeze(target_tensor, 1) # due to one dim target tensor
# print('train_target', train_target)
input_df = df.drop([target_label], axis=1)
num_features = len(input_df.columns)
input_tensor = torch.tensor(input_df.values.astype(np.float32))
# input normalization
scaler = MinMaxScaler()
scaler.fit(input_tensor)
input_tensor = torch.tensor(scaler.transform(input_tensor).astype(np.float32))
tensor_dataset = data_utils.TensorDataset(input_tensor, target_tensor)
# train test split
print('dataset_size:', len(tensor_dataset))
train_size = int(0.9 * len(tensor_dataset))
test_size = len(tensor_dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(tensor_dataset, [train_size, test_size])
# load data
batch_size = math.floor(train_size)
train_dataloader = data_utils.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = data_utils.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=True)
for X, y in test_dataset:
print("Shape of X [BatchSize, #params]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
model = NeuralNetwork(num_features).to(device)
print(model)
loss_fn = nn.MSELoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
for epoch in range(epochs):
print(f"Epoch {epoch + 1}\n-------------------------------")
train(train_dataloader, model, loss_fn, optimizer, epoch + 1, device)
test(test_dataloader, model, loss_fn, epoch + 1, device)
writer.flush()
writer.close()
print("Done Training!")
# save model
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
# load model
model = NeuralNetwork(num_features)
model.load_state_dict(torch.load("model.pth"))
model.eval()
for day in range(len(test_dataset)):
x = test_dataset[day][0]
y = test_dataset[day][1]
with torch.no_grad():
pred = model(x)
predicted, actual = pred[0], y
# print(f'Predicted: {predicted}; Actual: {actual[0]}')
def train(dataloader, model, loss_fn, optimizer, epoch, device):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
X, y = X.to(device), y.to(device)
# Compute prediction error
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"train loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
writer.add_scalar("Loss/train", loss, epoch)
def test(dataloader, model, loss_fn, epoch, device):
num_batches = len(dataloader)
model.eval()
test_loss = 0
with torch.no_grad():
for X, y in dataloader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_fn(pred, y).item()
test_loss /= num_batches
print(f"Avg test loss: {test_loss:>8f} \n")
writer.add_scalar("Loss/test", test_loss, epoch)
| 4,817 | 32.227586 | 108 |
py
|
correlate
|
correlate-master/prediction/linear_regression.py
|
import numpy as np
import pandas as pd
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import TimeSeriesSplit
from config import target_label, ensemble_weights, multiple_linear_regression_ensemble_on, \
regularization_strengths, l1_ratios, private_folder_path
from helper import histograms, plot_prediction_w_ci_interval, drop_days_where_mood_was_tracked_irregularly, \
out_of_bound_correction, generate_sample_weights
from phone_io import write_csv_for_phone_visualization
def multiple_linear_regression_ensemble(df,
df_not_normalized,
df_longest,
df_2019_09_08,
df_widest,
results,
target_mean,
target_std,
target_scale_bounds_normalized,
min_max):
if multiple_linear_regression_ensemble_on:
# multiple linear regression on different datasets
prediction_results = df[target_label].to_frame()
prediction_results = multiple_regression(df=df_longest,
results=results,
dataset_name='longest',
prediction_results=prediction_results,
regularization_strength=regularization_strengths[0],
l1_ratio=l1_ratios[0],
target_scale_bounds_normalized=target_scale_bounds_normalized)
prediction_results = multiple_regression(df=df_2019_09_08,
results=results,
dataset_name='after2019_09_08',
prediction_results=prediction_results,
regularization_strength=regularization_strengths[1],
l1_ratio=l1_ratios[1],
target_scale_bounds_normalized=target_scale_bounds_normalized)
prediction_results = multiple_regression(df=df_widest,
results=results,
dataset_name='widest',
prediction_results=prediction_results,
regularization_strength=regularization_strengths[2],
l1_ratio=l1_ratios[2],
target_scale_bounds_normalized=target_scale_bounds_normalized)
prediction_results['ensemble_prediction'] = ensemble_weights[0] * prediction_results[
'longest k=5'] + ensemble_weights[1] * prediction_results[
'after2019_09_08 k=5'
] + ensemble_weights[2] * prediction_results['widest k=5']
# diff
prediction_results['ensemble_diff'] = prediction_results[target_label] - prediction_results[
'ensemble_prediction']
histograms(prediction_results['ensemble_diff'].to_frame(),
save_path='/home/chrei/PycharmProjects/correlate/plots/prediction_diff/')
# l1
prediction_results['ensemble_residuals'] = abs(prediction_results['ensemble_diff'])
histograms(prediction_results['ensemble_residuals'].to_frame(),
save_path='/home/chrei/PycharmProjects/correlate/plots/prediction_diff/')
ensemble_average_residual = prediction_results['ensemble_residuals'].mean()
print('ensemble_average_residual: ', ensemble_average_residual)
prediction_results['CI_low'] = prediction_results['ensemble_prediction'] - ensemble_average_residual
prediction_results['CI_high'] = prediction_results['ensemble_prediction'] + ensemble_average_residual
ci = np.percentile(prediction_results['ensemble_residuals'].dropna(), 95)
ci68 = np.percentile(prediction_results['ensemble_residuals'].dropna(), 68)
print('prediction 95% confidence interval: ', ci)
plot_prediction_w_ci_interval(prediction_results, ci, target_mean, target_std)
write_csv_for_phone_visualization(ci95=ci, ci68=ci68, target_mean=target_mean,
prediction=prediction_results['widest k=5'],
scale_bounds=min_max[target_label],
feature_weights_normalized=results['reg_coeff_widestk=5'],
feature_values_not_normalized=df_not_normalized,
feature_values_normalized=df,
target_std_dev=target_std,
min_max=min_max)
# l2
prediction_results['ensemble_loss'] = prediction_results['ensemble_diff'] ** 2
ensemble_average_loss = prediction_results['ensemble_loss'].mean()
print('ensemble_average_loss: ', ensemble_average_loss)
# save
prediction_results.to_csv(str(private_folder_path) + 'prediction_results.csv') # save to file
def multiple_regression(df, results, dataset_name, prediction_results, regularization_strength, l1_ratio,
target_scale_bounds_normalized):
df = drop_days_where_mood_was_tracked_irregularly(df)
# missing_value_check(df)
y = df[target_label]
X = df.drop([target_label], axis=1)
# time series split for cv
tscv = TimeSeriesSplit(gap=0, max_train_size=None, n_splits=5, test_size=None)
i = 0
cross_validation_loss_list = []
for train_index, test_index in tscv.split(X):
i += 1
if i == 5: # CV in time series neglects too much training data, thus use a simple train test split
# print("TRAIN:", train_index, "\nTEST:", test_index)
X_train = X.iloc[train_index]
X_test = X.iloc[test_index]
y_train = y.iloc[train_index]
y_test = y.iloc[test_index]
regression = ElasticNet(alpha=regularization_strength, l1_ratio=l1_ratio,
fit_intercept=True)
sample_weight = generate_sample_weights(y_train)
regression.fit(X_train, y_train, sample_weight=sample_weight)
# x_labels = X.columns
regression_coefficient_df = pd.DataFrame(index=X.columns, columns=['reg_coeff'])
regression_coefficient_df['reg_coeff'] = regression.coef_
# print('intercept:', regression.intercept_, dataset_name)
results['reg_coeff_' + str(dataset_name) + 'k=' + str(i)] = regression_coefficient_df
results.to_csv(str(private_folder_path) + 'results.csv') # save to file
predictions = regression.predict(X_test)
predictions = pd.DataFrame(list(zip(y_test.index, predictions)),
columns=['date', str(dataset_name) + ' k=' + str(i)])
predictions = predictions.set_index('date')
predictions = out_of_bound_correction(predictions, target_scale_bounds_normalized)
prediction_results = prediction_results.join(predictions)
l2_loss = (y_test - predictions[str(dataset_name) + ' k=' + str(i)]) ** 2
mean_l2_loss_for_one_fold = l2_loss.mean(axis=0)
# print('L2 loss ' + str(dataset_name) + 'k=' + str(i), ': ', mean_l2_loss_for_one_fold)
cross_validation_loss_list.append(mean_l2_loss_for_one_fold)
cross_validation_loss = np.mean(cross_validation_loss_list)
print('cross_validation_loss: ', cross_validation_loss)
return prediction_results
| 8,120 | 57.007143 | 111 |
py
|
ClariQ
|
ClariQ-master/src/clariq_eval_tool.py
|
import pandas as pd
import argparse
import pickle
from os import path
import json
from statistics import mean
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
def evaluate_clarification_need(experiment_type, data_dir, run_file, out_file, leaderboard):
if experiment_type in ['train', 'dev']:
label_file_path = path.join(data_dir, '{}.tsv'.format(experiment_type))
else:
label_file_path = path.join(data_dir, '{}.tsv'.format('test_with_labels'))
# raise FileNotFoundError # TODO: remove when test labels released.
clarification_labels_dict = \
pd.read_csv(label_file_path, sep='\t').drop_duplicates('topic_id').set_index('topic_id')[
'clarification_need'].to_dict()
run_dict = pd.read_csv(run_file, sep=' ', header=None).set_index(0)[1].to_dict()
y_true = []
y_pred = []
for topic_id in clarification_labels_dict:
y_true.append(clarification_labels_dict[topic_id])
try:
y_pred.append(run_dict[topic_id])
except KeyError: # no prediction provided in the run file, so we put a dummy label.
y_pred.append(0)
precision = precision_score(y_true, y_pred, average='weighted')
recall = recall_score(y_true, y_pred, average='weighted')
f1 = f1_score(y_true, y_pred, average='weighted')
if leaderboard:
print('<td>{:.4f}</td>\n<td>{:.4f}</td>\n<td>{:.4f}</td>'.format(precision, recall, f1))
else:
print('Precision: ', precision)
print('Recall: ', recall)
print('F1:', f1)
def evaluate_document_relevance_single_turn(experiment_type, data_dir, run_file, out_file, leaderboard):
eval_file_path, topic_file_path = get_eval_topic_file_paths(data_dir, experiment_type)
eval_dict = load_eval_dict(eval_file_path, topic_file_path)
run_dict = load_run_dict_doc_relevance(run_file)
facet_to_topic_dict = load_facet_to_topic_dict(topic_file_path)
performance_dict = {}
for metric in eval_dict:
performance_dict[metric] = {}
get_document_relevance_for_metric(eval_dict, facet_to_topic_dict, metric, False, performance_dict,
run_dict)
if out_file != '':
with open(out_file, 'w') as fo:
json.dump(performance_dict, fo)
# compute the mean performance per metric and print
mean_performance = {}
for metric in performance_dict:
mean_performance[metric] = mean(performance_dict[metric][k] for k in performance_dict[metric])
if leaderboard:
print('| RANK | CREATOR | MODELNAME | {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(mean_performance['MRR100'],
mean_performance['P1'],
mean_performance['NDCG3'],
mean_performance['NDCG5']))
else:
for metric in performance_dict:
print('{}: {}'.format(metric, mean_performance[metric]))
def evaluate_document_relevance_multi_turn(experiment_type, data_dir, run_file, out_file, leaderboard):
eval_file_path, synthetic_file_path = get_eval_topic_file_paths(data_dir, experiment_type, True)
eval_dict = load_eval_dict(eval_file_path, synthetic_file_path, True)
run_dict = load_run_dict_doc_relevance(run_file, data_dir, True)
performance_dict = {}
for metric in eval_dict:
performance_dict[metric] = {}
get_document_relevance_for_metric(eval_dict, None, metric, True, performance_dict,
run_dict)
if out_file != '':
with open(out_file, 'w') as fo:
json.dump(performance_dict, fo)
# compute the mean performance per metric and print
mean_performance = {}
for metric in performance_dict:
mean_performance[metric] = mean(performance_dict[metric][k] for k in performance_dict[metric])
if leaderboard:
print('| RANK | CREATOR | MODELNAME | {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(mean_performance['MRR100'],
mean_performance['P1'],
mean_performance['NDCG3'],
mean_performance['NDCG5']))
else:
for metric in performance_dict:
print('{}: {}'.format(metric, mean_performance[metric]))
def evaluate_document_relevance(experiment_type, data_dir, run_file, out_file, multi_turn, leaderboard):
if multi_turn:
return evaluate_document_relevance_multi_turn(experiment_type, data_dir, run_file, out_file, leaderboard)
else:
return evaluate_document_relevance_single_turn(experiment_type, data_dir, run_file, out_file, leaderboard)
def get_document_relevance_for_metric(eval_dict, facet_to_topic_dict, metric, multi_turn, performance_dict, run_dict):
for context_id in eval_dict[metric]:
try:
selected_q = get_selected_question(context_id, facet_to_topic_dict, multi_turn, run_dict)
try:
performance_dict[metric][context_id] = eval_dict[metric][context_id][selected_q]['with_answer']
except KeyError: # if question is not among candidate question, we consider it equal to minimum performance.
performance_dict[metric][context_id] = eval_dict[metric][context_id]['MIN']['with_answer']
except KeyError: # if there is no prediction provided for a facet, we consider performance 0.
performance_dict[metric][context_id] = 0.
def get_selected_question(context_id, facet_to_topic_dict, multi_turn, run_dict):
if multi_turn:
selected_q = run_dict[context_id]
else:
selected_q = run_dict[facet_to_topic_dict[context_id]]
selected_q = 'MIN' if selected_q == 'MAX' else selected_q # to avoid submitting MAX results.
return selected_q
def get_eval_topic_file_paths(data_dir, experiment_type, multi_turn=False):
if experiment_type in ['train', 'dev']:
if multi_turn:
eval_file_path = path.join(data_dir, 'multi_turn_{}_eval.pkl'.format(experiment_type))
topic_file_path = path.join(data_dir, '{}_synthetic.pkl'.format(experiment_type))
else:
eval_file_path = path.join(data_dir, 'single_turn_train_eval.pkl')
topic_file_path = path.join(data_dir, '{}.tsv'.format(experiment_type))
else:
eval_file_path = path.join(data_dir, 'single_turn_test_eval.pkl')
topic_file_path = path.join(data_dir, 'test_with_labels.tsv')
# raise FileNotFoundError # TODO: remove when test eval released.
return eval_file_path, topic_file_path
def load_facet_to_topic_dict(topic_file_path):
topic_df = pd.read_csv(topic_file_path, sep='\t')
facet_to_topic_dict = topic_df.set_index('facet_id')['topic_id'].to_dict()
return facet_to_topic_dict
def load_eval_dict(eval_file_path, topic_file_path, multi_turn):
if multi_turn:
with open(eval_file_path, 'rb') as fi:
eval_dict = pickle.load(fi)
return eval_dict
else:
topic_df = pd.read_csv(topic_file_path, sep='\t')
context_array = topic_df['facet_id'].values
with open(eval_file_path, 'rb') as fi:
eval_dict = pickle.load(fi)
# we keep only the instances in the topic file.
new_eval_dict = {}
for metric in eval_dict:
new_eval_dict[metric] = {}
for fid in eval_dict[metric]:
if fid in context_array:
new_eval_dict[metric][fid] = eval_dict[metric][fid]
return new_eval_dict
def load_run_dict_doc_relevance(run_file, data_folder='', multi_turn=False):
run_df = pd.read_csv(run_file, sep=' ', header=None).fillna('')
run_df = run_df.sort_values(by=4).drop_duplicates(subset=[0], keep='last') # we only keep the top ranked question.
if multi_turn:
question_dict = pd.read_csv(path.join(data_folder, 'question_bank.tsv'), sep='\t').fillna('').set_index('question')[
'question_id'].to_dict()
run_df[2] = run_df[2].map(question_dict)
run_dict = run_df.set_index(0)[2].to_dict() # we convert the run dataframe to dict.
return run_dict
def evaluate_question_relevance(experiment_type, data_dir, run_file, out_file, leaderboard):
eval_file_path, topic_file_path = get_eval_topic_file_paths(data_dir, experiment_type)
topic_df = pd.read_csv(topic_file_path, sep='\t')
topic_question_set_dict = topic_df.groupby('topic_id')['question_id'].agg(set).to_dict()
run_df = pd.read_csv(run_file, sep=' ', header=None)
run_df = run_df.sort_values(by=[0, 4], ascending=False).drop_duplicates(subset=[0, 4], keep='first')
run_question_set_list = run_df.groupby(0)[2].agg(list).to_dict()
topk_list = [5, 10, 20, 30]
recall_score_dict = {}
for topk in topk_list:
metric_name = 'Recall{}'.format(topk)
recall_score_dict[metric_name] = {}
for tid in topic_question_set_dict:
try:
rec = len(set(run_question_set_list[tid][:topk]) & topic_question_set_dict[tid]) / len(
topic_question_set_dict[tid])
except KeyError: # in case a topic is not included in the predictions
rec = 0.
recall_score_dict[metric_name][tid] = rec
if out_file != '':
with open(out_file, 'w') as fo:
json.dump(recall_score_dict, fo)
mean_performance = {}
for metric in recall_score_dict:
mean_performance[metric] = mean(recall_score_dict[metric][k] for k in recall_score_dict[metric])
if leaderboard:
print('| RANK | CREATOR | MODELNAME | {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(mean_performance['Recall5'],
mean_performance['Recall10'],
mean_performance['Recall20'],
mean_performance['Recall30']))
else:
for metric in recall_score_dict:
print('{}: {}'.format(metric, mean_performance[metric]))
def main():
parser = argparse.ArgumentParser(description='Input arguments for ClariQ eval tool.',
add_help=True)
parser.add_argument('--eval_task',
dest='eval_task',
type=str,
help='Defines the evaluation task. Possible values: '
'clarification_need|document_relevance|question_relevance',
required=True)
parser.add_argument('--experiment_type',
dest='experiment_type',
type=str,
help='Defines the experiment type. The run file will be evaluated on the data that you '
'specify here. Possible values: train|dev|test. Default value: dev',
default='dev')
parser.add_argument('--data_dir',
dest='data_dir',
type=str,
help='Path to the data directory.',
default='../data/',
)
parser.add_argument('--run_file',
dest='run_file',
type=str,
help='Path to the run file.',
required=True)
parser.add_argument('--out_file',
dest='out_file',
type=str,
help='Path to the evaluation output json file.',
required=False,
default='')
parser.add_argument('--multi_turn',
dest='multi_turn', action='store_true',
help='Determines if the results are on multi-turn conversations. Conversation is assumed to '
'be single-turn if not specified.',
required=False)
parser.add_argument('--leaderboard',
dest='leaderboard', action='store_true',
help='Determines if the results output must be in the format to be added to the leaderboard',
required=False)
parser.set_defaults(multi_turn=False)
input_args = parser.parse_args()
if input_args.eval_task == 'clarification_need':
evaluate_clarification_need(input_args.experiment_type, input_args.data_dir, input_args.run_file,
input_args.out_file, input_args.leaderboard)
elif input_args.eval_task == 'document_relevance':
evaluate_document_relevance(input_args.experiment_type, input_args.data_dir, input_args.run_file,
input_args.out_file, input_args.multi_turn, input_args.leaderboard)
elif input_args.eval_task == 'question_relevance':
evaluate_question_relevance(input_args.experiment_type, input_args.data_dir, input_args.run_file,
input_args.out_file, input_args.leaderboard)
if __name__ == '__main__':
main()
| 13,590 | 48.421818 | 124 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# the following code is credited to the mOWL library under the BSD 3-Clause License:
# https://github.com/bio-ontology-research-group/mowl/blob/main/LICENSE
import jpype
import jpype.imports # very important for basic Java dependencies!
import os
import platform
def init_jvm(memory):
jars_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "lib/")
# jars_dir = os.path.join(os.path.dirname(os.path.realpath(mowl.__file__)), "lib/")
if not os.path.exists(jars_dir):
raise FileNotFoundError(f"JAR files not found. Make sure that the lib directory exists \
and contains the JAR dependencies.")
if (platform.system() == 'Windows'):
jars = f'{str.join(";", [jars_dir + name for name in os.listdir(jars_dir)])}'
else:
jars = f'{str.join(":", [jars_dir + name for name in os.listdir(jars_dir)])}'
if not jpype.isJVMStarted():
jpype.startJVM(
jpype.getDefaultJVMPath(), "-ea",
f"-Xmx{memory}",
"-Djava.class.path=" + jars,
convertStrings=False)
if jpype.isJVMStarted():
print(f"{memory} maximum memory allocated to JVM.")
print("JVM started successfully.")
| 1,805 | 38.26087 | 96 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/probe/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 589 | 44.384615 | 74 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/probe/ontolama/inference.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Credit to: https://github.com/thunlp/OpenPrompt/blob/main/experiments/cli.py
# """Script for running openprompt models for OntoLAMA."""
import os
from typing import List
import random
import logging
from openprompt.trainer import ClassificationRunner, GenerationRunner
from openprompt.lm_bff_trainer import LMBFFClassificationRunner
from openprompt.protoverb_trainer import ProtoVerbClassificationRunner
from openprompt.pipeline_base import PromptForClassification, PromptForGeneration
from openprompt.utils.reproduciblity import set_seed
from openprompt.prompts import (
load_template,
load_verbalizer,
)
from openprompt.data_utils import FewShotSampler
from openprompt.utils.logging import config_experiment_dir, init_logger
from openprompt.config import get_config, save_config_to_yaml
from openprompt.plms import load_plm_from_config
from openprompt import PromptDataLoader
from openprompt.prompt_base import Template
from openprompt.plms.utils import TokenizerWrapper
from transformers.tokenization_utils import PreTrainedTokenizer
from yacs.config import CfgNode
from .data_processor import OntoLAMADataProcessor
CUR_TEMPLATE = None
CUR_VERBALIZER = None
def build_dataloader(
dataset: List,
template: Template,
tokenizer: PreTrainedTokenizer,
tokenizer_wrapper_class: TokenizerWrapper,
config: CfgNode,
split: str,
):
dataloader = PromptDataLoader(
dataset=dataset,
template=template,
tokenizer=tokenizer,
tokenizer_wrapper_class=tokenizer_wrapper_class,
batch_size=config[split].batch_size,
shuffle=config[split].shuffle_data,
teacher_forcing=config[split].teacher_forcing if hasattr(config[split], "teacher_forcing") else None,
predict_eos_token=True if config.task == "generation" else False,
**config.dataloader,
)
example = template.incorporate_text_example(random.choice(dataset))
logger = logging.getLogger()
logger.info(f"transformed example: {example}")
return dataloader
def run_inference(config, args):
"""Main entry for running the OpenPrompt script.
"""
global CUR_TEMPLATE, CUR_VERBALIZER
# exit()
# init logger, create log dir and set log level, etc.
if args.resume and args.test:
raise Exception("cannot use flag --resume and --test together")
if args.resume or args.test:
config.logging.path = EXP_PATH = args.resume or args.test
else:
EXP_PATH = config_experiment_dir(config)
init_logger(
os.path.join(EXP_PATH, "log.txt"),
config.logging.file_level,
config.logging.console_level,
)
# save config to the logger directory
save_config_to_yaml(config)
# load dataset. The valid_dataset can be None
train_dataset, valid_dataset, test_dataset, Processor = OntoLAMADataProcessor.load_inference_dataset(
config, test=args.test is not None or config.learning_setting == "zero_shot"
)
# main
if config.learning_setting == "full":
res = trainer(
EXP_PATH,
config,
Processor,
resume=args.resume,
test=args.test,
train_dataset=train_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
)
elif config.learning_setting == "few_shot":
if config.few_shot.few_shot_sampling is None:
raise ValueError("use few_shot setting but config.few_shot.few_shot_sampling is not specified")
seeds = config.sampling_from_train.seed
res = 0
for seed in seeds:
if not args.test:
sampler = FewShotSampler(
num_examples_per_label=config.sampling_from_train.num_examples_per_label,
also_sample_dev=config.sampling_from_train.also_sample_dev,
num_examples_per_label_dev=config.sampling_from_train.num_examples_per_label_dev,
)
train_sampled_dataset, valid_sampled_dataset = sampler(
train_dataset=train_dataset, valid_dataset=valid_dataset, seed=seed
)
result = trainer(
os.path.join(EXP_PATH, f"seed-{seed}"),
config,
Processor,
resume=args.resume,
test=args.test,
train_dataset=train_sampled_dataset,
valid_dataset=valid_sampled_dataset,
test_dataset=test_dataset,
)
else:
result = trainer(
os.path.join(EXP_PATH, f"seed-{seed}"),
config,
Processor,
test=args.test,
test_dataset=test_dataset,
)
res += result
res /= len(seeds)
elif config.learning_setting == "zero_shot":
res = trainer(
EXP_PATH,
config,
Processor,
zero=True,
train_dataset=train_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
)
return config, CUR_TEMPLATE, CUR_VERBALIZER
def trainer(
EXP_PATH,
config,
Processor,
train_dataset=None,
valid_dataset=None,
test_dataset=None,
resume=None,
test=None,
zero=False,
):
global CUR_TEMPLATE, CUR_VERBALIZER
if not os.path.exists(EXP_PATH):
os.mkdir(EXP_PATH)
config.logging.path = EXP_PATH
# set seed
set_seed(config.reproduce.seed)
# load the pretrained models, its model, tokenizer, and config.
plm_model, plm_tokenizer, plm_config, plm_wrapper_class = load_plm_from_config(config)
# define template and verbalizer
if config.task == "classification":
# define prompt
template = load_template(config=config, model=plm_model, tokenizer=plm_tokenizer, plm_config=plm_config)
verbalizer = load_verbalizer(
config=config,
model=plm_model,
tokenizer=plm_tokenizer,
plm_config=plm_config,
classes=Processor.labels,
)
# load prompt’s pipeline model
prompt_model = PromptForClassification(
plm_model, template, verbalizer, freeze_plm=config.plm.optimize.freeze_para
)
elif config.task == "generation":
template = load_template(config=config, model=plm_model, tokenizer=plm_tokenizer, plm_config=plm_config)
prompt_model = PromptForGeneration(
plm_model,
template,
freeze_plm=config.plm.optimize.freeze_para,
gen_config=config.generation,
)
else:
raise NotImplementedError(
f"config.task {config.task} is not implemented yet. Only classification and generation are supported."
)
# process data and get data_loader
train_dataloader = (
build_dataloader(train_dataset, template, plm_tokenizer, plm_wrapper_class, config, "train")
if train_dataset
else None
)
valid_dataloader = (
build_dataloader(valid_dataset, template, plm_tokenizer, plm_wrapper_class, config, "dev")
if valid_dataset
else None
)
test_dataloader = (
build_dataloader(test_dataset, template, plm_tokenizer, plm_wrapper_class, config, "test")
if test_dataset
else None
)
if config.task == "classification":
if config.classification.auto_t or config.classification.auto_v:
runner = LMBFFClassificationRunner(
train_dataset=train_dataset,
valid_dataset=valid_dataset,
test_dataset=test_dataset,
template=template,
verbalizer=verbalizer,
config=config,
)
elif config.verbalizer == "proto_verbalizer":
runner = ProtoVerbClassificationRunner(
model=prompt_model,
train_dataloader=train_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
id2label=Processor.id2label,
config=config,
)
else:
runner = ClassificationRunner(
model=prompt_model,
train_dataloader=train_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
id2label=Processor.id2label,
config=config,
)
elif config.task == "generation":
runner = GenerationRunner(
model=prompt_model,
train_dataloader=train_dataloader,
valid_dataloader=valid_dataloader,
test_dataloader=test_dataloader,
config=config,
)
CUR_TEMPLATE = template
CUR_VERBALIZER = verbalizer
logger = logging.getLogger()
logger.info(f"Label classes: {verbalizer.classes}")
logger.info(f"Label words: {verbalizer.label_words}")
if zero:
res = runner.test()
elif test:
res = runner.test(ckpt="best")
elif resume:
res = runner.run(ckpt="last")
else:
res = runner.run()
return res
| 9,867 | 34.624549 | 114 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/probe/ontolama/data_processor.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from yacs.config import CfgNode
from datasets import load_dataset
from openprompt.data_utils import InputExample
from openprompt.data_utils.data_processor import DataProcessor
from openprompt.utils.logging import logger
class OntoLAMADataProcessor(DataProcessor):
"""Class for processing the OntoLAMA data points."""
def __init__(self):
super().__init__()
self.labels = ["negative", "positive"]
@staticmethod
def load_dataset(task_name: str, split: str):
"""Load a specific OntoLAMA dataset from huggingface dataset hub."""
# TODO: remove use_auth_token after going to public
return load_dataset("krr-oxford/OntoLAMA", task_name, split=split, use_auth_token=True)
def get_examples(self, task_name, split):
"""Load a specific OntoLAMA dataset and transform the data points into
input examples for prompt-based inference.
"""
dataset = self.load_dataset(task_name, split)
premise_name = "v_sub_concept"
hypothesis_name = "v_super_concept"
# different data fields for the bimnli dataset
if "bimnli" in task_name:
premise_name = "premise"
hypothesis_name = "hypothesis"
prompt_samples = []
for samp in dataset:
inp = InputExample(text_a=samp[premise_name], text_b=samp[hypothesis_name], label=samp["label"])
prompt_samples.append(inp)
return prompt_samples
@classmethod
def load_inference_dataset(cls, config: CfgNode, return_class=True, test=False):
r"""A plm loader using a global config.
It will load the train, valid, and test set (if exists) simulatenously.
Args:
config (CfgNode): The global config from the CfgNode.
return_class (bool): Whether return the data processor class for future usage.
Returns:
(Optional[List[InputExample]]): The train dataset.
(Optional[List[InputExample]]): The valid dataset.
(Optional[List[InputExample]]): The test dataset.
(Optional[OntoLAMADataProcessor]): The data processor object.
"""
dataset_config = config.dataset
processor = cls()
train_dataset = None
valid_dataset = None
if not test:
try:
train_dataset = processor.get_examples(dataset_config.task_name, "train")
except FileNotFoundError:
logger.warning(f"Has no training dataset in krr-oxford/OntoLAMA/{dataset_config.task_name}.")
try:
valid_dataset = processor.get_examples(dataset_config.task_name, "validation")
except FileNotFoundError:
logger.warning(f"Has no validation dataset in krr-oxford/OntoLAMA/{dataset_config.task_name}.")
test_dataset = None
try:
test_dataset = processor.get_examples(dataset_config.task_name, "test")
except FileNotFoundError:
logger.warning(f"Has no test dataset in krr-oxford/OntoLAMA/{dataset_config.task_name}.")
# checking whether donwloaded.
if (train_dataset is None) and (valid_dataset is None) and (test_dataset is None):
logger.error(
"Dataset is empty. Either there is no download or the path is wrong. "
+ "If not downloaded, please `cd datasets/` and `bash download_xxx.sh`"
)
exit()
if return_class:
return train_dataset, valid_dataset, test_dataset, processor
else:
return train_dataset, valid_dataset, test_dataset
| 4,208 | 39.864078 | 111 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/probe/ontolama/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .inference import run_inference
| 627 | 40.866667 | 74 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/probe/ontolama/subsumption_sampler.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import abstractmethod
import itertools
import random
from collections import defaultdict
from typing import Callable, Optional
import enlighten
import re
from org.semanticweb.owlapi.model import OWLAxiom # type: ignore
from deeponto.onto import Ontology
class SubsumptionSamplerBase:
"""Base Class for Sampling Subsumption Pairs."""
def __init__(self, onto: Ontology):
self.onto = onto
self.progress_manager = enlighten.get_manager()
# for faster sampling
self.concept_iris = list(self.onto.owl_classes.keys())
self.object_property_iris = list(self.onto.owl_object_properties.keys())
self.sibling_concept_groups = self.onto.sibling_class_groups
self.sibling_auxiliary_dict = defaultdict(list)
for i, sib_group in enumerate(self.sibling_concept_groups):
for sib in sib_group:
self.sibling_auxiliary_dict[sib].append(i)
def random_named_concept(self) -> str:
"""Randomly draw a named concept's IRI."""
return random.choice(self.concept_iris)
def random_object_property(self) -> str:
"""Randomly draw a object property's IRI."""
return random.choice(self.object_property_iris)
def get_siblings(self, concept_iri: str):
"""Get the sibling concepts of the given concept."""
sibling_group = self.sibling_auxiliary_dict[concept_iri]
sibling_group = [self.sibling_concept_groups[i] for i in sibling_group]
sibling_group = list(itertools.chain.from_iterable(sibling_group))
return sibling_group
def random_sibling(self, concept_iri: str) -> str:
"""Randomly draw a sibling concept for a given concept."""
sibling_group = self.get_siblings(concept_iri)
if sibling_group:
return random.choice(sibling_group)
else:
# not every concept has a sibling concept
return None
@abstractmethod
def positive_sampling(self, num_samples: Optional[int]):
raise NotImplementedError
@abstractmethod
def negative_sampling(self, num_samples: Optional[int]):
raise NotImplementedError
class AtomicSubsumptionSampler(SubsumptionSamplerBase):
r"""Sampler for constructing the Atomic Subsumption Inference (SI) dataset.
Positive samples come from the entailed subsumptions.
Soft negative samples come from the pairs of randomly selected concepts, subject to
passing the [assumed disjointness check][deeponto.onto.OntologyReasoner.check_assumed_disjoint].
Hard negative samples come from the pairs of randomly selected *sibling* concepts, subject to
passing the [assumed disjointness check][deeponto.onto.OntologyReasoner.check_assumed_disjoint].
"""
def __init__(self, onto: Ontology):
super().__init__(onto)
# compute the sibling concept pairs for faster hard negative sampling
self.sibling_pairs = []
for sib_group in self.sibling_concept_groups:
self.sibling_pairs += [(x, y) for x, y in itertools.product(sib_group, sib_group) if x != y]
self.maximum_num_hard_negatives = len(self.sibling_pairs)
def positive_sampling(self, num_samples: Optional[int] = None):
r"""Sample named concept pairs that are involved in a subsumption axiom.
An extracted pair $(C, D)$ indicates $\mathcal{O} \models C \sqsubseteq D$ where
$\mathcal{O}$ is the input ontology.
"""
pbar = self.progress_manager.counter(desc="Sample Positive Subsumptions", unit="pair")
positives = []
for concept_iri in self.concept_iris:
owl_concept = self.onto.owl_classes[concept_iri]
for subsumer_iri in self.onto.reasoner.get_inferred_super_entities(owl_concept, direct=False):
positives.append((concept_iri, subsumer_iri))
pbar.update()
positives = list(set(sorted(positives)))
if num_samples:
positives = random.sample(positives, num_samples)
print(f"Sample {len(positives)} unique positive subsumption pairs.")
return positives
def negative_sampling(
self,
negative_sample_type: str,
num_samples: int,
apply_assumed_disjointness_alternative: bool = True,
):
r"""Sample named concept pairs that are involved in a disjoiness (assumed) axiom, which then
implies non-subsumption.
"""
if negative_sample_type == "soft":
draw_one = lambda: tuple(random.sample(self.concept_iris, k=2))
elif negative_sample_type == "hard":
draw_one = lambda: random.choice(self.sibling_pairs)
else:
raise RuntimeError(f"{negative_sample_type} not supported.")
negatives = []
max_iter = 2 * num_samples
# which method to validate the negative sample
valid_negative = self.onto.reasoner.check_assumed_disjoint
if apply_assumed_disjointness_alternative:
valid_negative = self.onto.reasoner.check_assumed_disjoint_alternative
print(f"Sample {negative_sample_type} negative subsumption pairs.")
# create two bars for process tracking
added_bar = self.progress_manager.counter(total=num_samples, desc="Sample Negative Subsumptions", unit="pair")
iter_bar = self.progress_manager.counter(total=max_iter, desc="#Iteration", unit="it")
i = 0
added = 0
while added < num_samples and i < max_iter:
sub_concept_iri, super_concept_iri = draw_one()
sub_concept = self.onto.get_owl_object_from_iri(sub_concept_iri)
super_concept = self.onto.get_owl_object_from_iri(super_concept_iri)
# collect class iri if accepted
if valid_negative(sub_concept, super_concept):
neg = (sub_concept_iri, super_concept_iri)
negatives.append(neg)
added += 1
added_bar.update(1)
if added == num_samples:
negatives = list(set(sorted(negatives)))
added = len(negatives)
added_bar.count = added
i += 1
iter_bar.update(1)
negatives = list(set(sorted(negatives)))
print(f"Sample {len(negatives)} unique positive subsumption pairs.")
return negatives
IRI = "<https?:\\/\\/(?:www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b(?:[-a-zA-Z0-9()@:%_\\+.~#?&\\/=]*)>"
class ComplexSubsumptionSampler(SubsumptionSamplerBase):
r"""Sampler for constructing the Complex Subsumption Inference (SI) dataset.
To obtain complex concept expressions on both sides of the subsumption relationship
(as a sub-concept or a super-concept), this sampler utilises the equivalence axioms
in the form of $C \equiv C_{comp}$ where $C$ is atomic and $C_{comp}$ is complex.
An equivalence axiom like $C \equiv C_{comp}$ is deemed as an **anchor axiom**.
Positive samples are in the form of $C_{sub} \sqsubseteq C_{comp}$ or $C_{comp} \sqsubseteq C_{super}$
where $C_{sub}$ is an entailed sub-concept of $C$ and $C_{comp}$, $C_{super}$ is an entailed super-concept
of $C$ and $C_{comp}$.
Negative samples are formed by replacing one of the named entities in the anchor axiom, the modified
sub-concept and super-concept need to pass the [assumed disjointness check][deeponto.onto.OntologyReasoner.check_assumed_disjoint]
to be accepted as a valid negative sample. Without loss of generality, suppose we choose $C \sqsubseteq C_{comp}$
and replace a named entity in $C_{comp}'$ to form $C \sqsubseteq C_{comp}'$, then $C$ and $C_{comp}'$ is a valid
negative only if they satisfy the assumed disjointness check.
"""
def __init__(self, onto: Ontology):
super().__init__(onto)
self.anchor_axioms = self.onto.get_equivalence_axioms("Classes")
def positive_sampling_from_anchor(self, anchor_axiom: OWLAxiom):
"""Returns all positive subsumption pairs extracted from an anchor equivalence axiom."""
sub_axiom = list(anchor_axiom.asOWLSubClassOfAxioms())[0]
atomic_concept, complex_concept = sub_axiom.getSubClass(), sub_axiom.getSuperClass()
# determine which is the atomic concept
if complex_concept.isClassExpressionLiteral():
atomic_concept, complex_concept = complex_concept, atomic_concept
# intialise the positive samples from the anchor equivalence axiom
positives = list(anchor_axiom.asOWLSubClassOfAxioms())
for super_concept_iri in self.onto.reasoner.get_inferred_super_entities(atomic_concept, direct=False):
positives.append(
self.onto.owl_data_factory.getOWLSubClassOfAxiom(
complex_concept, self.onto.get_owl_object_from_iri(super_concept_iri)
)
)
for sub_concept_iri in self.onto.reasoner.get_inferred_sub_entities(atomic_concept, direct=False):
positives.append(
self.onto.owl_data_factory.getOWLSubClassOfAxiom(
self.onto.get_owl_object_from_iri(sub_concept_iri), complex_concept
)
)
# TESTING
# for p in positives:
# assert self.onto.reasoner.owl_reasoner.isEntailed(p)
return list(set(sorted(positives)))
def positive_sampling(self, num_samples_per_anchor: Optional[int] = 10):
r"""Sample positive subsumption axioms that involve one atomic and one complex concepts.
An extracted pair $(C, D)$ indicates $\mathcal{O} \models C \sqsubseteq D$ where
$\mathcal{O}$ is the input ontology.
"""
print(f"Maximum number of positive samples for each anchor is set to {num_samples_per_anchor}.")
pbar = self.progress_manager.counter(desc="Sample Positive Subsumptions from", unit="anchor axiom")
positives = dict()
for anchor in self.anchor_axioms:
positives_from_anchor = self.positive_sampling_from_anchor(anchor)
if num_samples_per_anchor and num_samples_per_anchor < len(positives_from_anchor):
positives_from_anchor = random.sample(positives_from_anchor, k = num_samples_per_anchor)
positives[str(anchor)] = positives_from_anchor
pbar.update()
# positives = list(set(sorted(positives)))
print(f"Sample {sum([len(v) for v in positives.values()])} unique positive subsumption pairs.")
return positives
def negative_sampling(self, num_samples_per_anchor: Optional[int] = 10):
r"""Sample negative subsumption axioms that involve one atomic and one complex concepts.
An extracted pair $(C, D)$ indicates $C$ and $D$ pass the [assumed disjointness check][deeponto.onto.OntologyReasoner.check_assumed_disjoint].
"""
print(f"Maximum number of negative samples for each anchor is set to {num_samples_per_anchor}.")
pbar = self.progress_manager.counter(desc="Sample Negative Subsumptions from", unit="anchor axiom")
negatives = dict()
for anchor in self.anchor_axioms:
negatives_from_anchor = []
i, max_iter = 0, num_samples_per_anchor + 2
while i < max_iter and len(negatives_from_anchor) < num_samples_per_anchor:
corrupted_anchor = self.random_corrupt(anchor)
corrupted_sub_axiom = random.choice(list(corrupted_anchor.asOWLSubClassOfAxioms()))
sub_concept, super_concept = corrupted_sub_axiom.getSubClass(), corrupted_sub_axiom.getSuperClass()
if self.onto.reasoner.check_assumed_disjoint_alternative(sub_concept, super_concept):
negatives_from_anchor.append(corrupted_sub_axiom)
i += 1
negatives[str(anchor)] = list(set(sorted(negatives_from_anchor)))
pbar.update()
# negatives = list(set(sorted(negatives)))
print(f"Sample {sum([len(v) for v in negatives.values()])} unique positive subsumption pairs.")
return negatives
def random_corrupt(self, axiom: OWLAxiom):
"""Randomly change an IRI in the input axiom and return a new one.
"""
replaced_iri = random.choice(re.findall(IRI, str(axiom)))[1:-1]
replaced_entity = self.onto.get_owl_object_from_iri(replaced_iri)
replacement_iri = None
if self.onto.get_entity_type(replaced_entity) == "Classes":
replacement_iri = self.random_named_concept()
elif self.onto.get_entity_type(replaced_entity) == "ObjectProperties":
replacement_iri = self.random_object_property()
else:
# NOTE: to extend to other types of entities in future
raise RuntimeError("Unknown type of axiom.")
return self.onto.replace_entity(axiom, replaced_iri, replacement_iri)
| 13,498 | 47.383513 | 150 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/subs/__init__.py
|
# Copyright 2021 Yuan He (KRR-Oxford). All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 602 | 45.384615 | 74 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/subs/bertsubs/pipeline_inter.py
|
# Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @paper(
# "Contextual Semantic Embeddings for Ontology Subsumption Prediction (World Wide Web Journal)",
# )
import os
import sys
import random
import datetime
import warnings
import math
from yacs.config import CfgNode
from typing import List
import numpy as np
import torch
from transformers import TrainingArguments
from deeponto.onto import Ontology
from .bert_classifier import BERTSubsumptionClassifierTrainer
from .text_semantics import SubsumptionSampler
from .pipeline_intra import BERTSubsIntraPipeline
DEFAULT_CONFIG_FILE_INTER = os.path.join(os.path.dirname(__file__), "default_config_inter.yaml")
class BERTSubsInterPipeline:
r"""Class for the model training and prediction/validation pipeline of inter-ontology subsumption of BERTSubs.
Attributes:
src_onto (Ontology): Source ontology (the sub-class side).
tgt_onto (Ontology): Target ontology (the super-class side).
config (CfgNode): Configuration.
src_sampler (SubsumptionSampler): Object for sampling-related functions of the source ontology.
tgt_sampler (SubsumptionSampler): Object for sampling-related functions of the target ontology.
"""
def __init__(self, src_onto: Ontology, tgt_onto: Ontology, config: CfgNode):
self.src_onto = src_onto
self.tgt_onto = tgt_onto
self.config = config
self.config.label_property = self.config.src_label_property
self.src_sampler = SubsumptionSampler(onto=self.src_onto, config=self.config)
self.config.label_property = self.config.tgt_label_property
self.tgt_sampler = SubsumptionSampler(onto=self.tgt_onto, config=self.config)
start_time = datetime.datetime.now()
read_subsumptions = lambda file_name: [line.strip().split(',') for line in open(file_name).readlines()]
test_subsumptions = None if config.test_subsumption_file is None or config.test_subsumption_file == 'None' \
else read_subsumptions(config.test_subsumption_file)
valid_subsumptions = None if config.valid_subsumption_file is None or config.valid_subsumption_file == 'None' \
else read_subsumptions(config.valid_subsumption_file)
if config.use_ontology_subsumptions_training:
src_subsumptions = BERTSubsIntraPipeline.extract_subsumptions_from_ontology(onto=self.src_onto,
subsumption_type=config.subsumption_type)
tgt_subsumptions = BERTSubsIntraPipeline.extract_subsumptions_from_ontology(onto=self.tgt_onto,
subsumption_type=config.subsumption_type)
src_subsumptions0, tgt_subsumptions0 = [], []
if config.subsumption_type == 'named_class':
for subs in src_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
src_subsumptions0.append([str(c1.getIRI()), str(c2.getIRI())])
for subs in tgt_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
tgt_subsumptions0.append([str(c1.getIRI()), str(c2.getIRI())])
elif config.subsumption_type == 'restriction':
for subs in src_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
src_subsumptions0.append([str(c1.getIRI()), str(c2)])
for subs in tgt_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
tgt_subsumptions0.append([str(c1.getIRI()), str(c2)])
restrictions = BERTSubsIntraPipeline.extract_restrictions_from_ontology(onto=self.tgt_onto)
print('restrictions in the target ontology: %d' % len(restrictions))
else:
warnings.warn('Unknown subsumption type %s' % config.subsumption_type)
sys.exit(0)
print('Positive train subsumptions from the source/target ontology: %d/%d' % (
len(src_subsumptions0), len(tgt_subsumptions0)))
src_tr = self.src_sampler.generate_samples(subsumptions=src_subsumptions0)
tgt_tr = self.tgt_sampler.generate_samples(subsumptions=tgt_subsumptions0)
else:
src_tr, tgt_tr = [], []
if config.train_subsumption_file is None or config.train_subsumption_file == 'None':
tr = src_tr + tgt_tr
else:
train_subsumptions = read_subsumptions(config.train_subsumption_file)
tr = self.inter_ontology_sampling(subsumptions=train_subsumptions, pos_dup=config.fine_tune.train_pos_dup,
neg_dup=config.fine_tune.train_neg_dup)
tr = tr + src_tr + tgt_tr
if len(tr) == 0:
warnings.warn('No training samples extracted')
if config.fine_tune.do_fine_tune:
sys.exit(0)
end_time = datetime.datetime.now()
print('data pre-processing costs %.1f minutes' % ((end_time - start_time).seconds / 60))
start_time = datetime.datetime.now()
torch.cuda.empty_cache()
bert_trainer = BERTSubsumptionClassifierTrainer(config.fine_tune.pretrained, train_data=tr,
val_data=tr[0:int(len(tr) / 5)],
max_length=config.prompt.max_length,
early_stop=config.fine_tune.early_stop)
epoch_steps = len(bert_trainer.tra) // config.fine_tune.batch_size # total steps of an epoch
logging_steps = int(epoch_steps * 0.02) if int(epoch_steps * 0.02) > 0 else 5
eval_steps = 5 * logging_steps
training_args = TrainingArguments(
output_dir=config.fine_tune.output_dir,
num_train_epochs=config.fine_tune.num_epochs,
per_device_train_batch_size=config.fine_tune.batch_size,
per_device_eval_batch_size=config.fine_tune.batch_size,
warmup_ratio=config.fine_tune.warm_up_ratio,
weight_decay=0.01,
logging_steps=logging_steps,
logging_dir=f"{config.fine_tune.output_dir}/tb",
eval_steps=eval_steps,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
save_steps=eval_steps,
load_best_model_at_end=True,
save_total_limit=1,
metric_for_best_model="accuracy",
greater_is_better=True
)
if config.fine_tune.do_fine_tune and (config.prompt.prompt_type == 'traversal' or (
config.prompt.prompt_type == 'path' and config.prompt.use_sub_special_token)):
bert_trainer.add_special_tokens(['<SUB>'])
bert_trainer.train(train_args=training_args, do_fine_tune=config.fine_tune.do_fine_tune)
if config.fine_tune.do_fine_tune:
bert_trainer.trainer.save_model(
output_dir=os.path.join(config.fine_tune.output_dir, 'fine-tuned-checkpoint'))
print('fine-tuning done, fine-tuned model saved')
else:
print('pretrained or fine-tuned model loaded.')
end_time = datetime.datetime.now()
print('Fine-tuning costs %.1f minutes' % ((end_time - start_time).seconds / 60))
bert_trainer.model.eval()
self.device = torch.device(f"cuda") if torch.cuda.is_available() else torch.device("cpu")
bert_trainer.model.to(self.device)
self.tokenize = lambda x: bert_trainer.tokenizer(x, max_length=config.prompt.max_length, truncation=True,
padding=True, return_tensors="pt")
softmax = torch.nn.Softmax(dim=1)
self.classifier = lambda x: softmax(bert_trainer.model(**x).logits)[:, 1]
if valid_subsumptions is not None:
self.evaluate(target_subsumptions=valid_subsumptions, test_type='valid')
if test_subsumptions is not None:
if config.test_type == 'evaluation':
self.evaluate(target_subsumptions=test_subsumptions, test_type='test')
elif config.test_type == 'prediction':
self.predict(target_subsumptions=test_subsumptions)
else:
warnings.warn("Unknown test_type: %s" % config.test_type)
print('\n ------------------------- done! ---------------------------\n\n\n')
def inter_ontology_sampling(self, subsumptions: List[List], pos_dup: int = 1, neg_dup: int = 1):
r"""Transform inter-ontology subsumptions to two-string samples
Args:
subsumptions (List[List]): A list of subsumptions; each subsumption is composed of two IRIs.
pos_dup (int): Positive sample duplication.
neg_dup (int): Negative sample duplication.
"""
pos_samples = list()
for subs in subsumptions:
sub_strs = self.src_sampler.subclass_to_strings(subcls=subs[0])
sup_strs = self.tgt_sampler.supclass_to_strings(supcls=subs[1],
subsumption_type=self.config.subsumption_type)
for sub_str in sub_strs:
for sup_str in sup_strs:
pos_samples.append([sub_str, sup_str, 1])
pos_samples = pos_dup * pos_samples
neg_subsumptions = list()
for subs in subsumptions:
for _ in range(neg_dup):
neg_c = self.tgt_sampler.get_negative_sample(subclass_iri=subs[1],
subsumption_type=self.config.subsumption_type)
neg_subsumptions.append([subs[0], neg_c])
neg_samples = list()
for subs in neg_subsumptions:
sub_strs = self.src_sampler.subclass_to_strings(subcls=subs[0])
sup_strs = self.tgt_sampler.supclass_to_strings(supcls=subs[1],
subsumption_type=self.config.subsumption_type)
for sub_str in sub_strs:
for sup_str in sup_strs:
neg_samples.append([sub_str, sup_str, 0])
if len(neg_samples) < len(pos_samples):
neg_samples = neg_samples + [random.choice(neg_samples) for _ in range(len(pos_samples) - len(neg_samples))]
if len(neg_samples) > len(pos_samples):
pos_samples = pos_samples + [random.choice(pos_samples) for _ in range(len(neg_samples) - len(pos_samples))]
print('training mappings, pos_samples: %d, neg_samples: %d' % (len(pos_samples), len(neg_samples)))
all_samples = [s for s in pos_samples + neg_samples if s[0] != '' and s[1] != '']
return all_samples
def inter_ontology_subsumption_to_sample(self, subsumption: List):
r"""Transform an inter ontology subsumption into a sample (a two-string list).
Args:
subsumption (List): a subsumption composed of two IRIs.
"""
subcls, supcls = subsumption[0], subsumption[1]
substrs = self.src_sampler.subclass_to_strings(subcls=subcls)
supstrs = self.tgt_sampler.supclass_to_strings(supcls=supcls, subsumption_type='named_class')
samples = list()
for substr in substrs:
for supstr in supstrs:
samples.append([substr, supstr])
return samples
def score(self, samples):
r"""Score the samples with the classifier.
Args:
samples (List[List]): Each item is a list with two strings (input).
"""
sample_size = len(samples)
scores = np.zeros(sample_size)
batch_num = math.ceil(sample_size / self.config.evaluation.batch_size)
for i in range(batch_num):
j = (i + 1) * self.config.evaluation.batch_size \
if (i + 1) * self.config.evaluation.batch_size <= sample_size else sample_size
inputs = self.tokenize(samples[i * self.config.evaluation.batch_size:j])
inputs.to(self.device)
with torch.no_grad():
batch_scores = self.classifier(inputs)
scores[i * self.config.evaluation.batch_size:j] = batch_scores.cpu().numpy()
return scores
def evaluate(self, target_subsumptions: List[List], test_type: str = 'test'):
r"""Test and calculate the metrics according to a given list of subsumptions.
Args:
target_subsumptions (List[List]): A list of subsumptions, each of which of is a two-component list `(subclass_iri, super_class_iri_or_str)`.
test_type (str): `"test"` or `"valid"`.
"""
MRR_sum, hits1_sum, hits5_sum, hits10_sum = 0, 0, 0, 0
MRR, Hits1, Hits5, Hits10 = 0, 0, 0, 0
size_sum, size_n = 0, 0
for k0, test in enumerate(target_subsumptions):
subcls, gt = test[0], test[1]
candidates = test[1:]
candidate_subsumptions = [[subcls, c] for c in candidates]
candidate_scores = np.zeros(len(candidate_subsumptions))
for k1, candidate_subsumption in enumerate(candidate_subsumptions):
samples = self.inter_ontology_subsumption_to_sample(subsumption=candidate_subsumption)
size_sum += len(samples)
size_n += 1
scores = self.score(samples=samples)
candidate_scores[k1] = np.average(scores)
sorted_indexes = np.argsort(candidate_scores)[::-1]
sorted_classes = [candidates[i] for i in sorted_indexes]
rank = sorted_classes.index(gt) + 1
MRR_sum += 1.0 / rank
hits1_sum += 1 if gt in sorted_classes[:1] else 0
hits5_sum += 1 if gt in sorted_classes[:5] else 0
hits10_sum += 1 if gt in sorted_classes[:10] else 0
num = k0 + 1
MRR, Hits1, Hits5, Hits10 = MRR_sum / num, hits1_sum / num, hits5_sum / num, hits10_sum / num
if num % 500 == 0:
print('\n%d tested, MRR: %.3f, Hits@1: %.3f, Hits@5: %.3f, Hits@10: %.3f\n' % (
num, MRR, Hits1, Hits5, Hits10))
print('\n[%s], MRR: %.3f, Hits@1: %.3f, Hits@5: %.3f, Hits@10: %.3f\n' % (test_type, MRR, Hits1, Hits5, Hits10))
print('%.2f samples per testing subsumption' % (size_sum / size_n))
def predict(self, target_subsumptions: List[List]):
r"""Predict a score for each given subsumption.
The scores will be saved in `test_subsumption_scores.csv`.
Args:
target_subsumptions (List[List]): Each item is a list with the first element as the sub-class,
and the remaining elements as n candidate super-classes.
"""
out_lines = []
for test in target_subsumptions:
subcls, candidates = test[0], test[1:]
candidate_subsumptions = [[subcls, c] for c in candidates]
candidate_scores = []
for candidate_subsumption in candidate_subsumptions:
samples = self.inter_ontology_subsumption_to_sample(subsumption=candidate_subsumption)
scores = self.score(samples=samples)
candidate_scores.append(np.average(scores))
out_lines.append(','.join([str(i) for i in candidate_scores]))
out_file = 'test_subsumption_scores.csv'
with open(out_file, 'w') as f:
for line in out_lines:
f.write('%s\n' % line)
print('Predicted subsumption scores are saved to %s' % out_file)
| 16,303 | 50.432177 | 152 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/subs/bertsubs/pipeline_intra.py
|
# Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @paper(
# "Contextual Semantic Embeddings for Ontology Subsumption Prediction (World Wide Web Journal)",
# )
import os
import sys
import warnings
import random
import torch
import math
import datetime
import numpy as np
from typing import List
from transformers import TrainingArguments
from yacs.config import CfgNode
from deeponto.onto import Ontology
from .bert_classifier import BERTSubsumptionClassifierTrainer
from .text_semantics import SubsumptionSampler
DEFAULT_CONFIG_FILE_INTRA = os.path.join(os.path.dirname(__file__), "default_config_intra.yaml")
class BERTSubsIntraPipeline:
r"""Class for the intra-ontology subsumption prediction setting of BERTSubs.
Attributes:
onto (Ontology): The target ontology.
config (CfgNode): The configuration for BERTSubs.
sampler (SubsumptionSample): The subsumption sampler for BERTSubs.
"""
def __init__(self, onto: Ontology, config: CfgNode):
self.onto = onto
self.config = config
self.sampler = SubsumptionSampler(onto=onto, config=config)
start_time = datetime.datetime.now()
n = 0
for k in self.sampler.named_classes:
n += len(self.sampler.iri_label[k])
print(
"%d named classes, %.1f labels per class"
% (len(self.sampler.named_classes), n / len(self.sampler.named_classes))
)
read_subsumptions = lambda file_name: [line.strip().split(",") for line in open(file_name).readlines()]
test_subsumptions = (
None
if config.test_subsumption_file is None or config.test_subsumption_file == "None"
else read_subsumptions(config.test_subsumption_file)
)
# The train/valid subsumptions are not given. They will be extracted from the given ontology:
if config.train_subsumption_file is None or config.train_subsumption_file == "None":
subsumptions0 = self.extract_subsumptions_from_ontology(
onto=onto, subsumption_type=config.subsumption_type
)
random.shuffle(subsumptions0)
valid_size = int(len(subsumptions0) * config.valid.valid_ratio)
train_subsumptions0, valid_subsumptions0 = subsumptions0[valid_size:], subsumptions0[0:valid_size]
train_subsumptions, valid_subsumptions = [], []
if config.subsumption_type == "named_class":
for subs in train_subsumptions0:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
train_subsumptions.append([str(c1.getIRI()), str(c2.getIRI())])
size_sum = 0
for subs in valid_subsumptions0:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
neg_candidates = BERTSubsIntraPipeline.get_test_neg_candidates_named_class(
subclass=c1, gt=c2, max_neg_size=config.valid.max_neg_size, onto=onto
)
size = len(neg_candidates)
size_sum += size
if size > 0:
item = [str(c1.getIRI()), str(c2.getIRI())] + [str(c.getIRI()) for c in neg_candidates]
valid_subsumptions.append(item)
print("\t average neg candidate size in validation: %.2f" % (size_sum / len(valid_subsumptions)))
elif config.subsumption_type == "restriction":
for subs in train_subsumptions0:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
train_subsumptions.append([str(c1.getIRI()), str(c2)])
restrictions = BERTSubsIntraPipeline.extract_restrictions_from_ontology(onto=onto)
print("restrictions: %d" % len(restrictions))
size_sum = 0
for subs in valid_subsumptions0:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
c2_neg = BERTSubsIntraPipeline.get_test_neg_candidates_restriction(
subcls=c1, max_neg_size=config.valid.max_neg_size, restrictions=restrictions, onto=onto
)
size_sum += len(c2_neg)
item = [str(c1.getIRI()), str(c2)] + [str(r) for r in c2_neg]
valid_subsumptions.append(item)
print("valid candidate negative avg. size: %.1f" % (size_sum / len(valid_subsumptions)))
else:
warnings.warn("Unknown subsumption type %s" % config.subsumption_type)
sys.exit(0)
# The train/valid subsumptions are given:
else:
train_subsumptions = read_subsumptions(config.train_subsumption_file)
valid_subsumptions = read_subsumptions(config.valid_subsumption_file)
print("Positive train/valid subsumptions: %d/%d" % (len(train_subsumptions), len(valid_subsumptions)))
tr = self.sampler.generate_samples(subsumptions=train_subsumptions)
va = self.sampler.generate_samples(subsumptions=valid_subsumptions, duplicate=False)
end_time = datetime.datetime.now()
print("data pre-processing costs %.1f minutes" % ((end_time - start_time).seconds / 60))
start_time = datetime.datetime.now()
torch.cuda.empty_cache()
bert_trainer = BERTSubsumptionClassifierTrainer(
config.fine_tune.pretrained,
train_data=tr,
val_data=va,
max_length=config.prompt.max_length,
early_stop=config.fine_tune.early_stop,
)
epoch_steps = len(bert_trainer.tra) // config.fine_tune.batch_size # total steps of an epoch
logging_steps = int(epoch_steps * 0.02) if int(epoch_steps * 0.02) > 0 else 5
eval_steps = 5 * logging_steps
training_args = TrainingArguments(
output_dir=config.fine_tune.output_dir,
num_train_epochs=config.fine_tune.num_epochs,
per_device_train_batch_size=config.fine_tune.batch_size,
per_device_eval_batch_size=config.fine_tune.batch_size,
warmup_ratio=config.fine_tune.warm_up_ratio,
weight_decay=0.01,
logging_steps=logging_steps,
logging_dir=f"{config.fine_tune.output_dir}/tb",
eval_steps=eval_steps,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
save_steps=eval_steps,
load_best_model_at_end=True,
save_total_limit=1,
metric_for_best_model="accuracy",
greater_is_better=True,
)
if config.fine_tune.do_fine_tune and (
config.prompt.prompt_type == "traversal"
or (config.prompt.prompt_type == "path" and config.prompt.use_sub_special_token)
):
bert_trainer.add_special_tokens(["<SUB>"])
bert_trainer.train(train_args=training_args, do_fine_tune=config.fine_tune.do_fine_tune)
if config.fine_tune.do_fine_tune:
bert_trainer.trainer.save_model(
output_dir=os.path.join(config.fine_tune.output_dir, "fine-tuned-checkpoint")
)
print("fine-tuning done, fine-tuned model saved")
else:
print("pretrained or fine-tuned model loaded.")
end_time = datetime.datetime.now()
print("Fine-tuning costs %.1f minutes" % ((end_time - start_time).seconds / 60))
bert_trainer.model.eval()
self.device = torch.device(f"cuda") if torch.cuda.is_available() else torch.device("cpu")
bert_trainer.model.to(self.device)
self.tokenize = lambda x: bert_trainer.tokenizer(
x, max_length=config.prompt.max_length, truncation=True, padding=True, return_tensors="pt"
)
softmax = torch.nn.Softmax(dim=1)
self.classifier = lambda x: softmax(bert_trainer.model(**x).logits)[:, 1]
self.evaluate(target_subsumptions=valid_subsumptions, test_type="valid")
if test_subsumptions is not None:
if config.test_type == "evaluation":
self.evaluate(target_subsumptions=test_subsumptions, test_type="test")
elif config.test_type == "prediction":
self.predict(target_subsumptions=test_subsumptions)
else:
warnings.warn("Unknown test_type: %s" % config.test_type)
print("\n ------------------------- done! ---------------------------\n\n\n")
def score(self, samples: List[List]):
r"""The scoring function based on the fine-tuned BERT classifier.
Args:
samples (List[Tuple]): A list of input sentence pairs to be scored.
"""
sample_size = len(samples)
scores = np.zeros(sample_size)
batch_num = math.ceil(sample_size / self.config.evaluation.batch_size)
for i in range(batch_num):
j = (
(i + 1) * self.config.evaluation.batch_size
if (i + 1) * self.config.evaluation.batch_size <= sample_size
else sample_size
)
inputs = self.tokenize(samples[i * self.config.evaluation.batch_size : j])
inputs.to(self.device)
with torch.no_grad():
batch_scores = self.classifier(inputs)
scores[i * self.config.evaluation.batch_size : j] = batch_scores.cpu().numpy()
return scores
def evaluate(self, target_subsumptions: List[List], test_type: str = "test"):
r"""Test and calculate the metrics for a given list of subsumption pairs.
Args:
target_subsumptions (List[Tuple]): A list of subsumption pairs.
test_type (str): `test` for testing or `valid` for validation.
"""
MRR_sum, hits1_sum, hits5_sum, hits10_sum = 0, 0, 0, 0
MRR, Hits1, Hits5, Hits10 = 0, 0, 0, 0
size_sum, size_n = 0, 0
for k0, test in enumerate(target_subsumptions):
subcls, gt = test[0], test[1]
candidates = test[1:]
candidate_subsumptions = [[subcls, c] for c in candidates]
candidate_scores = np.zeros(len(candidate_subsumptions))
for k1, candidate_subsumption in enumerate(candidate_subsumptions):
samples = self.sampler.subsumptions_to_samples(subsumptions=[candidate_subsumption], sample_label=None)
size_sum += len(samples)
size_n += 1
scores = self.score(samples=samples)
candidate_scores[k1] = np.average(scores)
sorted_indexes = np.argsort(candidate_scores)[::-1]
sorted_classes = [candidates[i] for i in sorted_indexes]
rank = sorted_classes.index(gt) + 1
MRR_sum += 1.0 / rank
hits1_sum += 1 if gt in sorted_classes[:1] else 0
hits5_sum += 1 if gt in sorted_classes[:5] else 0
hits10_sum += 1 if gt in sorted_classes[:10] else 0
num = k0 + 1
MRR, Hits1, Hits5, Hits10 = MRR_sum / num, hits1_sum / num, hits5_sum / num, hits10_sum / num
if num % 500 == 0:
print(
"\n%d tested, MRR: %.3f, Hits@1: %.3f, Hits@5: %.3f, Hits@10: %.3f\n"
% (num, MRR, Hits1, Hits5, Hits10)
)
print(
"\n[%s], MRR: %.3f, Hits@1: %.3f, Hits@5: %.3f, Hits@10: %.3f\n" % (test_type, MRR, Hits1, Hits5, Hits10)
)
print("%.2f samples per testing subsumption" % (size_sum / size_n))
def predict(self, target_subsumptions: List[List]):
r"""Predict a score for each given subsumption in the list.
The scores will be saved in `test_subsumption_scores.csv`.
Args:
target_subsumptions (List[List]): Each item is a list where the first element is a fixed ontology class $C$,
and the remaining elements are potential (candidate) super-classes of $C$.
"""
out_lines = []
for test in target_subsumptions:
subcls, candidates = test[0], test[1:]
candidate_subsumptions = [[subcls, c] for c in candidates]
candidate_scores = []
for candidate_subsumption in candidate_subsumptions:
samples = self.sampler.subsumptions_to_samples(subsumptions=[candidate_subsumption], sample_label=None)
scores = self.score(samples=samples)
candidate_scores.append(np.average(scores))
out_lines.append(",".join([str(i) for i in candidate_scores]))
out_file = "test_subsumption_scores.csv"
with open(out_file, "w") as f:
for line in out_lines:
f.write("%s\n" % line)
print("Predicted subsumption scores are saved to %s" % out_file)
@staticmethod
def extract_subsumptions_from_ontology(onto: Ontology, subsumption_type: str):
r"""Extract target subsumptions from a given ontology.
Args:
onto (Ontology): The target ontology.
subsumption_type (str): the type of subsumptions, options are `"named_class"` or `"restriction"`.
"""
all_subsumptions = onto.get_subsumption_axioms(entity_type="Classes")
subsumptions = []
if subsumption_type == "restriction":
for subs in all_subsumptions:
if (
not onto.check_deprecated(owl_object=subs.getSubClass())
and not onto.check_named_entity(owl_object=subs.getSuperClass())
and SubsumptionSampler.is_basic_existential_restriction(
complex_class_str=str(subs.getSuperClass())
)
):
subsumptions.append(subs)
elif subsumption_type == "named_class":
for subs in all_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
if (
onto.check_named_entity(owl_object=c1)
and not onto.check_deprecated(owl_object=c1)
and onto.check_named_entity(owl_object=c2)
and not onto.check_deprecated(owl_object=c2)
):
subsumptions.append(subs)
else:
warnings.warn("\nUnknown subsumption type: %s\n" % subsumption_type)
return subsumptions
@staticmethod
def extract_restrictions_from_ontology(onto: Ontology):
r"""Extract basic existential restriction from an ontology.
Args:
onto (Ontology): The target ontology.
Returns:
restrictions (List): a list of existential restrictions.
"""
restrictions = []
for complexC in onto.get_asserted_complex_classes():
if SubsumptionSampler.is_basic_existential_restriction(complex_class_str=str(complexC)):
restrictions.append(complexC)
return restrictions
@staticmethod
def get_test_neg_candidates_restriction(subcls, max_neg_size, restrictions, onto):
"""Get a list of negative candidate class restrictions for testing."""
neg_restrictions = list()
n = max_neg_size * 2 if max_neg_size * 2 <= len(restrictions) else len(restrictions)
for r in random.sample(restrictions, n):
if not onto.reasoner.check_subsumption(sub_entity=subcls, super_entity=r):
neg_restrictions.append(r)
if len(neg_restrictions) >= max_neg_size:
break
return neg_restrictions
@staticmethod
def get_test_neg_candidates_named_class(subclass, gt, max_neg_size, onto, max_depth=3, max_width=8):
"""Get a list of negative candidate named classes for testing."""
all_nebs, seeds = set(), [gt]
depth = 1
while depth <= max_depth:
new_seeds = set()
for seed in seeds:
nebs = set()
for nc_iri in onto.reasoner.get_inferred_sub_entities(
seed, direct=True
) + onto.reasoner.get_inferred_super_entities(seed, direct=True):
nc = onto.owl_classes[nc_iri]
if onto.check_named_entity(owl_object=nc) and not onto.check_deprecated(owl_object=nc):
nebs.add(nc)
new_seeds = new_seeds.union(nebs)
all_nebs = all_nebs.union(nebs)
depth += 1
seeds = random.sample(new_seeds, max_width) if len(new_seeds) > max_width else new_seeds
all_nebs = (
all_nebs
- {onto.owl_classes[iri] for iri in onto.reasoner.get_inferred_super_entities(subclass, direct=False)}
- {subclass}
)
if len(all_nebs) > max_neg_size:
return random.sample(all_nebs, max_neg_size)
else:
return list(all_nebs)
| 17,435 | 44.76378 | 120 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/subs/bertsubs/__init__.py
|
# Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .text_semantics import SubsumptionSampler
from .pipeline_intra import BERTSubsIntraPipeline, DEFAULT_CONFIG_FILE_INTRA
from .pipeline_inter import BERTSubsInterPipeline, DEFAULT_CONFIG_FILE_INTER
| 796 | 45.882353 | 76 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/subs/bertsubs/text_semantics.py
|
# Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @paper(
# "Contextual Semantic Embeddings for Ontology Subsumption Prediction (World Wide Web Journal)",
# )
import random
import sys
import re
import warnings
from typing import List, Union
from deeponto.onto import Ontology
from deeponto.onto import OntologyVerbaliser
from yacs.config import CfgNode
class SubsumptionSampler:
r"""Class for sampling functions for training the subsumption prediction model.
Attributes:
onto (Ontology): The target ontology.
config (CfgNode): The loaded configuration.
named_classes (Set[str]): IRIs of named classes that are not deprecated.
iri_label (Dict[str, List]): key -- class iris from `named_classes`, value -- a list of labels.
restrictionObjects (Set[OWLClassExpression]): Basic existential restrictions that appear in the ontology.
restrictions (set[str]): Strings of basic existential restrictions corresponding to `restrictionObjects`.
restriction_label (Dict[str:List]): key -- existential restriction string, value -- a list of existential restriction labels.
verb (OntologyVerbaliser): object for verbalisation.
"""
def __init__(self, onto: Ontology, config: CfgNode):
self.onto = onto
self.config = config
self.named_classes = self.extract_named_classes(onto=onto)
self.iri_label = dict()
for iri in self.named_classes:
self.iri_label[iri] = []
for p in config.label_property:
strings = onto.get_owl_object_annotations(
owl_object=onto.get_owl_object_from_iri(iri),
annotation_property_iri=p,
annotation_language_tag=None,
apply_lowercasing=False,
normalise_identifiers=False,
)
for s in strings:
if s not in self.iri_label[iri]:
self.iri_label[iri].append(s)
self.restrictionObjects = set()
self.restrictions = set()
self.restriction_label = dict()
self.verb = OntologyVerbaliser(onto=onto)
for complexC in onto.get_asserted_complex_classes():
s = str(complexC)
self.restriction_label[s] = []
if self.is_basic_existential_restriction(complex_class_str=s):
self.restrictionObjects.add(complexC)
self.restrictions.add(s)
self.restriction_label[s].append(self.verb.verbalise_class_expression(complexC).verbal)
@staticmethod
def is_basic_existential_restriction(complex_class_str: str):
"""Determine if a complex class expression is a basic existential restriction."""
IRI = "<https?:\\/\\/(?:www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b(?:[-a-zA-Z0-9()@:%_\\+.~#?&\\/=]*)>"
p = rf"ObjectSomeValuesFrom\({IRI}\s{IRI}\)"
if re.match(p, complex_class_str):
return True
else:
return False
@staticmethod
def extract_named_classes(onto: Ontology):
named_classes = set()
for iri in onto.owl_classes:
if not onto.check_deprecated(owl_object=onto.owl_classes[iri]):
named_classes.add(iri)
return named_classes
def generate_samples(self, subsumptions: List[List], duplicate: bool = True):
r"""Generate text samples from subsumptions.
Args:
subsumptions (List[List]): A list of subsumptions, each of which of is a two-component list `(sub_class_iri, super_class_iri_or_str)`.
duplicate (bool): `True` -- duplicate the positive and negative samples, `False` -- do not duplicate.
Returns:
(List[List]): A list of samples, each element is a triple
in the form of `(sub_class_string, super_class_string, label_index)`.
"""
if duplicate:
pos_dup, neg_dup = self.config.fine_tune.train_pos_dup, self.config.fine_tune.train_neg_dup
else:
pos_dup, neg_dup = 1, 1
neg_subsumptions = list()
for subs in subsumptions:
c1 = subs[0]
for _ in range(neg_dup):
neg_c = self.get_negative_sample(subclass_iri=c1, subsumption_type=self.config.subsumption_type)
if neg_c is not None:
neg_subsumptions.append([c1, neg_c])
pos_samples = self.subsumptions_to_samples(subsumptions=subsumptions, sample_label=1)
pos_samples = pos_dup * pos_samples
neg_samples = self.subsumptions_to_samples(subsumptions=neg_subsumptions, sample_label=0)
if len(neg_samples) < len(pos_samples):
neg_samples = neg_samples + [
random.choice(neg_samples) for _ in range(len(pos_samples) - len(neg_samples))
]
if len(neg_samples) > len(pos_samples):
pos_samples = pos_samples + [
random.choice(pos_samples) for _ in range(len(neg_samples) - len(pos_samples))
]
print("pos_samples: %d, neg_samples: %d" % (len(pos_samples), len(neg_samples)))
all_samples = [s for s in pos_samples + neg_samples if s[0] != "" and s[1] != ""]
random.shuffle(all_samples)
return all_samples
def subsumptions_to_samples(self, subsumptions: List[List], sample_label: Union[int, None]):
r"""Transform subsumptions into samples of strings.
Args:
subsumptions (List[List]): The given subsumptions.
sample_label (Union[int, None]): `1` (positive), `0` (negative), `None` (no label).
Returns:
(List[List]): A list of samples, each element is a triple
in the form of `(sub_class_string, super_class_string, label_index)`.
"""
local_samples = list()
for subs in subsumptions:
subcls, supcls = subs[0], subs[1]
substrs = self.iri_label[subcls] if subcls in self.iri_label and len(self.iri_label[subcls]) > 0 else [""]
if self.config.subsumption_type == "named_class":
supstrs = self.iri_label[supcls] if supcls in self.iri_label and len(self.iri_label[supcls]) else [""]
else:
if supcls in self.restriction_label and len(self.restriction_label[supcls]) > 0:
supstrs = self.restriction_label[supcls]
else:
supstrs = [self.verb.verbalise_class_expression(supcls).verbal]
if self.config.use_one_label:
substrs, supstrs = substrs[0:1], supstrs[0:1]
if self.config.prompt.prompt_type == "isolated":
for substr in substrs:
for supstr in supstrs:
local_samples.append([substr, supstr])
elif self.config.prompt.prompt_type == "traversal":
subs_list_strs = set()
for _ in range(self.config.prompt.context_dup):
context_sub, no_duplicate = self.traversal_subsumptions(
cls=subcls,
hop=self.config.prompt.prompt_hop,
direction="subclass",
max_subsumptions=self.config.prompt.prompt_max_subsumptions,
)
subs_list = [self.named_subsumption_to_str(subsum) for subsum in context_sub]
subs_list_str = " <SEP> ".join(subs_list)
subs_list_strs.add(subs_list_str)
if no_duplicate:
break
if self.config.subsumption_type == "named_class":
sups_list_strs = set()
for _ in range(self.config.prompt.context_dup):
context_sup, no_duplicate = self.traversal_subsumptions(
cls=supcls,
hop=self.config.prompt.prompt_hop,
direction="supclass",
max_subsumptions=self.config.prompt.prompt_max_subsumptions,
)
sups_list = [self.named_subsumption_to_str(subsum) for subsum in context_sup]
sups_list_str = " <SEP> ".join(sups_list)
sups_list_strs.add(sups_list_str)
if no_duplicate:
break
else:
sups_list_strs = set(supstrs)
for subs_list_str in subs_list_strs:
for substr in substrs:
s1 = substr + " <SEP> " + subs_list_str
for sups_list_str in sups_list_strs:
for supstr in supstrs:
s2 = supstr + " <SEP> " + sups_list_str
local_samples.append([s1, s2])
elif self.config.prompt.prompt_type == "path":
sep_token = "<SUB>" if self.config.prompt.use_sub_special_token else "<SEP>"
s1_set = set()
for _ in range(self.config.prompt.context_dup):
context_sub, no_duplicate = self.path_subsumptions(
cls=subcls, hop=self.config.prompt.prompt_hop, direction="subclass"
)
if len(context_sub) > 0:
s1 = ""
for i in range(len(context_sub)):
subsum = context_sub[len(context_sub) - i - 1]
subc = subsum[0]
s1 += "%s %s " % (
self.iri_label[subc][0]
if subc in self.iri_label and len(self.iri_label[subc]) > 0
else "",
sep_token,
)
for substr in substrs:
s1_set.add(s1 + substr)
else:
for substr in substrs:
s1_set.add("%s %s" % (sep_token, substr))
if no_duplicate:
break
if self.config.subsumption_type == "named_class":
s2_set = set()
for _ in range(self.config.prompt.context_dup):
context_sup, no_duplicate = self.path_subsumptions(
cls=supcls, hop=self.config.prompt.prompt_hop, direction="supclass"
)
if len(context_sup) > 0:
s2 = ""
for subsum in context_sup:
supc = subsum[1]
s2 += " %s %s" % (
sep_token,
self.iri_label[supc][0]
if supc in self.iri_label and len(self.iri_label[supc]) > 0
else "",
)
for supstr in supstrs:
s2_set.add(supstr + s2)
else:
for supstr in supstrs:
s2_set.add("%s %s" % (supstr, sep_token))
if no_duplicate:
break
else:
s2_set = set(supstrs)
for s1 in s1_set:
for s2 in s2_set:
local_samples.append([s1, s2])
else:
print(f"unknown context type {self.config.prompt.prompt_type}")
sys.exit(0)
if sample_label is not None:
for i in range(len(local_samples)):
local_samples[i].append(sample_label)
return local_samples
def get_negative_sample(self, subclass_iri: str, subsumption_type: str = "named_class"):
r"""Given a named subclass, get a negative class for a negative subsumption.
Args:
subclass_iri (str): IRI of a given sub-class.
subsumption_type (str): `named_class` or `restriction`.
"""
subclass = self.onto.get_owl_object_from_iri(iri=subclass_iri)
if subsumption_type == "named_class":
ancestors = set(self.onto.reasoner.get_inferred_super_entities(subclass, direct=False))
neg_c = random.sample(self.named_classes - ancestors, 1)[0]
return neg_c
else:
for neg_c in random.sample(self.restrictionObjects, 5):
if not self.onto.reasoner.check_subsumption(sub_entity=subclass, super_entity=neg_c):
return str(neg_c)
return None
def named_subsumption_to_str(self, subsum: List):
r"""Transform a named subsumption into string with `<SUB>` and classes' labels.
Args:
subsum (List[Tuple]): A list of subsumption pairs in the form of `(sub_class_iri, super_class_iri)`.
"""
subc, supc = subsum[0], subsum[1]
subs = self.iri_label[subc][0] if subc in self.iri_label and len(self.iri_label[subc]) > 0 else ""
sups = self.iri_label[supc][0] if supc in self.iri_label and len(self.iri_label[supc]) > 0 else ""
return "%s <SUB> %s" % (subs, sups)
def subclass_to_strings(self, subcls):
r"""Transform a sub-class into strings (with the path or traversal context template).
Args:
subcls (str): IRI of the sub-class.
"""
substrs = self.iri_label[subcls] if subcls in self.iri_label and len(self.iri_label[subcls]) > 0 else [""]
if self.config.use_one_label:
substrs = substrs[0:1]
if self.config.prompt.prompt_type == "isolated":
return substrs
elif self.config.prompt.prompt_type == "traversal":
subs_list_strs = set()
for _ in range(self.config.prompt.context_dup):
context_sub, no_duplicate = self.traversal_subsumptions(
cls=subcls,
hop=self.config.prompt.prompt_hop,
direction="subclass",
max_subsumptions=self.config.prompt.prompt_max_subsumptions,
)
subs_list = [self.named_subsumption_to_str(subsum) for subsum in context_sub]
subs_list_str = " <SEP> ".join(subs_list)
subs_list_strs.add(subs_list_str)
if no_duplicate:
break
strs = list()
for subs_list_str in subs_list_strs:
for substr in substrs:
s1 = substr + " <SEP> " + subs_list_str
strs.append(s1)
return strs
elif self.config.prompt.prompt_type == "path":
sep_token = "<SUB>" if self.config.prompt.use_sub_special_token else "<SEP>"
s1_set = set()
for _ in range(self.config.prompt.context_dup):
context_sub, no_duplicate = self.path_subsumptions(
cls=subcls, hop=self.config.prompt.prompt_hop, direction="subclass"
)
if len(context_sub) > 0:
s1 = ""
for i in range(len(context_sub)):
subsum = context_sub[len(context_sub) - i - 1]
subc = subsum[0]
s1 += "%s %s " % (
self.iri_label[subc][0]
if subc in self.iri_label and len(self.iri_label[subc]) > 0
else "",
sep_token,
)
for substr in substrs:
s1_set.add(s1 + substr)
else:
for substr in substrs:
s1_set.add("%s %s" % (sep_token, substr))
if no_duplicate:
break
return list(s1_set)
def supclass_to_strings(self, supcls: str, subsumption_type: str = "named_class"):
r"""Transform a super-class into strings (with the path or traversal context template if the subsumption type is `"named_class"`).
Args:
supcls (str): IRI of the super-class.
subsumption_type (str): The type of the subsumption.
"""
if subsumption_type == "named_class":
supstrs = self.iri_label[supcls] if supcls in self.iri_label and len(self.iri_label[supcls]) else [""]
else:
if supcls in self.restriction_label and len(self.restriction_label[supcls]) > 0:
supstrs = self.restriction_label[supcls]
else:
warnings.warn("Warning: %s has no descriptions" % supcls)
supstrs = [""]
if self.config.use_one_label:
if subsumption_type == "named_class":
supstrs = supstrs[0:1]
if self.config.prompt.prompt_type == "isolated":
return supstrs
elif self.config.prompt.prompt_type == "traversal":
if subsumption_type == "named_class":
sups_list_strs = set()
for _ in range(self.config.prompt.context_dup):
context_sup, no_duplicate = self.traversal_subsumptions(
cls=supcls,
hop=self.config.prompt.prompt_hop,
direction="supclass",
max_subsumptions=self.config.prompt.prompt_max_subsumptions,
)
sups_list = [self.named_subsumption_to_str(subsum) for subsum in context_sup]
sups_list_str = " <SEP> ".join(sups_list)
sups_list_strs.add(sups_list_str)
if no_duplicate:
break
else:
sups_list_strs = set(supstrs)
strs = list()
for sups_list_str in sups_list_strs:
for supstr in supstrs:
s2 = supstr + " <SEP> " + sups_list_str
strs.append(s2)
return strs
elif self.config.prompt.prompt_type == "path":
sep_token = "<SUB>" if self.config.prompt.use_sub_special_token else "<SEP>"
if subsumption_type == "named_class":
s2_set = set()
for _ in range(self.config.prompt.context_dup):
context_sup, no_duplicate = self.path_subsumptions(
cls=supcls, hop=self.config.prompt.prompt_hop, direction="supclass"
)
if len(context_sup) > 0:
s2 = ""
for subsum in context_sup:
supc = subsum[1]
s2 += " %s %s" % (
sep_token,
self.iri_label[supc][0]
if supc in self.iri_label and len(self.iri_label[supc]) > 0
else "",
)
for supstr in supstrs:
s2_set.add(supstr + s2)
else:
for supstr in supstrs:
s2_set.add("%s %s" % (supstr, sep_token))
if no_duplicate:
break
else:
s2_set = set(supstrs)
return list(s2_set)
else:
print("unknown context type %s" % self.config.prompt.prompt_type)
sys.exit(0)
def traversal_subsumptions(self, cls: str, hop: int = 1, direction: str = "subclass", max_subsumptions: int = 5):
r"""Given a class, get its subsumptions by traversing the class hierarchy.
If the class is a sub-class in the subsumption axiom, get subsumptions from downside.
If the class is a super-class in the subsumption axiom, get subsumptions from upside.
Args:
cls (str): IRI of a named class.
hop (int): The depth of the path.
direction (str): `subclass` (downside path) or `supclass` (upside path).
max_subsumptions (int): The maximum number of subsumptions to consider.
"""
subsumptions = list()
seeds = [cls]
d = 1
no_duplicate = True
while d <= hop:
new_seeds = list()
for s in seeds:
if direction == "subclass":
tmp = self.onto.reasoner.get_inferred_sub_entities(
self.onto.get_owl_object_from_iri(iri=s), direct=True
)
if len(tmp) > 1:
no_duplicate = False
random.shuffle(tmp)
for c in tmp:
if not self.onto.check_deprecated(owl_object=self.onto.get_owl_object_from_iri(iri=c)):
subsumptions.append([c, s])
if c not in new_seeds:
new_seeds.append(c)
elif direction == "supclass":
tmp = self.onto.reasoner.get_inferred_super_entities(
self.onto.get_owl_object_from_iri(iri=s), direct=True
)
if len(tmp) > 1:
no_duplicate = False
random.shuffle(tmp)
for c in tmp:
if not self.onto.check_deprecated(owl_object=self.onto.get_owl_object_from_iri(iri=c)):
subsumptions.append([s, c])
if c not in new_seeds:
new_seeds.append(c)
else:
warnings.warn("Unknown direction: %s" % direction)
if len(subsumptions) >= max_subsumptions:
subsumptions = random.sample(subsumptions, max_subsumptions)
break
else:
seeds = new_seeds
random.shuffle(seeds)
d += 1
return subsumptions, no_duplicate
def path_subsumptions(self, cls: str, hop: int = 1, direction: str = "subclass"):
r"""Given a class, get its path subsumptions.
If the class is a sub-class in the subsumption axiom, get subsumptions from downside.
If the class is a super-class in the subsumption axiom, get subsumptions from upside.
Args:
cls (str): IRI of a named class.
hop (int): The depth of the path.
direction (str): `subclass` (downside path) or `supclass` (upside path).
"""
subsumptions = list()
seed = cls
d = 1
no_duplicate = True
while d <= hop:
if direction == "subclass":
tmp = self.onto.reasoner.get_inferred_sub_entities(
self.onto.get_owl_object_from_iri(iri=seed), direct=True
)
if len(tmp) > 1:
no_duplicate = False
end = True
if len(tmp) > 0:
random.shuffle(tmp)
for c in tmp:
if not self.onto.check_deprecated(owl_object=self.onto.get_owl_object_from_iri(iri=c)):
subsumptions.append([c, seed])
seed = c
end = False
break
if end:
break
elif direction == "supclass":
tmp = self.onto.reasoner.get_inferred_super_entities(
self.onto.get_owl_object_from_iri(iri=seed), direct=True
)
if len(tmp) > 1:
no_duplicate = False
end = True
if len(tmp) > 0:
random.shuffle(tmp)
for c in tmp:
if not self.onto.check_deprecated(owl_object=self.onto.get_owl_object_from_iri(iri=c)):
subsumptions.append([seed, c])
seed = c
end = False
break
if end:
break
else:
warnings.warn("Unknown direction: %s" % direction)
d += 1
return subsumptions, no_duplicate
| 25,122 | 43.702847 | 146 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/subs/bertsubs/bert_classifier.py
|
# Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @paper(
# "Contextual Semantic Embeddings for Ontology Subsumption Prediction (World Wide Web Journal)",
# )
from typing import List
from datasets import Dataset
from sklearn.metrics import accuracy_score
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
EarlyStoppingCallback,
Trainer,
TrainingArguments,
)
class BERTSubsumptionClassifierTrainer:
def __init__(
self,
bert_checkpoint: str,
train_data: List,
val_data: List,
max_length: int = 128,
early_stop: bool = False,
early_stop_patience: int = 10,
):
print(f"initialize BERT for Binary Classification from the Pretrained BERT model at: {bert_checkpoint} ...")
# BERT
self.model = AutoModelForSequenceClassification.from_pretrained(bert_checkpoint)
self.tokenizer = AutoTokenizer.from_pretrained(bert_checkpoint)
self.trainer = None
self.max_length = max_length
self.tra = self.load_dataset(train_data, max_length=self.max_length, count_token_size=True)
self.val = self.load_dataset(val_data, max_length=self.max_length, count_token_size=True)
print(f"text max length: {self.max_length}")
print(f"data files loaded with sizes:")
print(f"\t[# Train]: {len(self.tra)}, [# Val]: {len(self.val)}")
# early stopping
self.early_stop = early_stop
self.early_stop_patience = early_stop_patience
def add_special_tokens(self, tokens: List):
r"""Add additional special tokens into the tokenizer's vocab.
Args:
tokens (List[str]): additional tokens to add, e.g., `["<SUB>","<EOA>","<EOC>"]`
"""
special_tokens_dict = {"additional_special_tokens": tokens}
self.tokenizer.add_special_tokens(special_tokens_dict)
self.model.resize_token_embeddings(len(self.tokenizer))
def train(self, train_args: TrainingArguments, do_fine_tune: bool = True):
r"""Initiate the Huggingface trainer with input arguments and start training.
Args:
train_args (TrainingArguments): Arguments for training.
do_fine_tune (bool): `False` means loading the checkpoint without training. Defaults to `True`.
"""
self.trainer = Trainer(
model=self.model,
args=train_args,
train_dataset=self.tra,
eval_dataset=self.val,
compute_metrics=self.compute_metrics,
tokenizer=self.tokenizer,
)
if self.early_stop:
self.trainer.add_callback(EarlyStoppingCallback(early_stopping_patience=self.early_stop_patience))
if do_fine_tune:
self.trainer.train()
@staticmethod
def compute_metrics(pred):
"""Auxiliary function to add accurate metric into evaluation.
"""
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
acc = accuracy_score(labels, preds)
return {"accuracy": acc}
def load_dataset(self, data: List, max_length: int = 512, count_token_size: bool = False) -> Dataset:
r"""Load a Huggingface dataset from a list of samples.
Args:
data (List[Tuple]): Data samples in a list.
max_length (int): Maximum length of the input sequence.
count_token_size (bool): Whether or not to count the token sizes of the data. Defaults to `False`.
"""
# data_df = pd.DataFrame(data, columns=["sent1", "sent2", "labels"])
# dataset = Dataset.from_pandas(data_df)
def iterate():
for sample in data:
yield {"sent1": sample[0], "sent2": sample[1], "labels": sample[2]}
dataset = Dataset.from_generator(iterate)
if count_token_size:
tokens = self.tokenizer(dataset["sent1"], dataset["sent2"])
l_sum, num_128, num_256, num_512, l_max = 0, 0, 0, 0, 0
for item in tokens["input_ids"]:
l = len(item)
l_sum += l
if l <= 128:
num_128 += 1
if l <= 256:
num_256 += 1
if l <= 512:
num_512 += 1
if l > l_max:
l_max = l
print("average token size: %.2f" % (l_sum / len(tokens["input_ids"])))
print("ratio of token size <= 128: %.3f" % (num_128 / len(tokens["input_ids"])))
print("ratio of token size <= 256: %.3f" % (num_256 / len(tokens["input_ids"])))
print("ratio of token size <= 512: %.3f" % (num_512 / len(tokens["input_ids"])))
print("max token size: %d" % l_max)
dataset = dataset.map(
lambda examples: self.tokenizer(
examples["sent1"], examples["sent2"], max_length=max_length, truncation=True
),
batched=True,
num_proc=1,
)
return dataset
| 5,560 | 38.721429 | 116 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/onto/ontology.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
from typing import Optional, List, Union
from collections import defaultdict
from yacs.config import CfgNode
import warnings
import itertools
import jpype
from deeponto.utils import TextUtils, Tokenizer, InvertedIndex, FileUtils, DataUtils
from deeponto import init_jvm
# initialise JVM for python-java interaction
import click
if not jpype.isJVMStarted():
memory = click.prompt("Please enter the maximum memory located to JVM", type=str, default="8g")
print()
init_jvm(memory)
from java.io import File # type: ignore
from java.util import Collections # type: ignore
from org.semanticweb.owlapi.apibinding import OWLManager # type: ignore
from org.semanticweb.owlapi.model import IRI, OWLObject, OWLClassExpression, OWLObjectPropertyExpression, OWLDataPropertyExpression, OWLNamedIndividual, OWLAxiom, AddAxiom, RemoveAxiom, AxiomType # type: ignore
from org.semanticweb.HermiT import ReasonerFactory # type: ignore
from org.semanticweb.owlapi.util import OWLObjectDuplicator, OWLEntityRemover # type: ignore
from org.semanticweb.owlapi.search import EntitySearcher # type: ignore
# IRIs for special entities
OWL_THING = "http://www.w3.org/2002/07/owl#Thing"
OWL_NOTHING = "http://www.w3.org/2002/07/owl#Nothing"
OWL_TOP_OBJECT_PROPERTY = "http://www.w3.org/2002/07/owl#topObjectProperty"
OWL_BOTTOM_OBJECT_PROPERTY = "http://www.w3.org/2002/07/owl#bottomObjectProperty"
OWL_TOP_DATA_PROPERTY = "http://www.w3.org/2002/07/owl#topDataProperty"
OWL_BOTTOM_DATA_PROPERTY = "http://www.w3.org/2002/07/owl#bottomDataProperty"
RDFS_LABEL = "http://www.w3.org/2000/01/rdf-schema#label"
OWL_DEPRECATED = "http://www.w3.org/2002/07/owl#deprecated"
TOP_BOTTOMS = CfgNode(
{
"Classes": {"TOP": OWL_THING, "BOTTOM": OWL_NOTHING},
"ObjectProperties": {"TOP": OWL_TOP_OBJECT_PROPERTY, "BOTTOM": OWL_BOTTOM_OBJECT_PROPERTY},
"DataProperties": {"TOP": OWL_TOP_DATA_PROPERTY, "BOTTOM": OWL_BOTTOM_DATA_PROPERTY},
}
)
class Ontology:
"""Ontology class that extends from the Java library OWLAPI.
!!! note "Typing from OWLAPI"
Types with `OWL` prefix are mostly imported from the OWLAPI library by, for example,
`from org.semanticweb.owlapi.model import OWLObject`.
Attributes:
owl_path (str): The path to the OWL ontology file.
owl_manager (OWLOntologyManager): A ontology manager for creating `OWLOntology`.
owl_onto (OWLOntology): An `OWLOntology` created by `owl_manger` from `owl_path`.
owl_iri (str): The IRI of the `owl_onto`.
owl_classes (Dict[str, OWLClass]): A dictionary that stores the `(iri, ontology_class)` pairs.
owl_object_properties (Dict[str, OWLObjectProperty]): A dictionary that stores the `(iri, ontology_object_property)` pairs.
owl_data_properties (Dict[str, OWLDataProperty]): A dictionary that stores the `(iri, ontology_data_property)` pairs.
owl_data_factory (OWLDataFactory): A data factory for manipulating axioms.
owl_annotation_properties (Dict[str, OWLAnnotationProperty]): A dictionary that stores the `(iri, ontology_annotation_property)` pairs.
reasoner (OntologyReasoner): A reasoner for ontology inference.
"""
def __init__(self, owl_path: str):
"""Initialise a new ontology.
Args:
owl_path (str): The path to the OWL ontology file.
"""
self.owl_path = os.path.abspath(owl_path)
self.owl_manager = OWLManager.createOWLOntologyManager()
self.owl_onto = self.owl_manager.loadOntologyFromOntologyDocument(IRI.create("file:///" + self.owl_path))
self.owl_iri = str(self.owl_onto.getOntologyID().getOntologyIRI().get())
self.owl_classes = self.get_owl_objects("Classes")
self.owl_object_properties = self.get_owl_objects("ObjectProperties")
# for some reason the top object property is included
if OWL_TOP_OBJECT_PROPERTY in self.owl_object_properties.keys():
del self.owl_object_properties[OWL_TOP_OBJECT_PROPERTY]
self.owl_data_properties = self.get_owl_objects("DataProperties")
self.owl_data_factory = self.owl_manager.getOWLDataFactory()
self.owl_annotation_properties = self.get_owl_objects("AnnotationProperties")
# reasoning
self.reasoner = OntologyReasoner(self)
# hidden attributes
self._multi_children_classes = None
self._sibling_class_groups = None
# self._equiv_axioms = None
# self._subsumption_axioms = None
# summary
self.info = {
type(self).__name__: {
"loaded_from": os.path.basename(self.owl_path),
"num_classes": len(self.owl_classes),
"num_object_properties": len(self.owl_object_properties),
"num_data_properties": len(self.owl_data_properties),
"num_annotation_properties": len(self.owl_annotation_properties),
}
}
@property
def name(self):
"""Return the name of the ontology file."""
return os.path.normpath(self.owl_path).split(os.path.sep)[-1]
@property
def OWLThing(self):
"""Return `OWLThing`."""
return self.owl_data_factory.getOWLThing()
@property
def OWLNothing(self):
"""Return `OWLNoThing`."""
return self.owl_data_factory.getOWLNothing()
@property
def OWLTopObjectProperty(self):
"""Return `OWLTopObjectProperty`."""
return self.owl_data_factory.getOWLTopObjectProperty()
@property
def OWLBottomObjectProperty(self):
"""Return `OWLBottomObjectProperty`."""
return self.owl_data_factory.getOWLBottomObjectProperty()
@property
def OWLTopDataProperty(self):
"""Return `OWLTopDataProperty`."""
return self.owl_data_factory.getOWLTopDataProperty()
@property
def OWLBottomDataProperty(self):
"""Return `OWLBottomDataProperty`."""
return self.owl_data_factory.getOWLBottomDataProperty()
@staticmethod
def get_entity_type(entity: OWLObject, is_singular: bool = False):
"""A handy method to get the `type` of an `OWLObject` entity."""
if isinstance(entity, OWLClassExpression):
return "Classes" if not is_singular else "Class"
elif isinstance(entity, OWLObjectPropertyExpression):
return "ObjectProperties" if not is_singular else "ObjectProperty"
elif isinstance(entity, OWLDataPropertyExpression):
return "DataProperties" if not is_singular else "DataProperty"
else:
# NOTE: add further options in future
pass
def __str__(self) -> str:
return FileUtils.print_dict(self.info)
def get_owl_objects(self, entity_type: str):
"""Get an index of `OWLObject` of certain type from the ontology.
Args:
entity_type (str): Options are `"Classes"`, `"ObjectProperties"`, `"DataProperties"`, etc.
Returns:
(dict): A dictionary that stores the `(iri, owl_object)` pairs
"""
owl_objects = dict()
source = getattr(self.owl_onto, f"get{entity_type}InSignature")
for cl in source():
owl_objects[str(cl.getIRI())] = cl
return owl_objects
def get_owl_object_from_iri(self, iri: str):
"""Get an `OWLObject` given its IRI."""
if iri in self.owl_classes.keys():
return self.owl_classes[iri]
elif iri in self.owl_object_properties.keys():
return self.owl_object_properties[iri]
elif iri in self.owl_data_properties.keys():
return self.owl_data_properties[iri]
elif iri in self.owl_annotation_properties.keys():
return self.owl_annotation_properties[iri]
else:
raise KeyError(f"Cannot retrieve unknown IRI: {iri}.")
def get_subsumption_axioms(self, entity_type: str = "Classes"):
"""Return subsumption axioms (subject to input entity type) asserted in the ontology.
Args:
entity_type (str, optional): The entity type to be considered. Defaults to `"Classes"`.
Options are `"Classes"`, `"ObjectProperties"`, `"DataProperties"`, and `"AnnotationProperties"`.
Returns:
(List[OWLAxiom]): A list of equivalence axioms subject to input entity type.
"""
if entity_type == "Classes":
return list(self.owl_onto.getAxioms(AxiomType.SUBCLASS_OF))
elif entity_type == "ObjectProperties":
return list(self.owl_onto.getAxioms(AxiomType.SUB_OBJECT_PROPERTY))
elif entity_type == "DataProperties":
return list(self.owl_onto.getAxioms(AxiomType.SUB_DATA_PROPERTY))
elif entity_type == "AnnotationProperties":
return list(self.owl_onto.getAxioms(AxiomType.SUB_ANNOTATION_PROPERTY_OF))
else:
raise ValueError(f"Unknown entity type {entity_type}.")
def get_equivalence_axioms(self, entity_type: str = "Classes"):
"""Return equivalence axioms (subject to input entity type) asserted in the ontology.
Args:
entity_type (str, optional): The entity type to be considered. Defaults to `"Classes"`.
Options are `"Classes"`, `"ObjectProperties"`, and `"DataProperties"`.
Returns:
(List[OWLAxiom]): A list of equivalence axioms subject to input entity type.
"""
if entity_type == "Classes":
return list(self.owl_onto.getAxioms(AxiomType.EQUIVALENT_CLASSES))
elif entity_type == "ObjectProperties":
return list(self.owl_onto.getAxioms(AxiomType.EQUIVALENT_OBJECT_PROPERTIES))
elif entity_type == "DataProperties":
return list(self.owl_onto.getAxioms(AxiomType.EQUIVALENT_DATA_PROPERTIES))
else:
raise ValueError(f"Unknown entity type {entity_type}.")
def get_asserted_parents(self, owl_object: OWLObject, named_only: bool = False):
r"""Get all the asserted parents of a given owl object.
Args:
owl_object (OWLObject): An owl object that could have a parent.
named_only (bool): If `True`, return parents that are named classes.
Returns:
(Set[OWLObject]): The parent set of the given owl object.
"""
entity_type = self.get_entity_type(owl_object)
if entity_type == "Classes":
parents = set(EntitySearcher.getSuperClasses(owl_object, self.owl_onto))
elif entity_type.endswith("Properties"):
parents = set(EntitySearcher.getSuperProperties(owl_object, self.owl_onto))
else:
raise ValueError(f"Unsupported entity type {entity_type}.")
if named_only:
parents = set([p for p in parents if self.check_named_entity(p)])
return parents
def get_asserted_children(self, owl_object: OWLObject, named_only: bool = False):
r"""Get all the asserted children of a given owl object.
Args:
owl_object (OWLObject): An owl object that could have a child.
named_only (bool): If `True`, return children that are named classes.
Returns:
(Set[OWLObject]): The children set of the given owl object.
"""
entity_type = self.get_entity_type(owl_object)
if entity_type == "Classes":
children = set(EntitySearcher.getSubClasses(owl_object, self.owl_onto))
elif entity_type.endswith("Properties"):
children = set(EntitySearcher.getSubProperties(owl_object, self.owl_onto))
else:
raise ValueError(f"Unsupported entity type {entity_type}.")
if named_only:
children = set([c for c in children if self.check_named_entity(c)])
return children
def get_asserted_complex_classes(self, gci_only: bool = False):
"""Get complex classes that occur in at least one of the ontology axioms.
Args:
gci_only (bool): If `True`, consider complex classes that occur in GCIs only; otherwise consider
those that occur in equivalence axioms as well.
Returns:
(Set[OWLClassExpression]): A set of complex classes.
"""
complex_classes = []
for gci in self.get_subsumption_axioms("Classes"):
super_class = gci.getSuperClass()
sub_class = gci.getSubClass()
if not OntologyReasoner.has_iri(super_class):
complex_classes.append(super_class)
if not OntologyReasoner.has_iri(sub_class):
complex_classes.append(sub_class)
# also considering equivalence axioms
if not gci_only:
for eq in self.get_equivalence_axioms("Classes"):
gci = list(eq.asOWLSubClassOfAxioms())[0]
super_class = gci.getSuperClass()
sub_class = gci.getSubClass()
if not OntologyReasoner.has_iri(super_class):
complex_classes.append(super_class)
if not OntologyReasoner.has_iri(sub_class):
complex_classes.append(sub_class)
return set(complex_classes)
def get_owl_object_annotations(
self,
owl_object: Union[OWLObject, str],
annotation_property_iri: Optional[str] = None,
annotation_language_tag: Optional[str] = None,
apply_lowercasing: bool = True,
normalise_identifiers: bool = False,
):
"""Get the annotations of the given `OWLObject`.
Args:
owl_object (Union[OWLObject, str]): An `OWLObject` or its IRI.
annotation_property_iri (str, optional):
Any particular annotation property IRI of interest. Defaults to `None`.
annotation_language_tag (str, optional):
Any particular annotation language tag of interest; NOTE that not every
annotation has a language tag, in this case assume it is in English.
Defaults to `None`. Options are `"en"`, `"ge"` etc.
apply_lowercasing (bool): Whether or not to apply lowercasing to annotation literals.
Defaults to `True`.
normalise_identifiers (bool): Whether to normalise annotation text that is in the Java identifier format.
Defaults to `False`.
Returns:
(Set[str]): A set of annotation literals of the given `OWLObject`.
"""
if isinstance(owl_object, str):
owl_object = self.get_owl_object_from_iri(owl_object)
annotation_property = None
if annotation_property_iri:
# return an empty list if `annotation_property_iri` does not exist in this OWLOntology`
annotation_property = self.get_owl_object_from_iri(annotation_property_iri)
annotations = []
for annotation in EntitySearcher.getAnnotations(owl_object, self.owl_onto, annotation_property):
annotation = annotation.getValue()
# boolean that indicates whether the annotation's language is of interest
fit_language = False
if not annotation_language_tag:
# it is set to `True` if `annotation_langauge` is not specified
fit_language = True
else:
# restrict the annotations to a language if specified
try:
# NOTE: not every annotation has a language attribute
fit_language = annotation.getLang() == annotation_language_tag
except:
# in the case when this annotation has no language tag
# we assume it is in English
if annotation_language_tag == "en":
fit_language = True
if fit_language:
# only get annotations that have a literal value
if annotation.isLiteral():
annotations.append(
TextUtils.process_annotation_literal(
str(annotation.getLiteral()), apply_lowercasing, normalise_identifiers
)
)
return DataUtils.uniqify(annotations)
def check_named_entity(self, owl_object: OWLObject):
r"""Check if the input entity is a named atomic entity. That is,
it is not a complex entity, $\top$, or $\bot$.
"""
entity_type = self.get_entity_type(owl_object)
top = TOP_BOTTOMS[entity_type].TOP
bottom = TOP_BOTTOMS[entity_type].BOTTOM
if OntologyReasoner.has_iri(owl_object):
iri = str(owl_object.getIRI())
# check if the entity is TOP or BOTTOM
return iri != top and iri != bottom
return False
def check_deprecated(self, owl_object: OWLObject):
r"""Check if the given OWL object is marked as deprecated according to $\texttt{owl:deprecated}$.
NOTE: the string literal indicating deprecation is either `'true'` or `'True'`. Also, if $\texttt{owl:deprecated}$
is not defined in this ontology, return `False` by default.
"""
if not OWL_DEPRECATED in self.owl_annotation_properties.keys():
# return False if owl:deprecated is not defined in this ontology
return False
deprecated = self.get_owl_object_annotations(owl_object, annotation_property_iri=OWL_DEPRECATED)
if deprecated and (list(deprecated)[0] == "true" or list(deprecated)[0] == "True"):
return True
else:
return False
@property
def sibling_class_groups(self) -> List[List[str]]:
"""Return grouped sibling classes (with a common *direct* parent);
NOTE that only groups with size > 1 will be considered
"""
if not self._sibling_class_groups:
self._multi_children_classes = dict()
self._sibling_class_groups = []
all_class_iris = list(self.owl_classes.keys()) + [OWL_THING] # including the root node
for cl_iri in all_class_iris:
if cl_iri == OWL_THING:
cl = self.OWLThing
else:
cl = self.get_owl_object_from_iri(cl_iri)
children = self.get_asserted_children(cl)
children_iris = [str(child.getIRI()) for child in children if self.check_named_entity(child)]
self._multi_children_classes[cl_iri] = children_iris
if len(children_iris) > 1:
# classes that have siblings form a sibling group
if children_iris not in self._sibling_class_groups:
# it is possible that some groups appear more than once be they have mutltiple
# common parents
self._sibling_class_groups.append(children_iris)
return self._sibling_class_groups
def save_onto(self, save_path: str):
"""Save the ontology file to the given path."""
self.owl_onto.saveOntology(IRI.create(File(save_path).toURI()))
def build_annotation_index(
self,
annotation_property_iris: List[str] = [RDFS_LABEL],
entity_type: str = "Classes",
apply_lowercasing: bool = True,
normalise_identifiers: bool = False,
):
"""Build an annotation index for a given type of entities.
Args:
annotation_property_iris (List[str]): A list of annotation property IRIs (it is possible
that not every annotation property IRI is in use); if not provided, the built-in
`rdfs:label` is considered. Defaults to `[RDFS_LABEL]`.
entity_type (str, optional): The entity type to be considered. Defaults to `"Classes"`.
Options are `"Classes"`, `"ObjectProperties"`, `"DataProperties"`, etc.
apply_lowercasing (bool): Whether or not to apply lowercasing to annotation literals.
Defaults to `True`.
normalise_identifiers (bool): Whether to normalise annotation text that is in the Java identifier format.
Defaults to `False`.
Returns:
(Tuple[dict, List[str]]): The built annotation index, and the list of annotation property IRIs that are in use.
"""
annotation_index = defaultdict(set)
# example: Classes => owl_classes; ObjectProperties => owl_object_properties
entity_type = "owl_" + TextUtils.split_java_identifier(entity_type).replace(" ", "_").lower()
entity_index = getattr(self, entity_type)
# preserve available annotation properties
annotation_property_iris = [
airi for airi in annotation_property_iris if airi in self.owl_annotation_properties.keys()
]
# build the annotation index without duplicated literals
for airi in annotation_property_iris:
for iri, entity in entity_index.items():
annotation_index[iri].update(
self.get_owl_object_annotations(
owl_object=entity,
annotation_property_iri=airi,
annotation_language_tag=None,
apply_lowercasing=apply_lowercasing,
normalise_identifiers=normalise_identifiers,
)
)
return annotation_index, annotation_property_iris
@staticmethod
def build_inverted_annotation_index(annotation_index: dict, tokenizer: Tokenizer):
"""Build an inverted annotation index given an annotation index and a tokenizer."""
return InvertedIndex(annotation_index, tokenizer)
def add_axiom(self, owl_axiom: OWLAxiom, return_undo: bool = True):
"""Add an axiom into the current ontology.
Args:
owl_axiom (OWLAxiom): An axiom to be added.
return_undo (bool, optional): Returning the undo operation or not. Defaults to `True`.
"""
change = AddAxiom(self.owl_onto, owl_axiom)
result = self.owl_onto.applyChange(change)
print(f"[{str(result)}] Adding the axiom {str(owl_axiom)} into the ontology.")
if return_undo:
return change.reverseChange()
def remove_axiom(self, owl_axiom: OWLAxiom, return_undo: bool = True):
"""Remove an axiom from the current ontology.
Args:
owl_axiom (OWLAxiom): An axiom to be removed.
return_undo (bool, optional): Returning the undo operation or not. Defaults to `True`.
"""
change = RemoveAxiom(self.owl_onto, owl_axiom)
result = self.owl_onto.applyChange(change)
print(f"[{str(result)}] Removing the axiom {str(owl_axiom)} from the ontology.")
if return_undo:
return change.reverseChange()
def replace_entity(self, owl_object: OWLObject, entity_iri: str, replacement_iri: str):
"""Replace an entity in a class expression with another entity.
Args:
owl_object (OWLObject): An `OWLObject` entity to be manipulated.
entity_iri (str): IRI of the entity to be replaced.
replacement_iri (str): IRI of the entity to replace.
Returns:
(OWLObject): The changed `OWLObject` entity.
"""
iri_dict = {IRI.create(entity_iri): IRI.create(replacement_iri)}
replacer = OWLObjectDuplicator(self.owl_data_factory, iri_dict)
return replacer.duplicateObject(owl_object)
class OntologyReasoner:
"""Ontology reasoner class that extends from the Java library OWLAPI.
Attributes:
onto (Ontology): The input `deeponto` ontology.
owl_reasoner_factory (OWLReasonerFactory): A reasoner factory for creating a reasoner.
owl_reasoner (OWLReasoner): The created reasoner.
owl_data_factory (OWLDataFactory): A data factory (inherited from `onto`) for manipulating axioms.
"""
def __init__(self, onto: Ontology):
"""Initialise an ontology reasoner.
Args:
onto (Ontology): The input ontology to conduct reasoning on.
"""
self.onto = onto
self.owl_reasoner_factory = ReasonerFactory()
self.owl_reasoner = self.owl_reasoner_factory.createReasoner(self.onto.owl_onto)
self.owl_data_factory = self.onto.owl_data_factory
def reload_reasoner(self):
"""Reload the reasoner for the current ontology (possibly changed)."""
# release the memory
self.owl_reasoner.dispose()
# conduct reasoning on the possibly changed ontology
self.owl_reasoner = self.owl_reasoner_factory.createReasoner(self.onto.owl_onto)
@staticmethod
def get_entity_type(entity: OWLObject, is_singular: bool = False):
"""A handy method to get the type of an entity (`OWLObject`).
NOTE: This method is inherited from the Ontology Class.
"""
return Ontology.get_entity_type(entity, is_singular)
@staticmethod
def has_iri(entity: OWLObject):
"""Check if an entity has an IRI."""
try:
entity.getIRI()
return True
except:
return False
def get_inferred_super_entities(self, entity: OWLObject, direct: bool = False):
r"""Return the IRIs of named super-entities of a given `OWLObject` according to the reasoner.
A mixture of `getSuperClasses`, `getSuperObjectProperties`, `getSuperDataProperties`
functions imported from the OWLAPI reasoner. The type of input entity will be
automatically determined. The top entity such as `owl:Thing` is ignored.
Args:
entity (OWLObject): An `OWLObject` entity of interest.
direct (bool, optional): Return parents (`direct=True`) or
ancestors (`direct=False`). Defaults to `False`.
Returns:
(List[str]): A list of IRIs of the super-entities of the given `OWLObject` entity.
"""
entity_type = self.get_entity_type(entity)
get_super = f"getSuper{entity_type}"
TOP = TOP_BOTTOMS[entity_type].TOP # get the corresponding TOP entity
super_entities = getattr(self.owl_reasoner, get_super)(entity, direct).getFlattened()
super_entity_iris = [str(s.getIRI()) for s in super_entities]
# the root node is owl#Thing
if TOP in super_entity_iris:
super_entity_iris.remove(TOP)
return super_entity_iris
def get_inferred_sub_entities(self, entity: OWLObject, direct: bool = False):
"""Return the IRIs of named sub-entities of a given `OWLObject` according to the reasoner.
A mixture of `getSubClasses`, `getSubObjectProperties`, `getSubDataProperties`
functions imported from the OWLAPI reasoner. The type of input entity will be
automatically determined. The bottom entity such as `owl:Nothing` is ignored.
Args:
entity (OWLObject): An `OWLObject` entity of interest.
direct (bool, optional): Return parents (`direct=True`) or
ancestors (`direct=False`). Defaults to `False`.
Returns:
(List[str]): A list of IRIs of the sub-entities of the given `OWLObject` entity.
"""
entity_type = self.get_entity_type(entity)
get_sub = f"getSub{entity_type}"
BOTTOM = TOP_BOTTOMS[entity_type].BOTTOM
sub_entities = getattr(self.owl_reasoner, get_sub)(entity, direct).getFlattened()
sub_entity_iris = [str(s.getIRI()) for s in sub_entities]
# the root node is owl#Thing
if BOTTOM in sub_entity_iris:
sub_entity_iris.remove(BOTTOM)
return sub_entity_iris
def check_subsumption(self, sub_entity: OWLObject, super_entity: OWLObject):
"""Check if the first entity is subsumed by the second entity according to the reasoner."""
entity_type = self.get_entity_type(sub_entity, is_singular=True)
assert entity_type == self.get_entity_type(super_entity, is_singular=True)
sub_axiom = getattr(self.owl_data_factory, f"getOWLSub{entity_type}OfAxiom")(sub_entity, super_entity)
return self.owl_reasoner.isEntailed(sub_axiom)
def check_disjoint(self, entity1: OWLObject, entity2: OWLObject):
"""Check if two entities are disjoint according to the reasoner."""
entity_type = self.get_entity_type(entity1)
assert entity_type == self.get_entity_type(entity2)
disjoint_axiom = getattr(self.owl_data_factory, f"getOWLDisjoint{entity_type}Axiom")([entity1, entity2])
return self.owl_reasoner.isEntailed(disjoint_axiom)
def check_common_descendants(self, entity1: OWLObject, entity2: OWLObject):
"""Check if two entities have a common decendant.
Entities can be **OWL class or property expressions**, and can be either **atomic
or complex**. It takes longer computation time for the complex ones. Complex
entities do not have an IRI. This method is optimised in the way that if
there exists an atomic entity `A`, we compute descendants for `A` and
compare them against the other entity which could be complex.
"""
entity_type = self.get_entity_type(entity1)
assert entity_type == self.get_entity_type(entity2)
if not self.has_iri(entity1) and not self.has_iri(entity2):
warnings.warn("Computing descendants for two complex entities is not efficient.")
# `computed` is the one we compute the descendants
# `compared` is the one we compare `computed`'s descendant one-by-one
# we set the atomic entity as `computed` for efficiency if there is one
computed, compared = entity1, entity2
if not self.has_iri(entity1) and self.has_iri(entity2):
computed, compared = entity2, entity1
# for every inferred child of `computed`, check if it is subsumed by `compared``
for descendant_iri in self.get_inferred_sub_entities(computed, direct=False):
# print("check a subsumption")
if self.check_subsumption(self.onto.get_owl_object_from_iri(descendant_iri), compared):
return True
return False
def instances_of(self, owl_class: OWLClassExpression, direct: bool = False):
"""Return the list of named individuals that are instances of a given OWL class expression.
Args:
owl_class (OWLClassExpression): An ontology class of interest.
direct (bool, optional): Return direct instances (`direct=True`) or
also include the sub-classes' instances (`direct=False`). Defaults to `False`.
Returns:
(List[OWLNamedIndividual]): A list of named individuals that are instances of `owl_class`.
"""
return list(self.owl_reasoner.getInstances(owl_class, direct).getFlattened())
def check_instance(self, owl_instance: OWLNamedIndividual, owl_class: OWLClassExpression):
"""Check if a named individual is an instance of an OWL class."""
assertion_axiom = self.owl_data_factory.getOWLClassAssertionAxiom(owl_class, owl_instance)
return self.owl_reasoner.isEntailed(assertion_axiom)
def check_common_instances(self, owl_class1: OWLClassExpression, owl_class2: OWLClassExpression):
"""Check if two OWL class expressions have a common instance.
Class expressions can be **atomic or complex**, and it takes longer computation time
for the complex ones. Complex classes do not have an IRI. This method is optimised
in the way that if there exists an atomic class `A`, we compute instances for `A` and
compare them against the other class which could be complex.
!!! note "Difference with [`check_common_descendants`][deeponto.onto.OntologyReasoner.check_common_descendants]"
The inputs of this function are restricted to **OWL class expressions**. This is because
`descendant` is related to hierarchy and both class and property expressions have a hierarchy,
but `instance` is restricted to classes.
"""
if not self.has_iri(owl_class1) and not self.has_iri(owl_class2):
warnings.warn("Computing instances for two complex classes is not efficient.")
# `computed` is the one we compute the instances
# `compared` is the one we compare `computed`'s descendant one-by-one
# we set the atomic entity as `computed` for efficiency if there is one
computed, compared = owl_class1, owl_class2
if not self.has_iri(owl_class1) and self.has_iri(owl_class2):
computed, compared = owl_class2, owl_class2
# for every inferred instance of `computed`, check if it is subsumed by `compared``
for instance in self.instances_of(computed, direct=False):
if self.check_instance(instance, compared):
return True
return False
def check_assumed_disjoint(self, owl_class1: OWLClassExpression, owl_class2: OWLClassExpression):
r"""Check if two OWL class expressions satisfy the Assumed Disjointness.
!!! credit "Paper"
The definition of **Assumed Disjointness** comes from the paper:
[Language Model Analysis for Ontology Subsumption Inference](https://arxiv.org/abs/2302.06761).
!!! note "Assumed Disjointness (Definition)"
Two class expressions $C$ and $D$ are assumed to be disjoint if they meet the followings:
1. By adding the disjointness axiom of them into the ontology, $C$ and $D$ are **still satisfiable**.
2. $C$ and $D$ **do not have a common descendant** (otherwise $C$ and $D$ can be satisfiable but their
common descendants become the bottom $\bot$.)
Note that the special case where $C$ and $D$ are already disjoint is covered by the first check.
The paper also proposed a practical alternative to decide Assumed Disjointness.
See [`check_assumed_disjoint_alternative`][deeponto.onto.OntologyReasoner.check_assumed_disjoint_alternative].
Examples:
Suppose pre-load an ontology `onto` from the disease ontology file `doid.owl`.
```python
>>> c1 = onto.get_owl_object_from_iri("http://purl.obolibrary.org/obo/DOID_4058")
>>> c2 = onto.get_owl_object_from_iri("http://purl.obolibrary.org/obo/DOID_0001816")
>>> onto.reasoner.check_assumed_disjoint(c1, c2)
[SUCCESSFULLY] Adding the axiom DisjointClasses(<http://purl.obolibrary.org/obo/DOID_0001816> <http://purl.obolibrary.org/obo/DOID_4058>) into the ontology.
[CHECK1 True] input classes are still satisfiable;
[SUCCESSFULLY] Removing the axiom from the ontology.
[CHECK2 False] input classes have NO common descendant.
[PASSED False] assumed disjointness check done.
False
```
"""
# banner_message("Check Asssumed Disjointness")
entity_type = self.get_entity_type(owl_class1)
assert entity_type == self.get_entity_type(owl_class2)
# adding the disjointness axiom of `class1`` and `class2``
disjoint_axiom = getattr(self.owl_data_factory, f"getOWLDisjoint{entity_type}Axiom")([owl_class1, owl_class2])
undo_change = self.onto.add_axiom(disjoint_axiom, return_undo=True)
self.reload_reasoner()
# check if they are still satisfiable
still_satisfiable = self.owl_reasoner.isSatisfiable(owl_class1)
still_satisfiable = still_satisfiable and self.owl_reasoner.isSatisfiable(owl_class2)
print(f"[CHECK1 {still_satisfiable}] input classes are still satisfiable;")
# remove the axiom and re-construct the reasoner
undo_change_result = self.onto.owl_onto.applyChange(undo_change)
print(f"[{str(undo_change_result)}] Removing the axiom from the ontology.")
self.reload_reasoner()
# failing first check, there is no need to do the second.
if not still_satisfiable:
print("Failed `satisfiability check`, skip the `common descendant` check.")
print(f"[PASSED {still_satisfiable}] assumed disjointness check done.")
return False
# otherwise, the classes are still satisfiable and we should conduct the second check
has_common_descendants = self.check_common_descendants(owl_class1, owl_class2)
print(f"[CHECK2 {not has_common_descendants}] input classes have NO common descendant.")
print(f"[PASSED {not has_common_descendants}] assumed disjointness check done.")
return not has_common_descendants
def check_assumed_disjoint_alternative(
self, owl_class1: OWLClassExpression, owl_class2: OWLClassExpression, verbose: bool = False
):
r"""Check if two OWL class expressions satisfy the Assumed Disjointness.
!!! credit "Paper"
The definition of **Assumed Disjointness** comes from the paper:
[Language Model Analysis for Ontology Subsumption Inference](https://arxiv.org/abs/2302.06761).
The practical alternative version of [`check_assumed_disjoint`][deeponto.onto.OntologyReasoner.check_assumed_disjoint]
with following conditions:
!!! note "Assumed Disjointness (Practical Alternative)"
Two class expressions $C$ and $D$ are assumed to be disjoint if they
1. **do not** have a **subsumption relationship** between them,
2. **do not** have a **common descendant** (in TBox),
3. **do not** have a **common instance** (in ABox).
If all the conditions have been met, then we assume `class1` and `class2` as disjoint.
Examples:
Suppose pre-load an ontology `onto` from the disease ontology file `doid.owl`.
```python
>>> c1 = onto.get_owl_object_from_iri("http://purl.obolibrary.org/obo/DOID_4058")
>>> c2 = onto.get_owl_object_from_iri("http://purl.obolibrary.org/obo/DOID_0001816")
>>> onto.reasoner.check_assumed_disjoint(c1, c2, verbose=True)
[CHECK1 True] input classes have NO subsumption relationship;
[CHECK2 False] input classes have NO common descendant;
Failed the `common descendant check`, skip the `common instance` check.
[PASSED False] assumed disjointness check done.
False
```
In this alternative implementation, we do no need to add and remove axioms which will then
be time-saving.
"""
# banner_message("Check Asssumed Disjointness (Alternative)")
# # Check for entailed disjointness (short-cut)
# if self.check_disjoint(owl_class1, owl_class2):
# print(f"Input classes are already entailed as disjoint.")
# return True
# Check for entailed subsumption,
# common descendants and common instances
has_subsumption = self.check_subsumption(owl_class1, owl_class2)
has_subsumption = has_subsumption or self.check_subsumption(owl_class2, owl_class1)
if verbose:
print(f"[CHECK1 {not has_subsumption}] input classes have NO subsumption relationship;")
if has_subsumption:
if verbose:
print("Failed the `subsumption check`, skip the `common descendant` check.")
print(f"[PASSED {not has_subsumption}] assumed disjointness check done.")
return False
has_common_descendants = self.check_common_descendants(owl_class1, owl_class2)
if verbose:
print(f"[CHECK2 {not has_common_descendants}] input classes have NO common descendant;")
if has_common_descendants:
if verbose:
print("Failed the `common descendant check`, skip the `common instance` check.")
print(f"[PASSED {not has_common_descendants}] assumed disjointness check done.")
return False
# TODO: `check_common_instances` is still experimental because we have not tested it with ontologies of rich ABox.
has_common_instances = self.check_common_instances(owl_class1, owl_class2)
if verbose:
print(f"[CHECK3 {not has_common_instances}] input classes have NO common instance;")
print(f"[PASSED {not has_common_instances}] assumed disjointness check done.")
return not has_common_instances
| 41,129 | 46.384793 | 211 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/onto/normalisation.py
|
# The original code is licensed under the following:
# BSD 3-Clause License
# Copyright (c) 2022, Bio-Ontology Research Group
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The modified version is licensed under the following:
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import logging
logging.basicConfig(level=logging.INFO)
from . import Ontology
from de.tudresden.inf.lat.jcel.ontology.normalization import OntologyNormalizer # type: ignore
from de.tudresden.inf.lat.jcel.ontology.axiom.extension import IntegerOntologyObjectFactoryImpl # type: ignore
from de.tudresden.inf.lat.jcel.owlapi.translator import ReverseAxiomTranslator # type: ignore
from de.tudresden.inf.lat.jcel.owlapi.translator import Translator # type: ignore
from org.semanticweb.owlapi.model.parameters import Imports # type: ignore
from java.util import HashSet # type: ignore
class OntologyNormaliser:
r"""Class for ontology normalisation.
!!! note "Credit"
The code of this class originates from the [mOWL library](https://mowl.readthedocs.io/en/latest/index.html),
which utilises the normalisation functionality from the Java library `Jcel`.
The normalisation process transforms ontology axioms into **normal forms** in the Description Logic $\mathcal{EL}$, including:
- $C \sqsubseteq D$
- $C \sqcap C' \sqsubseteq D$
- $C \sqsubseteq \exists r.D$
- $\exists r.C \sqsubseteq D$
where $C$ and $C'$ can be named concepts or $\top$, $D$ is a named concept or $\bot$, $r$ is a role (property).
Attributes:
onto (Ontology): The input ontology to be normalised.
temp_super_class_index (Dict[OWLCLassExpression, OWLClass]): A dictionary in the form of `{complex_sub_class: temp_super_class}`, which means
`temp_super_class` is created during the normalisation of a complex subsumption axiom that has `complex_sub_class` as the sub-class.
"""
def __init__(self):
return
def normalise(self, ontology: Ontology):
r"""Performs the $\mathcal{EL}$ normalisation.
Args:
ontology (Ontology): An ontology to be normalised.
Returns:
(List[OWLAxiom]): A list of normalised TBox axioms.
"""
processed_owl_onto = self.preprocess_ontology(ontology)
root_ont = processed_owl_onto
translator = Translator(
processed_owl_onto.getOWLOntologyManager().getOWLDataFactory(), IntegerOntologyObjectFactoryImpl()
)
axioms = HashSet()
axioms.addAll(root_ont.getAxioms())
translator.getTranslationRepository().addAxiomEntities(root_ont)
for ont in root_ont.getImportsClosure():
axioms.addAll(ont.getAxioms())
translator.getTranslationRepository().addAxiomEntities(ont)
intAxioms = translator.translateSA(axioms)
normaliser = OntologyNormalizer()
factory = IntegerOntologyObjectFactoryImpl()
normalised_ontology = normaliser.normalize(intAxioms, factory)
self.rTranslator = ReverseAxiomTranslator(translator, processed_owl_onto)
normalised_axioms = []
# revert the jcel axioms to the original OWLAxioms
for ax in normalised_ontology:
try:
axiom = self.rTranslator.visit(ax)
normalised_axioms.append(axiom)
except Exception as e:
logging.info("Reverse translation. Ignoring axiom: %s", ax)
logging.info(e)
return list(set(axioms))
def preprocess_ontology(self, ontology: Ontology):
"""Preprocess the ontology to remove axioms that are not supported by the normalisation process."""
tbox_axioms = ontology.owl_onto.getTBoxAxioms(Imports.fromBoolean(True))
new_tbox_axioms = HashSet()
for axiom in tbox_axioms:
axiom_as_str = axiom.toString()
if "UnionOf" in axiom_as_str:
continue
elif "MinCardinality" in axiom_as_str:
continue
elif "ComplementOf" in axiom_as_str:
continue
elif "AllValuesFrom" in axiom_as_str:
continue
elif "MaxCardinality" in axiom_as_str:
continue
elif "ExactCardinality" in axiom_as_str:
continue
elif "Annotation" in axiom_as_str:
continue
elif "ObjectHasSelf" in axiom_as_str:
continue
elif "urn:swrl" in axiom_as_str:
continue
elif "EquivalentObjectProperties" in axiom_as_str:
continue
elif "SymmetricObjectProperty" in axiom_as_str:
continue
elif "AsymmetricObjectProperty" in axiom_as_str:
continue
elif "ObjectOneOf" in axiom_as_str:
continue
else:
new_tbox_axioms.add(axiom)
processed_owl_onto = ontology.owl_manager.createOntology(new_tbox_axioms)
# NOTE: the returned object is `owlapi.OWLOntology` not `deeponto.onto.Ontology`
return processed_owl_onto
| 7,223 | 40.28 | 149 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/onto/verbalisation.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import spacy
from typing import List, Union
from collections import defaultdict
from anytree import NodeMixin, RenderTree
from IPython.display import Image
from anytree.dotexport import RenderTreeGraph
import math
from yacs.config import CfgNode
from . import Ontology
from org.semanticweb.owlapi.model import OWLObject, OWLClassExpression, OWLAxiom # type: ignore
ABBREVIATION_DICT = {
"ObjectComplementOf": "[NEG]", # negation
"ObjectSomeValuesFrom": "[EX.]", # existential restriction
"ObjectAllValuesFrom": "[ALL]", # universal restriction
"ObjectUnionOf": "[OR.]", # disjunction
"ObjectIntersectionOf": "[AND]", # conjunction
"EquivalentClasses": "[EQV]", # equivalence
"SubClassOf": "[SUB]", # subsumed by
"SuperClassOf": "[SUP]", # subsumes
}
RDFS_LABEL = "http://www.w3.org/2000/01/rdf-schema#label"
class OntologyVerbaliser:
r"""A recursive natural language verbaliser for the OWL logical expressions, e.g., [`OWLAxiom`](http://owlcs.github.io/owlapi/apidocs_5/org/semanticweb/owlapi/model/OWLAxiom.html)
and [`OWLClassExpression`](https://owlcs.github.io/owlapi/apidocs_4/org/semanticweb/owlapi/model/OWLClassExpression.html).
The concept patterns supported by this verbaliser are shown below:
| **Pattern** | **Verbalisation** ($\mathcal{V}$) |
|-----------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| $A$ (atomic) | the name ($\texttt{rdfs:label}$) of $A$ |
| $r$ (property) | the name ($\texttt{rdfs:label}$) of $r$; *"is"* is appended to the head if the name starts with a passive verb, noun, or adjective |
| $\neg C$ | *"not $\mathcal{V}(C)$"* |
| $\exists r.C$ | *"something that $\mathcal{V}(r)$ some $\mathcal{V}(C)$"* |
| $\forall r.C$ | *"something that $\mathcal{V}(r)$ only $\mathcal{V}(C)$"* |
| $C_1 \sqcap ... \sqcap C_n$ | if $C_i = \exists/\forall r.D_i$ and $C_j = \exists/\forall r.D_j$, they will be re-written into $\exists/\forall r.(D_i \sqcap D_j)$ before verbalisation; suppose after re-writing the new expression is $C_1 \sqcap ... \sqcap C_{n'}$ <p> **(a)** if **all** $C_i$s (for $i = 1, ..., n'$) are restrictions, in the form of $\exists/\forall r_i.D_i$: <br /> *"something that $\mathcal{V}(r_1)$ some/only $V(D_1)$ and ... and $\mathcal{V}(r_{n'})$ some/only $V(D_{n'})$"* <br /> **(b)** if **some** $C_i$s (for $i = m+1, ..., n'$) are restrictions, in the form of $\exists/\forall r_i.D_i$: <br /> *"$\mathcal{V}(C_{1})$ and ... and $\mathcal{V}(C_{m})$ that $\mathcal{V}(r_{m+1})$ some/only $V(D_{m+1})$ and ... and $\mathcal{V}(r_{n'})$ some/only $V(D_{n'})$"* <br /> **(c)** if **no** $C_i$ is a restriction: <br /> *"$\mathcal{V}(C_{1})$ and ... and $\mathcal{V}(C_{n'})$"* |
| $C_1 \sqcup ... \sqcup C_n$ | similar to verbalising $C_1 \sqcap ... \sqcap C_n$ except that *"and"* is replaced by *"or"* and case **(b)** uses the same verbalisation as case **(c)** |
!!! warning
This verbaliser utilises spacy for POS tagging used in the auto-correction of property names.
Automatic download of the rule-based library `en_core_web_sm` is available at the init function. However, if you
somehow cannot find it, please manually download it using `python -m spacy download en_core_web_sm`.
Attributes:
onto (Ontology): An ontology whose entities are to be verbalised.
parser (OntologySyntaxParser): A syntax parser for the string representation of an `OWLObject`.
vocab (Dict[str, List[str]]): A dictionary with `(entity_iri, entity_name)` pairs, by default
the names are retrieved from $\texttt{rdfs:label}$.
"""
def __init__(self, onto: Ontology, apply_lowercasing_to_vocab: bool = False):
self.onto = onto
self.parser = OntologySyntaxParser()
# download en_core_web_sm for object property
try:
spacy.load("en_core_web_sm")
except:
print("Download `en_core_web_sm` for pos tagger.")
os.system("python -m spacy download en_core_web_sm")
self.nlp = spacy.load("en_core_web_sm")
# build the default vocabulary for entities
self.apply_lowercasing_to_vocab = apply_lowercasing_to_vocab
self.vocab = dict()
for entity_type in ["Classes", "ObjectProperties", "DataProperties"]:
entity_annotations, _ = self.onto.build_annotation_index(entity_type=entity_type, apply_lowercasing=self.apply_lowercasing_to_vocab)
self.vocab.update(**entity_annotations)
literal_or_iri = lambda k, v: list(v)[0] if v else k # set vocab to IRI if no string available
self.vocab = {k: literal_or_iri(k, v) for k, v in self.vocab.items()} # only set one name for each entity
def update_entity_name(self, entity_iri: str, entity_name: str):
"""Update the name of an entity in `self.vocab`.
If you want to change the name of a specific entity, you should call this
function before applying verbalisation.
"""
self.vocab[entity_iri] = entity_name
def verbalise_class_expression(self, class_expression: Union[OWLClassExpression, str, RangeNode]):
r"""Verbalise a class expression (`OWLClassExpression`) or its parsed form (in `RangeNode`).
See currently supported types of class (or concept) expressions [here][deeponto.onto.verbalisation.OntologyVerbaliser].
Args:
class_expression (Union[OWLClassExpression, str, RangeNode]): A class expression to be verbalised.
Raises:
RuntimeError: Occurs when the class expression is not in one of the supported types.
Returns:
(CfgNode): A nested dictionary that presents the details of verbalisation. The verbalised string
can be accessed with the key `["verbal"]`.
"""
if not isinstance(class_expression, RangeNode):
parsed_class_expression = self.parser.parse(class_expression).children[0] # skip the root node
else:
parsed_class_expression = class_expression
# for a singleton IRI
if parsed_class_expression.is_iri:
iri = parsed_class_expression.text.lstrip("<").rstrip(">")
return CfgNode({"verbal": self.vocab[iri], "iri": iri, "type": "IRI"})
if parsed_class_expression.name.startswith("NEG"):
# negation only has one child
cl = self.verbalise_class_expression(parsed_class_expression.children[0])
return CfgNode({"verbal": "not " + cl.verbal, "class": cl, "type": "NEG"})
# for existential and universal restrictions
if parsed_class_expression.name.startswith("EX.") or parsed_class_expression.name.startswith("ALL"):
return self._verbalise_restriction(parsed_class_expression)
# for conjunction and disjunction
if parsed_class_expression.name.startswith("AND") or parsed_class_expression.name.startswith("OR"):
return self._verbalise_junction(parsed_class_expression)
raise RuntimeError("Input class expression is not in one of the supported types.")
def _verbalise_restriction(self, restriction_node: RangeNode, add_something: bool = True, add_quantifier_word: bool = False):
"""Verbalise a (parsed) class expression in the form of existential or universal restriction."""
try:
assert restriction_node.name.startswith("EX.") or restriction_node.name.startswith("ALL")
assert len(restriction_node.children) == 2
except:
raise RuntimeError("Input range node is not related to a existential or universal restriction statement.")
quantifier_word = "some" if restriction_node.name.startswith("EX.") else "only"
object_property = restriction_node.children[0]
assert object_property.is_iri
object_property = self.verbalise_class_expression(object_property)
# NOTE modify the object property's verbalisation with rules
doc = self.nlp(object_property.verbal)
# Rule 1. Add "is" if the object property starts with a NOUN, ADJ, or passive VERB
if doc[0].pos_ == 'NOUN' or doc[0].pos_ == 'ADJ' or (doc[0].pos_ == 'VERB' and doc[0].text.endswith("ed")):
object_property.verbal = "is " + object_property.verbal
class_expression = restriction_node.children[1]
class_expression = self.verbalise_class_expression(class_expression.text)
# adding quantifier word or not
if add_quantifier_word:
verbal = f"{object_property.verbal} {quantifier_word} {class_expression.verbal}"
else:
verbal = f"{object_property.verbal} {class_expression.verbal}"
verbal = verbal.lstrip()
if add_something:
verbal = f"something that " + verbal
return CfgNode(
{
"verbal": verbal,
"property": object_property,
"class": class_expression,
"type": restriction_node.name[:3],
}
)
def _verbalise_junction(self, junction_node: RangeNode):
"""Verbalise a (parsed) class expression in the form of conjunction or disjunction."""
try:
assert junction_node.name.startswith("AND") or junction_node.name.startswith("OR.")
except:
raise RuntimeError("Input range node is not related to a conjunction or disjunction statement.")
junction_word = "and" if junction_node.name.startswith("AND") else "or"
# collect restriction nodes for merging
existential_restriction_children = defaultdict(list)
universal_restriction_children = defaultdict(list)
other_children = []
for child in junction_node.children:
if child.name.startswith("EX."):
child = self._verbalise_restriction(child, add_something=False)
existential_restriction_children[child.property.verbal].append(child)
elif child.name.startswith("ALL"):
child = self._verbalise_restriction(child, add_something=False)
universal_restriction_children[child.property.verbal].append(child)
else:
other_children.append(self.verbalise_class_expression(child))
merged_children = []
for v in list(existential_restriction_children.values()) + list(universal_restriction_children.values()):
# restriction = v[0].type
if len(v) > 1:
merged_child = CfgNode(dict())
merged_child.update(v[0]) # initialised with the first one
merged_child["class"] = CfgNode(
{"verbal": v[0]["class"].verbal, "classes": [v[0]["class"]], "type": junction_node.name[:3]}
)
for i in range(1, len(v)):
# v 0.5.2 fix for len(v) > 1 case
merged_child.verbal += f" {junction_word} " + v[i]["class"].verbal # update grouped concepts with property
merged_child["class"].verbal += f" {junction_word} " + v[i]["class"].verbal # update grouped concepts
merged_child["class"].classes.append(v[i]["class"])
merged_children.append(merged_child)
# print(merged_children)
else:
merged_children.append(v[0])
results = CfgNode(
{
"verbal": "",
"classes": other_children + merged_children,
"type": junction_node.name[:3],
}
)
# add the preceeding "something that" if there are only restrictions
if not other_children:
results.verbal += " something that " + f" {junction_word} ".join(
c.verbal for c in merged_children
)
results.verbal.lstrip()
else:
results.verbal += f" {junction_word} ".join(c.verbal for c in other_children)
if merged_children:
if junction_word == "and":
# sea food and non-vergetarian product that derives from shark and goldfish
results.verbal += " that " + f" {junction_word} ".join(c.verbal for c in merged_children)
elif junction_word == "or":
# sea food or non-vergetarian product or something that derives from shark or goldfish
results.verbal += " or something that " + f" {junction_word} ".join(c.verbal for c in merged_children)
return results
# def verbalise_equivalence_axiom(self, equivalence_axiom: OWLAxiom):
# #TODO
# pass
def verbalise_subsumption_axiom(self, subsumption_axiom: OWLAxiom):
r"""Verbalise a subsumption axiom.
The subsumption axiom can have two forms:
- $C \sqsubseteq D$, the `SubClassOf` axiom;
- $C \sqsupseteq D$, the `SuperClassOf` axiom.
Args:
subsumption_axiom (OWLAxiom): The subsumption axiom to be verbalised.
Returns:
(Tuple[CfgNode, CfgNode]): The verbalised sub-concept and super-concept (order matters).
"""
parsed_subsumption_axiom = self.parser.parse(subsumption_axiom).children[0] # skip the root node
if str(subsumption_axiom).startswith("SubClassOf"):
parsed_sub_class, parsed_super_class = parsed_subsumption_axiom.children
elif str(subsumption_axiom).startswith("SuperClassOf"):
parsed_super_class, parsed_sub_class = parsed_subsumption_axiom.children
else:
raise RuntimeError(f"The input axiom is not a valid subsumption axiom.")
return self.verbalise_class_expression(parsed_sub_class), self.verbalise_class_expression(parsed_super_class)
class OntologySyntaxParser:
r"""A syntax parser for the OWL logical expressions, e.g., [`OWLAxiom`](http://owlcs.github.io/owlapi/apidocs_5/org/semanticweb/owlapi/model/OWLAxiom.html)
and [`OWLClassExpression`](https://owlcs.github.io/owlapi/apidocs_4/org/semanticweb/owlapi/model/OWLClassExpression.html).
It makes use of the string representation (based on Manchester Syntax) defined in the OWLAPI. In Python,
such string can be accessed by simply using `#!python str(some_owl_object)`.
To keep the Java import in the main [`Ontology`][deeponto.onto.Ontology] class,
this parser does not deal with `OWLAxiom` directly but instead its **string representation**.
Due to the `OWLObject` syntax, this parser relies on two components:
1. Parentheses matching;
2. Tree construction ([`RangeNode`][deeponto.onto.verbalisation.RangeNode]).
As a result, it will return a [`RangeNode`][deeponto.onto.verbalisation.RangeNode] that
specifies the sub-formulas (and their respective **positions in the string representation**)
in a tree structure.
Examples:
Suppose the input is an `OWLAxiom` that has the string representation:
```python
>>> str(owl_axiom)
>>> 'EquivalentClasses(<http://purl.obolibrary.org/obo/FOODON_00001707> ObjectIntersectionOf(<http://purl.obolibrary.org/obo/FOODON_00002044> ObjectSomeValuesFrom(<http://purl.obolibrary.org/obo/RO_0001000> <http://purl.obolibrary.org/obo/FOODON_03412116>)) )'
```
This corresponds to the following logical expression:
$$
CephalopodFoodProduct \equiv MolluskFoodProduct \sqcap \exists derivesFrom.Cephalopod
$$
After apply the parser, a [`RangeNode`][deeponto.onto.verbalisation.RangeNode] will be returned which can be rentered as:
```python
axiom_parser = OntologySyntaxParser()
print(axiom_parser.parse(str(owl_axiom)).render_tree())
```
`#!console Output:`
:  
```python
Root@[0:inf]
└── EQV@[0:212]
├── FOODON_00001707@[6:54]
└── AND@[55:210]
├── FOODON_00002044@[61:109]
└── EX.@[110:209]
├── RO_0001000@[116:159]
└── FOODON_03412116@[160:208]
```
Or, if `graphviz` (installed by e.g., `sudo apt install graphviz`) is available,
you can visualise the tree as an image by:
```python
axiom_parser.parse(str(owl_axiom)).render_image()
```
`#!console Output:`
<p align="center">
<img alt="range_node" src="../../../assets/example_range_node.png" style="padding: 30px 50px">
</p>
The name for each node has the form `{node_type}@[{start}:{end}]`, which means a node of the type `{node_type}` is
located at the range `[{start}:{end}]` in the **abbreviated** expression (see [`abbreviate_owl_expression`][deeponto.onto.verbalisation.OntologySyntaxParser.abbreviate_owl_expression]
below).
The leaf nodes are IRIs and they are represented by the last segment (split by `"/"`) of the whole IRI.
Child nodes can be accessed by `.children`, the string representation of the sub-formula in this node can be
accessed by `.text`. For example:
```python
parser.parse(str(owl_axiom)).children[0].children[1].text
```
`#!console Output:`
:  
```python
'[AND](<http://purl.obolibrary.org/obo/FOODON_00002044> [EX.](<http://purl.obolibrary.org/obo/RO_0001000> <http://purl.obolibrary.org/obo/FOODON_03412116>))'
```
"""
def __init__(self):
pass
def abbreviate_owl_expression(self, owl_expression: str):
r"""Abbreviate the string representations of logical operators to a
fixed length (easier for parsing).
The abbreviations are as follows:
```python
{
"ObjectComplementOf": "[NEG]", # negation
"ObjectSomeValuesFrom": "[EX.]", # existential restriction
"ObjectAllValuesFrom": "[ALL]", # universal restriction
"ObjectUnionOf": "[OR.]", # disjunction
"ObjectIntersectionOf": "[AND]", # conjunction
"EquivalentClasses": "[EQV]", # equivalence
"SubClassOf": "[SUB]", # subsumed by
"SuperClassOf": "[SUP]", # subsumes
}
```
Args:
owl_expression (str): The string representation of an `OWLObject`.
Returns:
(str): The modified string representation of this `OWLObject` where the logical operators are abbreviated.
"""
for k, v in ABBREVIATION_DICT.items():
owl_expression = owl_expression.replace(k, v)
return owl_expression
def parse(self, owl_expression: Union[str, OWLObject]) -> RangeNode:
r"""Parse an `OWLAxiom` into a [`RangeNode`][deeponto.onto.verbalisation.RangeNode].
This is the main entry for using the parser, which relies on the [`parse_by_parentheses`][deeponto.onto.verbalisation.OntologySyntaxParser.parse_by_parentheses]
method below.
Args:
owl_expression (Union[str, OWLObject]): The string representation of an `OWLObject` or the `OWLObject` itself.
Returns:
(RangeNode): A parsed syntactic tree given what parentheses to be matched.
"""
if not isinstance(owl_expression, str):
owl_expression = str(owl_expression)
owl_expression = self.abbreviate_owl_expression(owl_expression)
# print("To parse the following (transformed) axiom text:\n", owl_expression)
# parse complex patterns first
cur_parsed = self.parse_by_parentheses(owl_expression)
# parse the IRI patterns latter
return self.parse_by_parentheses(owl_expression, cur_parsed, for_iri=True)
@classmethod
def parse_by_parentheses(
cls, owl_expression: str, already_parsed: RangeNode = None, for_iri: bool = False
) -> RangeNode:
r"""Parse an `OWLAxiom` based on parentheses matching into a [`RangeNode`][deeponto.onto.verbalisation.RangeNode].
This function needs to be applied twice to get a fully parsed [`RangeNode`][deeponto.onto.verbalisation.RangeNode] because IRIs have
a different parenthesis pattern.
Args:
owl_expression (str): The string representation of an `OWLObject`.
already_parsed (RangeNode, optional): A partially parsed [`RangeNode`][deeponto.onto.verbalisation.RangeNode] to continue with. Defaults to `None`.
for_iri (bool, optional): Parentheses are by default `()` but will be changed to `<>` for IRIs. Defaults to `False`.
Raises:
RuntimeError: Raised when the input axiom text is nor properly formatted.
Returns:
(RangeNode): A parsed syntactic tree given what parentheses to be matched.
"""
if not already_parsed:
# a root node that covers the entire sentence
parsed = RangeNode(0, math.inf, name=f"Root", text=owl_expression, is_iri=False)
else:
parsed = already_parsed
stack = []
left_par = "("
right_par = ")"
if for_iri:
left_par = "<"
right_par = ">"
for i, c in enumerate(owl_expression):
if c == left_par:
stack.append(i)
if c == right_par:
try:
start = stack.pop()
end = i
if not for_iri:
# the first character is actually "["
real_start = start - 5
axiom_type = owl_expression[real_start + 1 : start - 1]
node = RangeNode(
real_start,
end + 1,
name=f"{axiom_type}",
text=owl_expression[real_start : end + 1],
is_iri=False,
)
parsed.insert_child(node)
else:
# no preceding characters for just atomic class (IRI)
abbr_iri = owl_expression[start : end + 1].split("/")[-1].rstrip(">")
node = RangeNode(
start, end + 1, name=abbr_iri, text=owl_expression[start : end + 1], is_iri=True
)
parsed.insert_child(node)
except IndexError:
print("Too many closing parentheses")
if stack: # check if stack is empty afterwards
raise RuntimeError("Too many opening parentheses")
return parsed
class RangeNode(NodeMixin):
r"""A tree implementation for ranges (without partial overlap).
- Parent node's range fully covers child node's range, e.g., `[1, 10]` is a parent of `[2, 5]`.
- Partial overlap between ranges are not allowed, e.g., `[2, 4]` and `[3, 5]` cannot appear in the same `RangeNodeTree`.
- Non-overlap ranges are on different branches (irrelevant).
- Child nodes are ordered according to their relative positions.
"""
def __init__(self, start, end, name=None, **kwargs):
if start >= end:
raise RuntimeError("invalid start and end positions ...")
self.start = start
self.end = end
self.name = "Root" if not name else name
self.name = f"{self.name}@[{self.start}:{self.end}]" # add start and ent to the name
for k, v in kwargs.items():
setattr(self, k, v)
super().__init__()
# def __eq__(self, other: RangeNode):
# """Two ranges are equal if they have the same `start` and `end`.
# """
# return self.start == other.start and self.end == other.end
def __gt__(self, other: RangeNode):
r"""Compare two ranges if they have a different `start` and/or a different `end`.
- $R_1 \lt R_2$: if range $R_1$ is completely contained in range $R_2$, and $R_1 \neq R_2$.
- $R_1 \gt R_2$: if range $R_2$ is completely contained in range $R_1$, and $R_1 \neq R_2$.
- `"irrelevant"`: if range $R_1$ and range $R_2$ have no overlap.
!!! warning
Partial overlap is not allowed.
"""
# ranges inside
if self.start <= other.start and other.end <= self.end:
return True
# ranges outside
if other.start <= self.start and self.end <= other.end:
return False
if other.end < self.start or self.end < other.start:
return "irrelevant"
raise RuntimeError("Compared ranges have a partial overlap.")
@staticmethod
def sort_by_start(nodes: List[RangeNode]):
"""A sorting function that sorts the nodes by their starting positions."""
temp = {sib: sib.start for sib in nodes}
return list(dict(sorted(temp.items(), key=lambda item: item[1])).keys())
def insert_child(self, node: RangeNode):
r"""Inserting a child [`RangeNode`][deeponto.onto.verbalisation.RangeNode].
Child nodes have a smaller (inclusive) range, e.g., `[2, 5]` is a child of `[1, 6]`.
"""
if node > self:
raise RuntimeError("invalid child node")
if node.start == self.start and node.end == self.end:
# duplicated node
return
# print(self.children)
if self.children:
inserted = False
for ch in self.children:
if (node < ch) is True:
# print("further down")
ch.insert_child(node)
inserted = True
break
elif (node > ch) is True:
# print("insert in between")
ch.parent = node
# NOTE: should not break here as it could be parent of multiple children !
# break
# NOTE: the equal case is when two nodes are exactly the same, no operation needed
if not inserted:
self.children = list(self.children) + [node]
self.children = self.sort_by_start(self.children)
else:
node.parent = self
self.children = [node]
def __repr__(self):
return f"{self.name}"
def render_tree(self):
"""Render the whole tree."""
return RenderTree(self)
def render_image(self):
"""Calling this function will generate a temporary `range_node.png` file
which will be displayed.
To make this visualisation work, you need to install `graphviz` by, e.g.,
```bash
sudo apt install graphviz
```
"""
RenderTreeGraph(self).to_picture("range_node.png")
return Image("range_node.png")
| 28,640 | 46.262376 | 910 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/onto/projection.py
|
# The original code is licensed under the following:
# BSD 3-Clause License
# Copyright (c) 2022, Bio-Ontology Research Group
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The modified version is licensed under the following:
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from . import Ontology
from org.mowl.Projectors import OWL2VecStarProjector as Projector #type:ignore
from org.semanticweb.owlapi.model import OWLOntology #type:ignore
class OntologyProjector:
r'''Class for ontology projection -- transforming ontology axioms into triples.
!!! note "Credit"
The code of this class originates from the [mOWL library](https://mowl.readthedocs.io/en/latest/index.html).
Attributes:
bidirectional_taxonomy (bool): If `True` then per each `SubClass` edge one `SuperClass` edge will
be generated. Defaults to `False`.
only_taxonomy (bool): If `True`, then projection will only include `subClass` edges. Defaults to `False`.
include_literals (bool): If `True` the projection will also include triples involving data property
assertions and annotations. Defaults to `False`.
'''
def __init__(self, bidirectional_taxonomy: bool=False, only_taxonomy: bool=False, include_literals: bool=False):
self.bidirectional_taxonomy = bidirectional_taxonomy
self.include_literals = include_literals
self.only_taxonomy = only_taxonomy
self.projector = Projector(self.bidirectional_taxonomy, self.only_taxonomy,
self.include_literals)
def project(self, ontology: Ontology):
"""The projection algorithm implemented in OWL2Vec*.
Args:
ontology (Ontology): An ontology to be processed.
Returns:
(Set): Set of triples after projection.
"""
ontology = ontology.owl_onto
if not isinstance(ontology, OWLOntology):
raise TypeError(
"Input ontology must be of type `org.semanticweb.owlapi.model.OWLOntology`.")
edges = self.projector.project(ontology)
triples = [(str(e.src()), str(e.rel()), str(e.dst())) for e in edges if str(e.dst()) != ""]
return set(triples)
| 4,274 | 44 | 116 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/onto/pruning.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import itertools
from typing import List, TYPE_CHECKING
if TYPE_CHECKING:
from . import Ontology
from org.semanticweb.owlapi.util import OWLEntityRemover # type: ignore
from java.util import Collections # type: ignore
class OntologyPruner:
r"""Class for in-place ontology pruning.
Attributes:
onto (Ontology): The input ontology to be pruned. Note that the pruning process is in-place.
"""
def __init__(self, onto: Ontology):
self.onto = onto
self._pruning_applied = None
def save_onto(self, save_path: str):
"""Save the pruned ontology file to the given path."""
print(f"{self._pruning_applied} pruning algorithm has been applied.")
print(f"Save the pruned ontology file to {save_path}.")
return self.onto.save_onto(save_path)
def prune(self, class_iris_to_be_removed: List[str]):
r"""Apply ontology pruning while preserving the relevant hierarchy.
!!! credit "paper"
This refers to the ontology pruning algorithm introduced in the paper:
[*Machine Learning-Friendly Biomedical Datasets for Equivalence and Subsumption Ontology Matching (ISWC 2022)*](https://link.springer.com/chapter/10.1007/978-3-031-19433-7_33).
For each class $c$ to be pruned, subsumption axioms will be created between $c$'s parents and children so as to preserve the
relevant hierarchy.
Args:
class_iris_to_be_removed (List[str]): Classes with IRIs in this list will be pruned and the relevant hierarchy will be repaired.
"""
# create the subsumption axioms first
for cl_iri in class_iris_to_be_removed:
cl = self.onto.get_owl_object_from_iri(cl_iri)
cl_parents = self.onto.get_asserted_parents(cl)
cl_children = self.onto.get_asserted_children(cl)
for parent, child in itertools.product(cl_parents, cl_children):
sub_axiom = self.onto.owl_data_factory.getOWLSubClassOfAxiom(child, parent)
self.onto.add_axiom(sub_axiom)
# apply pruning
class_remover = OWLEntityRemover(Collections.singleton(self.onto.owl_onto))
for cl_iri in class_iris_to_be_removed:
cl = self.onto.get_owl_object_from_iri(cl_iri)
cl.accept(class_remover)
self.onto.owl_manager.applyChanges(class_remover.getChanges())
# remove IRIs in dictionaries?
# TODO Test it
# self._pruning_applied = "min_hierarchy"
| 3,170 | 40.181818 | 188 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/onto/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .ontology import Ontology, OntologyReasoner
from .pruning import OntologyPruner
from .verbalisation import OntologyVerbaliser, OntologySyntaxParser
from .projection import OntologyProjector
from .normalisation import OntologyNormaliser
| 831 | 42.789474 | 74 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/utils/logging.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional
import logging
import datetime
import time
import torch
import xml.etree.ElementTree as ET
import subprocess
# subclass of logging.Formatter
class RuntimeFormatter(logging.Formatter):
"""Auxiliary class for runtime formatting in the logger."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start_time = time.time()
def formatTime(self, record, datefmt=None):
"""Record relative runtime in hr:min:sec format。"""
duration = datetime.datetime.utcfromtimestamp(record.created - self.start_time)
elapsed = duration.strftime("%H:%M:%S")
return "{}".format(elapsed)
def create_logger(model_name: str, saved_path: str):
"""Create logger for both console info and saved info.
The pre-existed log file will be cleared before writing into new messages.
"""
logger = logging.getLogger(model_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{saved_path}/{model_name}.log", mode="w") # "w" means clear the log file before writing
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = RuntimeFormatter("[Time: %(asctime)s] - [PID: %(process)d] - [Model: %(name)s] \n%(message)s")
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
def banner_message(message: str, sym="^"):
"""Print a banner message surrounded by special symbols."""
print()
message = message.upper()
banner_len = len(message) + 4
message = " " * ((banner_len - len(message)) // 2) + message
message = message + " " * (banner_len - len(message))
print(message)
print(sym * banner_len)
print()
| 2,609 | 34.27027 | 119 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/utils/data_utils.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional
class DataUtils:
@staticmethod
def uniqify(ls):
"""Return a list of unique elements without messing around the order"""
non_empty_ls = list(filter(lambda x: x != "", ls))
return list(dict.fromkeys(non_empty_ls))
@staticmethod
def sort_dict_by_values(dic: dict, desc: bool = True, k: Optional[int] = None):
"""Return a sorted dict by values with first k reserved if provided."""
sorted_items = list(sorted(dic.items(), key=lambda item: item[1], reverse=desc))
return dict(sorted_items[:k])
| 1,215 | 37 | 88 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/utils/text_utils.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Iterable, List, Dict, Tuple, Union
import re
from collections import defaultdict
from itertools import chain
import math
from transformers import AutoTokenizer
import spacy
from spacy.lang.en import English
import xml.etree.ElementTree as ET
class TextUtils:
"""Provides text processing utilities."""
@staticmethod
def process_annotation_literal(annotation_literal: str, apply_lowercasing: bool = True, normalise_identifiers: bool = False):
"""Pre-process an annotation literal string.
Args:
annotation_literal (str): A literal string of an entity's annotation.
apply_lowercasing (bool): A boolean that determines lowercasing or not. Defaults to `True`.
normalise_identifiers (bool): Whether to normalise annotation text that is in the Java identifier format. Defaults to `False`.
Returns:
(str): the processed annotation literal string.
"""
# replace the underscores with spaces
annotation_literal = annotation_literal.replace("_", " ")
# if the annotation literal is a valid identifier with first letter capitalised
# we suspect that it could be a Java style identifier that needs to be split
if normalise_identifiers and annotation_literal[0].isupper() and annotation_literal.isidentifier():
annotation_literal = TextUtils.split_java_identifier(annotation_literal)
# lowercase the annotation literal if specfied
if apply_lowercasing:
annotation_literal = annotation_literal.lower()
return annotation_literal
@staticmethod
def split_java_identifier(java_style_identifier: str):
r"""Split words in java's identifier style into natural language phrase.
Examples:
- `"SuperNaturalPower"` $\rightarrow$ `"Super Natural Power"`
- `"APIReference"` $\rightarrow$ `"API Reference"`
- `"Covid19"` $\rightarrow$ `"Covid 19"`
"""
# split at every capital letter or number (numbers are treated as capital letters)
raw_words = re.findall("([0-9A-Z][a-z]*)", java_style_identifier)
words = []
capitalized_word = ""
for i, w in enumerate(raw_words):
# the above regex pattern will split at capitals
# so the capitalized words are split into characters
# i.e., (len(w) == 1)
if len(w) == 1:
capitalized_word += w
# edge case for the last word
if i == len(raw_words) - 1:
words.append(capitalized_word)
# if the the current w is a full word, save the previous
# cached capitalized_word and also save current full word
elif capitalized_word:
words.append(capitalized_word)
words.append(w)
capitalized_word = ""
# just save the current full word otherwise
else:
words.append(w)
return " ".join(words)
class Tokenizer:
"""A Tokenizer class for both sub-word (pre-trained) and word (rule-based) level tokenization."""
def __init__(self, tokenizer_type: str):
self.type = tokenizer_type
self._tokenizer = None # hidden tokenizer
self.tokenize = None # the tokenization method
def __call__(self, texts: Union[str, List[str]]):
if isinstance(texts, str):
return self.tokenize(texts)
else:
return list(chain.from_iterable(self.tokenize(t) for t in texts))
@classmethod
def from_pretrained(cls, pretrained_path: str = "bert-base-uncased"):
"""(Based on **transformers**) Load a sub-word level tokenizer from pre-trained model."""
instance = cls("pre-trained")
instance._tokenizer = AutoTokenizer.from_pretrained(pretrained_path)
instance.tokenize = instance._tokenizer.tokenize
return instance
@classmethod
def from_rule_based(cls):
"""(Based on **spacy**) Load a word-level (rule-based) tokenizer."""
spacy.prefer_gpu()
instance = cls("rule-based")
instance._tokenizer = English()
instance.tokenize = lambda texts: [word.text for word in instance._tokenizer(texts).doc]
return instance
class InvertedIndex:
r"""Inverted index built from a text index.
Attributes:
tokenizer (Tokenizer): A tokenizer instance to be used.
original_index (defaultdict): A dictionary where the values are text strings to be tokenized.
constructed_index (defaultdict): A dictionary that acts as the inverted index of `original_index`.
"""
def __init__(self, index: defaultdict, tokenizer: Tokenizer):
self.tokenizer = tokenizer
self.original_index = index
self.constructed_index = defaultdict(list)
for k, v in self.original_index.items():
# value is a list of strings
for token in self.tokenizer(v):
self.constructed_index[token].append(k)
def idf_select(self, texts: Union[str, List[str]], pool_size: int = 200):
"""Given a list of tokens, select a set candidates based on the inverted document frequency (idf) scores.
We use `idf` instead of `tf` because labels have different lengths and thus tf is not a fair measure.
"""
candidate_pool = defaultdict(lambda: 0)
# D := number of "documents", i.e., number of "keys" in the original index
D = len(self.original_index)
for token in self.tokenizer(texts):
# each token is associated with some classes
potential_candidates = self.constructed_index[token]
if not potential_candidates:
continue
# We use idf instead of tf because the text for each class is of different length, tf is not a fair measure
# inverse document frequency: with more classes to have the current token tk, the score decreases
idf = math.log10(D / len(potential_candidates))
for candidate in potential_candidates:
# each candidate class is scored by sum(idf)
candidate_pool[candidate] += idf
candidate_pool = list(sorted(candidate_pool.items(), key=lambda item: item[1], reverse=True))
# print(f"Select {min(len(candidate_pool), pool_size)} candidates.")
# select the first K ranked
return candidate_pool[:pool_size]
| 7,121 | 41.142012 | 138 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/utils/file_utils.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import json
import yaml
import dill as pickle
import os
import shutil
from pathlib import Path
import pandas as pd
import xml.etree.ElementTree as ET
import subprocess
class FileUtils:
"""Provides file processing utilities."""
@staticmethod
def create_path(path: str):
"""Create a path recursively."""
Path(path).mkdir(parents=True, exist_ok=True)
@staticmethod
def save_file(obj, save_path: str, sort_keys: bool = False):
"""Save an object to a certain format."""
if save_path.endswith(".json"):
with open(save_path, "w") as output:
json.dump(obj, output, indent=4, separators=(",", ": "), sort_keys=sort_keys)
elif save_path.endswith(".pkl"):
with open(save_path, "wb") as output:
pickle.dump(obj, output, -1)
elif save_path.endswith(".yaml"):
with open(save_path, "w") as output:
yaml.dump(obj, output, default_flow_style=False, allow_unicode=True)
else:
raise RuntimeError(f"Unsupported saving format: {save_path}")
@staticmethod
def load_file(save_path: str):
"""Load an object of a certain format."""
if save_path.endswith(".json"):
with open(save_path, "r") as input:
return json.load(input)
elif save_path.endswith(".pkl"):
with open(save_path, "rb") as input:
return pickle.load(input)
elif save_path.endswith(".yaml"):
with open(save_path, "r") as input:
return yaml.safe_load(input)
else:
raise RuntimeError(f"Unsupported loading format: {save_path}")
@staticmethod
def print_dict(dic: dict):
"""Pretty print a dictionary."""
pretty_print = json.dumps(dic, indent=4, separators=(",", ": "))
# print(pretty_print)
return pretty_print
@staticmethod
def copy2(source: str, destination: str):
"""Copy a file from source to destination."""
try:
shutil.copy2(source, destination)
print(f"copied successfully FROM {source} TO {destination}")
except shutil.SameFileError:
print(f"same file exists at {destination}")
@staticmethod
def read_table(table_file_path: str):
r"""Read `csv` or `tsv` file as pandas dataframe without treating `"NULL"`, `"null"`, and `"n/a"` as an empty string."""
# TODO: this might change with the version of pandas
na_vals = pd.io.parsers.readers.STR_NA_VALUES.difference({"NULL", "null", "n/a"})
sep = "\t" if table_file_path.endswith(".tsv") else ","
return pd.read_csv(table_file_path, sep=sep, na_values=na_vals, keep_default_na=False)
@staticmethod
def read_jsonl(file_path: str):
"""Read `.jsonl` file (list of json) introduced in the BLINK project."""
results = []
key_set = []
with open(file_path, "r", encoding="utf-8-sig") as f:
lines = f.readlines()
for line in lines:
record = json.loads(line)
results.append(record)
key_set += list(record.keys())
print(f"all available keys: {set(key_set)}")
return results
@staticmethod
def read_oaei_mappings(rdf_file: str):
"""To read mapping files in the OAEI rdf format."""
xml_root = ET.parse(rdf_file).getroot()
ref_mappings = [] # where relation is "="
ignored_mappings = [] # where relation is "?"
for elem in xml_root.iter():
# every Cell contains a mapping of en1 -rel(some value)-> en2
if "Cell" in elem.tag:
en1, en2, rel, measure = None, None, None, None
for sub_elem in elem:
if "entity1" in sub_elem.tag:
en1 = list(sub_elem.attrib.values())[0]
elif "entity2" in sub_elem.tag:
en2 = list(sub_elem.attrib.values())[0]
elif "relation" in sub_elem.tag:
rel = sub_elem.text
elif "measure" in sub_elem.tag:
measure = sub_elem.text
row = (en1, en2, measure)
# =: equivalent; > superset of; < subset of.
if rel == "=" or rel == ">" or rel == "<":
# rel.replace(">", ">").replace("<", "<")
ref_mappings.append(row)
elif rel == "?":
ignored_mappings.append(row)
else:
print("Unknown Relation Warning: ", rel)
print('#Maps ("="):', len(ref_mappings))
print('#Maps ("?"):', len(ignored_mappings))
return ref_mappings, ignored_mappings
@staticmethod
def run_jar(jar_command: str):
"""Run jar command using subprocess."""
proc = subprocess.Popen(jar_command.split(" "))
try:
_, _ = proc.communicate(timeout=600)
except subprocess.TimeoutExpired:
proc.kill()
_, _ = proc.communicate()
| 5,752 | 37.871622 | 128 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/utils/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .logging import create_logger, banner_message
from .data_utils import *
from .file_utils import *
from .text_utils import *
| 719 | 39 | 74 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/utils/decorators.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from functools import wraps
import time
def timer(function):
"""Print the runtime of the decorated function."""
@wraps(function)
def wrapper_timer(*args, **kwargs):
start_time = time.perf_counter() # 1
value = function(*args, **kwargs)
end_time = time.perf_counter() # 2
run_time = end_time - start_time # 3
print(f"Finished {function.__name__!r} in {run_time:.4f} secs.")
return value
return wrapper_timer
def debug(function):
"""Print the function signature and return value."""
@wraps(function)
def wrapper_debug(*args, **kwargs):
args_repr = [repr(a) for a in args]
kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]
signature = ", ".join(args_repr + kwargs_repr)
print(f"Calling {function.__name__}({signature})")
value = function(*args, **kwargs)
print(f"{function.__name__!r} returned {value!r}.")
return value
return wrapper_debug
def paper(title: str, link: str):
"""Add paper tagger for methods."""
# Define a new decorator, named "decorator", to return
def decorator(func):
# Ensure the decorated function keeps its metadata
@wraps(func)
def wrapper(*args, **kwargs):
# Call the function being decorated and return the result
return func(*args, **kwargs)
wrapper.paper_title = f'This method is associated with tha paper of title: "{title}".'
wrapper.paper_link = f"This method is associated with the paper with link: {link}."
return wrapper
# Return the new decorator
return decorator
| 2,271 | 32.411765 | 94 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 590 | 41.214286 | 74 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/evaluation.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List
import math
from .mapping import *
class AlignmentEvaluator:
"""Class that provides evaluation metrics for alignment."""
def __init__(self):
pass
@staticmethod
def precision(prediction_mappings: List[EntityMapping], reference_mappings: List[ReferenceMapping]) -> float:
r"""The percentage of correct predictions.
$$P = \frac{|\mathcal{M}_{pred} \cap \mathcal{M}_{ref}|}{|\mathcal{M}_{pred}|}$$
"""
preds = [p.to_tuple() for p in prediction_mappings]
refs = [r.to_tuple() for r in reference_mappings]
return len(set(preds).intersection(set(refs))) / len(set(preds))
@staticmethod
def recall(prediction_mappings: List[EntityMapping], reference_mappings: List[ReferenceMapping]) -> float:
r"""The percentage of correct retrievals.
$$R = \frac{|\mathcal{M}_{pred} \cap \mathcal{M}_{ref}|}{|\mathcal{M}_{ref}|}$$
"""
preds = [p.to_tuple() for p in prediction_mappings]
refs = [r.to_tuple() for r in reference_mappings]
return len(set(preds).intersection(set(refs))) / len(set(refs))
@staticmethod
def f1(
prediction_mappings: List[EntityMapping],
reference_mappings: List[ReferenceMapping],
null_reference_mappings: List[ReferenceMapping] = [],
):
r"""Compute the F1 score given the prediction and reference mappings.
$$F_1 = \frac{2 P R}{P + R}$$
`null_reference_mappings` is an additional set whose elements
should be **ignored** in the calculation, i.e., **neither positive nor negative**.
Specifically, both $\mathcal{M}_{pred}$ and $\mathcal{M}_{ref}$ will **substract**
$\mathcal{M}_{null}$ from them.
"""
preds = [p.to_tuple() for p in prediction_mappings]
refs = [r.to_tuple() for r in reference_mappings]
null_refs = [n.to_tuple() for n in null_reference_mappings]
# elements in the {null_set} are removed from both {pred} and {ref} (ignored)
if null_refs:
preds = set(preds) - set(null_refs)
refs = set(refs) - set(null_refs)
P = len(set(preds).intersection(set(refs))) / len(set(preds))
R = len(set(preds).intersection(set(refs))) / len(set(refs))
F1 = 2 * P * R / (P + R)
return {"P": round(P, 3), "R": round(R, 3), "F1": round(F1, 3)}
##################################################################################
### [Eval Case 2]: Hits@K & MRR ###
##################################################################################
# TODO: check below algorithms after full deployment
@staticmethod
def hits_at_K(prediction_and_candidates: List[Tuple[EntityMapping, List[EntityMapping]]], K: int):
r"""Compute $Hits@K$ for a list of `(prediction_mapping, candidate_mappings)` pair.
It is computed as the number of a `prediction_mapping` existed in the first $K$ ranked `candidate_mappings`,
divided by the total number of input pairs.
$$Hits@K = \sum_i^N \mathbb{I}_{rank_i \leq k} / N$$
"""
n_hits = 0
for pred, cands in prediction_and_candidates:
ordered_candidates = [c.to_tuple() for c in EntityMapping.sort_entity_mappings_by_score(cands, k=K)]
if pred.to_tuple() in ordered_candidates:
n_hits += 1
return n_hits / len(prediction_and_candidates)
@staticmethod
def mean_reciprocal_rank(prediction_and_candidates: List[Tuple[EntityMapping, List[EntityMapping]]]):
r"""Compute $MRR$ for a list of `(prediction_mapping, candidate_mappings)` pair.
$$MRR = \sum_i^N rank_i^{-1} / N$$
"""
sum_inverted_ranks = 0
for pred, cands in prediction_and_candidates:
ordered_candidates = [c.to_tuple() for c in EntityMapping.sort_entity_mappings_by_score(cands)]
if pred.to_tuple() in ordered_candidates:
rank = ordered_candidates.index(pred.to_tuple()) + 1
else:
rank = math.inf
sum_inverted_ranks += 1 / rank
return sum_inverted_ranks / len(prediction_and_candidates)
| 4,843 | 42.63964 | 116 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/mapping.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional, List, TYPE_CHECKING
import pprintpp
from collections import defaultdict
import pandas as pd
import random
from deeponto.onto import Ontology
from deeponto.utils import FileUtils, DataUtils, Tokenizer
if TYPE_CHECKING:
from org.semanticweb.owlapi.model import OWLObject # type: ignore
DEFAULT_REL = "<?rel>"
DUP_STRATEGIES = ["average", "kept_new", "kept_old"]
DEFAULT_DUP_STRATEGY = DUP_STRATEGIES[0]
##################################################################################
### basic mapping structure ###
##################################################################################
class EntityMapping:
r"""A datastructure for entity mapping.
Such entities should be named and have an IRI.
Attributes:
src_entity_iri (str): The IRI of the source entity, usually its IRI if available.
tgt_entity_iri (str): The IRI of the target entity, usually its IRI if available.
relation (str, optional): A symbol that represents what semantic relation this mapping stands for. Defaults to `<?rel>` which means unspecified.
Suggested inputs are `"<EquivalentTo>"` and `"<SubsumedBy>"`.
score (float, optional): The score that indicates the confidence of this mapping. Defaults to `0.0`.
"""
def __init__(self, src_entity_iri: str, tgt_entity_iri: str, relation: str = DEFAULT_REL, score: float = 0.0):
"""Intialise an entity mapping.
Args:
src_entity_iri (str): The IRI of the source entity, usually its IRI if available.
tgt_entity_iri (str): The IRI of the target entity, usually its IRI if available.
relation (str, optional): A symbol that represents what semantic relation this mapping stands for. Defaults to `<?rel>` which means unspecified.
Suggested inputs are `"<EquivalentTo>"` and `"<SubsumedBy>"`.
score (float, optional): The score that indicates the confidence of this mapping. Defaults to `0.0`.
"""
self.head = src_entity_iri
self.tail = tgt_entity_iri
self.relation = relation
self.score = score
@classmethod
def from_owl_objects(
cls, src_entity: OWLObject, tgt_entity: OWLObject, relation: str = DEFAULT_REL, score: float = 0.0
):
"""Create an entity mapping from two `OWLObject` entities which have an IRI.
Args:
src_entity (OWLObject): The source entity in `OWLObject`.
tgt_entity (OWLObject): The target entity in `OWLObject`.
relation (str, optional): A symbol that represents what semantic relation this mapping stands for. Defaults to `<?rel>` which means unspecified.
Suggested inputs are `"<EquivalentTo>"` and `"<SubsumedBy>"`.
score (float, optional): The score that indicates the confidence of this mapping. Defaults to `0.0`.
Returns:
(EntityMapping): The entity mapping created from the source and target entities.
"""
return cls(str(src_entity.getIRI()), str(tgt_entity.getIRI()), relation, score)
def to_tuple(self, with_score: bool = False):
"""Transform an entity mapping (`self`) to a tuple representation
Note that `relation` is discarded and `score` is optionally preserved).
"""
if with_score:
return (self.head, self.tail, self.score)
else:
return (self.head, self.tail)
@staticmethod
def as_tuples(entity_mappings: List[EntityMapping], with_score: bool = False):
"""Transform a list of entity mappings to their tuple representations.
Note that `relation` is discarded and `score` is optionally preserved).
"""
return [m.to_tuple(with_score=with_score) for m in entity_mappings]
@staticmethod
def sort_entity_mappings_by_score(entity_mappings: List[EntityMapping], k: Optional[int] = None):
r"""Sort the entity mappings in a list by their scores in descending order.
Args:
entity_mappings (List[EntityMapping]): A list entity mappings to sort.
k (int, optional): The number of top $k$ scored entities preserved if specified. Defaults to `None` which
means to return **all** entity mappings.
Returns:
(List[EntityMapping]): A list of sorted entity mappings.
"""
return list(sorted(entity_mappings, key=lambda x: x.score, reverse=True))[:k]
@staticmethod
def read_table_mappings(
table_of_mappings_file: str,
threshold: Optional[float] = 0.0,
relation: str = DEFAULT_REL,
is_reference: bool = False,
) -> List[EntityMapping]:
r"""Read entity mappings from `.csv` or `.tsv` files.
!!! note "Mapping Table Format"
The columns of the mapping table must have the headings: `"SrcEntity"`, `"TgtEntity"`, and `"Score"`.
Args:
table_of_mappings_file (str): The path to the table (`.csv` or `.tsv`) of mappings.
threshold (Optional[float], optional): Mappings with scores less than `threshold` will not be loaded. Defaults to 0.0.
relation (str, optional): A symbol that represents what semantic relation this mapping stands for. Defaults to `<?rel>` which means unspecified.
Suggested inputs are `"<EquivalentTo>"` and `"<SubsumedBy>"`.
is_reference (bool): Whether the loaded mappings are reference mappigns; if so, `threshold` is disabled and mapping scores
are all set to $1.0$. Defaults to `False`.
Returns:
(List[EntityMapping]): A list of entity mappings loaded from the table file.
"""
df = FileUtils.read_table(table_of_mappings_file)
entity_mappings = []
for _, dp in df.iterrows():
if is_reference:
entity_mappings.append(ReferenceMapping(dp["SrcEntity"], dp["TgtEntity"], relation))
else:
if dp["Score"] >= threshold:
entity_mappings.append(EntityMapping(dp["SrcEntity"], dp["TgtEntity"], relation, dp["Score"]))
return entity_mappings
def __repr__(self):
return f"EntityMapping({self.head} {self.relation} {self.tail}, {round(self.score, 6)})"
class ReferenceMapping(EntityMapping):
r"""A datastructure for entity mapping that acts as a reference mapping.
A reference mapppings is a ground truth entity mapping (with $score = 1.0$) and can
have several entity mappings as candidates. These candidate mappings should have the
same `head` (i.e., source entity) as the reference mapping.
Attributes:
src_entity_iri (str): The IRI of the source entity, usually its IRI if available.
tgt_entity_iri (str): The IRI of the target entity, usually its IRI if available.
relation (str, optional): A symbol that represents what semantic relation this mapping stands for. Defaults to `<?rel>` which means unspecified.
Suggested inputs are `"<EquivalentTo>"` and `"<SubsumedBy>"`.
"""
def __init__(
self,
src_entity_iri: str,
tgt_entity_iri: str,
relation: str = DEFAULT_REL,
candidate_mappings: Optional[List[EntityMapping]] = [],
):
r"""Intialise a reference mapping.
Args:
src_entity_iri (str): The IRI of the source entity, usually its IRI if available.
tgt_entity_iri (str): The IRI of the target entity, usually its IRI if available.
relation (str, optional): A symbol that represents what semantic relation this mapping stands for. Defaults to `<?rel>` which means unspecified.
Suggested inputs are `"<EquivalentTo>"` and `"<SubsumedBy>"`.
candidate_mappings (List[EntityMapping], optional): A list of entity mappings that are candidates for this reference mapping. Defaults to `[]`.
"""
super().__init__(src_entity_iri, tgt_entity_iri, relation, 1.0)
self.candidates = []
for candidate in candidate_mappings:
self.add_candidate(candidate)
def __repr__(self):
reference_mapping_str = f"ReferenceMapping({self.head} {self.relation} {self.tail}, 1.0)"
if self.candidates:
candidate_mapping_str = pprintpp.pformat(self.candidates)
reference_mapping_str += f" with candidates:\n{candidate_mapping_str}"
return reference_mapping_str
def add_candidate(self, candidate_mapping: EntityMapping):
"""Add a candidate mapping whose relation and head entity are the
same as the reference mapping's.
"""
if self.relation != candidate_mapping.relation:
raise ValueError(
f"Expect relation of candidate mapping to be {self.relation} but got {candidate_mapping.relation}"
)
if self.head != candidate_mapping.head:
raise ValueError("Candidate mapping does not have the same head entity as the anchor mapping.")
self.candidates.append(candidate_mapping)
@staticmethod
def read_table_mappings(table_of_mappings_file: str, relation: str = DEFAULT_REL):
r"""Read reference mappings from `.csv` or `.tsv` files.
!!! note "Mapping Table Format"
The columns of the mapping table must have the headings: `"SrcEntity"`, `"TgtEntity"`, and `"Score"`.
Args:
table_of_mappings_file (str): The path to the table (`.csv` or `.tsv`) of mappings.
relation (str, optional): A symbol that represents what semantic relation this mapping stands for. Defaults to `<?rel>` which means unspecified.
Suggested inputs are `"<EquivalentTo>"` and `"<SubsumedBy>"`.
Returns:
(List[ReferenceMapping]): A list of reference mappings loaded from the table file.
"""
return EntityMapping.read_table_mappings(table_of_mappings_file, relation=relation, is_reference=True)
class SubsFromEquivMappingGenerator:
r"""Generating subsumption mappings from gold standard equivalence mappings.
!!! credit "paper"
The online subsumption mapping construction algorithm is proposed in the paper:
[Machine Learning-Friendly Biomedical Datasets for Equivalence and Subsumption Ontology Matching (ISWC 2022)](https://link.springer.com/chapter/10.1007/978-3-031-19433-7_33).
This generator has an attribute `delete_used_equiv_tgt_class` for determining whether or not to sabotage the equivalence
mappings used to create $\geq 1$ subsumption mappings. The reason is that, if the equivalence mapping is broken, then the
OM tool is expected to predict subsumption mappings directly without relying on the equivalence mappings as an intermediate.
Attributes:
src_onto (Ontology): The source ontology.
tgt_onto (Ontology): The target ontology.
equiv_class_pairs (List[Tuple[str, str]]): A list of class pairs (in IRIs) that are **equivalent** according to the input
equivalence mappings.
subs_generation_ratio (int, optional): The maximum number of subsumption mappings generated from each equivalence
mapping. Defaults to `None` which means there is no limit on the number of subsumption mappings.
delete_used_equiv_tgt_class (bool): Whether to mark the target side of an equivalence mapping **used** for creating
at least one subsumption mappings as "deleted". Defaults to `True`.
"""
def __init__(
self,
src_onto: Ontology,
tgt_onto: Ontology,
equiv_mappings: List[ReferenceMapping],
subs_generation_ratio: Optional[int] = None,
delete_used_equiv_tgt_class: bool = True,
):
self.src_onto = src_onto
self.tgt_onto = tgt_onto
self.equiv_class_pairs = [m.to_tuple() for m in equiv_mappings]
self.subs_generation_ratio = subs_generation_ratio
self.delete_used_equiv_tgt_class = delete_used_equiv_tgt_class
subs_from_equivs, self.used_equiv_tgt_class_iris = self.online_construction()
# turn into triples with scores 1.0
self.subs_from_equivs = [(c, p, 1.0) for c, p in subs_from_equivs]
def online_construction(self):
r"""An **online** algorithm for constructing subsumption mappings from gold standard equivalence mappings.
Let $t$ denote the boolean value that indicates if the target class involved in an equivalence mapping
will be deleted. If $t$ is true, then for each equivalent class pair $(c, c')$, do the following:
1. If $c'$ has been inolved in a subsumption mapping, skip this pair as otherwise $c'$ will need to be deleted.
2. For each parent class of $c'$, skip it if it has been marked deleted (i.e., involved in an equivalence mapping that has been used to create a subsumption mapping).
3. If any subsumption mapping has been created from $(c, c')$, mark $c'$ as deleted.
Steps 1 and 2 ensure that target classes that have been **involved in a subsumption mapping** have **no conflicts** with
target classes that have been **used to create a subsumption mapping**.
This algorithm is *online* because the construction and deletion depend on the order of the input equivalent class pairs.
"""
subs_class_pairs = []
in_subs = defaultdict(lambda: False) # in a subsumption mapping
used_equivs = defaultdict(lambda: False) # in a used equivalence mapping
for src_class_iri, tgt_class_iri in self.equiv_class_pairs:
cur_subs_pairs = []
# NOTE (1) an equiv pair is skipped if the target side is marked constructed
if self.delete_used_equiv_tgt_class and in_subs[tgt_class_iri]:
continue
# construct subsumption pairs by matching the source class and the target class's parents
tgt_class = self.tgt_onto.get_owl_object_from_iri(tgt_class_iri)
tgt_class_parent_iris = self.tgt_onto.reasoner.get_inferred_super_entities(tgt_class, direct=True)
for parent_iri in tgt_class_parent_iris:
# skip this parent if it is marked as "used"
if self.delete_used_equiv_tgt_class and used_equivs[parent_iri]:
continue
cur_subs_pairs.append((src_class_iri, parent_iri))
# if successfully created, mark this parent as "in"
if self.delete_used_equiv_tgt_class:
in_subs[parent_iri] = True
# mark the target class as "used" because it has been used for creating a subsumption mapping
if self.delete_used_equiv_tgt_class and cur_subs_pairs:
used_equivs[tgt_class_iri] = True
if self.subs_generation_ratio and len(cur_subs_pairs) > self.subs_generation_ratio:
cur_subs_pairs = random.sample(cur_subs_pairs, self.subs_generation_ratio)
subs_class_pairs += cur_subs_pairs
used_equiv_tgt_class_iris = None
if self.delete_used_equiv_tgt_class:
used_equiv_tgt_class_iris = [iri for iri, used in used_equivs.items() if used is True]
print(
f"{len(used_equiv_tgt_class_iris)}/{len(self.equiv_class_pairs)} are used for creating at least one subsumption mapping."
)
subs_class_pairs = DataUtils.uniqify(subs_class_pairs)
print(f"{len(subs_class_pairs)} subsumption mappings are created in the end.")
return subs_class_pairs, used_equiv_tgt_class_iris
def save_subs(self, save_path: str):
"""Save the constructed subsumption mappings (in tuples) to a local `.tsv` file."""
subs_df = pd.DataFrame(self.subs_from_equivs, columns=["SrcEntity", "TgtEntity", "Score"])
subs_df.to_csv(save_path, sep="\t", index=False)
# TODO: to be updated constantly
SAMPLING_OPTIONS = ["idf", "neighbour", "random"]
class NegativeCandidateMappingGenerator:
r"""Generating **negative** candidate mappings for each gold standard mapping.
Note that the source side of the golden standard mapping is fixed, i.e., candidate mappings are generated
according to the target side.
!!! credit "paper"
The candidate mapping generation algorithm is proposed in the paper:
[Machine Learning-Friendly Biomedical Datasets for Equivalence and Subsumption Ontology Matching (ISWC 2022)](https://link.springer.com/chapter/10.1007/978-3-031-19433-7_33).
"""
def __init__(
self,
src_onto: Ontology,
tgt_onto: Ontology,
reference_class_mappings: List[ReferenceMapping], # equivalence or subsumption
annotation_property_iris: List[str], # for text-based candidates
tokenizer: Tokenizer, # for text-based candidates
max_hops: int = 5, # for graph-based candidates
for_subsumption: bool = False, # if for subsumption, avoid adding ancestors as candidates
):
self.src_onto = src_onto
self.tgt_onto = tgt_onto
self.reference_class_mappings = reference_class_mappings
self.reference_class_dict = defaultdict(list) # to prevent wrongly adding negative candidates
for m in self.reference_class_mappings:
src_class_iri, tgt_class_iri = m.to_tuple()
self.reference_class_dict[src_class_iri].append(tgt_class_iri)
# for IDF sample
self.tgt_annotation_index, self.annotation_property_iris = self.tgt_onto.build_annotation_index(
annotation_property_iris
)
self.tokenizer = tokenizer
self.tgt_inverted_annotation_index = self.tgt_onto.build_inverted_annotation_index(
self.tgt_annotation_index, self.tokenizer
)
# for neighbour sample
self.max_hops = max_hops
# if for subsumption, avoid adding ancestors as candidates
self.for_subsumption = for_subsumption
# if for subsumption, add (src_class, tgt_class_ancestor) into the reference mappings
if self.for_subsumption:
for m in self.reference_class_mappings:
src_class_iri, tgt_class_iri = m.to_tuple()
tgt_class = self.tgt_onto.get_owl_object_from_iri(tgt_class_iri)
tgt_class_ancestors = self.tgt_onto.reasoner.get_inferred_super_entities(tgt_class)
for tgt_ancestor_iri in tgt_class_ancestors:
self.reference_class_dict[src_class_iri].append(tgt_ancestor_iri)
def mixed_sample(self, reference_class_mapping: ReferenceMapping, **strategy2nums):
"""A mixed sampling approach that combines several sampling strategies.
As introduced in the Bio-ML paper, this mixed approach guarantees that the number of samples for each
strategy is either the **maximum that can be sampled** or the required number.
Specifically, at each sampling iteration, the number of candidates is **first increased by the number of
previously sampled candidates**, as in the worst case, all the candidates sampled at this iteration
will be duplicated with the previous.
The random sampling is used as the amending strategy, i.e., if other sampling strategies cannot retrieve
the specified number of samples, then use random sampling to amend the number.
Args:
reference_class_mapping (ReferenceMapping): The reference class mapping for generating the candidate mappings.
**strategy2nums (int): The keyword arguments that specify the expected number of candidates for each
sampling strategy.
"""
valid_tgt_candidate_iris = []
sample_stats = defaultdict(lambda: 0)
i = 0
total_num_candidates = 0
for strategy, num_canddiates in strategy2nums.items():
i += 1
if strategy in SAMPLING_OPTIONS:
sampler = getattr(self, f"{strategy}_sample")
# for ith iteration, the worst case is when all n_cands are duplicated
# or should be excluded from other reference targets so we generate
# NOTE: total_num_candidates + num_candidates + len(excluded_tgt_class_iris)
# candidates first and prune the rest; another edge case is when sampled
# candidates are not sufficient and we use random sample to meet n_cands
cur_valid_tgt_candidate_iris = sampler(
reference_class_mapping, total_num_candidates + num_canddiates
)
# remove the duplicated candidates (and excluded refs) and prune the tail
cur_valid_tgt_candidate_iris = list(
set(cur_valid_tgt_candidate_iris) - set(valid_tgt_candidate_iris)
)[:num_canddiates]
sample_stats[strategy] += len(cur_valid_tgt_candidate_iris)
# use random samples for complementation if not enough
while len(cur_valid_tgt_candidate_iris) < num_canddiates:
amend_candidate_iris = self.random_sample(
reference_class_mapping, num_canddiates - len(cur_valid_tgt_candidate_iris)
)
amend_candidate_iris = list(
set(amend_candidate_iris)
- set(valid_tgt_candidate_iris)
- set(cur_valid_tgt_candidate_iris)
)
cur_valid_tgt_candidate_iris += amend_candidate_iris
assert len(cur_valid_tgt_candidate_iris) == num_canddiates
# record how many random samples to amend
if strategy != "random":
sample_stats["random"] += num_canddiates - sample_stats[strategy]
valid_tgt_candidate_iris += cur_valid_tgt_candidate_iris
total_num_candidates += num_canddiates
else:
raise ValueError(f"Invalid sampling trategy: {strategy}.")
assert len(valid_tgt_candidate_iris) == total_num_candidates
# TODO: add the candidate mappings into the reference mapping
return valid_tgt_candidate_iris, sample_stats
def random_sample(self, reference_class_mapping: ReferenceMapping, num_candidates: int):
r"""**Randomly** sample a set of target class candidates $c'_{cand}$ for a given reference mapping $(c, c')$.
The sampled candidate classes will be combined with the source reference class $c$ to get a set of
candidate mappings $\{(c, c'_{cand})\}$.
Args:
reference_class_mapping (ReferenceMapping): The reference class mapping for generating the candidate mappings.
num_candidates (int): The expected number of candidate mappings to generate.
"""
ref_src_class_iri, ref_tgt_class_iri = reference_class_mapping.to_tuple()
all_tgt_class_iris = set(self.tgt_onto.owl_classes.keys())
valid_tgt_class_iris = all_tgt_class_iris - set(
self.reference_class_dict[ref_src_class_iri]
) # exclude gold standards
assert not ref_tgt_class_iri in valid_tgt_class_iris
return random.sample(valid_tgt_class_iris, num_candidates)
def idf_sample(self, reference_class_mapping: ReferenceMapping, num_candidates: int):
r"""Sample a set of target class candidates $c'_{cand}$ for a given reference mapping $(c, c')$ based on the $idf$ scores
w.r.t. the inverted annotation index (sub-word level).
Candidate classes with higher $idf$ scores will be considered first, and then combined with the source reference class $c$
to get a set of candidate mappings $\{(c, c'_{cand})\}$.
Args:
reference_class_mapping (ReferenceMapping): The reference class mapping for generating the candidate mappings.
num_candidates (int): The expected number of candidate mappings to generate.
"""
ref_src_class_iri, ref_tgt_class_iri = reference_class_mapping.to_tuple()
tgt_candidates = self.tgt_inverted_annotation_index.idf_select(
self.tgt_annotation_index[ref_tgt_class_iri]
) # select all non-trivial candidates first
valid_tgt_class_iris = []
for tgt_candidate_iri, _ in tgt_candidates:
# valid as long as it is not one of the reference target
if tgt_candidate_iri not in self.reference_class_dict[ref_src_class_iri]:
valid_tgt_class_iris.append(tgt_candidate_iri)
if len(valid_tgt_class_iris) == num_candidates:
break
assert not ref_tgt_class_iri in valid_tgt_class_iris
return valid_tgt_class_iris
def neighbour_sample(self, reference_class_mapping: ReferenceMapping, num_candidates: int):
r"""Sample a set of target class candidates $c'_{cand}$ for a given reference mapping $(c, c')$ based on the **subsumption
hierarchy**.
Define one-hop as one edge derived from an **asserted** subsumption axiom, i.e., to the parent class or the child class.
Candidates classes with nearer hops will be considered first, and then combined with the source reference class $c$
to get a set of candidate mappings $\{(c, c'_{cand})\}$.
Args:
reference_class_mapping (ReferenceMapping): The reference class mapping for generating the candidate mappings.
num_candidates (int): The expected number of candidate mappings to generate.
"""
ref_src_class_iri, ref_tgt_class_iri = reference_class_mapping.to_tuple()
valid_tgt_class_iris = set()
cur_hop = 1
frontier = [ref_tgt_class_iri]
# extract from the nearest neighbours until enough candidates or max hop
while len(valid_tgt_class_iris) < num_candidates and cur_hop <= self.max_hops:
neighbours_of_cur_hop = []
for tgt_class_iri in frontier:
tgt_class = self.tgt_onto.get_owl_object_from_iri(tgt_class_iri)
parents = self.tgt_onto.reasoner.get_inferred_super_entities(tgt_class, direct=True)
children = self.tgt_onto.reasoner.get_inferred_sub_entities(tgt_class, direct=True)
neighbours_of_cur_hop += parents + children # used for further hop expansion
valid_neighbours_of_cur_hop = set(neighbours_of_cur_hop) - set(self.reference_class_dict[ref_src_class_iri])
# print(valid_neighbours_of_cur_hop)
# NOTE if by adding neighbours of current hop the require number will be met
# we randomly pick among them
if len(valid_neighbours_of_cur_hop) > num_candidates - len(valid_tgt_class_iris):
valid_neighbours_of_cur_hop = random.sample(
valid_neighbours_of_cur_hop, num_candidates - len(valid_tgt_class_iris)
)
valid_tgt_class_iris.update(valid_neighbours_of_cur_hop)
frontier = neighbours_of_cur_hop # update the frontier with all possible neighbors
cur_hop += 1
assert not ref_tgt_class_iri in valid_tgt_class_iris
return list(valid_tgt_class_iris)
| 28,009 | 50.583794 | 182 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/bertmap/mapping_prediction.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional, List, Set, Tuple
from yacs.config import CfgNode
import os
from textdistance import levenshtein
from logging import Logger
import itertools
import torch
import pandas as pd
import enlighten
from deeponto.align.mapping import EntityMapping
from deeponto.onto import Ontology
from deeponto.utils import FileUtils, Tokenizer
from .bert_classifier import BERTSynonymClassifier
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
class MappingPredictor:
r"""Class for the mapping prediction module of $\textsf{BERTMap}$ and $\textsf{BERTMapLt}$ models.
Attributes:
tokenizer (Tokenizer): The tokenizer used for constructing the inverted annotation index and candidate selection.
src_annotation_index (dict): A dictionary that stores the `(class_iri, class_annotations)` pairs from `src_onto` according to `annotation_property_iris`.
tgt_annotation_index (dict): A dictionary that stores the `(class_iri, class_annotations)` pairs from `tgt_onto` according to `annotation_property_iris`.
tgt_inverted_annotation_index (InvertedIndex): The inverted index built from `tgt_annotation_index` used for target class candidate selection.
bert_synonym_classifier (BERTSynonymClassifier, optional): The BERT synonym classifier fine-tuned on text semantics corpora.
num_raw_candidates (int): The maximum number of selected target class candidates for a source class.
num_best_predictions (int): The maximum number of best scored mappings presevred for a source class.
batch_size_for_prediction (int): The batch size of class annotation pairs for computing synonym scores.
"""
def __init__(
self,
output_path: str,
tokenizer_path: str,
src_annotation_index: dict,
tgt_annotation_index: dict,
bert_synonym_classifier: Optional[BERTSynonymClassifier],
num_raw_candidates: Optional[int],
num_best_predictions: Optional[int],
batch_size_for_prediction: int,
logger: Logger,
enlighten_manager: enlighten.Manager,
enlighten_status: enlighten.StatusBar,
):
self.logger = logger
self.enlighten_manager = enlighten_manager
self.enlighten_status = enlighten_status
self.tokenizer = Tokenizer.from_pretrained(tokenizer_path)
self.logger.info("Build inverted annotation index for candidate selection.")
self.src_annotation_index = src_annotation_index
self.tgt_annotation_index = tgt_annotation_index
self.tgt_inverted_annotation_index = Ontology.build_inverted_annotation_index(
tgt_annotation_index, self.tokenizer
)
# the fundamental judgement for whether bertmap or bertmaplt is loaded
self.bert_synonym_classifier = bert_synonym_classifier
self.num_raw_candidates = num_raw_candidates
self.num_best_predictions = num_best_predictions
self.batch_size_for_prediction = batch_size_for_prediction
self.output_path = output_path
self.init_class_mapping = lambda head, tail, score: EntityMapping(head, tail, "<EquivalentTo>", score)
def bert_mapping_score(
self,
src_class_annotations: Set[str],
tgt_class_annotations: Set[str],
):
r"""$\textsf{BERTMap}$'s main mapping score module which utilises the fine-tuned BERT synonym
classifier.
Compute the **synonym score** for each pair of src-tgt class annotations, and return
the **average** score as the mapping score. Apply string matching before applying the
BERT module to filter easy mappings (with scores $1.0$).
"""
# apply string matching before applying the bert module
prelim_score = self.edit_similarity_mapping_score(
src_class_annotations,
tgt_class_annotations,
string_match_only=True,
)
if prelim_score == 1.0:
return prelim_score
# apply BERT classifier and define mapping score := Average(SynonymScores)
class_annotation_pairs = list(itertools.product(src_class_annotations, tgt_class_annotations))
synonym_scores = self.bert_synonym_classifier.predict(class_annotation_pairs)
# only one element tensor is able to be extracted as a scalar by .item()
return float(torch.mean(synonym_scores).item())
@staticmethod
def edit_similarity_mapping_score(
src_class_annotations: Set[str],
tgt_class_annotations: Set[str],
string_match_only: bool = False,
):
r"""$\textsf{BERTMap}$'s string match module and $\textsf{BERTMapLt}$'s mapping prediction function.
Compute the **normalised edit similarity** `(1 - normalised edit distance)` for each pair
of src-tgt class annotations, and return the **maximum** score as the mapping score.
"""
# edge case when src and tgt classes have an exact match of annotation
if len(src_class_annotations.intersection(tgt_class_annotations)) > 0:
return 1.0
# a shortcut to save time for $\textsf{BERTMap}$
if string_match_only:
return 0.0
annotation_pairs = itertools.product(src_class_annotations, tgt_class_annotations)
sim_scores = [levenshtein.normalized_similarity(src, tgt) for src, tgt in annotation_pairs]
return max(sim_scores)
def mapping_prediction_for_src_class(self, src_class_iri: str) -> List[EntityMapping]:
r"""Predict $N$ best scored mappings for a source ontology class, where
$N$ is specified in `self.num_best_predictions`.
1. Apply the **string matching** module to compute "easy" mappings.
2. Return the mappings if found any, or if there is no BERT synonym classifier
as in $\textsf{BERTMapLt}$.
3. If using the BERT synonym classifier module:
- Generate batches for class annotation pairs. Each batch contains the combinations of the
source class annotations and $M$ target candidate classes' annotations. $M$ is determined
by `batch_size_for_prediction`, i.e., stop adding annotations of a target class candidate into
the current batch if this operation will cause the size of current batch to exceed the limit.
- Compute the synonym scores for each batch and aggregate them into mapping scores; preserve
$N$ best scored candidates and update them in the next batch. By this dynamic process, we eventually
get $N$ best scored mappings for a source ontology class.
"""
src_class_annotations = self.src_annotation_index[src_class_iri]
# previously wrongly put tokenizer again !!!
tgt_class_candidates = self.tgt_inverted_annotation_index.idf_select(
list(src_class_annotations), pool_size=self.num_raw_candidates
) # [(tgt_class_iri, idf_score)]
best_scored_mappings = []
# for string matching: save time if already found string-matched candidates
def string_match():
"""Compute string-matched mappings."""
string_matched_mappings = []
for tgt_candidate_iri, _ in tgt_class_candidates:
tgt_candidate_annotations = self.tgt_annotation_index[tgt_candidate_iri]
prelim_score = self.edit_similarity_mapping_score(
src_class_annotations,
tgt_candidate_annotations,
string_match_only=True,
)
if prelim_score > 0.0:
# if src_class_annotations.intersection(tgt_candidate_annotations):
string_matched_mappings.append(
self.init_class_mapping(src_class_iri, tgt_candidate_iri, prelim_score)
)
return string_matched_mappings
best_scored_mappings += string_match()
# return string-matched mappings if found or if there is no bert module (bertmaplt)
if best_scored_mappings or not self.bert_synonym_classifier:
self.logger.info(f"The best scored class mappings for {src_class_iri} are\n{best_scored_mappings}")
return best_scored_mappings
def generate_batched_annotations(batch_size: int):
"""Generate batches of class annotations for the input source class and its
target candidates.
"""
batches = []
# the `nums`` parameter determines how the annotations are grouped
current_batch = CfgNode({"annotations": [], "nums": []})
for i, (tgt_candidate_iri, _) in enumerate(tgt_class_candidates):
tgt_candidate_annotations = self.tgt_annotation_index[tgt_candidate_iri]
annotation_pairs = list(itertools.product(src_class_annotations, tgt_candidate_annotations))
current_batch.annotations += annotation_pairs
num_annotation_pairs = len(annotation_pairs)
current_batch.nums.append(num_annotation_pairs)
# collect when the batch is full or for the last target class candidate
if sum(current_batch.nums) > batch_size or i == len(tgt_class_candidates) - 1:
batches.append(current_batch)
current_batch = CfgNode({"annotations": [], "nums": []})
return batches
def bert_match():
"""Compute mappings with fine-tuned BERT synonym classifier."""
bert_matched_mappings = []
class_annotation_batches = generate_batched_annotations(self.batch_size_for_prediction)
batch_base_candidate_idx = (
0 # after each batch, the base index will be increased by # of covered target candidates
)
device = self.bert_synonym_classifier.device
# intialize N prediction scores and N corresponding indices w.r.t `tgt_class_candidates`
final_best_scores = torch.tensor([-1] * self.num_best_predictions).to(device)
final_best_idxs = torch.tensor([-1] * self.num_best_predictions).to(device)
for annotation_batch in class_annotation_batches:
synonym_scores = self.bert_synonym_classifier.predict(annotation_batch.annotations)
# aggregating to mappings cores
grouped_synonym_scores = torch.split(
synonym_scores,
split_size_or_sections=annotation_batch.nums,
)
mapping_scores = torch.stack([torch.mean(chunk) for chunk in grouped_synonym_scores])
assert len(mapping_scores) == len(annotation_batch.nums)
# preserve N best scored mappings
# scale N in case there are less than N tgt candidates in this batch
N = min(len(mapping_scores), self.num_best_predictions)
batch_best_scores, batch_best_idxs = torch.topk(mapping_scores, k=N)
batch_best_idxs += batch_base_candidate_idx
# we do the substitution for every batch to prevent from memory overflow
final_best_scores, _idxs = torch.topk(
torch.cat([batch_best_scores, final_best_scores]),
k=self.num_best_predictions,
)
final_best_idxs = torch.cat([batch_best_idxs, final_best_idxs])[_idxs]
# update the index for target candidate classes
batch_base_candidate_idx += len(annotation_batch.nums)
for candidate_idx, mapping_score in zip(final_best_idxs, final_best_scores):
# ignore intial values (-1.0) for dummy mappings
# the threshold 0.9 is for mapping extension
if mapping_score.item() >= 0.9:
tgt_candidate_iri = tgt_class_candidates[candidate_idx.item()][0]
bert_matched_mappings.append(
self.init_class_mapping(
src_class_iri,
tgt_candidate_iri,
mapping_score.item(),
)
)
assert len(bert_matched_mappings) <= self.num_best_predictions
self.logger.info(f"The best scored class mappings for {src_class_iri} are\n{bert_matched_mappings}")
return bert_matched_mappings
return bert_match()
def mapping_prediction(self):
r"""Apply global matching for each class in the source ontology.
See [`mapping_prediction_for_src_class`][deeponto.align.bertmap.mapping_prediction.MappingPredictor.mapping_prediction_for_src_class].
If this process is accidentally stopped, it can be resumed from already saved predictions. The progress
bar keeps track of the number of source ontology classes that have been matched.
"""
self.logger.info("Start global matching for each class in the source ontology.")
match_dir = os.path.join(self.output_path, "match")
try:
mapping_index = FileUtils.load_file(os.path.join(match_dir, "raw_mappings.json"))
self.logger.info("Load the existing mapping prediction file.")
except:
mapping_index = dict()
FileUtils.create_path(match_dir)
progress_bar = self.enlighten_manager.counter(
total=len(self.src_annotation_index), desc="Mapping Prediction", unit="per src class"
)
self.enlighten_status.update(demo="Mapping Prediction")
for i, src_class_iri in enumerate(self.src_annotation_index.keys()):
if src_class_iri in mapping_index.keys():
self.logger.info(f"[Class {i}] Skip matching {src_class_iri} as already computed.")
progress_bar.update()
continue
mappings = self.mapping_prediction_for_src_class(src_class_iri)
mapping_index[src_class_iri] = [m.to_tuple(with_score=True) for m in mappings]
if i % 100 == 0 or i == len(self.src_annotation_index) - 1:
FileUtils.save_file(mapping_index, os.path.join(match_dir, "raw_mappings.json"))
# also save a .tsv version
mapping_in_tuples = list(itertools.chain.from_iterable(mapping_index.values()))
mapping_df = pd.DataFrame(mapping_in_tuples, columns=["SrcEntity", "TgtEntity", "Score"])
mapping_df.to_csv(os.path.join(match_dir, "raw_mappings.tsv"), sep="\t", index=False)
self.logger.info("Save currently computed mappings to prevent undesirable loss.")
progress_bar.update()
self.logger.info("Finished mapping prediction for each class in the source ontology.")
progress_bar.close()
| 15,548 | 49.980328 | 161 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/bertmap/mapping_refinement.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import List, Tuple
import os
from logging import Logger
import itertools
import random
import pandas as pd
import enlighten
from deeponto.align.mapping import EntityMapping
from deeponto.onto import Ontology
from deeponto.utils import FileUtils, Tokenizer
from deeponto.utils.decorators import paper
from deeponto.align.logmap import run_logmap_repair
from .mapping_prediction import MappingPredictor
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
class MappingRefiner:
r"""Class for the mapping refinement module of $\textsf{BERTMap}$.
$\textsf{BERTMapLt}$ does not go through mapping refinement for its being "light".
All the attributes of this class are supposed to be passed from `BERTMapPipeline`.
Attributes:
src_onto (Ontology): The source ontology to be matched.
tgt_onto (Ontology): The target ontology to be matched.
mapping_predictor (MappingPredictor): The mapping prediction module of BERTMap.
mapping_extension_threshold (float): Mappings with scores $\geq$ this value will be considered in the iterative mapping extension process.
raw_mappings (List[EntityMapping]): List of **raw class mappings** predicted in the **global matching** phase.
mapping_score_dict (dict): A dynamic dictionary that keeps track of mappings (with scores) that have already been computed.
mapping_filter_threshold (float): Mappings with scores $\geq$ this value will be preserved for the final mapping repairing.
"""
def __init__(
self,
output_path: str,
src_onto: Ontology,
tgt_onto: Ontology,
mapping_predictor: MappingPredictor,
mapping_extension_threshold: float,
mapping_filtered_threshold: float,
logger: Logger,
enlighten_manager: enlighten.Manager,
enlighten_status: enlighten.StatusBar
):
self.output_path = output_path
self.logger = logger
self.enlighten_manager = enlighten_manager
self.enlighten_status = enlighten_status
self.src_onto = src_onto
self.tgt_onto = tgt_onto
# iterative mapping extension
self.mapping_predictor = mapping_predictor
self.mapping_extension_threshold = mapping_extension_threshold # \kappa
self.raw_mappings = EntityMapping.read_table_mappings(
os.path.join(self.output_path, "match", "raw_mappings.tsv"),
threshold=self.mapping_extension_threshold,
relation="<EquivalentTo>",
)
# keep track of already scored mappings to prevent duplicated predictions
self.mapping_score_dict = dict()
for m in self.raw_mappings:
src_class_iri, tgt_class_iri, score = m.to_tuple(with_score=True)
self.mapping_score_dict[(src_class_iri, tgt_class_iri)] = score
# the threshold for final filtering the extended mappings
self.mapping_filtered_threshold = mapping_filtered_threshold # \lambda
# logmap mapping repair folder
self.logmap_repair_path = os.path.join(self.output_path, "match", "logmap-repair")
# paths for mapping extension and repair
self.extended_mapping_path = os.path.join(self.output_path, "match", "extended_mappings.tsv")
self.filtered_mapping_path = os.path.join(self.output_path, "match", "filtered_mappings.tsv")
self.repaired_mapping_path = os.path.join(self.output_path, "match", "repaired_mappings.tsv")
def mapping_extension(self, max_iter: int = 10):
r"""Iterative mapping extension based on the locality principle.
For each class pair $(c, c')$ (scored in the global matching phase) with score
$\geq \kappa$, search for plausible mappings between the parents of $c$ and $c'$,
and between the children of $c$ and $c'$. This is an iterative process as the set
newly discovered mappings can act renew the frontier for searching. Terminate if
no new mappings with score $\geq \kappa$ can be found or the limit `max_iter` has
been reached. Note that $\kappa$ is set to $0.9$ by default (can be altered
in the configuration file). The mapping extension progress bar keeps track of the
total number of extended mappings (including the previously predicted ones).
A further filtering will be performed by only preserving mappings with score $\geq \lambda$,
in the original BERTMap paper, $\lambda$ is determined by the validation mappings, but
in practice $\lambda$ is not a sensitive hyperparameter and validation mappings are often
not available. Therefore, we manually set $\lambda$ to $0.9995$ by default (can be altered
in the configuration file). The mapping filtering progress bar keeps track of the
total number of filtered mappings (this bar is purely for logging purpose).
Args:
max_iter (int, optional): The maximum number of mapping extension iterations. Defaults to `10`.
"""
num_iter = 0
self.enlighten_status.update(demo="Mapping Extension")
extension_progress_bar = self.enlighten_manager.counter(
desc=f"Mapping Extension [Iteration #{num_iter}]", unit="mapping"
)
filtering_progress_bar = self.enlighten_manager.counter(
desc=f"Mapping Filtering", unit="mapping"
)
if os.path.exists(self.extended_mapping_path) and os.path.exists(self.filtered_mapping_path):
self.logger.info(
f"Found extended and filtered mapping files at {self.extended_mapping_path}"
+ f" and {self.filtered_mapping_path}.\nPlease check file integrity; if incomplete, "
+ "delete them and re-run the program."
)
# for animation purposes
extension_progress_bar.desc = f"Mapping Extension"
for _ in EntityMapping.read_table_mappings(self.extended_mapping_path):
extension_progress_bar.update()
self.enlighten_status.update(demo="Mapping Filtering")
for _ in EntityMapping.read_table_mappings(self.filtered_mapping_path):
filtering_progress_bar.update()
extension_progress_bar.close()
filtering_progress_bar.close()
return
# intialise the frontier, explored, final expansion sets with the raw mappings
# NOTE be careful of address pointers
frontier = [m.to_tuple() for m in self.raw_mappings]
expansion = [m.to_tuple(with_score=True) for m in self.raw_mappings]
# for animation purposes
for _ in range(len(expansion)):
extension_progress_bar.update()
self.logger.info(
f"Start mapping extension for each class pair with score >= {self.mapping_extension_threshold}."
)
while frontier and num_iter < max_iter:
new_mappings = []
for src_class_iri, tgt_class_iri in frontier:
# one hop extension makes sure new mappings are really "new"
cur_new_mappings = self.one_hop_extend(src_class_iri, tgt_class_iri)
extension_progress_bar.update(len(cur_new_mappings))
new_mappings += cur_new_mappings
# add new mappings to the expansion set
expansion += new_mappings
# renew frontier with the newly discovered mappings
frontier = [(x, y) for x, y, _ in new_mappings]
self.logger.info(f"Add {len(new_mappings)} mappings at iteration #{num_iter}.")
num_iter += 1
extension_progress_bar.desc = f"Mapping Extension [Iteration #{num_iter}]"
num_extended = len(expansion) - len(self.raw_mappings)
self.logger.info(
f"Finished iterative mapping extension with {num_extended} new mappings and in total {len(expansion)} extended mappings."
)
extended_mapping_df = pd.DataFrame(expansion, columns=["SrcEntity", "TgtEntity", "Score"])
extended_mapping_df.to_csv(self.extended_mapping_path, sep="\t", index=False)
self.enlighten_status.update(demo="Mapping Filtering")
filtered_expansion = [
(src, tgt, score) for src, tgt, score in expansion if score >= self.mapping_filtered_threshold
]
self.logger.info(
f"Filtered the extended mappings by a threshold of {self.mapping_filtered_threshold}."
+ f"There are {len(filtered_expansion)} mappings left for mapping repair."
)
for _ in range(len(filtered_expansion)):
filtering_progress_bar.update()
filtered_mapping_df = pd.DataFrame(filtered_expansion, columns=["SrcEntity", "TgtEntity", "Score"])
filtered_mapping_df.to_csv(self.filtered_mapping_path, sep="\t", index=False)
extension_progress_bar.close()
filtering_progress_bar.close()
return filtered_expansion
def one_hop_extend(self, src_class_iri: str, tgt_class_iri: str, pool_size: int = 200):
r"""Extend mappings from a scored class pair $(c, c')$ by
searching from one-hop neighbors.
Search for plausible mappings between the parents of $c$ and $c'$,
and between the children of $c$ and $c'$. Mappings that are not
already computed (recorded in `self.mapping_score_dict`) and have
a score $\geq$ `self.mapping_extension_threshold` will be returned as
**new** mappings.
Args:
src_class_iri (str): The IRI of the source ontology class $c$.
tgt_class_iri (str): The IRI of the target ontology class $c'$.
pool_size (int, optional): The maximum number of plausible mappings to be extended. Defaults to 200.
Returns:
(List[EntityMapping]): A list of one-hop extended mappings.
"""
def get_iris(owl_objects):
return [str(x.getIRI()) for x in owl_objects]
src_class = self.src_onto.get_owl_object_from_iri(src_class_iri)
src_class_parent_iris = get_iris(self.src_onto.get_asserted_parents(src_class, named_only=True))
src_class_children_iris = get_iris(self.src_onto.get_asserted_children(src_class, named_only=True))
tgt_class = self.tgt_onto.get_owl_object_from_iri(tgt_class_iri)
tgt_class_parent_iris = get_iris(self.tgt_onto.get_asserted_parents(tgt_class, named_only=True))
tgt_class_children_iris = get_iris(self.tgt_onto.get_asserted_children(tgt_class, named_only=True))
# pair up parents and children, respectively; NOTE set() might not be necessary
parent_pairs = list(set(itertools.product(src_class_parent_iris, tgt_class_parent_iris)))
children_pairs = list(set(itertools.product(src_class_children_iris, tgt_class_children_iris)))
candidate_pairs = parent_pairs + children_pairs
# downsample if the number of candidates is too large
if len(candidate_pairs) > pool_size:
candidate_pairs = random.sample(candidate_pairs, pool_size)
extended_mappings = []
for src_candidate_iri, tgt_candidate_iri in parent_pairs + children_pairs:
# if already computed meaning that it is not a new mapping
if (src_candidate_iri, tgt_candidate_iri) in self.mapping_score_dict:
continue
src_candidate_annotations = self.mapping_predictor.src_annotation_index[src_candidate_iri]
tgt_candidate_annotations = self.mapping_predictor.tgt_annotation_index[tgt_candidate_iri]
score = self.mapping_predictor.bert_mapping_score(src_candidate_annotations, tgt_candidate_annotations)
# add to already scored collection
self.mapping_score_dict[(src_candidate_iri, tgt_candidate_iri)] = score
# skip mappings with low scores
if score < self.mapping_extension_threshold:
continue
extended_mappings.append((src_candidate_iri, tgt_candidate_iri, score))
self.logger.info(
f"New mappings (in tuples) extended from {(src_class_iri, tgt_class_iri)} are:\n" + f"{extended_mappings}"
)
return extended_mappings
def mapping_repair(self):
"""Repair the filtered mappings with LogMap's debugger.
!!! note
A sub-folder under `match` named `logmap-repair` contains LogMap-related intermediate files.
"""
# progress bar for animation purposes
self.enlighten_status.update(demo="Mapping Repairing")
repair_progress_bar = self.enlighten_manager.counter(
desc=f"Mapping Repairing", unit="mapping"
)
# skip repairing if already found the file
if os.path.exists(self.repaired_mapping_path):
self.logger.info(
f"Found the repaired mapping file at {self.repaired_mapping_path}."
+ "\nPlease check file integrity; if incomplete, "
+ "delete it and re-run the program."
)
# update progress bar for animation purposes
for _ in EntityMapping.read_table_mappings(self.repaired_mapping_path):
repair_progress_bar.update()
repair_progress_bar.close()
return
# start mapping repair
self.logger.info("Repair the filtered mappings with LogMap debugger.")
# formatting the filtered mappings
self.logmap_repair_formatting()
# run the LogMap repair module on the extended mappings
run_logmap_repair(
self.src_onto.owl_path,
self.tgt_onto.owl_path,
os.path.join(self.logmap_repair_path, f"filtered_mappings_for_LogMap_repair.txt"),
self.logmap_repair_path,
)
# create table mappings from LogMap repair outputs
with open(os.path.join(self.logmap_repair_path, "mappings_repaired_with_LogMap.tsv"), "r") as f:
lines = f.readlines()
with open(os.path.join(self.output_path, "match", "repaired_mappings.tsv"), "w+") as f:
f.write("SrcEntity\tTgtEntity\tScore\n")
for line in lines:
src_ent_iri, tgt_ent_iri, score = line.split("\t")
f.write(f"{src_ent_iri}\t{tgt_ent_iri}\t{score}")
repair_progress_bar.update()
self.logger.info("Mapping repair finished.")
repair_progress_bar.close()
def logmap_repair_formatting(self):
"""Transform the filtered mapping file into the LogMap format.
An auxiliary function of the mapping repair module which requires mappings
to be formatted as LogMap's input format.
"""
# read the filtered mapping file and convert to tuples
filtered_mappings = EntityMapping.read_table_mappings(self.filtered_mapping_path)
filtered_mappings_in_tuples = [m.to_tuple(with_score=True) for m in filtered_mappings]
# write the mappings into logmap format
lines = []
for src_class_iri, tgt_class_iri, score in filtered_mappings_in_tuples:
lines.append(f"{src_class_iri}|{tgt_class_iri}|=|{score}|CLS\n")
# create a path to prevent error
FileUtils.create_path(self.logmap_repair_path)
formatted_file = os.path.join(self.logmap_repair_path, f"filtered_mappings_for_LogMap_repair.txt")
with open(formatted_file, "w") as f:
f.writelines(lines)
return lines
| 16,390 | 46.648256 | 146 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/bertmap/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .pipeline import BERTMapPipeline, DEFAULT_CONFIG_FILE
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
| 797 | 38.9 | 74 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/bertmap/text_semantics.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import networkx as nx
import itertools
import random
import os
from typing import List, Set, Tuple, Optional, Union
from deeponto.onto import Ontology
from deeponto.align.mapping import ReferenceMapping
from deeponto.utils import FileUtils, DataUtils
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
class AnnotationThesaurus:
"""A thesaurus class for synonyms and non-synonyms extracted from an ontology.
Some related definitions of arguments here:
- A `synonym_group` is a set of annotation phrases that are synonymous to each other;
- The `transitivity` of synonyms means if A and B are synonymous and B and C are synonymous,
then A and C are synonymous. This is achieved by a connected graph-based algorithm.
- A `synonym_pair` is a pair synonymous annotation phrase which can be extracted from
the cartesian product of a `synonym_group` and itself. NOTE that **reflexivity** and **symmetry**
are preserved meaning that *(i)* every phrase A is a synonym of itself and *(ii)* if (A, B) is
a synonym pair then (B, A) is a synonym pair, too.
Attributes:
onto (Ontology): An ontology to construct the annotation thesaurus from.
annotation_index (Dict[str, Set[str]]): An index of the class annotations with `(class_iri, annotations)` pairs.
annotation_property_iris (List[str]): A list of annotation property IRIs used to extract the annotations.
average_number_of_annotations_per_class (int): The average number of (extracted) annotations per ontology class.
apply_transitivity (bool): Apply synonym transitivity to merge synonym groups or not.
synonym_groups (List[Set[str]]): The list of synonym groups extracted from the ontology according to specified annotation properties.
"""
def __init__(self, onto: Ontology, annotation_property_iris: List[str], apply_transitivity: bool = False):
r"""Initialise a thesaurus for ontology class annotations.
Args:
onto (Ontology): The input ontology to extract annotations from.
annotation_property_iris (List[str]): Specify which annotation properties to be used.
apply_transitivity (bool, optional): Apply synonym transitivity to merge synonym groups or not. Defaults to `False`.
"""
self.onto = onto
# build the annotation index to extract synonyms from `onto`
# the input property iris may not exist in this ontology
# the output property iris will be truncated to the existing ones
index, iris = self.onto.build_annotation_index(
annotation_property_iris=annotation_property_iris,
entity_type="Classes",
apply_lowercasing=True,
)
self.annotation_index = index
self.annotation_property_iris = iris
total_number_of_annotations = sum([len(v) for v in self.annotation_index.values()])
self.average_number_of_annotations_per_class = total_number_of_annotations / len(self.annotation_index)
# synonym groups
self.apply_transitivity = apply_transitivity
self.synonym_groups = list(self.annotation_index.values())
if self.apply_transitivity:
self.synonym_groups = self.merge_synonym_groups_by_transitivity(self.synonym_groups)
# summary
self.info = {
type(self).__name__: {
"ontology": self.onto.info[type(self.onto).__name__],
"average_number_of_annotations_per_class": round(self.average_number_of_annotations_per_class, 3),
"number_of_synonym_groups": len(self.synonym_groups),
}
}
def __str__(self):
str(self.onto) # the info of ontology is updated upon calling its __str__ method
return FileUtils.print_dict(self.info)
@staticmethod
def get_synonym_pairs(synonym_group: Set[str], remove_duplicates: bool = True):
"""Get synonym pairs from a synonym group through a cartesian product.
Args:
synonym_group (Set[str]): A set of annotation phrases that are synonymous to each other.
Returns:
(List[Tuple[str, str]]): A list of synonym pairs.
"""
synonym_pairs = list(itertools.product(synonym_group, synonym_group))
if remove_duplicates:
return DataUtils.uniqify(synonym_pairs)
else:
return synonym_pairs
@staticmethod
def merge_synonym_groups_by_transitivity(synonym_groups: List[Set[str]]):
r"""Merge synonym groups by transitivity.
Synonym groups that share a common annotation phrase will be merged. NOTE that for
multiple ontologies, we can merge their synonym groups by first concatenating them
then use this function.
!!! note
In $\textsf{BERTMap}$ experiments we have considered this as a data augmentation approach
but it does not bring a significant performance improvement. However, if the
overall number of annotations is not large enough then this could be a good option.
Args:
synonym_groups (List[Set[str]]): A sequence of synonym groups to be merged.
Returns:
(List[Set[str]]): A list of merged synonym groups.
"""
synonym_pairs = []
for synonym_group in synonym_groups:
# gather synonym pairs from the self-product of a synonym group
synonym_pairs += AnnotationThesaurus.get_synonym_pairs(synonym_group, remove_duplicates=False)
synonym_pairs = DataUtils.uniqify(synonym_pairs)
merged_grouped_synonyms = AnnotationThesaurus.connected_labels(synonym_pairs)
return merged_grouped_synonyms
@staticmethod
def connected_annotations(synonym_pairs: List[Tuple[str, str]]):
"""Build a graph for adjacency among the class annotations (labels) such that
the **transitivity** of synonyms is ensured.
Auxiliary function for [`merge_synonym_groups_by_transitivity`][deeponto.align.bertmap.text_semantics.AnnotationThesaurus.merge_synonym_groups_by_transitivity].
Args:
synonym_pairs (List[Tuple[str, str]]): List of pairs of phrases that are synonymous.
Returns:
(List[Set[str]]): A list of synonym groups.
"""
graph = nx.Graph()
graph.add_edges_from(synonym_pairs)
# nx.draw(G, with_labels = True)
connected = list(nx.connected_components(graph))
return connected
def synonym_sampling(self, num_samples: Optional[int] = None):
r"""Sample synonym pairs from a list of synonym groups extracted from the input ontology.
According to the $\textsf{BERTMap}$ paper, **synonyms** are defined as label pairs that belong
to the same ontology class.
NOTE this has been validated for getting the same results as in the original $\textsf{BERTMap}$ repository.
Args:
num_samples (int, optional): The (maximum) number of **unique** samples extracted. Defaults to `None`.
Returns:
(List[Tuple[str, str]]): A list of unique synonym pair samples.
"""
synonym_pool = []
for synonym_group in self.synonym_groups:
# do not remove duplicates in the loop to save time
synonym_pairs = self.get_synonym_pairs(synonym_group, remove_duplicates=False)
synonym_pool += synonym_pairs
# remove duplicates afer the loop
synonym_pool = DataUtils.uniqify(synonym_pool)
if (not num_samples) or (num_samples >= len(synonym_pool)):
# print("Return all synonym pairs without downsampling.")
return synonym_pool
else:
return random.sample(synonym_pool, num_samples)
def soft_nonsynonym_sampling(self, num_samples: int, max_iter: int = 5):
r"""Sample **soft** non-synonyms from a list of synonym groups extracted from the input ontology.
According to the $\textsf{BERTMap}$ paper, **soft non-synonyms** are defined as label pairs
from two *different* synonym groups that are **randomly** selected.
Args:
num_samples (int): The (maximum) number of **unique** samples extracted; this is
required **unlike for synonym sampling** because the non-synonym pool is **significantly
larger** (considering random combinations of different synonym groups).
max_iter (int): The maximum number of iterations for conducting sampling. Defaults to `5`.
Returns:
(List[Tuple[str, str]]): A list of unique (soft) non-synonym pair samples.
"""
nonsyonym_pool = []
# randomly select disjoint synonym group pairs from all
for _ in range(num_samples):
left_synonym_group, right_synonym_group = tuple(random.sample(self.synonym_groups, 2))
try:
# randomly choose one label from a synonym group
left_label = random.choice(list(left_synonym_group))
right_label = random.choice(list(right_synonym_group))
nonsyonym_pool.append((left_label, right_label))
except:
# skip if there are no class labels
continue
# DataUtils.uniqify is too slow so we should avoid operating it too often
nonsyonym_pool = DataUtils.uniqify(nonsyonym_pool)
while len(nonsyonym_pool) < num_samples and max_iter > 0:
max_iter = max_iter - 1 # reduce the iteration to prevent exhausting loop
nonsyonym_pool += self.soft_nonsynonym_sampling(num_samples - len(nonsyonym_pool), max_iter)
nonsyonym_pool = DataUtils.uniqify(nonsyonym_pool)
return nonsyonym_pool
def weighted_random_choices_of_sibling_groups(self, k: int = 1):
"""Randomly (weighted) select a number of sibling class groups.
The weights are computed according to the sizes of the sibling class groups.
"""
weights = [len(s) for s in self.onto.sibling_class_groups]
weights = [w / sum(weights) for w in weights] # normalised
return random.choices(self.onto.sibling_class_groups, weights=weights, k=k)
def hard_nonsynonym_sampling(self, num_samples: int, max_iter: int = 5):
r"""Sample **hard** non-synonyms from sibling classes of the input ontology.
According to the $\textsf{BERTMap}$ paper, **hard non-synonyms** are defined as label pairs
that belong to two **disjoint** ontology classes. For practical reason, the condition
is eased to two **sibling** ontology classes.
Args:
num_samples (int): The (maximum) number of **unique** samples extracted; this is
required **unlike for synonym sampling** because the non-synonym pool is **significantly
larger** (considering random combinations of different synonym groups).
max_iter (int): The maximum number of iterations for conducting sampling. Defaults to `5`.
Returns:
(List[Tuple[str, str]]): A list of unique (hard) non-synonym pair samples.
"""
# intialise the sibling class groups
self.onto.sibling_class_groups
# flatten the disjointness groups into all pairs of hard neagtives
nonsynonym_pool = []
# randomly (weighted) select a number of sibling class groups with replacement
sibling_class_groups = self.weighted_random_choices_of_sibling_groups(k=num_samples)
for sibling_class_group in sibling_class_groups:
# random select two sibling classes; no weights this time
left_class_iri, right_class_iri = tuple(random.sample(sibling_class_group, 2))
try:
# random select a label for each of them
left_label = random.choice(list(self.annotation_index[left_class_iri]))
right_label = random.choice(list(self.annotation_index[right_class_iri]))
# add the label pair to the pool
nonsynonym_pool.append((left_label, right_label))
except:
# skip them if there are no class labels
continue
# DataUtils.uniqify is too slow so we should avoid operating it too often
nonsynonym_pool = DataUtils.uniqify(nonsynonym_pool)
while len(nonsynonym_pool) < num_samples and max_iter > 0:
max_iter = max_iter - 1 # reduce the iteration to prevent exhausting loop
nonsynonym_pool += self.hard_nonsynonym_sampling(num_samples - len(nonsynonym_pool), max_iter)
nonsynonym_pool = DataUtils.uniqify(nonsynonym_pool)
return nonsynonym_pool
class IntraOntologyTextSemanticsCorpus:
r"""Class for creating the intra-ontology text semantics corpus from an ontology.
As defined in the $\textsf{BERTMap}$ paper, the **intra-ontology** text semantics corpus consists
of synonym and non-synonym pairs extracted from the ontology class annotations.
Attributes:
onto (Ontology): An ontology to construct the intra-ontology text semantics corpus from.
annotation_property_iris (List[str]): Specify which annotation properties to be used.
soft_negative_ratio (int): The expected negative sample ratio of the soft non-synonyms to the extracted synonyms. Defaults to `2`.
hard_negative_ratio (int): The expected negative sample ratio of the hard non-synonyms to the extracted synonyms. Defaults to `2`.
However, hard non-synonyms are sometimes insufficient given an ontology's hierarchy, the soft ones are used to compensate
the number in this case.
"""
def __init__(
self,
onto: Ontology,
annotation_property_iris: List[str],
soft_negative_ratio: int = 2,
hard_negative_ratio: int = 2,
):
self.onto = onto
# $\textsf{BERTMap}$ does not apply synonym transitivity
self.thesaurus = AnnotationThesaurus(onto, annotation_property_iris, apply_transitivity=False)
self.synonyms = self.thesaurus.synonym_sampling()
# sample hard negatives first as they might not be enough
num_hard = hard_negative_ratio * len(self.synonyms)
self.hard_nonsynonyms = self.thesaurus.hard_nonsynonym_sampling(num_hard)
# compensate the number of hard negatives as soft negatives are almost always available
num_soft = (soft_negative_ratio + hard_negative_ratio) * len(self.synonyms) - len(self.hard_nonsynonyms)
self.soft_nonsynonyms = self.thesaurus.soft_nonsynonym_sampling(num_soft)
self.info = {
type(self).__name__: {
"num_synonyms": len(self.synonyms),
"num_nonsynonyms": len(self.soft_nonsynonyms) + len(self.hard_nonsynonyms),
"num_soft_nonsynonyms": len(self.soft_nonsynonyms),
"num_hard_nonsynonyms": len(self.hard_nonsynonyms),
"annotation_thesaurus": self.thesaurus.info["AnnotationThesaurus"],
}
}
def __str__(self):
return FileUtils.print_dict(self.info)
def save(self, save_path: str):
"""Save the intra-ontology corpus (a `.json` file for label pairs
and its summary) in the specified directory.
"""
FileUtils.create_path(save_path)
save_json = {
"summary": self.info,
"synonyms": [(pos[0], pos[1], 1) for pos in self.synonyms],
"nonsynonyms": [(neg[0], neg[1], 0) for neg in self.soft_nonsynonyms + self.hard_nonsynonyms],
}
FileUtils.save_file(save_json, os.path.join(save_path, "intra-onto.corpus.json"))
class CrossOntologyTextSemanticsCorpus:
r"""Class for creating the cross-ontology text semantics corpus from two ontologies and provided mappings between them.
As defined in the $\textsf{BERTMap}$ paper, the **cross-ontology** text semantics corpus consists
of synonym and non-synonym pairs extracted from the annotations/labels of class pairs
involved in the provided cross-ontology mappigns.
Attributes:
class_mappings (List[ReferenceMapping]): A list of cross-ontology class mappings.
src_onto (Ontology): The source ontology whose class IRIs are heads of the `class_mappings`.
tgt_onto (Ontology): The target ontology whose class IRIs are tails of the `class_mappings`.
annotation_property_iris (List[str]): A list of annotation property IRIs used to extract the annotations.
negative_ratio (int): The expected negative sample ratio of the non-synonyms to the extracted synonyms. Defaults to `4`. NOTE
that we do not have *hard* non-synonyms at the cross-ontology level.
"""
def __init__(
self,
class_mappings: List[ReferenceMapping],
src_onto: Ontology,
tgt_onto: Ontology,
annotation_property_iris: List[str],
negative_ratio: int = 4,
):
self.class_mappings = class_mappings
self.src_onto = src_onto
self.tgt_onto = tgt_onto
# build the annotation thesaurus for each ontology
self.src_thesaurus = AnnotationThesaurus(src_onto, annotation_property_iris)
self.tgt_thesaurus = AnnotationThesaurus(tgt_onto, annotation_property_iris)
self.negative_ratio = negative_ratio
self.synonyms = self.synonym_sampling_from_mappings()
num_negative = negative_ratio * len(self.synonyms)
self.nonsynonyms = self.nonsynonym_sampling_from_mappings(num_negative)
self.info = {
type(self).__name__: {
"num_synonyms": len(self.synonyms),
"num_nonsynonyms": len(self.nonsynonyms),
"num_mappings": len(self.class_mappings),
"src_annotation_thesaurus": self.src_thesaurus.info["AnnotationThesaurus"],
"tgt_annotation_thesaurus": self.tgt_thesaurus.info["AnnotationThesaurus"],
}
}
def __str__(self):
return FileUtils.print_dict(self.info)
def save(self, save_path: str):
"""Save the cross-ontology corpus (a `.json` file for label pairs
and its summary) in the specified directory.
"""
FileUtils.create_path(save_path)
save_json = {
"summary": self.info,
"synonyms": [(pos[0], pos[1], 1) for pos in self.synonyms],
"nonsynonyms": [(neg[0], neg[1], 0) for neg in self.nonsynonyms],
}
FileUtils.save_file(save_json, os.path.join(save_path, "cross-onto.corpus.json"))
def synonym_sampling_from_mappings(self):
r"""Sample synonyms from cross-ontology class mappings.
Arguments of this method are all class attributes.
See [`CrossOntologyTextSemanticsCorpus`][deeponto.align.bertmap.text_semantics.CrossOntologyTextSemanticsCorpus].
According to the $\textsf{BERTMap}$ paper, **cross-ontology synonyms** are defined as label pairs
that belong to two **matched** classes. Suppose the class $C$ from the source ontology
and the class $D$ from the target ontology are matched according to one of the `class_mappings`,
then the cartesian product of labels of $C$ and labels of $D$ form cross-ontology synonyms.
Note that **identity synonyms** in the form of $(a, a)$ are removed because they have been covered
in the intra-ontology case.
Returns:
(List[Tuple[str, str]]): A list of unique synonym pair samples from ontology class mappings.
"""
synonym_pool = []
for class_mapping in self.class_mappings:
src_class_iri, tgt_class_iri = class_mapping.to_tuple()
src_class_annotations = self.src_thesaurus.annotation_index[src_class_iri]
tgt_class_annotations = self.tgt_thesaurus.annotation_index[tgt_class_iri]
synonym_pairs = list(itertools.product(src_class_annotations, tgt_class_annotations))
# remove the identity synonyms as the have been covered in the intra-ontology case
synonym_pairs = [(l, r) for l, r in synonym_pairs if l != r]
backward_synonym_pairs = [(r, l) for l, r in synonym_pairs]
synonym_pool += synonym_pairs + backward_synonym_pairs
synonym_pool = DataUtils.uniqify(synonym_pool)
return synonym_pool
def nonsynonym_sampling_from_mappings(self, num_samples: int, max_iter: int = 5):
r"""Sample non-synonyms from cross-ontology class mappings.
Arguments of this method are all class attributes.
See [`CrossOntologyTextSemanticsCorpus`][deeponto.align.bertmap.text_semantics.CrossOntologyTextSemanticsCorpus].
According to the $\textsf{BERTMap}$ paper, **cross-ontology non-synonyms** are defined as label pairs
that belong to two **unmatched** classes. Assume that the provided class mappings are self-contained
in the sense that they are complete for the classes involved in them, then we can randomly
sample two cross-ontology classes that are not matched according to the mappings and take
their labels as nonsynonyms. In practice, it is quite unlikely to obtain false negatives since
the number of incorrect mappings is much larger than the number of correct ones.
Returns:
(List[Tuple[str, str]]): A list of unique nonsynonym pair samples from ontology class mappings.
"""
nonsynonym_pool = []
# form cross-ontology synonym groups
cross_onto_synonym_group_pair = []
for class_mapping in self.class_mappings:
src_class_iri, tgt_class_iri = class_mapping.to_tuple()
src_class_annotations = self.src_thesaurus.annotation_index[src_class_iri]
tgt_class_annotations = self.tgt_thesaurus.annotation_index[tgt_class_iri]
# let each matched class pair's annotations form a synonym group_pair
cross_onto_synonym_group_pair.append((src_class_annotations, tgt_class_annotations))
# randomly select disjoint synonym group pairs from all
for _ in range(num_samples):
left_class_pair, right_class_pair = tuple(random.sample(cross_onto_synonym_group_pair, 2))
try:
# randomly choose one label from a synonym group
left_label = random.choice(list(left_class_pair[0])) # choosing the src side by [0]
right_label = random.choice(list(right_class_pair[1])) # choosing the tgt side by [1]
nonsynonym_pool.append((left_label, right_label))
except:
# skip if there are no class labels
continue
# DataUtils.uniqify is too slow so we should avoid operating it too often
nonsynonym_pool = DataUtils.uniqify(nonsynonym_pool)
while len(nonsynonym_pool) < num_samples and max_iter > 0:
max_iter = max_iter - 1 # reduce the iteration to prevent exhausting loop
nonsynonym_pool += self.nonsynonym_sampling_from_mappings(num_samples - len(nonsynonym_pool), max_iter)
nonsynonym_pool = DataUtils.uniqify(nonsynonym_pool)
return nonsynonym_pool
class TextSemanticsCorpora:
r"""Class for creating the collection text semantics corpora.
As defined in the $\textsf{BERTMap}$ paper, the collection of text semantics corpora contains
**at least two intra-ontology sub-corpora** from the source and target ontologies, respectively.
If some class mappings are provided, then a **cross-ontology sub-corpus** will be created.
If some additional auxiliary ontologies are provided, the intra-ontology corpora created from them
will serve as the **auxiliary sub-corpora**.
Attributes:
src_onto (Ontology): The source ontology to be matched or aligned.
tgt_onto (Ontology): The target ontology to be matched or aligned.
annotation_property_iris (List[str]): A list of annotation property IRIs used to extract the annotations.
class_mappings (List[ReferenceMapping], optional): A list of cross-ontology class mappings between the
source and the target ontologies. Defaults to `None`.
auxiliary_ontos (List[Ontology], optional): A list of auxiliary ontologies for augmenting more synonym/non-synonym samples. Defaults to `None`.
"""
def __init__(
self,
src_onto: Ontology,
tgt_onto: Ontology,
annotation_property_iris: List[str],
class_mappings: Optional[List[ReferenceMapping]] = None,
auxiliary_ontos: Optional[List[Ontology]] = None,
):
self.synonyms = []
self.nonsynonyms = []
# build intra-ontology corpora
# negative sample ratios are by default
self.intra_src_onto_corpus = IntraOntologyTextSemanticsCorpus(src_onto, annotation_property_iris)
self.add_samples_from_sub_corpus(self.intra_src_onto_corpus)
self.intra_tgt_onto_corpus = IntraOntologyTextSemanticsCorpus(tgt_onto, annotation_property_iris)
self.add_samples_from_sub_corpus(self.intra_tgt_onto_corpus)
# build cross-ontolgoy corpora
self.class_mappings = class_mappings
self.cross_onto_corpus = None
if self.class_mappings:
self.cross_onto_corpus = CrossOntologyTextSemanticsCorpus(
class_mappings, src_onto, tgt_onto, annotation_property_iris
)
self.add_samples_from_sub_corpus(self.cross_onto_corpus)
# build auxiliary ontology corpora (same as intra-ontology)
self.auxiliary_ontos = auxiliary_ontos
self.auxiliary_onto_corpora = []
if self.auxiliary_ontos:
for auxiliary_onto in self.auxiliary_ontos:
self.auxiliary_onto_corpora.append(
IntraOntologyTextSemanticsCorpus(auxiliary_onto, annotation_property_iris)
)
for auxiliary_onto_corpus in self.auxiliary_onto_corpora:
self.add_samples_from_sub_corpus(auxiliary_onto_corpus)
# DataUtils.uniqify the samples
self.synonyms = DataUtils.uniqify(self.synonyms)
self.nonsynonyms = DataUtils.uniqify(self.nonsynonyms)
# remove invalid nonsynonyms
self.nonsynonyms = list(set(self.nonsynonyms) - set(self.synonyms))
# summary
self.info = {
type(self).__name__: {
"num_synonyms": len(self.synonyms),
"num_nonsynonyms": len(self.nonsynonyms),
"intra_src_onto_corpus": self.intra_src_onto_corpus.info["IntraOntologyTextSemanticsCorpus"],
"intra_tgt_onto_corpus": self.intra_tgt_onto_corpus.info["IntraOntologyTextSemanticsCorpus"],
"cross_onto_corpus": self.cross_onto_corpus.info["CrossOntologyTextSemanticsCorpus"] if self.cross_onto_corpus else None,
"auxiliary_onto_corpora": [a.info["IntraOntologyTextSemanticsCorpus"] for a in self.auxiliary_onto_corpora],
}
}
def __str__(self):
return FileUtils.print_dict(self.info)
def save(self, save_path: str):
"""Save the overall text semantics corpora (a `.json` file for label pairs
and its summary) in the specified directory.
"""
FileUtils.create_path(save_path)
save_json = {
"summary": self.info,
"synonyms": [(pos[0], pos[1], 1) for pos in self.synonyms],
"nonsynonyms": [(neg[0], neg[1], 0) for neg in self.nonsynonyms],
}
FileUtils.save_file(save_json, os.path.join(save_path, "text-semantics.corpora.json"))
def add_samples_from_sub_corpus(
self, sub_corpus: Union[IntraOntologyTextSemanticsCorpus, CrossOntologyTextSemanticsCorpus]
):
"""Add synonyms and non-synonyms from each sub-corpus to the overall collection."""
self.synonyms += sub_corpus.synonyms
if isinstance(sub_corpus, IntraOntologyTextSemanticsCorpus):
self.nonsynonyms += sub_corpus.soft_nonsynonyms + sub_corpus.hard_nonsynonyms
else:
self.nonsynonyms += sub_corpus.nonsynonyms
| 28,876 | 48.702238 | 168 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/bertmap/pipeline.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional, Callable
from yacs.config import CfgNode
import os
import random
import enlighten
# import transformers
from deeponto.align.mapping import ReferenceMapping
from deeponto.onto import Ontology
from deeponto.utils.decorators import paper
from deeponto.utils import FileUtils, Tokenizer
from deeponto.utils.logging import create_logger
from .text_semantics import TextSemanticsCorpora
from .bert_classifier import BERTSynonymClassifier
from .mapping_prediction import MappingPredictor
from .mapping_refinement import MappingRefiner
MODEL_OPTIONS = {"bertmap": {"trainable": True}, "bertmaplt": {"trainable": False}}
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "default_config.yaml")
# transformers.logging.set_verbosity_info()
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
class BERTMapPipeline:
r"""Class for the whole ontology alignment pipeline of $\textsf{BERTMap}$ and $\textsf{BERTMapLt}$ models.
!!! note
Parameters related to BERT training are `None` by default. They will be constructed for
$\textsf{BERTMap}$ and stay as `None` for $\textsf{BERTMapLt}$.
Attributes:
config (CfgNode): The configuration for BERTMap or BERTMapLt.
name (str): The name of the model, either `bertmap` or `bertmaplt`.
output_path (str): The path to the output directory.
src_onto (Ontology): The source ontology to be matched.
tgt_onto (Ontology): The target ontology to be matched.
annotation_property_iris (List[str]): The annotation property IRIs used for extracting synonyms and nonsynonyms.
src_annotation_index (dict): A dictionary that stores the `(class_iri, class_annotations)` pairs from `src_onto` according to `annotation_property_iris`.
tgt_annotation_index (dict): A dictionary that stores the `(class_iri, class_annotations)` pairs from `tgt_onto` according to `annotation_property_iris`.
known_mappings (List[ReferenceMapping], optional): List of known mappings for constructing the **cross-ontology corpus**.
auxliary_ontos (List[Ontology], optional): List of auxiliary ontolgoies for constructing any **auxiliary corpus**.
corpora (dict, optional): A dictionary that stores the `summary` of built text semantics corpora and the sampled `synonyms` and `nonsynonyms`.
finetune_data (dict, optional): A dictionary that stores the `training` and `validation` splits of samples from `corpora`.
bert (BERTSynonymClassifier, optional): A BERT model for synonym classification and mapping prediction.
best_checkpoint (str, optional): The path to the best BERT checkpoint which will be loaded after training.
mapping_predictor (MappingPredictor): The predictor function based on class annotations, used for **global matching** or **mapping scoring**.
"""
def __init__(self, src_onto: Ontology, tgt_onto: Ontology, config: CfgNode):
"""Initialize the BERTMap or BERTMapLt model.
Args:
src_onto (Ontology): The source ontology for alignment.
tgt_onto (Ontology): The target ontology for alignment.
config (CfgNode): The configuration for BERTMap or BERTMapLt.
"""
# load the configuration and confirm model name is valid
self.config = config
self.name = self.config.model
if not self.name in MODEL_OPTIONS.keys():
raise RuntimeError(f"`model` {self.name} in the config file is not one of the supported.")
# create the output directory, e.g., experiments/bertmap
self.config.output_path = "." if not self.config.output_path else self.config.output_path
self.config.output_path = os.path.abspath(self.config.output_path)
self.output_path = os.path.join(self.config.output_path, self.name)
FileUtils.create_path(self.output_path)
# create logger and progress manager (hidden attribute)
self.logger = create_logger(self.name, self.output_path)
self.enlighten_manager = enlighten.get_manager()
# ontology
self.src_onto = src_onto
self.tgt_onto = tgt_onto
self.annotation_property_iris = self.config.annotation_property_iris
self.logger.info(f"Load the following configurations:\n{FileUtils.print_dict(self.config)}")
config_path = os.path.join(self.output_path, "config.yaml")
self.logger.info(f"Save the configuration file at {config_path}.")
self.save_bertmap_config(self.config, config_path)
# build the annotation thesaurus
self.src_annotation_index, _ = self.src_onto.build_annotation_index(self.annotation_property_iris)
self.tgt_annotation_index, _ = self.tgt_onto.build_annotation_index(self.annotation_property_iris)
# provided mappings if any
self.known_mappings = self.config.known_mappings
if self.known_mappings:
self.known_mappings = ReferenceMapping.read_table_mappings(self.known_mappings)
# auxiliary ontologies if any
self.auxiliary_ontos = self.config.auxiliary_ontos
if self.auxiliary_ontos:
self.auxiliary_ontos = [Ontology(ao) for ao in self.auxiliary_ontos]
self.data_path = os.path.join(self.output_path, "data")
# load or construct the corpora
self.corpora_path = os.path.join(self.data_path, "text-semantics.corpora.json")
self.corpora = self.load_text_semantics_corpora()
# load or construct fine-tune data
self.finetune_data_path = os.path.join(self.data_path, "fine-tune.data.json")
self.finetune_data = self.load_finetune_data()
# load the bert model and train
self.bert_config = self.config.bert
self.bert_pretrained_path = self.bert_config.pretrained_path
self.bert_finetuned_path = os.path.join(self.output_path, "bert")
self.bert_resume_training = self.bert_config.resume_training
self.bert_synonym_classifier = None
self.best_checkpoint = None
if self.name == "bertmap":
self.bert_synonym_classifier = self.load_bert_synonym_classifier()
# train if the loaded classifier is not in eval mode
if self.bert_synonym_classifier.eval_mode == False:
self.logger.info(
f"Data statistics:\n \
{FileUtils.print_dict(self.bert_synonym_classifier.data_stat)}"
)
self.bert_synonym_classifier.train(self.bert_resume_training)
# turn on eval mode after training
self.bert_synonym_classifier.eval()
# NOTE potential redundancy here: after training, load the best checkpoint
self.best_checkpoint = self.load_best_checkpoint()
if not self.best_checkpoint:
raise RuntimeError(f"No best checkpoint found for the BERT synonym classifier model.")
self.logger.info(f"Fine-tuning finished, found best checkpoint at {self.best_checkpoint}.")
else:
self.logger.info(f"No training needed; skip BERT fine-tuning.")
# pretty progress bar tracking
self.enlighten_status = self.enlighten_manager.status_bar(
status_format=u'Global Matching{fill}Stage: {demo}{fill}{elapsed}',
color='bold_underline_bright_white_on_lightslategray',
justify=enlighten.Justify.CENTER, demo='Initializing',
autorefresh=True, min_delta=0.5
)
# mapping predictions
self.global_matching_config = self.config.global_matching
self.mapping_predictor = MappingPredictor(
output_path=self.output_path,
tokenizer_path=self.bert_config.pretrained_path,
src_annotation_index=self.src_annotation_index,
tgt_annotation_index=self.tgt_annotation_index,
bert_synonym_classifier=self.bert_synonym_classifier,
num_raw_candidates=self.global_matching_config.num_raw_candidates,
num_best_predictions=self.global_matching_config.num_best_predictions,
batch_size_for_prediction=self.bert_config.batch_size_for_prediction,
logger=self.logger,
enlighten_manager=self.enlighten_manager,
enlighten_status=self.enlighten_status
)
self.mapping_refiner = None
# if global matching is disabled (potentially used for class pair scoring)
if self.config.global_matching.enabled:
self.mapping_predictor.mapping_prediction() # mapping prediction
if self.name == "bertmap":
self.mapping_refiner = MappingRefiner(
output_path=self.output_path,
src_onto=self.src_onto,
tgt_onto=self.tgt_onto,
mapping_predictor=self.mapping_predictor,
mapping_extension_threshold=self.global_matching_config.mapping_extension_threshold,
mapping_filtered_threshold=self.global_matching_config.mapping_filtered_threshold,
logger=self.logger,
enlighten_manager=self.enlighten_manager,
enlighten_status=self.enlighten_status
)
self.mapping_refiner.mapping_extension() # mapping extension
self.mapping_refiner.mapping_repair() # mapping repair
self.enlighten_status.update(demo="Finished")
else:
self.enlighten_status.update(demo="Skipped")
self.enlighten_status.close()
# class pair scoring is invoked outside
def load_or_construct(self, data_file: str, data_name: str, construct_func: Callable, *args, **kwargs):
"""Load existing data or construct a new one.
An auxlirary function that checks the existence of a data file and loads it if it exists.
Otherwise, construct new data with the input `construct_func` which is supported generate
a local data file.
"""
if os.path.exists(data_file):
self.logger.info(f"Load existing {data_name} from {data_file}.")
else:
self.logger.info(f"Construct new {data_name} and save at {data_file}.")
construct_func(*args, **kwargs)
# load the data file that is supposed to be saved locally
return FileUtils.load_file(data_file)
def load_text_semantics_corpora(self):
"""Load or construct text semantics corpora.
See [`TextSemanticsCorpora`][deeponto.align.bertmap.text_semantics.TextSemanticsCorpora].
"""
data_name = "text semantics corpora"
if self.name == "bertmap":
def construct():
corpora = TextSemanticsCorpora(
src_onto=self.src_onto,
tgt_onto=self.tgt_onto,
annotation_property_iris=self.annotation_property_iris,
class_mappings=self.known_mappings,
auxiliary_ontos=self.auxiliary_ontos,
)
self.logger.info(str(corpora))
corpora.save(self.data_path)
return self.load_or_construct(self.corpora_path, data_name, construct)
self.logger.info(f"No training needed; skip the construction of {data_name}.")
return None
def load_finetune_data(self):
r"""Load or construct fine-tuning data from text semantics corpora.
Steps of constructing fine-tuning data from text semantics:
1. Mix synonym and nonsynonym data.
2. Randomly sample 90% as training samples and 10% as validation.
"""
data_name = "fine-tuning data"
if self.name == "bertmap":
def construct():
finetune_data = dict()
samples = self.corpora["synonyms"] + self.corpora["nonsynonyms"]
random.shuffle(samples)
split_index = int(0.9 * len(samples)) # split at 90%
finetune_data["training"] = samples[:split_index]
finetune_data["validation"] = samples[split_index:]
FileUtils.save_file(finetune_data, self.finetune_data_path)
return self.load_or_construct(self.finetune_data_path, data_name, construct)
self.logger.info(f"No training needed; skip the construction of {data_name}.")
return None
def load_bert_synonym_classifier(self):
"""Load the BERT model from a pre-trained or a local checkpoint.
- If loaded from pre-trained, it means to start training from a pre-trained model such as `bert-uncased`.
- If loaded from local, turn on the `eval` mode for mapping predictions.
- If `self.bert_resume_training` is `True`, it will be loaded from the latest saved checkpoint.
"""
checkpoint = self.load_best_checkpoint() # load the best checkpoint or nothing
eval_mode = True
# if no checkpoint has been found, start training from scratch OR resume training
# no point to load the best checkpoint if resume training (will automatically search for the latest checkpoint)
if not checkpoint or self.bert_resume_training:
checkpoint = self.bert_pretrained_path
eval_mode = False # since it is for training now
return BERTSynonymClassifier(
loaded_path=checkpoint,
output_path=self.bert_finetuned_path,
eval_mode=eval_mode,
max_length_for_input=self.bert_config.max_length_for_input,
num_epochs_for_training=self.bert_config.num_epochs_for_training,
batch_size_for_training=self.bert_config.batch_size_for_training,
batch_size_for_prediction=self.bert_config.batch_size_for_prediction,
training_data=self.finetune_data["training"],
validation_data=self.finetune_data["validation"],
)
def load_best_checkpoint(self) -> Optional[str]:
"""Find the best checkpoint by searching for trainer states in each checkpoint file."""
best_checkpoint = -1
if os.path.exists(self.bert_finetuned_path):
for file in os.listdir(self.bert_finetuned_path):
# load trainer states from each checkpoint file
if file.startswith("checkpoint"):
trainer_state = FileUtils.load_file(
os.path.join(self.bert_finetuned_path, file, "trainer_state.json")
)
checkpoint = int(trainer_state["best_model_checkpoint"].split("/")[-1].split("-")[-1])
# find the latest best checkpoint
if checkpoint > best_checkpoint:
best_checkpoint = checkpoint
if best_checkpoint == -1:
best_checkpoint = None
else:
best_checkpoint = os.path.join(self.bert_finetuned_path, f"checkpoint-{best_checkpoint}")
return best_checkpoint
@staticmethod
def load_bertmap_config(config_file: Optional[str] = None):
"""Load the BERTMap configuration in `.yaml`. If the file
is not provided, use the default configuration.
"""
if not config_file:
config_file = DEFAULT_CONFIG_FILE
print(f"Use the default configuration at {DEFAULT_CONFIG_FILE}.")
if not config_file.endswith(".yaml"):
raise RuntimeError("Configuration file should be in `yaml` format.")
return CfgNode(FileUtils.load_file(config_file))
@staticmethod
def save_bertmap_config(config: CfgNode, config_file: str):
"""Save the BERTMap configuration in `.yaml`."""
with open(config_file, "w") as c:
config.dump(stream=c, sort_keys=False, default_flow_style=False)
| 16,546 | 48.247024 | 161 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/bertmap/bert_classifier.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Tuple, List, Optional, Union
import torch
from transformers import TrainingArguments, AutoModelForSequenceClassification, Trainer
from datasets import Dataset
from sklearn.metrics import accuracy_score
import numpy as np
import random
from deeponto.utils import Tokenizer, FileUtils
from deeponto.utils.decorators import paper
# @paper(
# "BERTMap: A BERT-based Ontology Alignment System (AAAI-2022)",
# "https://ojs.aaai.org/index.php/AAAI/article/view/20510",
# )
class BERTSynonymClassifier:
r"""Class for BERT synonym classifier.
The main scoring module of $\textsf{BERTMap}$ consisting of a BERT model and a binary synonym classifier.
Attributes:
loaded_path (str): The path to the checkpoint of a pre-trained BERT model.
output_path (str): The path to the output BERT model (usually fine-tuned).
eval_mode (bool): Set to `False` if the model is loaded for training.
max_length_for_input (int): The maximum length of an input sequence.
num_epochs_for_training (int): The number of epochs for training a BERT model.
batch_size_for_training (int): The batch size for training a BERT model.
batch_size_for_prediction (int): The batch size for making predictions.
training_data (Dataset, optional): Data for training the model if `for_training` is set to `True`. Defaults to `None`.
validation_data (Dataset, optional): Data for validating the model if `for_training` is set to `True`. Defaults to `None`.
training_args (TrainingArguments, optional): Training arguments for training the model if `for_training` is set to `True`. Defaults to `None`.
trainer (Trainer, optional): The model trainer fed with `training_args` and data samples. Defaults to `None`.
softmax (torch.nn.SoftMax, optional): The softmax layer used for normalising synonym scores. Defaults to `None`.
"""
def __init__(
self,
loaded_path: str,
output_path: str,
eval_mode: bool,
max_length_for_input: int,
num_epochs_for_training: Optional[float] = None,
batch_size_for_training: Optional[int] = None,
batch_size_for_prediction: Optional[int] = None,
training_data: Optional[List[Tuple[str, str, int]]] = None, # (sentence1, sentence2, label)
validation_data: Optional[List[Tuple[str, str, int]]] = None,
):
# Load the pretrained BERT model from the given path
self.loaded_path = loaded_path
print(f"Loading a BERT model from: {self.loaded_path}.")
self.model = AutoModelForSequenceClassification.from_pretrained(
self.loaded_path, output_hidden_states=eval_mode
)
self.tokenizer = Tokenizer.from_pretrained(loaded_path)
self.output_path = output_path
self.eval_mode = eval_mode
self.max_length_for_input = max_length_for_input
self.num_epochs_for_training = num_epochs_for_training
self.batch_size_for_training = batch_size_for_training
self.batch_size_for_prediction = batch_size_for_prediction
self.training_data = None
self.validation_data = None
self.data_stat = {}
self.training_args = None
self.trainer = None
self.softmax = None
# load the pre-trained BERT model and set it to eval mode (static)
if self.eval_mode:
self.eval()
# load the pre-trained BERT model for fine-tuning
else:
if not training_data:
raise RuntimeError("Training data should be provided when `for_training` is `True`.")
if not validation_data:
raise RuntimeError("Validation data should be provided when `for_training` is `True`.")
# load data (max_length is used for truncation)
self.training_data = self.load_dataset(training_data, "training")
self.validation_data = self.load_dataset(validation_data, "validation")
self.data_stat = {
"num_training": len(self.training_data),
"num_validation": len(self.validation_data),
}
# generate training arguments
epoch_steps = len(self.training_data) // self.batch_size_for_training # total steps of an epoch
if torch.cuda.device_count() > 0:
epoch_steps = epoch_steps // torch.cuda.device_count() # to deal with multi-gpus case
# keep logging steps consisitent even for small batch size
# report logging on every 0.02 epoch
logging_steps = int(epoch_steps * 0.02)
# eval on every 0.2 epoch
eval_steps = 10 * logging_steps
# generate the training arguments
self.training_args = TrainingArguments(
output_dir=self.output_path,
num_train_epochs=self.num_epochs_for_training,
per_device_train_batch_size=self.batch_size_for_training,
per_device_eval_batch_size=self.batch_size_for_training,
warmup_ratio=0.0,
weight_decay=0.01,
logging_steps=logging_steps,
logging_dir=f"{self.output_path}/tensorboard",
eval_steps=eval_steps,
evaluation_strategy="steps",
do_train=True,
do_eval=True,
save_steps=eval_steps,
save_total_limit=2,
load_best_model_at_end=True,
)
# build the trainer
self.trainer = Trainer(
model=self.model,
args=self.training_args,
train_dataset=self.training_data,
eval_dataset=self.validation_data,
compute_metrics=self.compute_metrics,
tokenizer=self.tokenizer._tokenizer,
)
def train(self, resume_from_checkpoint: Optional[Union[bool, str]] = None):
"""Start training the BERT model."""
if self.eval_mode:
raise RuntimeError("Training cannot be started in `eval` mode.")
self.trainer.train(resume_from_checkpoint=resume_from_checkpoint)
def eval(self):
"""To eval mode."""
print("The BERT model is set to eval mode for making predictions.")
self.model.eval()
# TODO: to implement multi-gpus for inference
self.device = self.get_device(device_num=0)
self.model.to(self.device)
self.softmax = torch.nn.Softmax(dim=1).to(self.device)
def predict(self, sent_pairs: List[Tuple[str, str]]):
r"""Run prediction pipeline for synonym classification.
Return the `softmax` probailities of predicting pairs as synonyms (`index=1`).
"""
inputs = self.process_inputs(sent_pairs)
with torch.no_grad():
return self.softmax(self.model(**inputs).logits)[:, 1]
def load_dataset(self, data: List[Tuple[str, str, int]], split: str) -> Dataset:
r"""Load the list of `(annotation1, annotation2, label)` samples into a `datasets.Dataset`."""
def iterate():
for sample in data:
yield {"annotation1": sample[0], "annotation2": sample[1], "labels": sample[2]}
dataset = Dataset.from_generator(iterate)
# NOTE: no padding here because the Trainer class supports dynamic padding
dataset = dataset.map(
lambda examples: self.tokenizer._tokenizer(
examples["annotation1"], examples["annotation2"], max_length=self.max_length_for_input, truncation=True
),
batched=True,
desc=f"Load {split} data:",
)
return dataset
def process_inputs(self, sent_pairs: List[Tuple[str, str]]):
r"""Process input sentence pairs for the BERT model.
Transform the sentences into BERT input embeddings and load them into the device.
This function is called only when the BERT model is about to make predictions (`eval` mode).
"""
return self.tokenizer._tokenizer(
sent_pairs,
return_tensors="pt",
max_length=self.max_length_for_input,
padding=True,
truncation=True,
).to(self.device)
@staticmethod
def compute_metrics(pred):
"""Add more evaluation metrics into the training log."""
# TODO: currently only accuracy is added, will expect more in the future if needed
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
acc = accuracy_score(labels, preds)
return {"accuracy": acc}
@staticmethod
def get_device(device_num: int = 0):
"""Get a device (GPU or CPU) for the torch model"""
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device(f"cuda:{device_num}")
print("There are %d GPU(s) available." % torch.cuda.device_count())
print("We will use the GPU:", torch.cuda.get_device_name(device_num))
# If not...
else:
print("No GPU available, using the CPU instead.")
device = torch.device("cpu")
return device
@staticmethod
def set_seed(seed_val: int = 888):
"""Set random seed for reproducible results."""
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
| 10,098 | 44.084821 | 150 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/bertsubs/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from deeponto.subs.bertsubs import BERTSubsInterPipeline
| 646 | 45.214286 | 74 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/logmap/__init__.py
|
# Copyright 2021 Yuan He. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from deeponto.utils import FileUtils
import os
def run_logmap_repair(
src_onto_path: str, tgt_onto_path: str, mapping_file_path: str, output_path: str
):
"""Run the repair module of LogMap with `java -jar`."""
# find logmap directory
logmap_path = os.path.dirname(__file__)
# obtain absolute paths
src_onto_path = os.path.abspath(src_onto_path)
tgt_onto_path = os.path.abspath(tgt_onto_path)
mapping_file_path = os.path.abspath(mapping_file_path)
output_path = os.path.abspath(output_path)
# run jar command
print(f"Run the repair module of LogMap from {logmap_path}.")
repair_command = (
f"java -jar {logmap_path}/logmap-matcher-4.0.jar DEBUGGER "
+ f"file:{src_onto_path} file:{tgt_onto_path} TXT {mapping_file_path}"
+ f" {output_path} false true"
)
print(f"The jar command is:\n{repair_command}.")
FileUtils.run_jar(repair_command)
| 1,527 | 36.268293 | 84 |
py
|
DeepOnto
|
DeepOnto-main/src/deeponto/align/logmap/java-dependencies/__init__.py
|
# Copyright 2021 Yuan He (KRR-Oxford). All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 602 | 45.384615 | 74 |
py
|
DeepOnto
|
DeepOnto-main/scripts/bertmap.py
|
# Copyright 2021 Yuan He (KRR-Oxford). All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
main_dir = os.getcwd().split("DeepOnto")[0] + "DeepOnto/src"
sys.path.append(main_dir)
from deeponto.onto import Ontology
from deeponto.align.bertmap import BERTMapPipeline, DEFAULT_CONFIG_FILE
import click
@click.command()
@click.option("-s", "--src_onto_file", type=click.Path(exists=True))
@click.option("-t", "--tgt_onto_file", type=click.Path(exists=True))
@click.option("-c", "--config_file", type=click.Path(exists=True))
@click.option("-r", "--resume_training", type=bool, default=False)
def run_bertmap(src_onto_file, tgt_onto_file, config_file, resume_training):
config = BERTMapPipeline.load_bertmap_config(config_file)
# enable automatic global matching and subsequent mapping refinement
config.global_matching.enabled = True
# None for both False and None
config.bert.resume_training = None if not resume_training else resume_training
src_onto = Ontology(src_onto_file)
tgt_onto = Ontology(tgt_onto_file)
BERTMapPipeline(src_onto, tgt_onto, config)
if __name__ == "__main__":
run_bertmap()
| 1,675 | 36.244444 | 82 |
py
|
DeepOnto
|
DeepOnto-main/scripts/bertsubs_intra_evaluate.py
|
# Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
from yacs.config import CfgNode
import sys
import os
main_dir = os.getcwd().split("DeepOnto")[0] + "DeepOnto/src"
sys.path.append(main_dir)
from deeponto.onto import Ontology
from deeponto.subs.bertsubs import BERTSubsIntraPipeline, DEFAULT_CONFIG_FILE_INTRA
from deeponto.utils import FileUtils
'''
partition the declared subsumptions into train, valid (--valid_ratio) and test (--test_ratio)
when subsumption_type == named_class:
a test sample is composed of two named classes: a subclass, a superclass (GT),
and at most --test_max_neg_size false superclasses are extracted from the GT's neighbourhood
when subsumption_type == restriction:
a sample is composed of a named class (subclass), an existential restriction (superclass GT),
and at most --test_max_neg_size false restrictions are randomly extracted from all existential restrictions
(this is different from the evaluation setting in our WWW J paper).
'''
parser = argparse.ArgumentParser()
parser.add_argument('--onto_file', type=str, default='/home/jiaoyan/bertsubs_data/foodon-merged.0.4.8.owl')
parser.add_argument('--valid_ratio', type=float, default=0.05)
parser.add_argument('--test_ratio', type=float, default=0.1)
parser.add_argument('--test_max_neg_size', type=int, default=40)
parser.add_argument('--max_depth', type=int, default=3)
parser.add_argument('--max_width', type=int, default=8)
parser.add_argument('--subsumption_type', type=str, default='named_class', help='restriction or named_class')
parser.add_argument('--train_file', type=str, default='./train_subsumptions.csv')
parser.add_argument('--valid_file', type=str, default='./valid_subsumptions.csv')
parser.add_argument('--test_file', type=str, default='./test_subsumptions.csv')
parser.add_argument('--evaluate_onto_file', type=str, default='./foodon.owl')
FLAGS, unparsed = parser.parse_known_args()
print('\n---- Evaluation data processing starts ----\n')
onto = Ontology(owl_path=FLAGS.onto_file)
all_subsumptions = onto.get_subsumption_axioms(entity_type='Classes')
subsumptions = BERTSubsIntraPipeline.extract_subsumptions_from_ontology(onto=onto, subsumption_type=FLAGS.subsumption_type)
if FLAGS.subsumption_type == 'restriction':
restrictions = BERTSubsIntraPipeline.extract_restrictions_from_ontology(onto=onto)
print('restrictions: %d' % len(restrictions))
else:
restrictions = []
random.shuffle(subsumptions)
valid_size = int(len(subsumptions) * FLAGS.valid_ratio)
test_size = int(len(subsumptions) * FLAGS.test_ratio)
valid_subsumptions = subsumptions[0:valid_size]
test_subsumptions = subsumptions[valid_size:(valid_size + test_size)]
train_subsumptions = subsumptions[(valid_size + test_size):]
print('train subsumptions: %d' % len(train_subsumptions))
print('valid subsumptions: %d' % len(valid_subsumptions))
print('test subsumptions: %d' % len(test_subsumptions))
def context_candidate(output_file, target_subs):
with open(output_file, 'w') as ff:
size_sum = 0
size_num = dict()
m = 0
for subs0 in target_subs:
subcls, supcls = subs0.getSubClass(), subs0.getSuperClass()
neg_candidates = BERTSubsIntraPipeline.get_test_neg_candidates_named_class(subclass=subcls, gt=supcls,
max_neg_size=FLAGS.test_max_neg_size,
onto=onto,
max_depth=FLAGS.max_depth,
max_width=FLAGS.max_width)
size = len(neg_candidates)
size_sum += size
size_num[size] = size_num[size] + 1 if size in size_num else 1
if size > 0:
s = ','.join([str(c.getIRI()) for c in neg_candidates])
ff.write('%s,%s,%s\n' % (str(subcls.getIRI()), str(supcls.getIRI()), s))
m += 1
print('\t The distribution of negative candidate size:')
for size in range(1, FLAGS.test_max_neg_size + 1):
if size in size_num:
print('\t size: %d, num: %d' % (size, size_num[size]))
else:
print('\t size: %d, num: 0' % size)
print('\t %d subsumptions saved; average neg candidate size: %.2f' % (m, size_sum / m))
if FLAGS.subsumption_type == 'restriction':
with open(FLAGS.train_file, 'w') as f:
for subs in train_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
f.write('%s,%s\n' % (str(c1.getIRI()), str(c2)))
with open(FLAGS.valid_file, 'w') as f:
sizes = 0
for subs in valid_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
c2_neg = BERTSubsIntraPipeline.get_test_neg_candidates_restriction(subcls=c1,
max_neg_size=FLAGS.test_max_neg_size,
restrictions=restrictions, onto=onto)
sizes += len(c2_neg)
strs = [str(r) for r in c2_neg]
f.write('%s,%s,%s\n' % (str(c1.getIRI()), str(c2), ','.join(strs)))
print('valid candidate negative avg. size: %.1f' % (sizes / len(valid_subsumptions)))
with open(FLAGS.test_file, 'w') as f:
sizes = 0
for subs in test_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
c2_neg = BERTSubsIntraPipeline.get_test_neg_candidates_restriction(subcls=c1,
max_neg_size=FLAGS.test_max_neg_size,
restrictions=restrictions, onto=onto)
sizes += len(c2_neg)
strs = [str(r) for r in c2_neg]
f.write('%s,%s,%s\n' % (str(c1.getIRI()), str(c2), ','.join(strs)))
print('test candidate negative avg. size: %.1f' % (sizes / len(test_subsumptions)))
else:
with open(FLAGS.train_file, 'w') as f:
for subs in train_subsumptions:
c1, c2 = subs.getSubClass(), subs.getSuperClass()
f.write('%s,%s\n' % (str(c1.getIRI()), str(c2.getIRI())))
print('\n---- context candidates for validation subsumptions ----')
context_candidate(output_file=FLAGS.valid_file, target_subs=valid_subsumptions)
print('\n---- context candidates for test subsumptions ----')
context_candidate(output_file=FLAGS.test_file, target_subs=test_subsumptions)
for subs in valid_subsumptions + test_subsumptions:
onto.remove_axiom(owl_axiom=subs)
onto.save_onto(save_path=FLAGS.evaluate_onto_file)
print('\n---- Evaluation data processing done ----\n')
print('\n---- Evaluation starts ----\n')
config = CfgNode(FileUtils.load_file(DEFAULT_CONFIG_FILE_INTRA))
config.subsumption_type = FLAGS.subsumption_type
config.prompt.prompt_type = 'traversal'
config.train_subsumption_file = FLAGS.train_file
config.valid_subsumption_file = FLAGS.valid_file
config.test_subsumption_file = FLAGS.test_file
config.onto_file = FLAGS.evaluate_onto_file
onto2 = Ontology(owl_path=FLAGS.evaluate_onto_file)
pipeline = BERTSubsIntraPipeline(onto=onto2, config=config)
print('\n---- Evaluation done ----\n')
| 8,070 | 49.761006 | 124 |
py
|
DeepOnto
|
DeepOnto-main/scripts/bertsubs_simple_test.py
|
# Copyright 2023 Jiaoyan Chen. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from yacs.config import CfgNode
import sys
import os
main_dir = os.getcwd().split("DeepOnto")[0] + "DeepOnto/src"
sys.path.append(main_dir)
from deeponto.subs.bertsubs import BERTSubsIntraPipeline, DEFAULT_CONFIG_FILE_INTRA, BERTSubsInterPipeline, DEFAULT_CONFIG_FILE_INTER
from deeponto.utils import FileUtils
from deeponto.onto import Ontology
'''
The following segment of codes is for testing BERTSubs Intra-ontology subsumption,
with a given ontology (and training/valid subsumptions optionally), and a testing file.
'''
config = CfgNode(FileUtils.load_file(DEFAULT_CONFIG_FILE_INTRA))
config.onto_file = './foodon.owl'
config.train_subsumption_file = './train_subsumptions.csv'
config.valid_subsumption_file = './valid_subsumptions.csv'
config.test_subsumption_file = './test_subsumptions.csv'
config.test_type = 'evaluation'
config.subsumption_type = 'named_class' # named_class, restriction
config.prompt.prompt_type = 'isolated' # isolated, traversal, path
onto = Ontology(owl_path=config.onto_file)
intra_pipeline = BERTSubsIntraPipeline(onto=onto, config=config)
'''
The following segment of codes is for testing BERTSubs Inter-ontology subsumption (mappings),
with a given ontology (and training/valid subsumptions optionally), and a testing file
'''
config = CfgNode(FileUtils.load_file(DEFAULT_CONFIG_FILE_INTER))
config.src_onto_file = './helis2foodon/helis_v1.00.owl'
config.tgt_onto_file = './helis2foodon/foodon-merged.0.4.8.subs.owl'
config.train_subsumption_file = './helis2foodon/train_subsumptions.csv'
config.valid_subsumption_file = './helis2foodon/valid_subsumptions.csv'
config.test_subsumption_file = './helis2foodon/test_subsumptions.csv'
config.test_type = 'evaluation'
config.subsumption_type = 'named_class' # named_class, restriction
config.prompt.prompt_type = 'path' # isolated, traversal, path
src_onto = Ontology(owl_path=config.src_onto_file)
tgt_onto = Ontology(owl_path=config.tgt_onto_file)
inter_pipeline = BERTSubsInterPipeline(src_onto=src_onto, tgt_onto=tgt_onto, config=config)
| 2,647 | 43.133333 | 133 |
py
|
ACE
|
ACE-main/example.py
|
import torch
import torch.nn.functional as F
import timm
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from ace import attack_confidence_estimation
def attack_example(file_name, true_label, transform, normalization):
image = Image.open(f'./images/{file_name}.jpg').convert('RGB')
input = transform(image).unsqueeze(0).cuda() # transform and add batch dimension
with torch.no_grad():
output = model(normalization(input))
orig_prediction = torch.nn.functional.softmax(output, dim=1).max(1)
print(f'Ground truth label is {true_label}. The predicted label is {orig_prediction[1].item()} with a confidence of {orig_prediction[0].item()}')
adversarial_example = attack_confidence_estimation(model=model, input=input, label=torch.tensor(true_label), normalization=normalization)
with torch.no_grad():
attacked_prediction = torch.nn.functional.softmax(model(normalization(adversarial_example)), dim=1).max(1)
print(f'After using ACE, the predicted label is still {attacked_prediction[1].item()} with a confidence of {attacked_prediction[0].item()}')
if __name__ == '__main__':
model = timm.create_model('efficientnet_b0', pretrained=True).cuda()
model.eval()
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
normalization = transform.transforms.pop(3)
# A correct prediction example
print('=============== A correct prediction example: ===============')
attack_example(file_name='tank', true_label=847, transform=transform, normalization=normalization)
# An incorrect prediction example
print('=============== An incorrect prediction example: ===============')
attack_example(file_name='binoculars', true_label=447, transform=transform, normalization=normalization)
| 1,864 | 57.28125 | 149 |
py
|
ACE
|
ACE-main/ace.py
|
import torch
def softmax_response(logits):
return torch.nn.functional.softmax(logits, dim=1)
def attack_confidence_estimation(model, input, label, normalization, proxy=None, epsilon=0.005, epsilon_decay=0.5, max_iterations=15, confidence_score_function=softmax_response, device='cuda'):
input = input.to(device)
label = label.to(device)
model = model.to(device)
data = normalization(input)
data.requires_grad = True
if proxy:
# Black-box setting, use proxy to calculate the gradients
proxy = proxy.to(device)
output = proxy(data)
proxy.zero_grad()
with torch.no_grad():
model_output = model(normalization(input))
else:
# White-box setting, use model itself to calculate the gradients
output = model(data)
model.zero_grad()
model_output = output
init_prediction = model_output.argmax()
output = confidence_score_function(output)
# Calculate gradients of model in backward pass
output[0][init_prediction.item()].backward(retain_graph=True)
# Collect gradients
jacobian = data.grad.data
if init_prediction == label:
# If the model is correct, we wish to make it less confident of its prediction
attack_direction = -1
else:
# Otherwise, we wish to make it more confident of its misprediction
attack_direction = 1
with torch.no_grad():
for i in range(max_iterations):
jacobian_sign = jacobian.sign()
perturbed_image = input + epsilon * jacobian_sign * attack_direction
perturbed_image = torch.clamp(perturbed_image, 0, 1)
new_output = model(normalization(perturbed_image))
if new_output.argmax() == init_prediction:
# This adversarial example does not change the prediction as required, return it
return perturbed_image
else:
epsilon = epsilon * epsilon_decay
# The attack has failed; either the epsilon was too large, epsilon_decay too small,
# or max_iterations was insufficient. Return original input.
return input
| 2,151 | 42.04 | 193 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/setup.py
|
from setuptools import setup, find_packages
setup(
name="fengshen",
version="0.0.1",
description="fengshen",
long_description="fengshen",
license="MIT Licence",
url="https://idea.edu.cn",
author="gaoxinyu",
author_email="[email protected]",
packages=find_packages(),
include_package_data=True,
platforms="any",
install_requires=[
'transformers >= 4.17.0',
'datasets >= 2.0.0',
'pytorch_lightning >= 1.5.10',
'deepspeed >= 0.5.10',
'jieba-fast >= 0.53',
'jieba >= 0.40.0',
],
scripts=[],
entry_points={
'console_scripts': [
'fengshen-pipeline = fengshen.cli.fengshen_pipeline:main'
]
}
)
| 733 | 21.9375 | 69 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/__init__.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .models.longformer import LongformerConfig, LongformerModel
from .models.roformer import RoFormerConfig, RoFormerModel
from .models.megatron_t5 import T5Config, T5EncoderModel
from .models.ubert import UbertPipelines, UbertModel
| 849 | 41.5 | 74 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/cli/fengshen_pipeline.py
|
import sys
from importlib import import_module
from datasets import load_dataset
import argparse
def main():
if len(sys.argv) < 3:
raise Exception(
'args len < 3, example: fengshen_pipeline text_classification predict xxxxx')
pipeline_name = sys.argv[1]
method = sys.argv[2]
pipeline_class = getattr(import_module('fengshen.pipelines.' + pipeline_name), 'Pipeline')
total_parser = argparse.ArgumentParser("FengShen Pipeline")
total_parser.add_argument('--model', default='', type=str)
total_parser.add_argument('--datasets', default='', type=str)
total_parser.add_argument('--text', default='', type=str)
total_parser = pipeline_class.add_pipeline_specific_args(total_parser)
args = total_parser.parse_args(args=sys.argv[3:])
pipeline = pipeline_class(args=args, model=args.model)
if method == 'predict':
print(pipeline(args.text))
elif method == 'train':
datasets = load_dataset(args.datasets)
pipeline.train(datasets)
else:
raise Exception(
'cmd not support, now only support {predict, train}')
if __name__ == '__main__':
main()
| 1,161 | 32.2 | 94 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/strategies/megatron_deepspeed.py
|
# Copyright The Lightning AI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
import torch
from torch.nn import Module
from torch.optim import Optimizer
import pytorch_lightning as pl
from pytorch_lightning.plugins import ClusterEnvironment
from pytorch_lightning.strategies.deepspeed import _DEEPSPEED_AVAILABLE
from pytorch_lightning.utilities.types import _PATH, LRSchedulerTypeUnion
from pytorch_lightning.utilities.optimizer import optimizers_to_device
from pytorch_lightning.plugins.precision import PrecisionPlugin
from pytorch_lightning.strategies.deepspeed import DeepSpeedStrategy as OriginDeepSpeedStrategy
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from fengshen.models.megatron import mpu, fused_kernels
log = logging.getLogger(__name__)
if _DEEPSPEED_AVAILABLE:
import deepspeed
def remove_module_hooks(model: torch.nn.Module) -> None:
# todo (tchaton) awaiting this feature to move upstream to DeepSpeed
for module in model.modules():
module._backward_hooks = OrderedDict()
module._is_full_backward_hook = None
module._forward_hooks = OrderedDict()
module._forward_pre_hooks = OrderedDict()
module._state_dict_hooks = OrderedDict()
module._load_state_dict_pre_hooks = OrderedDict()
class DeepSpeedStrategy(OriginDeepSpeedStrategy):
strategy_name = "megatron_deepspeed"
DEEPSPEED_ENV_VAR = "PL_DEEPSPEED_CONFIG_PATH"
def __init__(
self,
pipe_model_parallel_size,
tensor_model_parallel_size,
mpu_seed,
accelerator: Optional["pl.accelerators.Accelerator"] = None,
zero_optimization: bool = True,
stage: int = 2,
remote_device: str = "cpu",
offload_optimizer: bool = False,
offload_parameters: bool = False,
offload_params_device: str = "cpu",
nvme_path: str = "/local_nvme",
params_buffer_count: int = 5,
params_buffer_size: int = 100_000_000,
max_in_cpu: int = 1_000_000_000,
offload_optimizer_device: str = "cpu",
optimizer_buffer_count: int = 4,
block_size: int = 1048576,
queue_depth: int = 8,
single_submit: bool = False,
overlap_events: bool = True,
thread_count: int = 1,
pin_memory: bool = False,
sub_group_size: int = 1_000_000_000_000,
contiguous_gradients: bool = True,
overlap_comm: bool = True,
allgather_partitions: bool = True,
reduce_scatter: bool = True,
allgather_bucket_size: int = 200_000_000,
reduce_bucket_size: int = 200_000_000,
zero_allow_untested_optimizer: bool = True,
logging_batch_size_per_gpu: Union[str, int] = "auto",
config: Optional[Union[_PATH, Dict[str, Any]]] = None,
logging_level: int = logging.WARN,
parallel_devices: Optional[List[torch.device]] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
loss_scale: float = 0,
initial_scale_power: int = 16,
loss_scale_window: int = 1000,
hysteresis: int = 2,
min_loss_scale: int = 1,
partition_activations: bool = False,
cpu_checkpointing: bool = False,
contiguous_memory_optimization: bool = False,
synchronize_checkpoint_boundary: bool = False,
load_full_weights: bool = False,
precision_plugin: Optional[PrecisionPlugin] = None,
process_group_backend: Optional[str] = None,
) -> None:
"""Provides capabilities to run training using the DeepSpeed library, with training optimizations for large
billion parameter models. `For more information: https://pytorch-
lightning.readthedocs.io/en/stable/advanced/model_parallel.html#deepspeed`.
.. warning:: This is an :ref:`experimental <versioning:Experimental API>` feature.
Defaults have been set to enable ZeRO-Offload and some have been taken from the link below.
These defaults have been set generally, but may require tuning for optimum performance based on your model size.
`For more information: https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training`.
Arguments:
zero_optimization: Enable ZeRO optimization. This is compatible with either `precision="16-mixed"` or
`precision="bf16-mixed"`.
stage: Different stages of the ZeRO Optimizer. 0 is disabled,
1 is optimizer state partitioning, 2 is optimizer+gradient state partitioning,
3 is optimizer+gradient_parameter partitioning using the infinity engine.
remote_device: Device to instantiate the model on initially (``cpu`` or ``nvme``).
offload_optimizer: Enable offloading optimizer memory and computation to CPU or NVMe
based on ``offload_optimizer_device``.
offload_parameters: When using ZeRO Stage 3, Enable offloading parameter memory and computation
to CPU or NVMe based on ``offload_params_device``.
offload_params_device: When offloading parameters choose the device to offload to, ``cpu`` or ``nvme``.
offload_optimizer_device: When offloading optimizer state choose the device to offload to,
``cpu`` or ``nvme``.
params_buffer_count: Number of buffers in buffer pool for
parameter offloading when ``offload_params_device`` is ``nvme``.
params_buffer_size: Size of buffers in buffer pool for parameter offloading
when ``offload_params_device`` is ``nvme``.
max_in_cpu: Number of parameter elements to maintain in CPU memory when offloading to NVMe is enabled.
nvme_path: Filesystem path for NVMe device for optimizer/parameter state offloading.
optimizer_buffer_count: Number of buffers in buffer pool for optimizer state offloading
when ``offload_optimizer_device`` is set to to ``nvme``.
This should be at least the number of states maintained per parameter by the optimizer.
For example, Adam optimizer has 4 states (parameter, gradient, momentum, and variance).
block_size: When using NVMe Offloading, the I/O block size in bytes.
queue_depth: When using NVMe Offloading, the I/O queue depth.
single_submit: When using NVMe Offloading,
submit requests to storage device as multiple individual requests,
as opposed to one block of requests.
overlap_events: When using NVMe Offloading,
submit requests to storage device in an overlapped fashion
without waiting for completion of earlier requests.
thread_count: When using NVMe Offloading,
Intra-request parallelism for each read/write submitted by a user thread.
pin_memory: When using ZeRO stage 3, pin optimizer state memory on CPU.
This could boost throughput at the cost of extra memory overhead.
sub_group_size: When using ZeRO stage 3, defines the number of parameters
within a sub group to offload at a time.
Smaller numbers require more communication, but improve memory efficiency.
contiguous_gradients: Copies gradients to a continuous buffer as they are produced.
Avoids memory fragmentation during backwards. Useful when training large models.
overlap_comm: Overlap the reduction (synchronization) of gradients with the backwards computation.
This is a speed optimization when training across multiple GPUs/machines.
allgather_partitions: All gather updated parameters at the end of training step,
instead of using a series of broadcast collectives.
reduce_scatter: Use reduce/scatter instead of allreduce to average gradients.
allgather_bucket_size: Number of elements to allgather at once.
Used to limit the memory required for larger model sizes, with a tradeoff with speed.
reduce_bucket_size: Number of elements to reduce at once.
Used to limit the memory required for larger model sizes, with a tradeoff with speed.
zero_allow_untested_optimizer: Allow untested optimizers to be used with ZeRO. Currently only Adam is a
DeepSpeed supported optimizer when using ZeRO.
logging_batch_size_per_gpu: Config used in DeepSpeed to calculate verbose timing for logging
on a per sample per second basis (only displayed if logging=logging.INFO).
If set to "auto", the plugin tries to infer this from
the train DataLoader's BatchSampler, else defaults to 1.
To obtain accurate logs when using datasets that do not support batch samplers,
set this to the actual per gpu batch size (trainer.batch_size).
config: Pass in a deepspeed formatted config dict,
or path to a deepspeed config: https://www.deepspeed.ai/docs/config-json.
All defaults will be ignored if a config is passed in.
logging_level: Set logging level for deepspeed.
loss_scale: Loss scaling value for FP16 training.
0.0 results in dynamic loss scaling, otherwise static.
initial_scale_power: Power of the initial dynamic loss scale value. Loss scale is computed
by ``2^initial_scale_power``.
loss_scale_window: Window in which to raise/lower the dynamic FP16 loss scaling value.
hysteresis: FP16 Delay shift in Dynamic Loss scaling.
min_loss_scale: The minimum FP16 dynamic loss scaling value.
partition_activations: Enables partition activation when used with ZeRO stage 3 and model parallelism.
Still requires you to wrap your forward functions in deepspeed.checkpointing.checkpoint.
See `deepspeed tutorial
<https://www.deepspeed.ai/tutorials/megatron/#deepspeed-activation-checkpoints-optional>`_.
cpu_checkpointing: Offloads partitioned activations to CPU if ``partition_activations`` is enabled.
contiguous_memory_optimization: Copies partitioned activations so that they are contiguous in memory.
Not supported by all models.
synchronize_checkpoint_boundary: Insert :func:`torch.cuda.synchronize` at each checkpoint boundary.
load_full_weights: True when loading a single checkpoint file containing the model state dict
when using ZeRO Stage 3. This differs from the DeepSpeed checkpoint which contains shards
per worker.
"""
if not _DEEPSPEED_AVAILABLE:
raise MisconfigurationException(
"To use the `DeepSpeedStrategy`, you must have DeepSpeed installed."
" Install it by running `pip install -U deepspeed`."
)
super().__init__(
accelerator=accelerator,
parallel_devices=parallel_devices,
cluster_environment=cluster_environment,
precision_plugin=precision_plugin,
process_group_backend=process_group_backend,
)
self.config = self._load_config(config)
if self.config is None:
# User has not overridden config, set defaults
self.config = self._create_default_config(
zero_optimization,
zero_allow_untested_optimizer,
logging_batch_size_per_gpu,
offload_optimizer=offload_optimizer,
offload_parameters=offload_parameters,
nvme_path=nvme_path,
offload_params_device=offload_params_device,
params_buffer_count=params_buffer_count,
params_buffer_size=params_buffer_size,
max_in_cpu=max_in_cpu,
pin_memory=pin_memory,
offload_optimizer_device=offload_optimizer_device,
optimizer_buffer_count=optimizer_buffer_count,
block_size=block_size,
queue_depth=queue_depth,
single_submit=single_submit,
overlap_events=overlap_events,
thread_count=thread_count,
partition_activations=partition_activations,
cpu_checkpointing=cpu_checkpointing,
contiguous_memory_optimization=contiguous_memory_optimization,
synchronize_checkpoint_boundary=synchronize_checkpoint_boundary,
stage=stage,
contiguous_gradients=contiguous_gradients,
overlap_comm=overlap_comm,
allgather_partitions=allgather_partitions,
reduce_scatter=reduce_scatter,
allgather_bucket_size=allgather_bucket_size,
reduce_bucket_size=reduce_bucket_size,
sub_group_size=sub_group_size,
)
import deepspeed
self._config_initialized = False
deepspeed.utils.logging.logger.setLevel(logging_level)
self.remote_device = remote_device
self.load_full_weights = load_full_weights
# default FP16 parameters.
self.loss_scale = loss_scale
self.initial_scale_power = initial_scale_power
self.loss_scale_window = loss_scale_window
self.hysteresis = hysteresis
self.min_loss_scale = min_loss_scale
self.pipe_model_parallel_size = pipe_model_parallel_size
self.tensor_model_parallel_size = tensor_model_parallel_size
self.mpu_seed = mpu_seed
def _setup_model_and_optimizer(
self, model: Module, optimizer: Optimizer, lr_scheduler: Optional[LRSchedulerTypeUnion] = None
):
"""Initialize one model and one optimizer with an optional learning rate scheduler.
This calls :func:`deepspeed.initialize` internally.
"""
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
deepspeed_engine, deepspeed_optimizer, _, _ = deepspeed.initialize(
args=argparse.Namespace(device_rank=self.root_device.index),
config=self.config,
model=model,
model_parameters=model_parameters, # type: ignore
optimizer=optimizer,
lr_scheduler=lr_scheduler,
dist_init_required=False,
mpu=mpu
)
return deepspeed_engine, deepspeed_optimizer
def _set_deepspeed_activation_checkpointing(self) -> None:
import deepspeed
assert isinstance(self.config, dict)
assert self.config.get(
"activation_checkpointing"), 'megatron_deepspeed stratygy need activation_checkpointing config'
if self.config.get("activation_checkpointing"):
checkpoint_config = self.config["activation_checkpointing"]
deepspeed.checkpointing.configure(
mpu_=mpu,
num_checkpoints=checkpoint_config.get("num_checkpoints"),
partition_activations=checkpoint_config.get("partition_activations"),
contiguous_checkpointing=checkpoint_config.get("contiguous_memory_optimization"),
checkpoint_in_cpu=checkpoint_config.get("cpu_checkpointing"),
profile=checkpoint_config.get("profile"),
)
def setup_environment(self) -> None:
super().setup_environment()
self.setup_mpu()
def setup_mpu(self) -> None:
fused_kernels.load_fused_kernels()
rank = self.cluster_environment.global_rank()
world_size = self.cluster_environment.world_size()
from deepspeed.runtime.pipe.topology import PipeModelDataParallelTopology
# this does pipe on the most outside, then data, then model.
# PipeModelDataParallelTopology is just a wrapper over ProcessTopology that predefines this order.
dp = world_size // self.pipe_model_parallel_size // self.tensor_model_parallel_size
topo = PipeModelDataParallelTopology(num_pp=self.pipe_model_parallel_size,
num_mp=self.tensor_model_parallel_size,
num_dp=dp)
# Offset base seeds for the interior pipeline stages.
# TODO: adjust last stage too once IO is improved.
stage_id = topo.get_coord(rank=rank).pipe
if 0 < stage_id < topo.get_dim("pipe") - 1:
offset = seed + 1138
seed = offset + (stage_id * self.tensor_model_parallel_size)
mpu.initialize_model_parallel(
self.tensor_model_parallel_size,
topology=topo,
fp32_allreduce=False)
self._set_deepspeed_activation_checkpointing()
mpu.model_parallel_cuda_manual_seed(self.mpu_seed)
def _initialize_deepspeed_inference(self, model: Module) -> None:
import deepspeed
assert isinstance(self.config, dict)
# todo: this is required for DeepSpeed throughput timers
inference_config = {"train_micro_batch_size_per_gpu": 1}
if "fp16" in self.config:
inference_config.update({"fp16": self.config["fp16"]})
if self.zero_stage_3:
inference_config.update(
{
"zero_allow_untested_optimizer": self.config["zero_allow_untested_optimizer"],
"zero_optimization": self.config["zero_optimization"],
}
)
# Remove all module hooks before initializing new model
remove_module_hooks(model)
model, _, _, _ = deepspeed.initialize(
args=argparse.Namespace(device_rank=self.root_device.index),
config=inference_config,
model=model,
optimizer=None,
lr_scheduler=None,
model_parameters=[],
dist_init_required=False,
mpu=mpu
)
self.model = model
| 18,750 | 45.8775 | 120 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/API/main.py
|
import uvicorn
import click
import argparse
import json
from importlib import import_module
from fastapi import FastAPI, WebSocket
from starlette.middleware.cors import CORSMiddleware
from utils import user_config, api_logger, setup_logger, RequestDataStructure
# 命令行启动时只输入一个参数,即配置文件的名字,eg: text_classification.json
# 其余所有配置在该配置文件中设定,不在命令行中指定
total_parser = argparse.ArgumentParser("API")
total_parser.add_argument("config_path", type=str)
args = total_parser.parse_args()
# set up user config
user_config.setup_config(args)
# set up logger
setup_logger(api_logger, user_config)
# load pipeline
pipeline_class = getattr(import_module('fengshen.pipelines.' + user_config.pipeline_type), 'Pipeline')
model_settings = user_config.model_settings
model_args = argparse.Namespace(**model_settings)
pipeline = pipeline_class(
args = model_args,
model = user_config.model_name
)
# initialize app
app = FastAPI(
title = user_config.PROJECT_NAME,
openapi_url = f"{user_config.API_PREFIX_STR}/openapi.json"
)
# api
# TODO
# 需要针对不同请求方法做不同判断,目前仅跑通了较通用的POST方法
# POST方法可以完成大多数 输入文本-返回结果 的请求任务
if(user_config.API_method == "POST"):
@app.post(user_config.API_path, tags = user_config.API_tags)
async def fengshen_post(data:RequestDataStructure):
# logging
api_logger.info(data.input_text)
input_text = data.input_text
result = pipeline(input_text)
return result
else:
print("only support POST method")
# Set all CORS enabled origins
if user_config.BACKEND_CORS_ORIGINS:
app.add_middleware(
CORSMiddleware,
allow_origins = [str(origin) for origin in user_config.BACKEND_CORS_ORIGINS],
allow_credentials = user_config.allow_credentials,
allow_methods = user_config.allow_methods,
allow_headers = user_config.allow_headers,
)
if __name__ == '__main__':
# 启动后可在浏览器打开 host:port/docs 查看接口的具体信息,并可进行简单测试
# eg: 127.0.0.1:8990/docs
uvicorn.run(app, host = user_config.SERVER_HOST, port = user_config.SERVER_PORT)
| 2,051 | 25.649351 | 102 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/API/utils.py
|
from dataclasses import dataclass, field
import os
import json
import logging
from argparse import Namespace
from typing import List, Literal, Optional, Union
from pydantic import AnyHttpUrl, BaseSettings, HttpUrl, validator, BaseModel
CURRENT_DIR_PATH = os.path.dirname(os.path.abspath(__file__))
# request body
# 使用pydantic对请求中的body数据进行验证
class RequestDataStructure(BaseModel):
input_text: List[str] = [""]
uuid: Optional[int]
# parameters for text2image model
input_image: Optional[str]
skip_steps: Optional[int]
clip_guidance_scale: Optional[int]
init_scale: Optional[int]
# API config
@dataclass
class APIConfig:
# server config
SERVER_HOST: AnyHttpUrl = "127.0.0.1"
SERVER_PORT: int = 8990
SERVER_NAME: str = ""
PROJECT_NAME: str = ""
API_PREFIX_STR: str = "/api"
# api config
API_method: Literal["POST","GET","PUT","OPTIONS","WEBSOCKET","PATCH","DELETE","TRACE","CONNECT"] = "POST"
API_path: str = "/TextClassification"
API_tags: List[str] = field(default_factory = lambda: [""])
# CORS config
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = field(default_factory = lambda: ["*"])
allow_credentials: bool = True
allow_methods: List[str] = field(default_factory = lambda: ["*"])
allow_headers: List[str] = field(default_factory = lambda: ["*"])
# log config
log_file_path: str = ""
log_level: str = "INFO"
# pipeline config
pipeline_type: str = ""
model_name: str = ""
# model config
# device: int = -1
# texta_name: Optional[str] = "sentence"
# textb_name: Optional[str] = "sentence2"
# label_name: Optional[str] = "label"
# max_length: int = 512
# return_tensors: str = "pt"
# padding: str = "longest"
# truncation: bool = True
# skip_special_tokens: bool = True
# clean_up_tkenization_spaces: bool = True
# # parameters for text2image model
# skip_steps: Optional[int] = 0
# clip_guidance_scale: Optional[int] = 0
# init_scale: Optional[int] = 0
def setup_config(self, args:Namespace) -> None:
# load config file
with open(CURRENT_DIR_PATH + "/" + args.config_path, "r") as jsonfile:
config = json.load(jsonfile)
server_config = config["SERVER"]
logging_config = config["LOGGING"]
pipeline_config = config["PIPELINE"]
# server config
self.SERVER_HOST: AnyHttpUrl = server_config["SERVER_HOST"]
self.SERVER_PORT: int = server_config["SERVER_PORT"]
self.SERVER_NAME: str = server_config["SERVER_NAME"]
self.PROJECT_NAME: str = server_config["PROJECT_NAME"]
self.API_PREFIX_STR: str = server_config["API_PREFIX_STR"]
# api config
self.API_method: Literal["POST","GET","PUT","OPTIONS","WEBSOCKET","PATCH","DELETE","TRACE","CONNECT"] = server_config["API_method"]
self.API_path: str = server_config["API_path"]
self.API_tags: List[str] = server_config["API_tags"]
# CORS config
self.BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = server_config["BACKEND_CORS_ORIGINS"]
self.allow_credentials: bool = server_config["allow_credentials"]
self.allow_methods: List[str] = server_config["allow_methods"]
self.allow_headers: List[str] = server_config["allow_headers"]
# log config
self.log_file_path: str = logging_config["log_file_path"]
self.log_level: str = logging_config["log_level"]
# pipeline config
self.pipeline_type: str = pipeline_config["pipeline_type"]
self.model_name: str = pipeline_config["model_name"]
# general model config
self.model_settings: dict = pipeline_config["model_settings"]
# 由于pipeline本身会解析参数,后续参数可以不要
# 直接将model_settings字典转为Namespace后作为pipeline的args参数即可
# self.device: int = self.model_settings["device"]
# self.texta_name: Optional[str] = self.model_settings["texta_name"]
# self.textb_name: Optional[str] = self.model_settings["textb_name"]
# self.label_name: Optional[str] = self.model_settings["label_name"]
# self.max_length: int = self.model_settings["max_length"]
# self.return_tensors: str = self.model_settings["return_tensors"]
# self.padding: str = self.model_settings["padding"]
# self.truncation: bool = self.model_settings["truncation"]
# self.skip_special_tokens: bool = self.model_settings["skip_special_tokens"]
# self.clean_up_tkenization_spaces: bool = self.model_settings["clean_up_tkenization_spaces"]
# # specific parameters for text2image model
# self.skip_steps: Optional[int] = self.model_settings["skip_steps"]
# self.clip_guidance_scale: Optional[int] = self.model_settings["clip_guidance_scale"]
# self.init_scale: Optional[int] = self.model_settings["init_scale"]
def setup_logger(logger, user_config: APIConfig):
# default level: INFO
logger.setLevel(getattr(logging, user_config.log_level, "INFO"))
ch = logging.StreamHandler()
if(user_config.log_file_path == ""):
fh = logging.FileHandler(filename = CURRENT_DIR_PATH + "/" + user_config.SERVER_NAME + ".log")
elif(".log" not in user_config.log_file_path[-5:-1]):
fh = logging.FileHandler(filename = user_config.log_file_path + "/" + user_config.SERVER_NAME + ".log")
else:
fh = logging.FileHandler(filename = user_config.log_file_path)
formatter = logging.Formatter(
"%(asctime)s - %(module)s - %(funcName)s - line:%(lineno)d - %(levelname)s - %(message)s"
)
ch.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(ch) # Exporting logs to the screen
logger.addHandler(fh) # Exporting logs to a file
return logger
user_config = APIConfig()
api_logger = logging.getLogger()
| 6,002 | 34.732143 | 139 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_t5/process_data.py
|
# coding=utf8
import argparse
import sys
import os
from concurrent.futures import ProcessPoolExecutor
def _generate_cache_arrow(index, ds, path):
print('saving dataset shard {}'.format(index))
ds.save_to_disk(os.path.join(path, 'part_{}'.format(index)))
return 'saving dataset shard {} done'.format(index)
def generate_arrow_cache(ds, args) -> None:
'''
读取wudao_180g等原数据或者tokenized之后的数据,并进行train test split
同时利用seed 42做shuffle 缓存下来
'''
ds = ds.train_test_split(train_size=args.train_split_size, seed=42)
print(ds)
p = ProcessPoolExecutor(max_workers=args.preprocessing_num_workers)
res = []
train_shard_part = args.saved_data_shards
for i in range(0, train_shard_part):
res.append(p.submit(_generate_cache_arrow, i,
ds['train'].shard(train_shard_part, i), args.saved_train_data_path))
p.shutdown(wait=True)
for future in res:
print(future.result(), flush=True)
ds['test'].save_to_disk(args.saved_test_data_path)
print('done')
if __name__ == '__main__':
total_parser = argparse.ArgumentParser("Save data Task")
total_parser.add_argument(
'--new_vocab_path', default='/cognitive_comp/ganruyi/hf_models/t5_cn_small/sentencepiece_cn.model', type=str)
total_parser.add_argument('--preprocessing_num_workers', default=30, type=int)
total_parser.add_argument(
'--train_data_path', default='/cognitive_comp/common_data/test_wudao_180g_mt5_tokenized/', type=str)
total_parser.add_argument('--saved_data_shards', default=800, type=int)
total_parser.add_argument('--saved_train_data_path', default=None, type=str)
total_parser.add_argument('--saved_test_data_path', default=None, type=str)
total_parser.add_argument('--max_seq_length', default=512, type=int)
total_parser.add_argument('--train_split_size', default=0.999, type=float)
total_parser.add_argument('--pretrained_model_path', default=None, type=str)
total_parser.add_argument('--tokenizer_type', default='t5_tokenizer', choices=['t5_tokenizer', 'bert_tokenizer'])
total_parser.add_argument('--text_column_name', default='text')
total_parser.add_argument('--remove_columns', nargs='+', default=[])
# * Args for data preprocessing
args = total_parser.parse_args()
sys.path.append('../../../')
from fengshen.data.t5_dataloader.t5_datasets import UnsuperviseT5Dataset
ds = UnsuperviseT5Dataset(args.train_data_path, args)
print(ds)
generate_arrow_cache(ds.data, args=args)
# ds = UnsuperviseT5Dataset(args.train_data_path, args, load_data_type=0)
for i in range(0, 2):
print(ds.data[i])
print(ds.tokenizer.decode(ds.data[i]['input_ids']))
print(ds.data)
| 2,746 | 40.621212 | 117 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_t5/pretrain_t5.py
|
import time
from builtins import print
import sys
import os
import torch
import argparse
import json
import pytorch_lightning as pl
from transformers import MT5Config, MT5Tokenizer
from pytorch_lightning import Trainer, loggers
from transformers import MT5ForConditionalGeneration
from pytorch_lightning.callbacks import LearningRateMonitor
# os.environ["CUDA_VISIBLE_DEVICES"] = '3'
class MT5PretrainModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--keep_tokens_path', default=None, type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
if args.tokenizer_type == 't5_tokenizer':
if args.new_vocab_path is not None:
# 用于从mt5继续训练,此时只保留中英文词表,spm采用新模型
assert args.keep_tokens_path is not None
keep_tokens = json.load(open(args.keep_tokens_path))
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path)
new_config = self.model.config
new_config.vocab_size = len(keep_tokens)
print('vocab_size:', new_config.vocab_size)
new_state_dict = self.model.state_dict()
select_index = torch.tensor(keep_tokens)
new_state_dict['encoder.embed_tokens.weight'] = torch.index_select(
new_state_dict['encoder.embed_tokens.weight'], dim=0, index=select_index)
new_state_dict['shared.weight'] = torch.index_select(
new_state_dict['shared.weight'], dim=0, index=select_index)
new_state_dict['decoder.embed_tokens.weight'] = torch.index_select(
new_state_dict['decoder.embed_tokens.weight'], dim=0, index=select_index)
new_state_dict['lm_head.weight'] = torch.index_select(
new_state_dict['lm_head.weight'], dim=0, index=select_index)
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path, config=new_config, state_dict=new_state_dict)
# self.model = MT5ForConditionalGeneration(config=new_config)
else:
# 用于继续训练
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path
)
else:
self.model = MT5ForConditionalGeneration(
MT5Config.from_pretrained(args.pretrained_model_path)
)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'], labels=batch['labels'])
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss, sync_dist=True)
self.log('train_acc', acc, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
# print('is out of index: ', batch['input_ids'][batch['input_ids'] >= 32598])
output = self.model(
input_ids=batch['input_ids'], labels=batch['labels'])
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/y_true.shape[0]
return acc
def on_save_checkpoint(self, checkpoint) -> None:
# Save the current loop info in the mid of epoch
# if you lightning <= 1.6.0 uncomment the line below
# checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict()
if self.trainer.global_rank == 0 and self.trainer.global_step % self.hparams.every_n_train_steps == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(self.trainer.current_epoch, self.trainer.global_step)))
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def get_time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def main():
total_parser = argparse.ArgumentParser("Pretrain Unsupervise.")
total_parser.add_argument(
'--do_eval_only', action='store_true', default=False)
total_parser.add_argument(
'--pretrained_model_path', default=None, type=str)
total_parser.add_argument(
'--new_vocab_path', default=None, type=str)
total_parser.add_argument('--max_seq_length', default=1024, type=int)
total_parser.add_argument('--ckpt_path', default=None, type=str)
sys.path.append('../../../')
from fengshen.data.t5_dataloader.t5_datasets import UnsuperviseT5DataModel
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
# * Args for data preprocessing
total_parser = UnsuperviseT5DataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = MT5PretrainModel.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
print('Argument parse success.')
print('UnsuperviseT5DataModel load start {}'.format(get_time_str()))
data_model = UnsuperviseT5DataModel(args)
print('UnsuperviseT5DataModel load end {}'.format(get_time_str()))
if not args.do_eval_only:
model = MT5PretrainModel(args)
checkpoint_callback = UniversalCheckpoint(args)
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'logs/'))
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback, lr_monitor]
)
trainer.fit(model, data_model, ckpt_path=args.ckpt_path)
else:
tokenizer = MT5Tokenizer.from_pretrained(args.new_vocab_path, extra_ids=0)
model = MT5PretrainModel(args=args, num_data=len(data_model.predict_dataloader()))
trainer = Trainer.from_argparse_args(args)
result = trainer.predict(model, data_model)
result = result[0]
for i in range(4):
print(tokenizer.batch_decode(result['input_ids'][i]))
print(tokenizer.batch_decode(result['predict_ids'][i]))
print(tokenizer.batch_decode(result['labels'][i]))
if __name__ == '__main__':
main()
| 8,139 | 45.25 | 110 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_t5/convert_ckpt_to_bin.py
|
import time
from builtins import print
import argparse
import torch
# os.environ["CUDA_VISIBLE_DEVICES"] = '3'
def get_time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def main():
total_parser = argparse.ArgumentParser("Pretrain Unsupervise.")
total_parser.add_argument('--ckpt_path', default=None, type=str)
total_parser.add_argument('--bin_path', default=None, type=str)
total_parser.add_argument('--rm_prefix', default=None, type=str)
# * Args for base model
args = total_parser.parse_args()
print('Argument parse success.')
state_dict = torch.load(args.ckpt_path)['module']
new_state_dict = {}
if args.rm_prefix is not None:
prefix_len = len(args.rm_prefix)
for k, v in state_dict.items():
if k[:prefix_len] == args.rm_prefix:
new_state_dict[k[prefix_len:]] = v
else:
new_state_dict[k] = v
else:
new_state_dict = state_dict
torch.save(new_state_dict, args.bin_path)
if __name__ == '__main__':
main()
| 1,071 | 27.210526 | 68 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/pretrain_t5/finetune_t5.py
|
import time
from builtins import print
import sys
import os
import torch
import argparse
import pytorch_lightning as pl
from pytorch_lightning import Trainer, loggers
from transformers import MT5ForConditionalGeneration
from pytorch_lightning.callbacks import LearningRateMonitor
# os.environ["CUDA_VISIBLE_DEVICES"] = '3'
class MT5FinetuneModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--keep_tokens_path', default=None, type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.save_hyperparameters(args)
self.model = MT5ForConditionalGeneration.from_pretrained(
args.pretrained_model_path
)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches * float(self.trainer.max_epochs)
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
def training_step(self, batch, batch_idx):
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
acc = self.comput_metrix(output.logits, batch['labels'])
self.log('train_loss', output.loss, sync_dist=True)
self.log('train_acc', acc, sync_dist=True)
return output.loss
def validation_step(self, batch, batch_idx):
# print('is out of index: ', batch['input_ids'][batch['input_ids'] >= 32598])
output = self.model(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
labels=batch['labels'])
acc = self.comput_metrix(output.logits, batch['labels'])
cond_output = self.model.generate(
input_ids=batch['input_ids'],
attention_mask=batch['attention_mask'],
force_words_ids=batch['force_words_ids'],
num_beams=2,
)
cond_acc = self.comput_metrix(cond_output, batch['labels'])
self.log('val_loss', output.loss, sync_dist=True)
self.log('val_acc', acc, sync_dist=True)
self.log('cond_acc', cond_acc, sync_dist=True)
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/y_true.shape[0]
return acc
def on_save_checkpoint(self, checkpoint) -> None:
# Save the current loop info in the mid of epoch
# if you lightning <= 1.6.0 uncomment the line below
# checkpoint['loops'] = self.trainer.checkpoint_connector._get_loops_state_dict()
if self.trainer.global_rank == 0 and self.trainer.global_step % self.hparams.every_n_train_steps == 0:
self.model.save_pretrained(os.path.join(
self.trainer.checkpoint_callback.dirpath,
'hf_pretrained_epoch{}_step{}'.format(self.trainer.current_epoch, self.trainer.global_step)))
def on_load_checkpoint(self, checkpoint) -> None:
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
def get_time_str():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
def main():
total_parser = argparse.ArgumentParser("Pretrain Unsupervise.")
total_parser.add_argument(
'--do_eval_only', action='store_true', default=False)
total_parser.add_argument(
'--pretrained_model_path', default=None, type=str)
total_parser.add_argument(
'--new_vocab_path', default=None, type=str)
total_parser.add_argument('--max_seq_length', default=1024, type=int)
total_parser.add_argument('--ckpt_path', default=None, type=str)
sys.path.append('../../../')
from fengshen.data.t5_dataloader.t5_datasets import TaskT5DataModel
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
# * Args for data preprocessing
total_parser = TaskT5DataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = Trainer.add_argparse_args(total_parser)
total_parser = UniversalCheckpoint.add_argparse_args(total_parser)
total_parser = MT5FinetuneModel.add_model_specific_args(total_parser)
# * Args for base model
args = total_parser.parse_args()
print('Argument parse success.')
print('TaskT5DataModel load start {}'.format(get_time_str()))
data_model = TaskT5DataModel(args)
print('TaskT5DataModel load end {}'.format(get_time_str()))
if not args.do_eval_only:
model = MT5FinetuneModel(args)
checkpoint_callback = UniversalCheckpoint(args)
lr_monitor = LearningRateMonitor(logging_interval='step')
logger = loggers.TensorBoardLogger(save_dir=os.path.join(
args.default_root_dir, 'logs/'))
trainer = Trainer.from_argparse_args(args,
logger=logger,
callbacks=[checkpoint_callback, lr_monitor]
)
trainer.fit(model, data_model, ckpt_path=args.ckpt_path)
if __name__ == '__main__':
main()
| 6,184 | 41.655172 | 110 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/stable_diffusion_dreambooth/train.py
|
# -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : train.py
@Time : 2022/11/09 22:27
@Author : Gan Ruyi
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
import hashlib
import itertools
import os
from pathlib import Path
from tqdm.auto import tqdm
import torch
import argparse
from pytorch_lightning import (
LightningModule,
Trainer,
)
from pytorch_lightning.callbacks import (
LearningRateMonitor,
)
from transformers import BertTokenizer, BertModel, CLIPTokenizer, CLIPTextModel
from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from torch.nn import functional as F
from fengshen.data.dreambooth_datasets.dreambooth_datasets import PromptDataset, DreamBoothDataset
from fengshen.data.universal_datamodule import UniversalDataModule
from fengshen.models.model_utils import (
add_module_args,
configure_optimizers,
get_total_steps,
)
from fengshen.utils.universal_checkpoint import UniversalCheckpoint
from fengshen.data.dreambooth_datasets.dreambooth_datasets import add_data_args
class StableDiffusionDreamBooth(LightningModule):
@staticmethod
def add_module_specific_args(parent_parser):
parser = parent_parser.add_argument_group('Taiyi Stable Diffusion Module')
parser.add_argument('--train_text_encoder', action='store_true', default=False)
# dreambooth train unet only default
parser.add_argument('--train_unet', action='store_true', default=True)
return parent_parser
def __init__(self, args):
super().__init__()
if 'Taiyi-Stable-Diffusion-1B-Chinese-v0.1' in args.model_path:
self.tokenizer = BertTokenizer.from_pretrained(
args.model_path, subfolder="tokenizer")
self.text_encoder = BertModel.from_pretrained(
args.model_path, subfolder="text_encoder") # load from taiyi_finetune-v0
else:
self.tokenizer = CLIPTokenizer.from_pretrained(
args.model_path, subfolder="tokenizer")
self.text_encoder = CLIPTextModel.from_pretrained(
args.model_path, subfolder="text_encoder")
self.vae = AutoencoderKL.from_pretrained(
args.model_path, subfolder="vae")
self.unet = UNet2DConditionModel.from_pretrained(
args.model_path, subfolder="unet")
self.noise_scheduler = DDPMScheduler.from_config(
args.model_path, subfolder="scheduler")
# set model
self.vae.requires_grad_(False)
if not args.train_text_encoder:
self.requires_grad_(False)
if not args.train_unet:
self.requires_grad_(False)
self.save_hyperparameters(args)
def generate_extra_data(self):
global_rank = self.global_rank
device = self.trainer.device_ids[global_rank]
print('generate on device {} of global_rank {}'.format(device, global_rank))
class_images_dir = Path(self.hparams.class_data_dir)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < self.hparams.num_class_images:
pipeline = StableDiffusionPipeline.from_pretrained(
self.hparams.model_path,
safety_checker=None,
)
pipeline.set_progress_bar_config(disable=True)
num_new_images = self.hparams.num_class_images - cur_class_images
print(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(self.hparams.class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=self.hparams.sample_batch_size)
pipeline.to(device)
for example in tqdm(
sample_dataloader, desc="Generating class images", disable=global_rank != 0
):
images = pipeline(example["prompt"]).images
for i, image in enumerate(images):
hash_image = hashlib.sha1(image.tobytes()).hexdigest()
image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
image.save(image_filename)
del pipeline
# if torch.cuda.is_available():
# torch.cuda.empty_cache()
def setup(self, stage) -> None:
if self.hparams.with_prior_preservation:
self.generate_extra_data()
if stage == 'fit':
self.total_steps = get_total_steps(self.trainer, self.hparams)
print('Total steps: {}' .format(self.total_steps))
def configure_optimizers(self):
model_params = []
if self.hparams.train_unet and self.hparams.train_text_encoder:
model_params = itertools.chain(self.unet.parameters(), self.text_encoder.parameters())
elif self.hparams.train_unet:
model_params = self.unet.parameters()
elif self.hparams.train_text_encoder:
model_params = self.text_encoder.parameters()
return configure_optimizers(self, model_params=model_params)
def training_step(self, batch, batch_idx):
if self.hparams.train_text_encoder:
self.text_encoder.train()
if self.hparams.train_unet:
self.unet.train()
latents = self.vae.encode(batch["pixel_values"]).latent_dist.sample()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn(latents.shape).to(latents.device)
noise = noise.to(dtype=self.unet.dtype)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(
0, self.noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = self.noise_scheduler.add_noise(latents, noise, timesteps)
noisy_latents = noisy_latents.to(dtype=self.unet.dtype)
# Get the text embedding for conditioning
# with torch.no_grad():
encoder_hidden_states = self.text_encoder(batch["input_ids"])[0]
# Predict the noise residual
noise_pred = self.unet(noisy_latents, timesteps, encoder_hidden_states).sample
if self.hparams.with_prior_preservation:
# Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0)
noise, noise_prior = torch.chunk(noise, 2, dim=0)
# Compute instance loss
loss = F.mse_loss(noise_pred, noise, reduction="none").mean([1, 2, 3]).mean()
# Compute prior loss
prior_loss = F.mse_loss(noise_pred_prior, noise_prior, reduction="mean")
# Add the prior loss to the instance loss.
loss = loss + args.prior_loss_weight * prior_loss
else:
loss = F.mse_loss(noise_pred, noise, reduction="mean")
self.log("train_loss", loss.item(), on_epoch=False, prog_bar=True, logger=True)
if self.trainer.global_rank == 0:
if (self.global_step+1) % 5000 == 0:
print('saving model...')
pipeline = StableDiffusionPipeline.from_pretrained(
args.model_path, unet=self.unet, text_encoder=self.text_encoder, tokenizer=self.tokenizer,
)
pipeline.save_pretrained(os.path.join(
args.default_root_dir, f'hf_out_{self.trainer.current_epoch}'))
return {"loss": loss}
def on_train_end(self) -> None:
if self.trainer.global_rank == 0:
print('saving model...')
pipeline = StableDiffusionPipeline.from_pretrained(
args.model_path, unet=self.unet, text_encoder=self.text_encoder, tokenizer=self.tokenizer,
)
pipeline.save_pretrained(os.path.join(
args.default_root_dir, f'hf_out_{self.trainer.current_epoch}'))
def on_load_checkpoint(self, checkpoint) -> None:
# 兼容低版本lightning,低版本lightning从ckpt起来时steps数会被重置为0
global_step_offset = checkpoint["global_step"]
if 'global_samples' in checkpoint:
self.consumed_samples = checkpoint['global_samples']
self.trainer.fit_loop.epoch_loop._batches_that_stepped = global_step_offset
if __name__ == '__main__':
args_parser = argparse.ArgumentParser()
args_parser = add_module_args(args_parser)
args_parser = add_data_args(args_parser)
args_parser = UniversalDataModule.add_data_specific_args(args_parser)
args_parser = Trainer.add_argparse_args(args_parser)
args_parser = StableDiffusionDreamBooth.add_module_specific_args(args_parser)
args_parser = UniversalCheckpoint.add_argparse_args(args_parser)
args = args_parser.parse_args()
model = StableDiffusionDreamBooth(args)
tokenizer = model.tokenizer
datasets = DreamBoothDataset(
instance_data_dir=args.instance_data_dir,
instance_prompt=args.instance_prompt,
tokenizer=tokenizer,
class_data_dir=args.class_data_dir,
class_prompt=args.class_prompt,
size=512,
center_crop=args.center_crop,
)
# construct the datasets to a dict for universal_datamodule
datasets = {'train': datasets}
def collate_fn(examples):
# print(examples)
input_ids = [example["instance_prompt_ids"] for example in examples]
pixel_values = [example["instance_images"] for example in examples]
# Concat class and instance examples for prior preservation.
# We do this to avoid doing two forward passes.
if args.with_prior_preservation:
input_ids += [example["class_prompt_ids"] for example in examples]
pixel_values += [example["class_images"] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
input_ids = tokenizer.pad(
{"input_ids": input_ids},
padding="max_length",
max_length=tokenizer.model_max_length,
return_tensors="pt",
).input_ids
batch = {
"input_ids": input_ids,
"pixel_values": pixel_values,
}
return batch
datamodule = UniversalDataModule(
tokenizer=tokenizer, collate_fn=collate_fn, args=args, datasets=datasets)
lr_monitor = LearningRateMonitor(logging_interval='step')
checkpoint_callback = UniversalCheckpoint(args)
trainer = Trainer.from_argparse_args(args,
callbacks=[
lr_monitor,
checkpoint_callback])
trainer.fit(model, datamodule, ckpt_path=args.load_ckpt_path)
| 11,678 | 41.162455 | 118 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/zen2_finetune/fengshen_token_level_ft_task.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fengshen.models.zen2.modeling import ZenForTokenClassification
from fengshen.metric.metric import SeqEntityScore
from fengshen.models.zen2.tokenization import BertTokenizer
from fengshen.models.zen2.ngram_utils import ZenNgramDict
from pytorch_lightning.callbacks import LearningRateMonitor
from dataclasses import dataclass
import logging
import math
import numpy as np
import os
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.ERROR)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, ngram_ids, ngram_positions, ngram_lengths,
ngram_tuples, ngram_seg_ids, ngram_masks, valid_ids=None, label_mask=None, b_use_valid_filter=False):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.valid_ids = valid_ids
self.label_mask = label_mask
self.ngram_ids = ngram_ids
self.ngram_positions = ngram_positions
self.ngram_lengths = ngram_lengths
self.ngram_tuples = ngram_tuples
self.ngram_seg_ids = ngram_seg_ids
self.ngram_masks = ngram_masks
self.b_use_valid_filter = b_use_valid_filter
def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict):
"""Loads a data file into a list of `InputBatch`s."""
# label_map = {label: i for i, label in enumerate(label_list, 1)}
# label_map["[PAD]"] = 0
features = []
b_use_valid_filter = False
for (ex_index, example) in enumerate(examples):
textlist = example.text_a
labellist = example.label
tokens = []
labels = []
valid = []
label_mask = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
if len(tokens) + len(token) > max_seq_length - 2:
break
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
valid.append(1)
label_mask.append(1)
else:
valid.append(0)
b_use_valid_filter = True
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
valid.insert(0, 1)
label_mask.insert(0, 1)
label_ids.append(label_map["[CLS]"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if len(labels) > i:
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map["[SEP]"])
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
label_mask = [1] * len(label_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while len(label_ids) < max_seq_length:
label_ids.append(0)
label_mask.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the ngram segment from 2 to 7 to check whether there is a ngram
max_gram_n = ngram_dict.max_ngram_len
for p in range(2, max_gram_n):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the ngram
# i is the length of the current ngram
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_freq = ngram_dict.ngram_to_freq_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment, ngram_freq])
ngram_matches = sorted(ngram_matches, key=lambda s: s[0])
max_ngram_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_ngram_in_seq_proportion:
ngram_matches = ngram_matches[:max_ngram_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_freqs = [ngram[4] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < (len(tokens) + 2) else 1 for position in ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# record the masked positions
ngram_positions_matrix = np.zeros(shape=(max_seq_length, ngram_dict.max_ngram_in_seq), dtype=np.int32)
for i in range(len(ngram_ids)):
ngram_positions_matrix[ngram_positions[i]:ngram_positions[i] + ngram_lengths[i], i] = ngram_freqs[i]
ngram_positions_matrix = torch.from_numpy(ngram_positions_matrix.astype(np.float))
ngram_positions_matrix = torch.div(ngram_positions_matrix, torch.stack(
[torch.sum(ngram_positions_matrix, 1)] * ngram_positions_matrix.size(1)).t() + 1e-10)
ngram_positions_matrix = ngram_positions_matrix.numpy()
# Zero-pad up to the max ngram in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_lengths += padding
ngram_seg_ids += padding
# ----------- code for ngram END-----------
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %s)" % (",".join([str(x) for x in example.label]), ",".join([str(x) for x in label_ids])))
logger.info("valid: %s" % " ".join([str(x) for x in valid]))
logger.info("b_use_valid_filter: %s" % str(b_use_valid_filter))
logger.info("ngram_ids: %s" % " ".join([str(x) for x in ngram_ids]))
logger.info("ngram_positions: %s" % " ".join([str(x) for x in ngram_positions]))
logger.info("ngram_lengths: %s" % " ".join([str(x) for x in ngram_lengths]))
logger.info("ngram_tuples: %s" % " ".join([str(x) for x in ngram_tuples]))
logger.info("ngram_seg_ids: %s" % " ".join([str(x) for x in ngram_seg_ids]))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_ids,
ngram_ids=ngram_ids,
ngram_positions=ngram_positions_matrix,
ngram_lengths=ngram_lengths,
ngram_tuples=ngram_tuples,
ngram_seg_ids=ngram_seg_ids,
ngram_masks=ngram_mask_array,
valid_ids=valid,
label_mask=label_mask,
b_use_valid_filter=b_use_valid_filter))
return features
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_path, set_type, quotechar=' '):
"""See base class."""
return self._create_examples(
self._read_tsv(data_path, self.get_quotechar()), set_type)
def _create_examples(self, lines, set_type):
examples = []
for i, (sentence, label) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = sentence
label = label
examples.append(InputExample(guid=guid, text_a=text_a, label=label))
return examples
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def get_quotechar(self):
return ' '
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
'''
read file
return format :
[ ['EU', 'B-ORG'], ['rejects', 'O'], ['German', 'B-MISC'], ['call', 'O'], ['to', 'O'], ['boycott', 'O'], ['British', 'B-MISC'], ['lamb', 'O'], ['.', 'O'] ]
'''
f = open(input_file)
data = []
sentence = []
label = []
for line in f:
if len(line) == 0 or line.startswith('-DOCSTART') or line[0] == "\n":
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
continue
splits = line.split(quotechar)
sentence.append(splits[0])
label.append(splits[-1][:-1])
if len(sentence) > 0:
data.append((sentence, label))
sentence = []
label = []
return data
class MSRAProcessor(DataProcessor):
"""Processor for the msra data set."""
def get_labels(self):
return ['B-NR', 'B-NS', 'B-NT', 'E-NR', 'E-NS', 'E-NT', 'M-NR',
'M-NS', 'M-NT', 'O', 'S-NR', 'S-NS', 'S-NT', '[CLS]', '[SEP]']
class OntoNotes4Processor(DataProcessor):
"""Processor for the OntoNotes4 data set."""
def get_labels(self):
return ['B-GPE', 'B-LOC', 'B-ORG', 'B-PER', 'E-GPE', 'E-LOC',
'E-ORG', 'E-PER', 'M-GPE', 'M-LOC', 'M-ORG', 'M-PER', 'O',
'S-GPE', 'S-LOC', 'S-ORG', 'S-PER', '[CLS]', '[SEP]']
class WeiboProcessor(DataProcessor):
"""Processor for the Weibo data set."""
def get_labels(self):
return ['B-GPE.NAM', 'B-GPE.NOM', 'B-LOC.NAM', 'B-LOC.NOM',
'B-ORG.NAM', 'B-ORG.NOM', 'B-PER.NAM', 'B-PER.NOM', 'E-GPE.NAM',
'E-GPE.NOM', 'E-LOC.NAM', 'E-LOC.NOM', 'E-ORG.NAM', 'E-ORG.NOM',
'E-PER.NAM', 'E-PER.NOM', 'M-GPE.NAM', 'M-LOC.NAM', 'M-LOC.NOM',
'M-ORG.NAM', 'M-ORG.NOM', 'M-PER.NAM', 'M-PER.NOM', 'O',
'S-GPE.NAM', 'S-LOC.NOM', 'S-PER.NAM', 'S-PER.NOM', '[CLS]', '[SEP]']
class ResumeProcessor(DataProcessor):
"""Processor for the resume data set."""
def get_labels(self):
return ['B-CONT', 'B-EDU', 'B-LOC', 'B-NAME', 'B-ORG', 'B-PRO',
'B-RACE', 'B-TITLE', 'E-CONT', 'E-EDU', 'E-LOC', 'E-NAME',
'E-ORG', 'E-PRO', 'E-RACE', 'E-TITLE', 'M-CONT', 'M-EDU',
'M-LOC', 'M-NAME', 'M-ORG', 'M-PRO', 'M-RACE', 'M-TITLE',
'O', 'S-NAME', 'S-ORG', 'S-RACE', '[CLS]', '[SEP]']
class CMeEEProcessor(DataProcessor):
"""Processor for the CMeEE data set."""
def get_quotechar(self):
return '\t'
def get_labels(self):
return ['B-临床表现', 'B-医学检验项目', 'B-医疗程序', 'B-医疗设备',
'B-微生物类', 'B-疾病', 'B-科室', 'B-药物', 'B-身体', 'I-临床表现',
'I-医学检验项目', 'I-医疗程序', 'I-医疗设备', 'I-微生物类',
'I-疾病', 'I-科室', 'I-药物', 'I-身体', 'O', '[CLS]', '[SEP]']
class CLUENERProcessor(DataProcessor):
"""Processor for the CLUENER data set."""
def get_quotechar(self):
return '\t'
def get_labels(self):
return ['B-书名', 'B-公司', 'B-地址', 'B-姓名', 'B-政府', 'B-景点',
'B-游戏', 'B-电影', 'B-组织机构', 'B-职位', 'I-书名', 'I-公司',
'I-地址', 'I-姓名', 'I-政府', 'I-景点', 'I-游戏', 'I-电影',
'I-组织机构', 'I-职位', 'O', '[CLS]', '[SEP]']
class TaskDataset(Dataset):
def __init__(self, data_path, processor, mode='train'):
super().__init__()
self.data = self.load_data(data_path, processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, processor, mode):
if mode == "train":
examples = processor.get_examples(data_path, mode)
elif mode == "test":
examples = processor.get_examples(data_path, mode)
elif mode == "dev":
examples = processor.get_examples(data_path, mode)
return examples
@dataclass
class TaskCollator:
args = None
tokenizer = None
ngram_dict = None
label2id = None
def __call__(self, samples):
features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict)
# logger.info(" Num examples = %d", len(samples))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
valid_ids = torch.tensor([f.valid_ids for f in features], dtype=torch.long)
ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long)
ngram_positions = torch.tensor([f.ngram_positions for f in features], dtype=torch.long)
# ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long)
# ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long)
# ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long)
# label_mask = torch.tensor([f.label_mask for f in features], dtype=torch.long)
b_use_valid_filter = torch.tensor([f.b_use_valid_filter for f in features], dtype=torch.bool)
# 取第一个出来?
# b_use_valid_filter = b_use_valid_filter.detach().cpu().numpy()[0]
b_use_valid_filter = b_use_valid_filter[0]
return {
'input_ids': input_ids,
'input_ngram_ids': ngram_ids,
'ngram_position_matrix': ngram_positions,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'labels': label_ids,
'valid_ids': valid_ids,
'b_use_valid_filter': b_use_valid_filter,
}
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_seq_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
parser.add_argument('--vocab_file',
type=str, default=None,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--task_name', default='weibo', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case)
self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer)
processors = {
'weibo': WeiboProcessor,
'resume': ResumeProcessor,
'msra': MSRAProcessor,
'ontonotes4': OntoNotes4Processor,
'cmeee': CMeEEProcessor,
'cluener': CLUENERProcessor,
}
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
# 生成id映射
label_list = processor.get_labels()
label2id = {label: i for i, label in enumerate(label_list, 1)}
label2id["[PAD]"] = 0
self.id2label = {v: k for k, v in label2id.items()}
self.collator.label2id = label2id
if args.dataset_name is None:
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), processor, mode='train')
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), processor, mode='dev')
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), processor, mode='test')
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--markup', default='bios', type=str)
parser.add_argument('--middle_prefix', default='I-', type=str)
return parent_args
def __init__(self, args, id2label):
super().__init__()
# config = ZenConfig(os.path.join(args.pretrained_model_path, 'config.json'))
self.model = ZenForTokenClassification.from_pretrained(args.pretrained_model_path, num_labels=len(id2label))
self.seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix)
self.train_seq_entity_score = SeqEntityScore(id2label, markup=args.markup, middle_prefix=args.middle_prefix)
self.id2label = id2label
self.label2id = {v: k for k, v in id2label.items()}
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
# logits = outputs.logits
# preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
# preds = preds.detach().cpu().numpy()
# labels = batch['labels'].detach().cpu().numpy()
# num_labels = len(self.label2id)
# y_true = []
# y_pred = []
# for i, label in enumerate(labels):
# temp_1 = []
# temp_2 = []
# for j, m in enumerate(label):
# if j == 0:
# continue
# elif labels[i][j] == num_labels - 1:
# y_true.append(temp_1)
# y_pred.append(temp_2)
# break
# else:
# temp_1.append(self.id2label[labels[i][j]])
# temp_2.append(self.id2label[preds[i][j]])
# self.train_seq_entity_score.update(y_true, y_pred)
# result = self.train_seq_entity_score.result()
# self.train_seq_entity_score.reset()
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
outputs = self.model(**batch)
loss = outputs.loss
logits = outputs.logits
preds = torch.argmax(F.log_softmax(logits, dim=2), dim=2)
preds = preds.detach().cpu().numpy()
labels = batch['labels'].detach().cpu().numpy()
num_labels = len(self.label2id)
y_true = []
y_pred = []
for i, label in enumerate(labels):
temp_1 = []
temp_2 = []
for j, m in enumerate(label):
if j == 0:
continue
elif labels[i][j] == num_labels - 1:
y_true.append(temp_1)
y_pred.append(temp_2)
break
else:
temp_1.append(self.id2label[labels[i][j]])
temp_2.append(self.id2label[preds[i][j]])
self.seq_entity_score.update(y_true, y_pred)
self.log('val_loss', loss)
def validation_epoch_end(self, outputs):
# compute metric for all process
score_dict, _ = self.seq_entity_score.result()
if self.trainer._accelerator_connector.cluster_environment.global_rank() == 0:
print('score_dict:\n', score_dict)
# reset the metric after once validation
self.seq_entity_score.reset()
for k, v in score_dict.items():
self.log('val_{}'.format(k), v)
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
def save_test(data, args, data_model):
with open(args.output_save_path, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
batch = data[i]
for sample in batch:
tmp_result = dict()
label_id = np.argmax(sample.numpy())
tmp_result['id'] = data_model.test_data.data[idx]['id']
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
checkpoint_callback = TaskModelCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = TaskDataModel(args)
id2label = data_model.id2label
print('id2label:', id2label)
model = LitModel(args, id2label)
trainer.fit(model, data_model)
if __name__ == "__main__":
main()
| 28,463 | 40.920471 | 163 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/zen2_finetune/fengshen_sequence_level_ft_task.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fengshen.models.zen2.modeling import ZenForSequenceClassification
from fengshen.models.zen2.ngram_utils import ZenNgramDict
from fengshen.models.zen2.tokenization import BertTokenizer
from pytorch_lightning.callbacks import LearningRateMonitor
import csv
from dataclasses import dataclass
import logging
import math
import numpy as np
import os
from tqdm import tqdm
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint
from torch.utils.data import Dataset, DataLoader
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, qid=0):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.qid = qid
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id,
ngram_ids, ngram_starts, ngram_lengths, ngram_tuples, ngram_seg_ids, ngram_masks, ngram_freqs,
qid=-1):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.qid = qid
self.ngram_ids = ngram_ids
self.ngram_starts = ngram_starts
self.ngram_lengths = ngram_lengths
self.ngram_tuples = ngram_tuples
self.ngram_seg_ids = ngram_seg_ids
self.ngram_masks = ngram_masks
self.ngram_freqs = ngram_freqs
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_examples(self, data_path, mode):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
# if sys.version_info[0] == 2:
# line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_json(cls, input_file):
"""Reads a jsonl file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = f.readlines()
samples = []
for line in tqdm(lines):
data = json.loads(line)
samples.append(data)
return samples
class TnewsProcessor(DataProcessor):
"""Processor for the tnews data set (HIT version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_json(os.path.join(data_dir, "train.json")), "train")
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence']
label = line['label'] if 'label' in line.keys() else None
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class OcnliProcessor(DataProcessor):
"""Processor for the ocnli or cmnli data set (HIT version)."""
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence1']
text_b = line['sentence2']
label = line['label'] if 'label' in line.keys() else None
# 特殊处理,cmnli有label为-的
if label == '-':
label = None
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class IflytekProcessor(DataProcessor):
"""Processor for the iflytek data set (HIT version)."""
def get_examples(self, data_path, mode):
return self._create_examples(
self._read_json(data_path),
set_type=mode
)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# if i == 0:
# continue
guid = "%s-%s" % (set_type, i)
# text_a = line[0]
text_a = line['sentence']
label = line['label'] if 'label' in line.keys() else None
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
def convert_examples_to_features(examples, label_map, max_seq_length, tokenizer, ngram_dict):
"""Loads a data file into a list of `InputBatch`s."""
# label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# ----------- code for ngram BEGIN-----------
ngram_matches = []
# Filter the word segment from 2 to max_ngram_len to check whether there is a word
max_gram_n = ngram_dict.max_ngram_len
for p in range(2, max_gram_n):
for q in range(0, len(tokens) - p + 1):
character_segment = tokens[q:q + p]
# j is the starting position of the word
# i is the length of the current word
character_segment = tuple(character_segment)
if character_segment in ngram_dict.ngram_to_id_dict:
ngram_index = ngram_dict.ngram_to_id_dict[character_segment]
ngram_freq = ngram_dict.ngram_to_freq_dict[character_segment]
ngram_matches.append([ngram_index, q, p, character_segment, ngram_freq])
# shuffle(ngram_matches)
ngram_matches = sorted(ngram_matches, key=lambda s: s[0])
# max_word_in_seq_proportion = max_word_in_seq
max_word_in_seq_proportion = math.ceil((len(tokens) / max_seq_length) * ngram_dict.max_ngram_in_seq)
if len(ngram_matches) > max_word_in_seq_proportion:
ngram_matches = ngram_matches[:max_word_in_seq_proportion]
ngram_ids = [ngram[0] for ngram in ngram_matches]
ngram_positions = [ngram[1] for ngram in ngram_matches]
ngram_lengths = [ngram[2] for ngram in ngram_matches]
ngram_tuples = [ngram[3] for ngram in ngram_matches]
ngram_freqs = [ngram[4] for ngram in ngram_matches]
ngram_seg_ids = [0 if position < len([id for id in segment_ids if id == 0]) else 1 for position in
ngram_positions]
ngram_mask_array = np.zeros(ngram_dict.max_ngram_in_seq, dtype=np.bool)
ngram_mask_array[:len(ngram_ids)] = 1
# Zero-pad up to the max word in seq length.
padding = [0] * (ngram_dict.max_ngram_in_seq - len(ngram_ids))
ngram_ids += padding
ngram_positions += padding
ngram_lengths += padding
ngram_seg_ids += padding
ngram_freqs += padding
# ----------- code for ngram END-----------
label_id = label_map[example.label] if example.label is not None else 0
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s" % (example.guid))
# logger.info("tokens: %s" % " ".join(
# [str(x) for x in tokens]))
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info(
# "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_id))
# logger.info("ngram_ids: %s" % " ".join([str(x) for x in ngram_ids]))
# logger.info("ngram_positions: %s" % " ".join([str(x) for x in ngram_positions]))
# logger.info("ngram_lengths: %s" % " ".join([str(x) for x in ngram_lengths]))
# logger.info("ngram_tuples: %s" % " ".join([str(x) for x in ngram_tuples]))
# logger.info("ngram_seg_ids: %s" % " ".join([str(x) for x in ngram_seg_ids]))
# logger.info("ngram_freqs: %s" % " ".join([str(x) for x in ngram_freqs]))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
ngram_ids=ngram_ids,
ngram_starts=ngram_positions,
ngram_lengths=ngram_lengths,
ngram_tuples=ngram_tuples,
ngram_seg_ids=ngram_seg_ids,
ngram_masks=ngram_mask_array,
ngram_freqs=ngram_freqs,
qid=example.qid))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class TaskDataset(Dataset):
def __init__(self, data_path, processor, mode='train'):
super().__init__()
self.data = self.load_data(data_path, processor, mode)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, processor, mode):
if mode == "train":
examples = processor.get_examples(data_path, mode)
elif mode == "test":
examples = processor.get_examples(data_path, mode)
elif mode == "dev":
examples = processor.get_examples(data_path, mode)
return examples
@dataclass
class TaskCollator:
args = None
tokenizer = None
ngram_dict = None
label2id = None
def __call__(self, samples):
features = convert_examples_to_features(samples, self.label2id, self.args.max_seq_length, self.tokenizer, self.ngram_dict)
# logger.info(" Num examples = %d", len(samples))
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
# qids = torch.tensor([f.qid for f in features], dtype=torch.long)
ngram_ids = torch.tensor([f.ngram_ids for f in features], dtype=torch.long)
ngram_starts = torch.tensor([f.ngram_starts for f in features], dtype=torch.long)
ngram_lengths = torch.tensor([f.ngram_lengths for f in features], dtype=torch.long)
# ngram_seg_ids = torch.tensor([f.ngram_seg_ids for f in features], dtype=torch.long)
# ngram_masks = torch.tensor([f.ngram_masks for f in features], dtype=torch.long)
ngram_freqs = torch.tensor([f.ngram_freqs for f in features], dtype=torch.long)
batch_size = len(samples)
ngram_positions_matrix = torch.zeros(
size=(batch_size, self.args.max_seq_length, self.ngram_dict.max_ngram_in_seq),
dtype=torch.int)
for batch_id in range(batch_size):
ngram_id = ngram_ids[batch_id]
ngram_start = ngram_starts[batch_id]
ngram_length = ngram_lengths[batch_id]
for i in range(len(ngram_id)):
ngram_positions_matrix[batch_id][ngram_start[i]:ngram_start[i] + ngram_length[i], i] = ngram_freqs[batch_id][i]
ngram_positions_matrix[batch_id] \
= torch.div(ngram_positions_matrix[batch_id],
torch.stack([torch.sum(ngram_positions_matrix[batch_id], 1)] *
ngram_positions_matrix[batch_id].size(1)).t() + 1e-10)
return {
'input_ids': input_ids,
'input_ngram_ids': ngram_ids,
'ngram_position_matrix': ngram_positions_matrix,
'attention_mask': input_mask,
'token_type_ids': segment_ids,
'labels': label_ids
}
# return default_collate(sample_list)
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_seq_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
parser.add_argument('--vocab_file',
type=str, default=None,
help="Vocabulary mapping/file BERT was pretrainined on")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument('--task_name', default='tnews', type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = BertTokenizer.from_pretrained(args.pretrained_model_path, do_lower_case=args.do_lower_case)
self.collator.ngram_dict = ZenNgramDict.from_pretrained(args.pretrained_model_path, tokenizer=self.collator.tokenizer)
processors = {
'afqmc': OcnliProcessor,
'tnews': TnewsProcessor,
'ocnli': OcnliProcessor,
'cmnli': OcnliProcessor,
'iflytek': IflytekProcessor,
}
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
if args.dataset_name is None:
self.label2id, self.id2label = self.load_schema(os.path.join(
args.data_dir, args.train_data), args)
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), processor, mode='train')
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), processor, mode='dev')
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), processor, mode='test')
self.collator.label2id = self.label2id
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def load_schema(self, data_path, args):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
label_list = []
for line in tqdm(lines):
data = json.loads(line)
labels = data[args.label_name] if args.label_name in data.keys(
) else 0
if labels not in label_list:
label_list.append(labels)
label2id, id2label = {}, {}
for i, k in enumerate(label_list):
label2id[k] = i
id2label[i] = k
return label2id, id2label
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--num_labels', default=2, type=int)
return parent_args
def __init__(self, args):
super().__init__()
self.model = ZenForSequenceClassification.from_pretrained(args.pretrained_model_path, num_labels=args.num_labels)
self.save_hyperparameters(args)
def setup(self, stage) -> None:
if stage == 'fit':
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
loss, logits = self.model(**batch)
acc = self.comput_metrix(logits, batch['labels'])
self.log('train_loss', loss)
self.log('train_acc', acc)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
loss, logits = self.model(**batch)
acc = self.comput_metrix(logits, batch['labels'])
self.log('val_loss', loss)
self.log('val_acc', acc)
def predict_step(self, batch, batch_idx):
output = self.model(**batch)
return output.logits
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
filename=args.filename)
def save_test(data, args, data_model):
with open(args.output_save_path, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
batch = data[i]
for sample in batch:
tmp_result = dict()
label_id = np.argmax(sample.numpy())
tmp_result['id'] = data_model.test_data.data[idx]['id']
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+args.output_save_path)
def main():
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
checkpoint_callback = TaskModelCheckpoint(args).callbacks
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[checkpoint_callback, lr_monitor]
)
data_model = TaskDataModel(args)
model = LitModel(args)
trainer.fit(model, data_model)
if __name__ == "__main__":
main()
| 27,189 | 40.830769 | 130 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/classification/finetune_classification.py
|
# coding=utf-8
# Copyright 2021 The IDEA Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from fengshen.models.zen1 import ZenModel
from dataclasses import dataclass
from fengshen.models.megatron_t5 import T5EncoderModel
from fengshen.models.roformer import RoFormerModel
from fengshen.models.longformer import LongformerModel
# from fengshen.models.cocolm.modeling_cocolm import COCOLMForSequenceClassification
import numpy as np
import os
from tqdm import tqdm
import json
import torch
import pytorch_lightning as pl
import argparse
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping, LearningRateMonitor
from torch.utils.data import Dataset, DataLoader
from torch.utils.data._utils.collate import default_collate
from transformers import (
BertModel,
BertConfig,
MegatronBertModel,
MegatronBertConfig,
AutoModel,
AutoConfig,
AutoTokenizer,
AutoModelForSequenceClassification,
)
# os.environ["CUDA_VISIBLE_DEVICES"] = '6'
model_dict = {'huggingface-bert': BertModel,
'fengshen-roformer': RoFormerModel,
'huggingface-megatron_bert': MegatronBertModel,
'fengshen-megatron_t5': T5EncoderModel,
'fengshen-longformer': LongformerModel,
# 'fengshen-zen1': ZenModel,
'huggingface-auto': AutoModelForSequenceClassification,
}
class TaskDataset(Dataset):
def __init__(self, data_path, args, label2id):
super().__init__()
self.args = args
self.label2id = label2id
self.max_length = args.max_length
self.data = self.load_data(data_path, args)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return self.data[index]
def load_data(self, data_path, args):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
samples = []
for line in tqdm(lines):
data = json.loads(line)
text_id = int(data[args.id_name]
) if args.id_name in data.keys() else 0
texta = data[args.texta_name] if args.texta_name in data.keys(
) else ''
textb = data[args.textb_name] if args.textb_name in data.keys(
) else ''
labels = self.label2id[data[args.label_name]
] if args.label_name in data.keys() else 0
samples.append({args.texta_name: texta, args.textb_name: textb,
args.label_name: labels, 'id': text_id})
return samples
@dataclass
class TaskCollator:
args = None
tokenizer = None
def __call__(self, samples):
sample_list = []
for item in samples:
if item[self.args.texta_name] != '' and item[self.args.textb_name] != '':
if self.args.model_type != 'fengshen-roformer':
encode_dict = self.tokenizer.encode_plus(
[item[self.args.texta_name], item[self.args.textb_name]],
max_length=self.args.max_length,
padding='max_length',
truncation='longest_first')
else:
encode_dict = self.tokenizer.encode_plus(
[item[self.args.texta_name] +
self.tokenizer.eos_token+item[self.args.textb_name]],
max_length=self.args.max_length,
padding='max_length',
truncation='longest_first')
else:
encode_dict = self.tokenizer.encode_plus(
item[self.args.texta_name],
max_length=self.args.max_length,
padding='max_length',
truncation='longest_first')
sample = {}
for k, v in encode_dict.items():
sample[k] = torch.tensor(v)
sample['labels'] = torch.tensor(item[self.args.label_name]).long()
sample['id'] = item['id']
sample_list.append(sample)
return default_collate(sample_list)
class TaskDataModel(pl.LightningDataModule):
@staticmethod
def add_data_specific_args(parent_args):
parser = parent_args.add_argument_group('TASK NAME DataModel')
parser.add_argument('--data_dir', default='./data', type=str)
parser.add_argument('--num_workers', default=8, type=int)
parser.add_argument('--train_data', default='train.json', type=str)
parser.add_argument('--valid_data', default='dev.json', type=str)
parser.add_argument('--test_data', default='test.json', type=str)
parser.add_argument('--train_batchsize', default=16, type=int)
parser.add_argument('--valid_batchsize', default=32, type=int)
parser.add_argument('--max_length', default=128, type=int)
parser.add_argument('--texta_name', default='text', type=str)
parser.add_argument('--textb_name', default='sentence2', type=str)
parser.add_argument('--label_name', default='label', type=str)
parser.add_argument('--id_name', default='id', type=str)
parser.add_argument('--dataset_name', default=None, type=str)
return parent_args
def __init__(self, args):
super().__init__()
self.train_batchsize = args.train_batchsize
self.valid_batchsize = args.valid_batchsize
self.tokenizer = AutoTokenizer.from_pretrained(
args.pretrained_model_path)
self.collator = TaskCollator()
self.collator.args = args
self.collator.tokenizer = self.tokenizer
if args.dataset_name is None:
self.label2id, self.id2label = self.load_schema(os.path.join(
args.data_dir, args.train_data), args)
self.train_data = TaskDataset(os.path.join(
args.data_dir, args.train_data), args, self.label2id)
self.valid_data = TaskDataset(os.path.join(
args.data_dir, args.valid_data), args, self.label2id)
self.test_data = TaskDataset(os.path.join(
args.data_dir, args.test_data), args, self.label2id)
else:
import datasets
ds = datasets.load_dataset(args.dataset_name)
self.train_data = ds['train']
self.valid_data = ds['validation']
self.test_data = ds['test']
self.save_hyperparameters(args)
def train_dataloader(self):
return DataLoader(self.train_data, shuffle=True, batch_size=self.train_batchsize, pin_memory=False,
collate_fn=self.collator)
def val_dataloader(self):
return DataLoader(self.valid_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def predict_dataloader(self):
return DataLoader(self.test_data, shuffle=False, batch_size=self.valid_batchsize, pin_memory=False,
collate_fn=self.collator)
def load_schema(self, data_path, args):
with open(data_path, 'r', encoding='utf8') as f:
lines = f.readlines()
label_list = []
for line in tqdm(lines):
data = json.loads(line)
labels = data[args.label_name] if args.label_name in data.keys(
) else 0
if labels not in label_list:
label_list.append(labels)
label2id, id2label = {}, {}
for i, k in enumerate(label_list):
label2id[k] = i
id2label[i] = k
return label2id, id2label
class taskModel(torch.nn.Module):
def __init__(self, args):
super().__init__()
self.args = args
print('args mode type:', args.model_type)
self.bert_encoder = model_dict[args.model_type].from_pretrained(
args.pretrained_model_path)
self.config = self.bert_encoder.config
self.cls_layer = torch.nn.Linear(
in_features=self.config.hidden_size, out_features=self.args.num_labels)
self.loss_func = torch.nn.CrossEntropyLoss()
def forward(self, input_ids, attention_mask, token_type_ids, labels=None):
if self.args.model_type == 'fengshen-megatron_t5':
bert_output = self.bert_encoder(
input_ids=input_ids, attention_mask=attention_mask) # (bsz, seq, dim)
encode = bert_output.last_hidden_state[:, 0, :]
else:
bert_output = self.bert_encoder(
input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids) # (bsz, seq, dim)
encode = bert_output[1]
logits = self.cls_layer(encode)
if labels is not None:
loss = self.loss_func(logits, labels.view(-1,))
return loss, logits
else:
return 0, logits
class LitModel(pl.LightningModule):
@staticmethod
def add_model_specific_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--num_labels', default=2, type=int)
return parent_args
def __init__(self, args, num_data):
super().__init__()
self.args = args
self.num_data = num_data
self.model = model_dict[args.model_type].from_pretrained(
args.pretrained_model_path)
self.save_hyperparameters(args)
def setup(self, stage) -> None:
train_loader = self.trainer._data_connector._train_dataloader_source.dataloader()
# Calculate total steps
if self.trainer.max_epochs > 0:
world_size = self.trainer.world_size
tb_size = self.hparams.train_batchsize * max(1, world_size)
ab_size = self.trainer.accumulate_grad_batches
self.total_steps = (len(train_loader.dataset) *
self.trainer.max_epochs // tb_size) // ab_size
else:
self.total_steps = self.trainer.max_steps // self.trainer.accumulate_grad_batches
print('Total steps: {}' .format(self.total_steps))
def training_step(self, batch, batch_idx):
del batch['id']
output = self.model(**batch)
loss, logits = output[0], output[1]
acc = self.comput_metrix(logits, batch['labels'])
self.log('train_loss', loss)
self.log('train_acc', acc)
return loss
def comput_metrix(self, logits, labels):
y_pred = torch.argmax(logits, dim=-1)
y_pred = y_pred.view(size=(-1,))
y_true = labels.view(size=(-1,)).float()
corr = torch.eq(y_pred, y_true)
acc = torch.sum(corr.float())/labels.size()[0]
return acc
def validation_step(self, batch, batch_idx):
del batch['id']
output = self.model(**batch)
loss, logits = output[0], output[1]
acc = self.comput_metrix(logits, batch['labels'])
self.log('val_loss', loss)
self.log('val_acc', acc, sync_dist=True)
def predict_step(self, batch, batch_idx):
ids = batch['id']
del batch['id']
output = self.model(**batch)
return {ids, output.logits}
def configure_optimizers(self):
from fengshen.models.model_utils import configure_optimizers
return configure_optimizers(self)
class TaskModelCheckpoint:
@staticmethod
def add_argparse_args(parent_args):
parser = parent_args.add_argument_group('BaseModel')
parser.add_argument('--monitor', default='train_loss', type=str)
parser.add_argument('--mode', default='min', type=str)
parser.add_argument('--dirpath', default='./log/', type=str)
parser.add_argument(
'--filename', default='model-{epoch:02d}-{train_loss:.4f}', type=str)
parser.add_argument('--save_top_k', default=3, type=float)
parser.add_argument('--every_n_train_steps', default=100, type=float)
parser.add_argument('--save_weights_only', default=True, type=bool)
return parent_args
def __init__(self, args):
self.callbacks = ModelCheckpoint(monitor=args.monitor,
save_top_k=args.save_top_k,
mode=args.mode,
every_n_train_steps=args.every_n_train_steps,
save_weights_only=args.save_weights_only,
dirpath=args.dirpath,
every_n_epochs=1,
filename=args.filename)
def save_test(data, args, data_model, rank):
file_name = args.output_save_path + f'.{rank}'
with open(file_name, 'w', encoding='utf-8') as f:
idx = 0
for i in range(len(data)):
ids, batch = data[i]
for id, sample in zip(ids, batch):
tmp_result = dict()
label_id = np.argmax(sample.cpu().numpy())
tmp_result['id'] = id.item()
tmp_result['label'] = data_model.id2label[label_id]
json_data = json.dumps(tmp_result, ensure_ascii=False)
f.write(json_data+'\n')
idx += 1
print('save the result to '+file_name)
def main():
pl.seed_everything(42)
total_parser = argparse.ArgumentParser("TASK NAME")
total_parser.add_argument('--pretrained_model_path', default='', type=str)
total_parser.add_argument('--output_save_path',
default='./predict.json', type=str)
total_parser.add_argument('--model_type',
default='huggingface-bert', type=str)
# * Args for data preprocessing
total_parser = TaskDataModel.add_data_specific_args(total_parser)
# * Args for training
total_parser = pl.Trainer.add_argparse_args(total_parser)
total_parser = TaskModelCheckpoint.add_argparse_args(total_parser)
# * Args for base model
from fengshen.models.model_utils import add_module_args
total_parser = add_module_args(total_parser)
total_parser = LitModel.add_model_specific_args(total_parser)
args = total_parser.parse_args()
print(args.pretrained_model_path)
checkpoint_callback = TaskModelCheckpoint(args).callbacks
early_stop_callback = EarlyStopping(
monitor="val_acc", min_delta=0.00, patience=5, verbose=False, mode="max")
lr_monitor = LearningRateMonitor(logging_interval='step')
trainer = pl.Trainer.from_argparse_args(args,
callbacks=[
checkpoint_callback,
lr_monitor,
early_stop_callback]
)
data_model = TaskDataModel(args)
model = LitModel(args, len(data_model.train_dataloader()))
trainer.fit(model, data_model)
result = trainer.predict(
model, data_model, ckpt_path=trainer.checkpoint_callback.best_model_path)
save_test(result, args, data_model, trainer.global_rank)
if __name__ == "__main__":
main()
| 15,787 | 39.482051 | 117 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/DAVAE/generate.py
|
# -*- encoding: utf-8 -*-
'''
Copyright 2022 The International Digital Economy Academy (IDEA). CCNL team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@File : generate.py
@Time : 2022/11/04 19:17
@Author : Liang Yuxin
@Version : 1.0
@Contact : [email protected]
@License : (C)Copyright 2022-2023, CCNL-IDEA
'''
# here put the import lib
import torch
from fengshen.models.DAVAE.DAVAEModel import DAVAEModel
from transformers import BertTokenizer,T5Tokenizer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
encoder_tokenizer = BertTokenizer.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese")
decoder_tokenizer = T5Tokenizer.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese", eos_token = '<|endoftext|>', pad_token = '<pad>',extra_ids=0)
decoder_tokenizer.add_special_tokens({'bos_token':'<bos>'})
vae_model = DAVAEModel.from_pretrained("IDEA-CCNL/Randeng-DAVAE-1.2B-General-Chinese").to(device)
input_texts = [
"针对电力系统中的混沌振荡对整个互联电网的危害问题,提出了一种基于非线性光滑函数的滑模控制方法.",
"超市面积不算大.挺方便附近的居民购买的. 生活用品也比较齐全.价格适用中.",
]
output_texts = vae_model.simulate_batch(encoder_tokenizer,decoder_tokenizer,input_texts)
print(output_texts)
| 1,595 | 42.135135 | 157 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/st_disco.py
|
# from disco_huge import Diffuser
# from utils import *
from disco import Diffuser
import streamlit as st
from io import BytesIO
from PIL import Image
from disco import steps
@st.cache(show_spinner=False, allow_output_mutation=True) # 加装饰器, 只加载一次。
class ST_Diffuser(Diffuser):
def __init__(self, custom_path):
super().__init__(custom_path)
if __name__ == '__main__':
dd = ST_Diffuser(custom_path="IDEA-CCNL/Taiyi-Diffusion-532M-Nature") # 初始化
form = st.form("参数设置")
input_text = form.text_input('输入文本生成图像:', value='', placeholder='你想象的一个画面')
form.form_submit_button("提交")
uploaded_file = st.file_uploader("上传初始化图片(可选)", type=["jpg", "png", "jpeg"])
text_scale_norm = st.sidebar.slider('文本强度', 0.1, 1.0, 0.5, step=0.1)
text_scale = int(text_scale_norm * 10000)
res_skip_steps = st.sidebar.slider('加噪强度', 0.1, 1.0, 0.9, step=0.1)
skip_steps = int(steps - round(res_skip_steps * steps))
width = st.sidebar.slider('宽度', 384, 1024, 512, step=64)
heigth = st.sidebar.slider('高度', 384, 1024, 512, step=64)
with st.spinner('正在生成中...'):
capture_img = None
if uploaded_file is not None:
# To read file as bytes:
bytes_data = uploaded_file.getvalue()
# 将字节数据转化成字节流
bytes_data = BytesIO(bytes_data)
# Image.open()可以读字节流
capture_img = Image.open(bytes_data).convert('RGB').resize((width, heigth))
image_status = st.empty()
image_status.image(capture_img, use_column_width=True)
else:
image_status = st.empty()
if input_text:
# global text_prompts
input_text_prompts = [input_text]
image = dd.generate(input_text_prompts,
capture_img,
clip_guidance_scale=text_scale,
skip_steps=skip_steps,
st_dynamic_image=image_status,
init_scale=None,
side_x=width,
side_y=heigth) # 最终结果。实时显示修改generate里面的内容。
image_status.image(image, use_column_width=True)
| 2,215 | 37.877193 | 87 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/disco.py
|
import os
import sys
# sys.path.insert(0, f'{PROJECT_DIR}/guided-diffusion') # 加在前面,不再读取库文件的东西。
import subprocess
import io
import torch.nn as nn
from torch.nn import functional as F
import torch
import torchvision.transforms.functional as TF
import torchvision.transforms as T
import math
import requests
import cv2
from resize_right import resize
from guided_diffusion.guided_diffusion.script_util import model_and_diffusion_defaults
from types import SimpleNamespace
from PIL import Image
import argparse
from guided_diffusion.guided_diffusion.unet import HFUNetModel
from tqdm.notebook import tqdm
from datetime import datetime
from guided_diffusion.guided_diffusion.script_util import create_model_and_diffusion
import clip
from transformers import BertForSequenceClassification, BertTokenizer
import gc
import random
# ======================== GLOBAL SETTING ========================
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
useCPU = False # @param {type:"boolean"}
skip_augs = False # @param{type: 'boolean'}
perlin_init = False # @param{type: 'boolean'}
use_secondary_model = False
diffusion_model = "custom"
# Dimensions must by multiples of 64.
side_x = 512
side_y = 512
diffusion_sampling_mode = 'ddim' # @param ['plms','ddim']
use_checkpoint = True # @param {type: 'boolean'}
ViTB32 = False # @param{type:"boolean"}
ViTB16 = False # @param{type:"boolean"}
ViTL14 = True # @param{type:"boolean"}
ViTL14_336px = False # @param{type:"boolean"}
RN101 = False # @param{type:"boolean"}
RN50 = False # @param{type:"boolean"}
RN50x4 = False # @param{type:"boolean"}
RN50x16 = False # @param{type:"boolean"}
RN50x64 = False # @param{type:"boolean"}
# @markdown #####**OpenCLIP settings:**
ViTB32_laion2b_e16 = False # @param{type:"boolean"}
ViTB32_laion400m_e31 = False # @param{type:"boolean"}
ViTB32_laion400m_32 = False # @param{type:"boolean"}
ViTB32quickgelu_laion400m_e31 = False # @param{type:"boolean"}
ViTB32quickgelu_laion400m_e32 = False # @param{type:"boolean"}
ViTB16_laion400m_e31 = False # @param{type:"boolean"}
ViTB16_laion400m_e32 = False # @param{type:"boolean"}
RN50_yffcc15m = False # @param{type:"boolean"}
RN50_cc12m = False # @param{type:"boolean"}
RN50_quickgelu_yfcc15m = False # @param{type:"boolean"}
RN50_quickgelu_cc12m = False # @param{type:"boolean"}
RN101_yfcc15m = False # @param{type:"boolean"}
RN101_quickgelu_yfcc15m = False # @param{type:"boolean"}
# @markdown ####**Basic Settings:**
# NOTE steps可以改这里,需要重新初始化模型,我懒得改接口了orz
steps = 100 # @param [25,50,100,150,250,500,1000]{type: 'raw', allow-input: true}
tv_scale = 0 # @param{type: 'number'}
range_scale = 150 # @param{type: 'number'}
sat_scale = 0 # @param{type: 'number'}
cutn_batches = 1 # @param{type: 'number'} # NOTE 这里会对图片做数据增强,累计计算n次CLIP的梯度,以此作为guidance。
skip_augs = False # @param{type: 'boolean'}
# @markdown ####**Saving:**
intermediate_saves = 0 # @param{type: 'raw'}
intermediates_in_subfolder = True # @param{type: 'boolean'}
# perlin_init = False # @param{type: 'boolean'}
perlin_mode = 'mixed' # @param ['mixed', 'color', 'gray']
set_seed = 'random_seed' # @param{type: 'string'}
eta = 0.8 # @param{type: 'number'}
clamp_grad = True # @param{type: 'boolean'}
clamp_max = 0.05 # @param{type: 'number'}
# EXTRA ADVANCED SETTINGS:
randomize_class = True
clip_denoised = False
fuzzy_prompt = False
rand_mag = 0.05
# @markdown ---
cut_overview = "[12]*400+[4]*600" # @param {type: 'string'}
cut_innercut = "[4]*400+[12]*600" # @param {type: 'string'}
cut_ic_pow = "[1]*1000" # @param {type: 'string'}
cut_icgray_p = "[0.2]*400+[0]*600" # @param {type: 'string'}
# @markdown ####**Transformation Settings:**
use_vertical_symmetry = False # @param {type:"boolean"}
use_horizontal_symmetry = False # @param {type:"boolean"}
transformation_percent = [0.09] # @param
display_rate = 3 # @param{type: 'number'}
n_batches = 1 # @param{type: 'number'}
# @markdown If you're having issues with model downloads, check this to compare SHA's:
check_model_SHA = False # @param{type:"boolean"}
interp_spline = 'Linear' # Do not change, currently will not look good. param ['Linear','Quadratic','Cubic']{type:"string"}
resume_run = False
batch_size = 1
def createPath(filepath):
os.makedirs(filepath, exist_ok=True)
def wget(url, outputdir):
res = subprocess.run(['wget', url, '-P', f'{outputdir}'], stdout=subprocess.PIPE).stdout.decode('utf-8')
print(res)
def alpha_sigma_to_t(alpha, sigma):
return torch.atan2(sigma, alpha) * 2 / math.pi
def interp(t):
return 3 * t**2 - 2 * t ** 3
def perlin(width, height, scale=10, device=None):
gx, gy = torch.randn(2, width + 1, height + 1, 1, 1, device=device)
xs = torch.linspace(0, 1, scale + 1)[:-1, None].to(device)
ys = torch.linspace(0, 1, scale + 1)[None, :-1].to(device)
wx = 1 - interp(xs)
wy = 1 - interp(ys)
dots = 0
dots += wx * wy * (gx[:-1, :-1] * xs + gy[:-1, :-1] * ys)
dots += (1 - wx) * wy * (-gx[1:, :-1] * (1 - xs) + gy[1:, :-1] * ys)
dots += wx * (1 - wy) * (gx[:-1, 1:] * xs - gy[:-1, 1:] * (1 - ys))
dots += (1 - wx) * (1 - wy) * (-gx[1:, 1:] * (1 - xs) - gy[1:, 1:] * (1 - ys))
return dots.permute(0, 2, 1, 3).contiguous().view(width * scale, height * scale)
def perlin_ms(octaves, width, height, grayscale, device=None):
out_array = [0.5] if grayscale else [0.5, 0.5, 0.5]
# out_array = [0.0] if grayscale else [0.0, 0.0, 0.0]
for i in range(1 if grayscale else 3):
scale = 2 ** len(octaves)
oct_width = width
oct_height = height
for oct in octaves:
p = perlin(oct_width, oct_height, scale, device)
out_array[i] += p * oct
scale //= 2
oct_width *= 2
oct_height *= 2
return torch.cat(out_array)
def fetch(url_or_path):
if str(url_or_path).startswith('http://') or str(url_or_path).startswith('https://'):
r = requests.get(url_or_path)
r.raise_for_status()
fd = io.BytesIO()
fd.write(r.content)
fd.seek(0)
return fd
return open(url_or_path, 'rb')
def read_image_workaround(path):
"""OpenCV reads images as BGR, Pillow saves them as RGB. Work around
this incompatibility to avoid colour inversions."""
im_tmp = cv2.imread(path)
return cv2.cvtColor(im_tmp, cv2.COLOR_BGR2RGB)
def parse_prompt(prompt):
if prompt.startswith('http://') or prompt.startswith('https://'):
vals = prompt.rsplit(':', 2)
vals = [vals[0] + ':' + vals[1], *vals[2:]]
else:
vals = prompt.rsplit(':', 1)
vals = vals + ['', '1'][len(vals):]
return vals[0], float(vals[1])
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x / a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
def resample(input, size, align_corners=True):
n, c, h, w = input.shape
dh, dw = size
input = input.reshape([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.reshape([n, c, h, w])
return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, skip_augs=False):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.skip_augs = skip_augs
self.augs = T.Compose([
T.RandomHorizontalFlip(p=0.5),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomAffine(degrees=15, translate=(0.1, 0.1)),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomPerspective(distortion_scale=0.4, p=0.7),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomGrayscale(p=0.15),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
# T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
])
def forward(self, input):
input = T.Pad(input.shape[2] // 4, fill=0)(input)
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
cutouts = []
for ch in range(self.cutn):
if ch > self.cutn - self.cutn // 4:
cutout = input.clone()
else:
size = int(max_size * torch.zeros(1,).normal_(mean=.8, std=.3).clip(float(self.cut_size / max_size), 1.))
offsetx = torch.randint(0, abs(sideX - size + 1), ())
offsety = torch.randint(0, abs(sideY - size + 1), ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
if not self.skip_augs:
cutout = self.augs(cutout)
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
del cutout
cutouts = torch.cat(cutouts, dim=0)
return cutouts
class MakeCutoutsDango(nn.Module):
def __init__(self, cut_size, args,
Overview=4,
InnerCrop=0, IC_Size_Pow=0.5, IC_Grey_P=0.2,
):
super().__init__()
self.padargs = {}
self.cutout_debug = False
self.cut_size = cut_size
self.Overview = Overview
self.InnerCrop = InnerCrop
self.IC_Size_Pow = IC_Size_Pow
self.IC_Grey_P = IC_Grey_P
self.augs = T.Compose([
T.RandomHorizontalFlip(p=0.5),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomAffine(degrees=10, translate=(0.05, 0.05), interpolation=T.InterpolationMode.BILINEAR),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.RandomGrayscale(p=0.1),
T.Lambda(lambda x: x + torch.randn_like(x) * 0.01),
T.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
])
def forward(self, input):
cutouts = []
gray = T.Grayscale(3)
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
output_shape = [1, 3, self.cut_size, self.cut_size]
pad_input = F.pad(input, ((sideY - max_size) // 2, (sideY - max_size) // 2, (sideX - max_size) // 2, (sideX - max_size) // 2), **self.padargs)
cutout = resize(pad_input, out_shape=output_shape)
if self.Overview > 0:
if self.Overview <= 4:
if self.Overview >= 1:
cutouts.append(cutout)
if self.Overview >= 2:
cutouts.append(gray(cutout))
if self.Overview >= 3:
cutouts.append(TF.hflip(cutout))
if self.Overview == 4:
cutouts.append(gray(TF.hflip(cutout)))
else:
cutout = resize(pad_input, out_shape=output_shape)
for _ in range(self.Overview):
cutouts.append(cutout)
if self.cutout_debug:
# if is_colab:
# TF.to_pil_image(cutouts[0].clamp(0, 1).squeeze(0)).save("/content/cutout_overview0.jpg",quality=99)
# else:
TF.to_pil_image(cutouts[0].clamp(0, 1).squeeze(0)).save("cutout_overview0.jpg", quality=99)
if self.InnerCrop > 0:
for i in range(self.InnerCrop):
size = int(torch.rand([])**self.IC_Size_Pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
if i <= int(self.IC_Grey_P * self.InnerCrop):
cutout = gray(cutout)
cutout = resize(cutout, out_shape=output_shape)
cutouts.append(cutout)
if self.cutout_debug:
# if is_colab:
# TF.to_pil_image(cutouts[-1].clamp(0, 1).squeeze(0)).save("/content/cutout_InnerCrop.jpg",quality=99)
# else:
TF.to_pil_image(cutouts[-1].clamp(0, 1).squeeze(0)).save("cutout_InnerCrop.jpg", quality=99)
cutouts = torch.cat(cutouts)
if skip_augs is not True:
cutouts = self.augs(cutouts)
return cutouts
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def tv_loss(input):
"""L2 total variation loss, as in Mahendran et al."""
input = F.pad(input, (0, 1, 0, 1), 'replicate')
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
return (x_diff**2 + y_diff**2).mean([1, 2, 3])
def range_loss(input):
return (input - input.clamp(-1, 1)).pow(2).mean([1, 2, 3])
def symmetry_transformation_fn(x):
# NOTE 强制图像对称
use_horizontal_symmetry = False
if use_horizontal_symmetry:
[n, c, h, w] = x.size()
x = torch.concat((x[:, :, :, :w // 2], torch.flip(x[:, :, :, :w // 2], [-1])), -1)
print("horizontal symmetry applied")
if use_vertical_symmetry:
[n, c, h, w] = x.size()
x = torch.concat((x[:, :, :h // 2, :], torch.flip(x[:, :, :h // 2, :], [-2])), -2)
print("vertical symmetry applied")
return x
# def split_prompts(prompts):
# prompt_series = pd.Series([np.nan for a in range(max_frames)])
# for i, prompt in prompts.items():
# prompt_series[i] = prompt
# # prompt_series = prompt_series.astype(str)
# prompt_series = prompt_series.ffill().bfill()
# return prompt_series
"""
other chaos settings
"""
# dir settings
outDirPath = f'{PROJECT_DIR}/images_out'
createPath(outDirPath)
model_path = f'{PROJECT_DIR}/models'
createPath(model_path)
# GPU setup
DEVICE = torch.device('cuda:0' if (torch.cuda.is_available() and not useCPU) else 'cpu')
print('Using device:', DEVICE)
device = DEVICE # At least one of the modules expects this name..
if not useCPU:
if torch.cuda.get_device_capability(DEVICE) == (8, 0): # A100 fix thanks to Emad
print('Disabling CUDNN for A100 gpu', file=sys.stderr)
torch.backends.cudnn.enabled = False
model_config = model_and_diffusion_defaults()
model_config.update({
'attention_resolutions': '32, 16, 8',
'class_cond': False,
'diffusion_steps': 1000, # No need to edit this, it is taken care of later.
'rescale_timesteps': True,
'timestep_respacing': 250, # No need to edit this, it is taken care of later.
'image_size': 512,
'learn_sigma': True,
'noise_schedule': 'linear',
'num_channels': 256,
'num_head_channels': 64,
'num_res_blocks': 2,
'resblock_updown': True,
'use_checkpoint': use_checkpoint,
'use_fp16': not useCPU,
'use_scale_shift_norm': True,
})
model_default = model_config['image_size']
normalize = T.Normalize(mean=[0.48145466, 0.4578275, 0.40821073], std=[0.26862954, 0.26130258, 0.27577711])
# Make folder for batch
steps_per_checkpoint = steps + 10
# Update Model Settings
timestep_respacing = f'ddim{steps}'
diffusion_steps = (1000 // steps) * steps if steps < 1000 else steps
model_config.update({
'timestep_respacing': timestep_respacing,
'diffusion_steps': diffusion_steps,
})
start_frame = 0
print('Starting Run:')
if set_seed == 'random_seed':
random.seed()
seed = random.randint(0, 2**32)
# print(f'Using seed: {seed}')
else:
seed = int(set_seed)
args = {
# 'seed': seed,
'display_rate': display_rate,
'n_batches': n_batches,
'batch_size': batch_size,
'steps': steps,
'diffusion_sampling_mode': diffusion_sampling_mode,
# 'width_height': width_height,
'tv_scale': tv_scale,
'range_scale': range_scale,
'sat_scale': sat_scale,
'cutn_batches': cutn_batches,
# 'side_x': side_x,
# 'side_y': side_y,
'timestep_respacing': timestep_respacing,
'diffusion_steps': diffusion_steps,
'cut_overview': eval(cut_overview),
'cut_innercut': eval(cut_innercut),
'cut_ic_pow': eval(cut_ic_pow),
'cut_icgray_p': eval(cut_icgray_p),
'intermediate_saves': intermediate_saves,
'intermediates_in_subfolder': intermediates_in_subfolder,
'steps_per_checkpoint': steps_per_checkpoint,
'set_seed': set_seed,
'eta': eta,
'clamp_grad': clamp_grad,
'clamp_max': clamp_max,
'skip_augs': skip_augs,
'randomize_class': randomize_class,
'clip_denoised': clip_denoised,
'fuzzy_prompt': fuzzy_prompt,
'rand_mag': rand_mag,
'use_vertical_symmetry': use_vertical_symmetry,
'use_horizontal_symmetry': use_horizontal_symmetry,
'transformation_percent': transformation_percent,
}
args = SimpleNamespace(**args)
# ======================== GLOBAL SETTING END ========================
class Diffuser:
def __init__(self, cutom_path='IDEA-CCNL/Taiyi-Diffusion-532M-Nature'):
self.model_setup(cutom_path)
def model_setup(self, custom_path):
# LOADING MODEL
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
print(f'Prepping model...model name: {custom_path}')
__, self.diffusion = create_model_and_diffusion(**model_config)
self.model = HFUNetModel.from_pretrained(custom_path)
# total = get_parameter_num(self.model)
# print("Number of parameter: %.2fM" % (total/1e6))
# print("Number of parameter: %.2fM" % (total/1024/1024))
self.model.requires_grad_(False).eval().to(device)
for name, param in self.model.named_parameters():
if 'qkv' in name or 'norm' in name or 'proj' in name:
param.requires_grad_()
if model_config['use_fp16']:
self.model.convert_to_fp16()
print(f'Diffusion_model Loaded {diffusion_model}')
# NOTE Directly Load The Text Encoder From Hugging Face
print('Prepping model...model name: CLIP')
self.taiyi_tokenizer = BertTokenizer.from_pretrained("IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese")
self.taiyi_transformer = BertForSequenceClassification.from_pretrained("IDEA-CCNL/Taiyi-CLIP-Roberta-large-326M-Chinese").eval().to(device)
self.clip_models = []
if ViTB32:
self.clip_models.append(clip.load('ViT-B/32', jit=False)[0].eval().requires_grad_(False).to(device))
if ViTB16:
self.clip_models.append(clip.load('ViT-B/16', jit=False)[0].eval().requires_grad_(False).to(device))
if ViTL14:
self.clip_models.append(clip.load('ViT-L/14', jit=False)[0].eval().requires_grad_(False).to(device))
if ViTL14_336px:
self.clip_models.append(clip.load('ViT-L/14@336px', jit=False)[0].eval().requires_grad_(False).to(device))
print('CLIP Loaded')
# self.lpips_model = lpips.LPIPS(net='vgg').to(device)
def generate(self,
input_text_prompts=['夕阳西下'],
init_image=None,
skip_steps=10,
clip_guidance_scale=7500,
init_scale=2000,
st_dynamic_image=None,
seed=None,
side_x=512,
side_y=512,
):
seed = seed
frame_num = 0
init_image = init_image
init_scale = init_scale
skip_steps = skip_steps
loss_values = []
# if seed is not None:
# np.random.seed(seed)
# random.seed(seed)
# torch.manual_seed(seed)
# torch.cuda.manual_seed_all(seed)
# torch.backends.cudnn.deterministic = True
# target_embeds, weights = [], []
frame_prompt = input_text_prompts
print(f'Frame {frame_num} Prompt: {frame_prompt}')
model_stats = []
for clip_model in self.clip_models:
# cutn = 16
model_stat = {"clip_model": None, "target_embeds": [], "make_cutouts": None, "weights": []}
model_stat["clip_model"] = clip_model
for prompt in frame_prompt:
txt, weight = parse_prompt(prompt)
# txt = clip_model.encode_text(clip.tokenize(prompt).to(device)).float()
# NOTE use chinese CLIP
txt = self.taiyi_transformer(self.taiyi_tokenizer(txt, return_tensors='pt')['input_ids'].to(device)).logits
if args.fuzzy_prompt:
for i in range(25):
model_stat["target_embeds"].append((txt + torch.randn(txt.shape).cuda() * args.rand_mag).clamp(0, 1))
model_stat["weights"].append(weight)
else:
model_stat["target_embeds"].append(txt)
model_stat["weights"].append(weight)
model_stat["target_embeds"] = torch.cat(model_stat["target_embeds"])
model_stat["weights"] = torch.tensor(model_stat["weights"], device=device)
if model_stat["weights"].sum().abs() < 1e-3:
raise RuntimeError('The weights must not sum to 0.')
model_stat["weights"] /= model_stat["weights"].sum().abs()
model_stats.append(model_stat)
init = None
if init_image is not None:
# init = Image.open(fetch(init_image)).convert('RGB') # 传递的是加载好的图片。而非地址~
init = init_image
init = init.resize((side_x, side_y), Image.LANCZOS)
init = TF.to_tensor(init).to(device).unsqueeze(0).mul(2).sub(1)
cur_t = None
def cond_fn(x, t, y=None):
with torch.enable_grad():
x_is_NaN = False
x = x.detach().requires_grad_()
n = x.shape[0]
my_t = torch.ones([n], device=device, dtype=torch.long) * cur_t
out = self.diffusion.p_mean_variance(self.model, x, my_t, clip_denoised=False, model_kwargs={'y': y})
fac = self.diffusion.sqrt_one_minus_alphas_cumprod[cur_t]
x_in = out['pred_xstart'] * fac + x * (1 - fac)
x_in_grad = torch.zeros_like(x_in)
for model_stat in model_stats:
for i in range(args.cutn_batches):
t_int = int(t.item()) + 1 # errors on last step without +1, need to find source
# try:
input_resolution = model_stat["clip_model"].visual.input_resolution
# except:
# input_resolution = 224
cuts = MakeCutoutsDango(input_resolution,
Overview=args.cut_overview[1000 - t_int],
InnerCrop=args.cut_innercut[1000 - t_int],
IC_Size_Pow=args.cut_ic_pow[1000 - t_int],
IC_Grey_P=args.cut_icgray_p[1000 - t_int],
args=args,
)
clip_in = normalize(cuts(x_in.add(1).div(2)))
image_embeds = model_stat["clip_model"].encode_image(clip_in).float()
dists = spherical_dist_loss(image_embeds.unsqueeze(1), model_stat["target_embeds"].unsqueeze(0))
dists = dists.view([args.cut_overview[1000 - t_int] + args.cut_innercut[1000 - t_int], n, -1])
losses = dists.mul(model_stat["weights"]).sum(2).mean(0)
loss_values.append(losses.sum().item()) # log loss, probably shouldn't do per cutn_batch
x_in_grad += torch.autograd.grad(losses.sum() * clip_guidance_scale, x_in)[0] / cutn_batches
tv_losses = tv_loss(x_in)
range_losses = range_loss(out['pred_xstart'])
sat_losses = torch.abs(x_in - x_in.clamp(min=-1, max=1)).mean()
loss = tv_losses.sum() * tv_scale + range_losses.sum() * range_scale + sat_losses.sum() * sat_scale
if init is not None and init_scale:
init_losses = self.lpips_model(x_in, init)
loss = loss + init_losses.sum() * init_scale
x_in_grad += torch.autograd.grad(loss, x_in)[0]
if not torch.isnan(x_in_grad).any():
grad = -torch.autograd.grad(x_in, x, x_in_grad)[0]
else:
x_is_NaN = True
grad = torch.zeros_like(x)
if args.clamp_grad and not x_is_NaN:
magnitude = grad.square().mean().sqrt()
return grad * magnitude.clamp(max=args.clamp_max) / magnitude # min=-0.02, min=-clamp_max,
return grad
if args.diffusion_sampling_mode == 'ddim':
sample_fn = self.diffusion.ddim_sample_loop_progressive
else:
sample_fn = self.diffusion.plms_sample_loop_progressive
for i in range(args.n_batches):
current_time = datetime.now().strftime('%y%m%d-%H%M%S_%f')
batchBar = tqdm(range(args.n_batches), desc="Batches")
batchBar.n = i
batchBar.refresh()
gc.collect()
torch.cuda.empty_cache()
cur_t = self.diffusion.num_timesteps - skip_steps - 1
# total_steps = cur_t
if args.diffusion_sampling_mode == 'ddim':
samples = sample_fn(
self.model,
(batch_size, 3, side_y, side_x),
clip_denoised=clip_denoised,
model_kwargs={},
cond_fn=cond_fn,
progress=True,
skip_timesteps=skip_steps,
init_image=init,
randomize_class=randomize_class,
eta=eta,
transformation_fn=symmetry_transformation_fn,
transformation_percent=args.transformation_percent
)
else:
samples = sample_fn(
self.model,
(batch_size, 3, side_y, side_x),
clip_denoised=clip_denoised,
model_kwargs={},
cond_fn=cond_fn,
progress=True,
skip_timesteps=skip_steps,
init_image=init,
randomize_class=randomize_class,
order=2,
)
for j, sample in enumerate(samples):
cur_t -= 1
intermediateStep = False
if args.steps_per_checkpoint is not None:
if j % steps_per_checkpoint == 0 and j > 0:
intermediateStep = True
elif j in args.intermediate_saves:
intermediateStep = True
if j % args.display_rate == 0 or cur_t == -1 or intermediateStep:
for k, image in enumerate(sample['pred_xstart']):
# tqdm.write(f'Batch {i}, step {j}, output {k}:')
# percent = math.ceil(j / total_steps * 100)
if args.n_batches > 0:
filename = f'{current_time}-{parse_prompt(prompt)[0]}.png'
image = TF.to_pil_image(image.add(1).div(2).clamp(0, 1))
if j % args.display_rate == 0 or cur_t == -1:
image.save(f'{outDirPath}/{filename}')
if st_dynamic_image:
st_dynamic_image.image(image, use_column_width=True)
# self.current_image = image
return image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="setting")
parser.add_argument('--prompt', type=str, required=True)
parser.add_argument('--text_scale', type=int, default=5000)
parser.add_argument('--model_path', type=str, default="IDEA-CCNL/Taiyi-Diffusion-532M-Nature")
parser.add_argument('--width', type=int, default=512)
parser.add_argument('--height', type=int, default=512)
user_args = parser.parse_args()
dd = Diffuser(user_args.model_path)
dd.generate([user_args.prompt],
clip_guidance_scale=user_args.text_scale,
side_x=user_args.width,
side_y=user_args.height,
)
| 29,225 | 38.709239 | 150 |
py
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/__init__.py
| 0 | 0 | 0 |
py
|
|
Fengshenbang-LM
|
Fengshenbang-LM-main/fengshen/examples/disco_project/guided_diffusion/guided_diffusion/resample.py
|
from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [
x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]
]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history ** 2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| 5,689 | 35.709677 | 87 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.