repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/unittest_optimisers.py
|
"""
Unit tests for optimers.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
import os
from argparse import Namespace
import numpy as np
import time
# Local
from base_test_class import BaseTestClass, execute_tests
import optimisers as optimisers
class DirectTestCase(BaseTestClass):
"""Unit test class for general utilities. """
def __init__(self, *args, **kwargs):
super(DirectTestCase, self).__init__(*args, **kwargs)
def setUp(self):
""" Sets up attributes. """
self.problems = []
self.max_evals = 2e4
# First problem
obj = lambda x: np.dot(x-1, x)
dim = 4
min_pt = np.array([0.5] * dim)
max_pt = np.array([-1] * dim)
self.problems.append(self._get_test_case_problem_instance(
obj, [-1] * dim, [1] * dim, min_pt, max_pt, '4D-quadratic'))
# Second problem
obj = lambda x: np.exp(-np.dot(x-0.5, x))
dim = 2
min_pt = np.array([-1] * dim)
max_pt = np.array([0.25] * dim)
self.problems.append(self._get_test_case_problem_instance(
obj, [-1] * dim, [1] * dim, min_pt, max_pt, '2D-gaussian'))
@classmethod
def _get_test_case_problem_instance(cls, obj, lb, ub, min_pt, max_pt, descr=''):
""" A wrapper which returns a problem instance as a list. """
min_val = float(obj(min_pt))
max_val = float(obj(max_pt))
problem_inst = Namespace(obj=obj, dim=len(lb), lb=lb, ub=ub, min_pt=min_pt,
max_pt=max_pt, min_val=min_val, max_val=max_val,
descr=descr)
return problem_inst
def test_random_maximise(self):
""" Test direct optmisation."""
self.report('Rand maximise:')
num_max_successes = 0
for prob in self.problems:
prob_bounds = np.concatenate((np.array(prob.lb).reshape(1, -1),
np.array(prob.ub).reshape(1, -1)),
axis=0).T
max_val_soln, _ = optimisers.random_maximise(
prob.obj, prob_bounds, 2 * self.max_evals, False)
diff = abs(prob.max_val - max_val_soln)
self.report(prob.descr + '(max):: True: %0.4f, Soln: %0.4f, diff: %0.4f'%(
prob.max_val, max_val_soln, diff), 'test_result')
max_is_successful = diff < 1e-3
num_max_successes += max_is_successful
# Check if successful
assert num_max_successes >= 1
def test_direct(self):
""" Test direct optmisation."""
self.report('DiRect minimise and maximise:')
num_min_successes = 0
num_max_successes = 0
for prob in self.problems:
# First the minimimum
min_val_soln, _, _ = optimisers.direct_ft_minimise(
prob.obj, prob.lb, prob.ub, self.max_evals)
diff = abs(prob.min_val - min_val_soln)
self.report(prob.descr + '(min):: True: %0.4f, Soln: %0.4f, diff: %0.4f.'%(
prob.min_val, min_val_soln, diff))
min_is_successful = diff < 1e-3
num_min_successes += min_is_successful
# Now the maximum
max_val_soln, _, _ = optimisers.direct_ft_maximise(
prob.obj, prob.lb, prob.ub, self.max_evals)
diff = abs(prob.max_val - max_val_soln)
self.report(prob.descr + '(max):: True: %0.4f, Soln: %0.4f, diff: %0.4f'%(
prob.max_val, max_val_soln, diff), 'test_result')
max_is_successful = diff < 1e-3
num_max_successes += max_is_successful
# Check if successful
assert num_min_successes == len(self.problems)
assert num_max_successes == len(self.problems)
def test_direct_times(self):
""" Tests the running time of the package with and without file writing. """
self.report('DiRect running times')
log_file_names = ['', 'test_log']
for prob in self.problems:
clock_times = []
real_times = []
for log_file_name in log_file_names:
start_clock = time.clock()
start_real_time = time.time()
_, _, _ = optimisers.direct_ft_minimise(
prob.obj, prob.lb, prob.ub, self.max_evals, log_file_name=log_file_name)
clock_time = time.clock() - start_clock
real_time = time.time() - start_real_time
clock_times.append(clock_time)
real_times.append(real_time)
if log_file_name:
os.remove(log_file_name)
# Print results out
result_str = ', '.join(['file: \'%s\': clk=%0.4f, real=%0.4f'%(log_file_names[i],
clock_times[i], real_times[i]) for i in
range(len(log_file_names))])
self.report('%s:: %s'%(prob.descr, result_str), 'test_result')
def test_direct_with_history(self):
""" Tests direct with history. """
self.report('DiRect with history.')
for prob in self.problems:
min_val_soln, _, history = optimisers.direct_ft_maximise_with_history(
prob.obj, prob.lb, prob.ub, self.max_evals)
assert np.abs(min_val_soln - history.curr_opt_vals[-1]) < 1e-4
if __name__ == '__main__':
execute_tests()
| 5,155 | 36.911765 | 87 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/ancillary_utils.py
|
"""
A collection of utilities for ancillary purposes.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
import numpy as np
# Print lists as strings
def get_rounded_list(float_list, round_to_decimals=3):
""" Rounds the list and returns. """
ret = np.array(float_list).round(round_to_decimals)
if isinstance(float_list, list):
ret = list(ret)
return ret
def get_list_as_str(list_of_objs):
""" Returns the list as a string. """
return '[' + ' '.join([str(x) for x in list_of_objs]) + ']'
def get_list_of_floats_as_str(float_list, round_to_decimals=3):
""" Rounds the list and returns a string representation. """
float_list = get_rounded_list(float_list, round_to_decimals)
return get_list_as_str(float_list)
# Some other utilities
def is_non_decreasing_sequence(vals):
""" Returns true if vals is nondecreasing. """
for i in range(len(vals)-1):
if vals[i] > vals[i+1]:
return False
return True
# Some plotting utilities.
def plot_2d_function(func, bounds, x_label='x', y_label='y', title=None):
""" Plots a 2D function in bounds. """
# pylint: disable=unused-variable
dim_grid_size = 20
x_grid = np.linspace(bounds[0][0], bounds[0][1], dim_grid_size)
y_grid = np.linspace(bounds[1][0], bounds[1][1], dim_grid_size)
XX, YY = np.meshgrid(x_grid, y_grid)
f_vals = func(XX.ravel(), YY.ravel())
FF = f_vals.reshape(dim_grid_size, dim_grid_size)
# Create plot
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(XX, YY, FF)
plt.xlabel(x_label)
plt.ylabel(y_label)
if title is not None:
plt.title(title)
return fig, ax, plt
| 1,795 | 27.507937 | 73 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/experimenters.py
|
"""
Harness to run experiments and save results.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=relative-import
# pylint: disable=abstract-class-not-used
from argparse import Namespace
import random
from time import time
import numpy as np
from scipy.io import savemat as sio_savemat
# Local imports
from reporters import get_reporter
class BasicExperimenter(object):
""" Base class for running experiments. """
def __init__(self, experiment_name, num_experiments, save_file_name,
save_file_extension='', reporter='default', random_seed_val='time'):
""" Constructor.
random_seed_val: If None we will not change the random seed. If it is
'time' we will set it to a time based value. If it is an int, we will set
the seed to that value.
"""
self.experiment_name = experiment_name
self.num_experiments = num_experiments
self.save_file_name = save_file_name
self.save_file_extension = (save_file_extension if save_file_extension else
self.save_file_name.split('.')[-1])
if not self.save_file_name.endswith('.' + self.save_file_extension):
self.save_file_name += '.' + self.save_file_extension
self.reporter = get_reporter(reporter)
self.to_be_saved = Namespace(experiment_name=self.experiment_name)
# We will need these going forward.
self.experiment_iter = 0
# Set the random seed
if random_seed_val is not None:
if random_seed_val == 'time':
random_seed_val = int(time() * 100) % 100000
self.reporter.writeln('Setting random seed to %d.'%(random_seed_val))
np.random.seed(random_seed_val)
random.seed(random_seed_val)
def save_results(self):
""" Saves results in save_file_name. """
self.reporter.write('Saving results (exp-iter:%d) to %s ... '%(self.experiment_iter,
self.save_file_name))
try:
if self.save_file_extension == 'mat':
sio_savemat(self.save_file_name, mdict=vars(self.to_be_saved))
else:
raise NotImplementedError('Only implemented saving mat files so far.')
save_successful = True
except IOError:
save_successful = False
# Report saving status
if save_successful:
self.reporter.writeln('successful.')
else:
self.reporter.writeln('unsuccessful!!')
def terminate_now(self):
""" Returns true if we should terminate now. Can be overridden in a child class. """
return self.experiment_iter >= self.num_experiments
def run_experiments(self):
""" This runs the experiments. """
self.reporter.writeln(self.get_experiment_header())
while not self.terminate_now():
# Prelims
self.experiment_iter += 1
iter_header = ('\nExperiment %d/%d:: '%(self.experiment_iter, self.num_experiments)
+ self.get_iteration_header())
iter_header += '\n' + '=' * len(iter_header)
self.reporter.write(iter_header)
# R experiment iteration.
self.run_experiment_iteration()
# Save results
self.save_results()
# Wrap up the experiments
self.wrapup_experiments()
def get_experiment_header(self):
""" Something to pring before running all the experiments. Can be overridden in a
child class."""
# pylint: disable=no-self-use
return ''
def get_iteration_header(self):
""" A header for the particular iteration. """
# pylint: disable=no-self-use
return ''
def run_experiment_iteration(self):
""" Implements the current iteration of the exeperiment. """
raise NotImplementedError('Implement this in a child class.')
def wrapup_experiments(self):
""" Any code to wrap up the experiments goes here. """
# pylint: disable=no-self-use
pass
| 3,830 | 34.472222 | 89 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/reporters.py
|
"""
Monitors are used to monitor the progress of any algorithm. Here we implement
the most basic monitor which will be inherited by monitors for specific processes.
-- [email protected]
"""
import sys
def get_reporter(reporter):
""" Returns a reporter based on what was passed as argument. If reporter is already
a reporter then it is returned. Otherwise, an appropriate reporter is constructed
and returned. """
if isinstance(reporter, str):
if reporter.lower() == 'default':
reporter = BasicReporter()
elif reporter.lower() == 'silent':
reporter = SilentReporter()
else:
raise ValueError('If reporter is string, it should be "default" or "silent".')
elif reporter is None:
reporter = SilentReporter()
elif not isinstance(reporter, BasicReporter):
raise ValueError('Pass either a string, BasicReporter or None for reporter. ')
return reporter
class BasicReporter(object):
""" The most basic monitor that implements printing etc. """
def __init__(self, out=sys.stdout):
""" Constructor. """
self.out = out
def write(self, msg, *_):
""" Writes a message to stdout. """
if self.out is not None:
self.out.write(msg)
self.out.flush()
def writeln(self, msg, *args):
""" Writes a message to stdout with a new line. """
self.write(msg + '\n', *args)
class SilentReporter(BasicReporter):
""" This reporter prints nothing. """
def __init__(self):
""" Constructor. """
super(SilentReporter, self).__init__(None)
| 1,537 | 29.156863 | 87 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/base_test_class.py
|
"""
Implements a base class for unit tests with some common utilities.
"""
import numpy as np
import random
import sys
from time import time
import unittest
class BaseTestClass(unittest.TestCase):
""" An abstract base class for unit tests. """
def __init__(self, *args, **kwargs):
""" Constructor. """
super(BaseTestClass, self).__init__(*args, **kwargs)
@classmethod
def report(cls, msg, msg_type='test_header', out=sys.stdout):
" Reports a message. """
prefix = '' # default prefix and suffix
suffix = '\n'
if msg_type == 'header':
suffix = '\n' + '=' * len(msg) + '\n'
elif msg_type == 'test_header':
prefix = ' * '
elif msg_type == 'test_result':
prefix = ' - '
out.write(prefix + msg + suffix)
out.flush()
def execute_tests(seed_val='time'):
""" Executes the tests. """
# Set seed value
# pylint: disable=superfluous-parens
# pylint: disable=no-member
if seed_val is not None:
if seed_val == 'time':
seed_val = int(time()*10) % 100000
elif not isinstance(seed_val, int):
raise ValueError('seed_val should be \'time\', an integer or None.')
print('Setting random seed to %d.'%(seed_val))
np.random.seed(seed_val)
random.seed(seed_val)
# Run unit tests
unittest.main()
| 1,298 | 24.98 | 74 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/general_utils.py
|
"""
A collection of very generic python utilities.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
import numpy as np
def compare_dict(dict_1, dict_2):
""" Compares two dictionaries. """
# N.B: Taken from stackoverflow:
# http://stackoverflow.com/questions/4527942/comparing-two-dictionaries-in-python
dict_1_keys = set(dict_1.keys())
dict_2_keys = set(dict_2.keys())
intersect_keys = dict_1_keys.intersection(dict_2_keys)
added = dict_1_keys - dict_2_keys
removed = dict_2_keys - dict_1_keys
modified = {o: (dict_1[o], dict_2[o]) for o in intersect_keys if dict_1[o] != dict_2[o]}
same = set(o for o in intersect_keys if dict_1[o] == dict_2[o])
return added, removed, modified, same
def dicts_are_equal(dict_1, dict_2):
""" Returns true if dict_1 and dict_2 are equal. """
added, removed, modified, _ = compare_dict(dict_1, dict_2)
return len(added) == 0 and len(removed) == 0 and len(modified) == 0
def map_to_cube(pts, bounds):
""" Maps bounds to [0,1]^d and returns the representation in the cube. """
return (pts - bounds[:, 0])/(bounds[:, 1] - bounds[:, 0])
def map_to_bounds(pts, bounds):
""" Given a point in [0,1]^d, returns the representation in the original space. """
return pts * (bounds[:, 1] - bounds[:, 0]) + bounds[:, 0]
def compute_average_sq_prediction_error(Y1, Y2):
""" Returns the average prediction error. """
return np.linalg.norm(np.array(Y1) - np.array(Y2))**2 / len(Y1)
def dist_squared(X1, X2):
""" If X1 is n1xd and X2 is n2xd, this returns an n1xn2 matrix where the (i,j)th
entry is the squared distance between X1(i,:) and X2(j,:).
"""
n1, dim1 = X1.shape
n2, dim2 = X2.shape
if dim1 != dim2:
raise ValueError('Second dimension of X1 and X2 should be equal.')
dist_sq = (np.outer(np.ones(n1), (X2**2).sum(axis=1))
+ np.outer((X1**2).sum(axis=1), np.ones(n2))
- 2*X1.dot(X2.T))
return dist_sq
def stable_cholesky(M):
""" Returns L, a 'stable' cholesky decomposition of M. L is lower triangular and
satisfies L*L' = M.
Sometimes nominally psd matrices are not psd due to numerical issues. By adding a
small value to the diagonal we can make it psd. This is what this function does.
Use this iff you know that K should be psd. We do not check for errors
"""
# pylint: disable=superfluous-parens
if M.size == 0:
return M # if you pass an empty array then just return it.
try:
# First try taking the Cholesky decomposition.
L = np.linalg.cholesky(M)
except np.linalg.linalg.LinAlgError:
# If it doesn't work, then try adding diagonal noise.
diag_noise_power = -11
max_M = np.diag(M).max()
diag_noise = np.diag(M).max() * 1e-11
chol_decomp_succ = False
while not chol_decomp_succ:
try:
L = np.linalg.cholesky(M + (10**diag_noise_power * max_M) * np.eye(M.shape[0]))
chol_decomp_succ = True
except np.linalg.linalg.LinAlgError:
diag_noise_power += 1
if diag_noise_power >= 5:
print('**************** Cholesky failed: Added diag noise = %e'%(diag_noise))
return L
def draw_gaussian_samples(num_samples, mu, K):
""" Draws num_samples samples from a Gaussian distribution with mean mu and
covariance K.
"""
num_pts = len(mu)
L = stable_cholesky(K)
U = np.random.normal(size=(num_pts, num_samples))
V = L.dot(U).T + mu
return V
| 3,506 | 33.048544 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/__init__.py
|
"""
Some general utility functions we will need.
-- [email protected]
"""
| 81 | 15.4 | 46 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/optimisers.py
|
"""
A collection of wrappers for optimisng a function.
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=superfluous-parens
from argparse import Namespace
from datetime import datetime
import os
import numpy as np
# Local imports
try:
import direct_fortran.direct as direct_ft_wrap
except ImportError:
print('Could not import fortran direct library')
direct_ft_wrap = None
from general_utils import map_to_bounds
def random_maximise(obj, bounds, max_evals, vectorised=True):
""" Optimises a function by randomly sampling and choosing its maximum. """
dim = len(bounds)
rand_pts = map_to_bounds(np.random.random((int(max_evals), dim)), bounds)
if vectorised:
obj_vals = obj(rand_pts)
else:
obj_vals = np.array([obj(x) for x in rand_pts])
max_idx = obj_vals.argmax()
max_val = obj_vals[max_idx]
max_pt = rand_pts[max_idx]
return max_val, max_pt
# DIRECT #########################################################################
# Some constants
_MAX_DIRECT_FN_EVALS = 2.6e6 # otherwise the fortran software complains
def direct_ft_minimise(obj, lower_bounds, upper_bounds, max_evals,
eps=1e-5,
return_history=False,
max_iterations=None,
alg_method=0,
fglobal=-1e100,
fglper=0.01,
volper=-1.0,
sigmaper=-1.0,
log_file_name='',
):
"""
A wrapper for the fortran implementation. The four mandatory arguments are self
explanatory. If return_history is True it also returns the history of evaluations.
max_iterations is the maximum number of iterations of the direct algorithm.
I am not sure what the remaining arguments are for.
"""
# pylint: disable=too-many-locals
# pylint: disable=too-many-arguments
# Preliminaries.
max_evals = min(_MAX_DIRECT_FN_EVALS, max_evals) # otherwise the fortran sw complains.
max_iterations = max_evals if max_iterations is None else max_iterations
lower_bounds = np.array(lower_bounds, dtype=np.float64)
upper_bounds = np.array(upper_bounds, dtype=np.float64)
if len(lower_bounds) != len(upper_bounds):
raise ValueError('The dimensionality of the lower and upper bounds should match.')
# Create a wrapper to comply with the fortran requirements.
def _objective_wrap(x, *_):
""" A wrapper to comply with the fortran requirements. """
return (obj(x), 0)
# Some dummy data to comply with the fortran requirements.
iidata = np.ones(0, dtype=np.int32)
ddata = np.ones(0, dtype=np.float64)
cdata = np.ones([0, 40], dtype=np.uint8)
# Call the function.
min_pt, min_val, _ = direct_ft_wrap.direct(_objective_wrap,
eps,
max_evals,
max_iterations,
lower_bounds,
upper_bounds,
alg_method,
log_file_name,
fglobal,
fglper,
volper,
sigmaper,
iidata,
ddata,
cdata
)
if return_history:
# TODO: implement this. Read it off the log file.
pass
else:
history = None
# return
return min_val, min_pt, history
def direct_ft_maximise(obj, lower_bounds, upper_bounds, max_evals, **kwargs):
"""
A wrapper for maximising a function which calls direct_ft_minimise. See arguments
under direct_ft_minimise for more details.
"""
min_obj = lambda x: -obj(x)
min_val, max_pt, history = direct_ft_minimise(min_obj, lower_bounds, upper_bounds,
max_evals, **kwargs)
max_val = - min_val
# TODO: Fix history here.
return max_val, max_pt, history
def direct_ft_maximise_with_history(obj, lower_bounds, upper_bounds, max_evals, **kwargs):
"""
A wrapper for maximising a function which calls direct_ft_minimise. But also
returns the history.
"""
log_file_name = 'direct_log_%s'%(datetime.now().strftime('%m%d-%H%M%S'))
max_val, max_pt, _ = direct_ft_maximise(obj, lower_bounds, upper_bounds, max_evals,
log_file_name=log_file_name, **kwargs)
history = get_history_from_direct_log(log_file_name)
# delete file
os.remove(log_file_name)
return max_val, max_pt, history
def get_history_from_direct_log(log_file_name):
""" Returns the history from the direct log file. """
saved_iterations = [0]
saved_max_vals = [-np.inf]
phase = 'boiler'
log_file_handle = open(log_file_name, 'r')
for line in log_file_handle.readlines():
words = line.strip().split()
if phase == 'boiler':
if words[0] == 'Iteration':
phase = 'results'
elif phase == 'results':
if len(words) == 3 and words[0].isdigit():
saved_iterations.append(int(words[1]))
saved_max_vals.append(-float(words[2]))
else:
phase = 'final'
elif phase == 'final':
if words[0] == 'Final':
saved_max_vals.append(max(-float(words[-1]), saved_max_vals[-1]))
# doing max as the fortran library rounds off the last result for some reason.
if words[0] == 'Number':
saved_iterations.append(int(words[-1]))
# Now fill in the rest of the history.
curr_opt_vals = np.zeros((saved_iterations[-1]), dtype=np.float64)
for i in range(len(saved_iterations)-1):
curr_opt_vals[saved_iterations[i]:saved_iterations[i+1]] = saved_max_vals[i]
curr_opt_vals[-1] = saved_max_vals[-1]
return Namespace(curr_opt_vals=curr_opt_vals)
def direct_maximise_from_mfof(mfof, max_evals, **kwargs):
""" Direct maximise from an mfof object. """
obj = lambda x: mfof.eval_single(mfof.opt_fidel, x)
lower_bounds = mfof.domain_bounds[:, 0]
upper_bounds = mfof.domain_bounds[:, 1]
return direct_ft_maximise_with_history(obj, lower_bounds, upper_bounds, max_evals,
**kwargs)
# DIRECT end ######################################################################
| 6,624 | 37.517442 | 90 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/unittest_general_utils.py
|
"""
Test cases for functions in general_utils.py
-- [email protected]
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
import numpy as np
import general_utils
from base_test_class import BaseTestClass, execute_tests
class GeneralUtilsTestCase(BaseTestClass):
"""Unit test class for general utilities. """
def __init__(self, *args, **kwargs):
super(GeneralUtilsTestCase, self).__init__(*args, **kwargs)
def setUp(self):
""" Sets up attributes. """
# For dictionary
self.dict_1 = {'a':1, 'b':2, 'c':3}
self.dict_2 = {'b':2, 'a':1, 'c':3}
self.dict_3 = {'a':4, 'b':5, 'c':6}
self.dict_4 = {'d':1, 'e':2, 'f':3}
# For dist squared
self.X1 = np.array([[1, 2, 3], [1, 2, 4], [2, 3, 4.5]])
self.X2 = np.array([[1, 2, 4], [1, 2, 5], [2, 3, 5]])
self.true_dist_sq = np.array([[1, 4, 6], [0, 1, 3], [2.25, 2.25, 0.25]])
def test_compare_dict(self):
""" Test compare_dict function."""
self.report('dicts_are_equal')
assert general_utils.dicts_are_equal(self.dict_1, self.dict_2)
assert not general_utils.dicts_are_equal(self.dict_1, self.dict_3)
assert not general_utils.dicts_are_equal(self.dict_1, self.dict_4)
def test_dist_squared(self):
""" Tests the squared distance function. """
self.report('dist_squared')
comp_dist_sq = general_utils.dist_squared(self.X1, self.X2)
assert (self.true_dist_sq == comp_dist_sq).all()
def test_mapping_to_cube_and_bound(self):
""" Test map_to_cube and map_to_bounds. """
self.report('map_to_cube and map_to_bounds')
bounds = np.array([[1, 3], [2, 4], [5, 6]])
x = np.array([1.7, 3.1, 5.5])
X = np.array([[1.7, 3.1, 5.5], [2.1, 2.9, 5.0]])
y = np.array([0.35, 0.55, 0.5])
Y = np.array([[0.35, 0.55, 0.5], [0.55, 0.45, 0]])
# Map to cube
y_ = general_utils.map_to_cube(x, bounds)
Y_ = general_utils.map_to_cube(X, bounds)
# Map to Bounds
x_ = general_utils.map_to_bounds(y, bounds)
X_ = general_utils.map_to_bounds(Y, bounds)
# Check if correct.
assert np.linalg.norm(y - y_) < 1e-5
assert np.linalg.norm(Y - Y_) < 1e-5
assert np.linalg.norm(x - x_) < 1e-5
assert np.linalg.norm(X - X_) < 1e-5
def test_compute_average_sq_prediction_error(self):
""" Tests compute_average_sq_prediction_error. """
self.report('compute_average_sq_prediction_error')
Y1 = [0, 1, 2]
Y2 = [2, 0, 1]
res = general_utils.compute_average_sq_prediction_error(Y1, Y2)
assert np.abs(res - 2.0) < 1e-5
def test_stable_cholesky(self):
""" Tests for stable cholesky. """
self.report('stable_cholesky')
M = np.random.normal(size=(5, 5))
M = M.dot(M.T)
L = general_utils.stable_cholesky(M)
assert np.linalg.norm(L.dot(L.T) - M) < 1e-5
def test_draw_gaussian_samples(self):
""" Tests for draw gaussian samples. """
self.report('draw_gaussian_samples. Probabilistic test, could fail at times')
num_samples = 10000
num_pts = 3
mu = list(range(num_pts))
K = np.random.normal(size=(num_pts, num_pts))
K = K.dot(K.T)
samples = general_utils.draw_gaussian_samples(num_samples, mu, K)
sample_mean = samples.mean(axis=0)
sample_centralised = samples - sample_mean
sample_covar = sample_centralised.T.dot(sample_centralised) / num_samples
mean_tol = 4 * np.linalg.norm(mu) / np.sqrt(num_samples)
covar_tol = 4 * np.linalg.norm(K) / np.sqrt(num_samples)
mean_err = np.linalg.norm(mu - sample_mean)
covar_err = np.linalg.norm(K - sample_covar)
self.report('Mean error (tol): ' + str(mean_err) + ' (' + str(mean_tol) + ')',
'test_result')
self.report('Cov error (tol): ' + str(covar_err) + ' (' + str(covar_tol) + ')',
'test_result')
assert mean_err < mean_tol
assert covar_err < covar_tol
if __name__ == '__main__':
execute_tests()
| 3,943 | 34.854545 | 83 |
py
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/direct_fortran/__init__.py
| 0 | 0 | 0 |
py
|
|
MFTreeSearchCV
|
MFTreeSearchCV-master/utils/direct_fortran/simple_direct_test.py
|
import numpy as np
import direct
def main():
""" Main function. """
obj = lambda x: (np.dot(x-0.1,x), 0)
lower_bounds = [-1] * 4
upper_bounds = [1] * 4;
dim = len(lower_bounds)
eps = 1e-5
max_func_evals = 1000
max_iterations = max_func_evals
algmethod = 0
# _log_file = 'dir_file_name'
_log_file = ''
fglobal = -1e100
fglper = 0.01
volper = -1.0
sigmaper = -1.0
# user_data = None
def _objective_wrap(x, iidata, ddata, cdata, n, iisize, idsize, icsize):
"""
A wrapper to comply with the fortran requirements.
"""
return obj(x)
iidata = np.ones(0, dtype=np.int32)
ddata = np.ones(0, dtype=np.float64)
cdata = np.ones([0, 40], dtype=np.uint8)
soln = direct.direct(_objective_wrap,
eps,
max_func_evals,
max_iterations,
np.array(lower_bounds, dtype=np.float64),
np.array(upper_bounds, dtype=np.float64),
algmethod,
_log_file,
fglobal,
fglper,
volper,
sigmaper,
iidata,
ddata,
cdata
)
print(soln)
if __name__ == '__main__':
main()
| 1,367 | 22.586207 | 74 |
py
|
Tamanduatei_Vulnerability
|
Tamanduatei_Vulnerability-main/vulnerability.py
|
import igraph as ig
import numpy as np
import time
import multiprocessing as mp
f2 = open("screen.out", 'w')
f = open("result.dat", "w")
# Calculating the efficiency of a given network
def eff_global(g, weights):
N = g.vcount()
eff = 0.0
sp = g.shortest_paths(weights=weights)
for l in sp:
for ll in l:
if ll != 0.0:
# the eff of each edge is simply 1.0/(shortest path)
eff += 1.0/float(ll)
E = eff / (float(N)*(float(N)-1.0))
return E
def vuln_for_each_node(g, weights, k, E):
g_copy = g.copy()
# delete edges connected to node k
del_list = []
for target_vertex_id in range(g_copy.vcount()):
try:
del_list.append(g_copy.get_eid(k,target_vertex_id))
except:
pass
g_copy.delete_edges(del_list)
# compute the efficiency of the network after removing the edges that incide on node k
Ek = eff_global(g_copy, weights)
# Vulnerability index
Vk = (E - Ek)/E
del del_list
return Vk
def vulnerability(g, weights, procs = 1, f2=None):
x=time.time()
f2.write("Calculating global efficiency\n")
# Global efficiency of the original network
E = eff_global(g, weights)
f2.write(f"Time to calculate: {time.time()-x} seconds\n")
# For each node, remove its adjacency edges, compute the global efficiency of the remaining network,
# and its associated Vulnerability index
# Parallel version
# Number of subprocesses
f2.write("Starting the parallelization\n")
pool = mp.Pool(procs)
argss = [(g, weights, k, E) for k in range(g.vcount())]
vuln = pool.starmap(vuln_for_each_node, argss)
return vuln
f2.write("Reading the network\n")
g = ig.Graph.Read_Edgelist("edgelist.dat")
f2.write("Network read\n")
x = time.time()
f2.write(f"Starting the vulnerability calculation\n")
vul = vulnerability(g, None, 4, f2)
f2.write(f"End of calculation: {(time.time()-x)/60/60} hours\n")
f.write(f"no\tvul\n")
for i in range(len(vul)):
f.write(f"{i}\t{vul[i]}\n")
| 2,290 | 27.283951 | 108 |
py
|
uMatrix
|
uMatrix-master/tools/make-firefox-meta.py
|
#!/usr/bin/env python3
import os
import json
import re
import sys
if len(sys.argv) == 1 or not sys.argv[1]:
raise SystemExit('Build dir missing.')
proj_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], '..')
build_dir = os.path.abspath(sys.argv[1])
version = ''
with open(os.path.join(proj_dir, 'dist', 'version')) as f:
version = f.read().strip()
webext_manifest = {}
webext_manifest_file = os.path.join(build_dir, 'manifest.json')
with open(webext_manifest_file) as f:
webext_manifest = json.load(f)
webext_manifest['version'] = version
match = re.search('^\d+\.\d+\.\d+(b|rc)', version)
if not match:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1459007
# By design Firefox opens the sidebar with new installation of
# uMatrix when sidebar_action is present in the manifest.
# Remove sidebarAction support for stable release of uBO.
del webext_manifest['sidebar_action']
with open(webext_manifest_file, 'w') as f2:
json.dump(webext_manifest, f2, indent=2, separators=(',', ': '), sort_keys=True)
f2.write('\n')
| 1,077 | 29.8 | 84 |
py
|
uMatrix
|
uMatrix-master/tools/make-opera-meta.py
|
#!/usr/bin/env python3
import os
import json
import re
import sys
if len(sys.argv) == 1 or not sys.argv[1]:
raise SystemExit('Build dir missing.')
proj_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], '..')
build_dir = os.path.abspath(sys.argv[1])
version = ''
with open(os.path.join(proj_dir, 'dist', 'version')) as f:
version = f.read().strip()
# Import data from chromium platform
chromium_manifest = {}
opera_manifest = {}
chromium_manifest_file = os.path.join(proj_dir, 'platform', 'chromium', 'manifest.json')
with open(chromium_manifest_file) as f1:
chromium_manifest = json.load(f1)
# WebExtension
opera_manifest_add_file = os.path.join(proj_dir, 'platform', 'opera', 'manifest-add.json')
with open(opera_manifest_add_file) as f2:
opera_manifest = json.load(f2)
for key in chromium_manifest:
if key not in opera_manifest:
opera_manifest[key] = chromium_manifest[key]
# Development build? If so, modify name accordingly.
match = re.search('^(\d+\.\d+\.\d+)(\.|b|rc)(\d+)$', version)
if match:
version = match.group(1)
revision = int(match.group(3))
if match.group(2) == 'rc':
revision += 100
version += '.' + str(revision)
opera_manifest['name'] += ' development build'
opera_manifest['short_name'] += ' dev build'
opera_manifest['browser_action']['default_title'] += ' dev build'
opera_manifest['version'] = version
opera_manifest_file = os.path.join(build_dir, 'manifest.json')
with open(opera_manifest_file, 'w') as f2:
json.dump(opera_manifest, f2, indent=2, separators=(',', ': '), sort_keys=True)
f2.write('\n')
| 1,623 | 29.641509 | 90 |
py
|
uMatrix
|
uMatrix-master/tools/make-chromium-meta.py
|
#!/usr/bin/env python3
import os
import json
import re
import sys
if len(sys.argv) == 1 or not sys.argv[1]:
raise SystemExit('Build dir missing.')
proj_dir = os.path.join(os.path.split(os.path.abspath(__file__))[0], '..')
build_dir = os.path.abspath(sys.argv[1])
version = ''
with open(os.path.join(proj_dir, 'dist', 'version')) as f:
version = f.read().strip()
manifest_out = {}
manifest_out_file = os.path.join(build_dir, 'manifest.json')
with open(manifest_out_file) as f:
manifest_out = json.load(f)
# Development build? If so, modify name accordingly.
match = re.search('^(\d+\.\d+\.\d+)(\.|b|rc)(\d+)$', version)
if match:
version = match.group(1)
revision = int(match.group(3))
if match.group(2) == 'rc':
revision += 100
version += '.' + str(revision)
manifest_out['name'] += ' development build'
manifest_out['short_name'] += ' dev build'
manifest_out['browser_action']['default_title'] += ' dev build'
manifest_out['version'] = version
with open(manifest_out_file, 'w') as f:
json.dump(manifest_out, f, indent=2, separators=(',', ': '), sort_keys=True)
f.write('\n')
| 1,140 | 27.525 | 80 |
py
|
uMatrix
|
uMatrix-master/dist/chromium/publish-beta.py
|
#!/usr/bin/env python3
import datetime
import json
import jwt
import os
import re
import requests
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
from distutils.version import StrictVersion
from string import Template
# - Download target (raw) uMatrix.chromium.zip from GitHub
# - This is referred to as "raw" package
# - This will fail if not a dev build
# - Upload uMatrix.chromium.zip to Chrome store
# - Publish uMatrix.chromium.zip to Chrome store
# Find path to project root
projdir = os.path.split(os.path.abspath(__file__))[0]
while not os.path.isdir(os.path.join(projdir, '.git')):
projdir = os.path.normpath(os.path.join(projdir, '..'))
# We need a version string to work with
if len(sys.argv) >= 2 and sys.argv[1]:
version = sys.argv[1]
else:
version = input('Github release version: ')
version.strip()
if not re.search('^\d+\.\d+\.\d+(b|rc)\d+$', version):
print('Error: Invalid version string.')
exit(1)
cs_extension_id = 'eckgcipdkhcfghnmincccnhpdmnbefki'
tmpdir = tempfile.TemporaryDirectory()
raw_zip_filename = 'uMatrix_'+ version + '.chromium.zip'
raw_zip_filepath = os.path.join(tmpdir.name, raw_zip_filename)
github_owner = 'gorhill'
github_repo = 'uMatrix'
# Load/save auth secrets
# The build directory is excluded from git
ubo_secrets = dict()
ubo_secrets_filename = os.path.join(projdir, 'dist', 'build', 'ubo_secrets')
if os.path.isfile(ubo_secrets_filename):
with open(ubo_secrets_filename) as f:
ubo_secrets = json.load(f)
def input_secret(prompt, token):
if token in ubo_secrets:
prompt += ' ✔'
prompt += ': '
value = input(prompt).strip()
if len(value) == 0:
if token not in ubo_secrets:
print('Token error:', token)
exit(1)
value = ubo_secrets[token]
elif token not in ubo_secrets or value != ubo_secrets[token]:
ubo_secrets[token] = value
exists = os.path.isfile(ubo_secrets_filename)
with open(ubo_secrets_filename, 'w') as f:
json.dump(ubo_secrets, f, indent=2)
if not exists:
os.chmod(ubo_secrets_filename, 0o600)
return value
# GitHub API token
github_token = input_secret('Github token', 'github_token')
github_auth = 'token ' + github_token
#
# Get metadata from GitHub about the release
#
# https://developer.github.com/v3/repos/releases/#get-a-single-release
print('Downloading release info from GitHub...')
release_info_url = 'https://api.github.com/repos/{0}/{1}/releases/tags/{2}'.format(github_owner, github_repo, version)
headers = { 'Authorization': github_auth, }
response = requests.get(release_info_url, headers=headers)
if response.status_code != 200:
print('Error: Release not found: {0}'.format(response.status_code))
exit(1)
release_info = response.json()
#
# Extract URL to raw package from metadata
#
# Find url for uMatrix.chromium.zip
raw_zip_url = ''
for asset in release_info['assets']:
if asset['name'] == raw_zip_filename:
raw_zip_url = asset['url']
if len(raw_zip_url) == 0:
print('Error: Release asset URL not found')
exit(1)
#
# Download raw package from GitHub
#
# https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
print('Downloading raw zip package from GitHub...')
headers = {
'Authorization': github_auth,
'Accept': 'application/octet-stream',
}
response = requests.get(raw_zip_url, headers=headers)
# Redirections are transparently handled:
# http://docs.python-requests.org/en/master/user/quickstart/#redirection-and-history
if response.status_code != 200:
print('Error: Downloading raw package failed -- server error {0}'.format(response.status_code))
exit(1)
with open(raw_zip_filepath, 'wb') as f:
f.write(response.content)
print('Downloaded raw package saved as {0}'.format(raw_zip_filepath))
#
# Upload to Chrome store
#
# Auth tokens
cs_id = input_secret('Chrome store id', 'cs_id')
cs_secret = input_secret('Chrome store secret', 'cs_secret')
cs_refresh = input_secret('Chrome store refresh token', 'cs_refresh')
print('Uploading to Chrome store...')
with open(raw_zip_filepath, 'rb') as f:
print('Generating access token...')
auth_url = 'https://accounts.google.com/o/oauth2/token'
auth_payload = {
'client_id': cs_id,
'client_secret': cs_secret,
'grant_type': 'refresh_token',
'refresh_token': cs_refresh,
}
auth_response = requests.post(auth_url, data=auth_payload)
if auth_response.status_code != 200:
print('Error: Auth failed -- server error {0}'.format(auth_response.status_code))
print(auth_response.text)
exit(1)
response_dict = auth_response.json()
if 'access_token' not in response_dict:
print('Error: Auth failed -- no access token')
exit(1)
# Prepare access token
cs_auth = 'Bearer ' + response_dict['access_token']
headers = {
'Authorization': cs_auth,
'x-goog-api-version': '2',
}
# Upload
print('Uploading package...')
upload_url = 'https://www.googleapis.com/upload/chromewebstore/v1.1/items/{0}'.format(cs_extension_id)
upload_response = requests.put(upload_url, headers=headers, data=f)
f.close()
if upload_response.status_code != 200:
print('Upload failed -- server error {0}'.format(upload_response.status_code))
print(upload_response.text)
exit(1)
response_dict = upload_response.json();
if 'uploadState' not in response_dict or response_dict['uploadState'] != 'SUCCESS':
print('Upload failed -- server error {0}'.format(response_dict['uploadState']))
exit(1)
print('Upload succeeded.')
# Publish
print('Publishing package...')
publish_url = 'https://www.googleapis.com/chromewebstore/v1.1/items/{0}/publish'.format(cs_extension_id)
headers = {
'Authorization': cs_auth,
'x-goog-api-version': '2',
'Content-Length': '0',
}
publish_response = requests.post(publish_url, headers=headers)
if publish_response.status_code != 200:
print('Error: Chrome store publishing failed -- server error {0}'.format(publish_response.status_code))
exit(1)
response_dict = publish_response.json();
if 'status' not in response_dict or response_dict['status'][0] != 'OK':
print('Publishing failed -- server error {0}'.format(response_dict['status']))
exit(1)
print('Publishing succeeded.')
print('All done.')
| 6,489 | 32.626943 | 118 |
py
|
uMatrix
|
uMatrix-master/dist/firefox/publish-signed-beta.py
|
#!/usr/bin/env python3
import datetime
import json
import jwt
import os
import re
import requests
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
from distutils.version import LooseVersion
from string import Template
# - Download target (raw) uMatrix.firefox.xpi from GitHub
# - This is referred to as "raw" package
# - This will fail if not a dev build
# - Modify raw package to make it self-hosted
# - This is referred to as "unsigned" package
# - Ask AMO to sign uMatrix.firefox.xpi
# - Generate JWT to be used for communication with server
# - Upload unsigned package to AMO
# - Wait for a valid download URL for signed package
# - Download signed package as uMatrix.firefox.signed.xpi
# - This is referred to as "signed" package
# - Upload uMatrix.firefox.signed.xpi to GitHub
# - Remove uMatrix.firefox.xpi from GitHub
# - Modify updates.json to point to new version
# - Commit changes to repo
# Find path to project root
projdir = os.path.split(os.path.abspath(__file__))[0]
while not os.path.isdir(os.path.join(projdir, '.git')):
projdir = os.path.normpath(os.path.join(projdir, '..'))
# Check that found project root is valid
version_filepath = os.path.join(projdir, 'dist', 'version')
if not os.path.isfile(version_filepath):
print('Version file not found.')
exit(1)
# We need a version string to work with
if len(sys.argv) >= 2 and sys.argv[1]:
tag_version = sys.argv[1]
else:
tag_version = input('Github release version: ')
tag_version.strip()
match = re.search('^(\d+\.\d+\.\d+)(?:(b|rc)(\d+))?$', tag_version)
if not match:
print('Error: Invalid version string.')
exit(1)
ext_version = match.group(1);
if match.group(2):
revision = int(match.group(3))
if match.group(2) == 'rc':
revision += 100;
ext_version += '.' + str(revision)
extension_id = '[email protected]'
tmpdir = tempfile.TemporaryDirectory()
raw_xpi_filename = 'uMatrix_' + tag_version + '.firefox.xpi'
raw_xpi_filepath = os.path.join(tmpdir.name, raw_xpi_filename)
unsigned_xpi_filepath = os.path.join(tmpdir.name, 'uMatrix.firefox.unsigned.xpi')
signed_xpi_filename = 'uMatrix_' + tag_version + '.firefox.signed.xpi'
signed_xpi_filepath = os.path.join(tmpdir.name, signed_xpi_filename)
github_owner = 'gorhill'
github_repo = 'uMatrix'
# Load/save auth secrets
# The build directory is excluded from git
ubo_secrets = dict()
ubo_secrets_filename = os.path.join(projdir, 'dist', 'build', 'ubo_secrets')
if os.path.isfile(ubo_secrets_filename):
with open(ubo_secrets_filename) as f:
ubo_secrets = json.load(f)
def input_secret(prompt, token):
if token in ubo_secrets:
prompt += ' ✔'
prompt += ': '
value = input(prompt).strip()
if len(value) == 0:
if token not in ubo_secrets:
print('Token error:', token)
exit(1)
value = ubo_secrets[token]
elif token not in ubo_secrets or value != ubo_secrets[token]:
ubo_secrets[token] = value
exists = os.path.isfile(ubo_secrets_filename)
with open(ubo_secrets_filename, 'w') as f:
json.dump(ubo_secrets, f, indent=2)
if not exists:
os.chmod(ubo_secrets_filename, 0o600)
return value
# GitHub API token
github_token = input_secret('Github token', 'github_token')
github_auth = 'token ' + github_token
#
# Get metadata from GitHub about the release
#
# https://developer.github.com/v3/repos/releases/#get-a-single-release
print('Downloading release info from GitHub...')
release_info_url = 'https://api.github.com/repos/{0}/{1}/releases/tags/{2}'.format(github_owner, github_repo, tag_version)
headers = { 'Authorization': github_auth, }
response = requests.get(release_info_url, headers=headers)
if response.status_code != 200:
print('Error: Release not found: {0}'.format(response.status_code))
exit(1)
release_info = response.json()
#
# Extract URL to raw package from metadata
#
# Find url for uMatrix.firefox.xpi
raw_xpi_url = ''
for asset in release_info['assets']:
if asset['name'] == signed_xpi_filename:
print('Error: Found existing signed self-hosted package.')
exit(1)
if asset['name'] == raw_xpi_filename:
raw_xpi_url = asset['url']
if len(raw_xpi_url) == 0:
print('Error: Release asset URL not found')
exit(1)
#
# Download raw package from GitHub
#
# https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
print('Downloading raw xpi package from GitHub...')
headers = {
'Authorization': github_auth,
'Accept': 'application/octet-stream',
}
response = requests.get(raw_xpi_url, headers=headers)
# Redirections are transparently handled:
# http://docs.python-requests.org/en/master/user/quickstart/#redirection-and-history
if response.status_code != 200:
print('Error: Downloading raw package failed -- server error {0}'.format(response.status_code))
exit(1)
with open(raw_xpi_filepath, 'wb') as f:
f.write(response.content)
print('Downloaded raw package saved as {0}'.format(raw_xpi_filepath))
#
# Convert the package to a self-hosted one: add `update_url` to the manifest
#
print('Converting raw xpi package into self-hosted xpi package...')
with zipfile.ZipFile(raw_xpi_filepath, 'r') as zipin:
with zipfile.ZipFile(unsigned_xpi_filepath, 'w') as zipout:
for item in zipin.infolist():
data = zipin.read(item.filename)
if item.filename == 'manifest.json':
manifest = json.loads(bytes.decode(data))
manifest['browser_specific_settings']['gecko']['update_url'] = 'https://raw.githubusercontent.com/{0}/{1}/master/dist/firefox/updates.json'.format(github_owner, github_repo)
data = json.dumps(manifest, indent=2, separators=(',', ': '), sort_keys=True).encode()
zipout.writestr(item, data)
#
# Ask AMO to sign the self-hosted package
# - https://developer.mozilla.org/en-US/Add-ons/Distribution#Distributing_your_add-on
# - https://pyjwt.readthedocs.io/en/latest/usage.html
# - https://addons-server.readthedocs.io/en/latest/topics/api/auth.html
# - https://addons-server.readthedocs.io/en/latest/topics/api/signing.html
#
amo_api_key = ''
amo_secret = ''
def get_jwt_auth():
global amo_api_key
if amo_api_key == '':
amo_api_key = input_secret('AMO API key', 'amo_api_key')
global amo_secret
if amo_secret == '':
amo_secret = input_secret('AMO API secret', 'amo_secret')
amo_nonce = os.urandom(8).hex()
jwt_payload = {
'iss': amo_api_key,
'jti': amo_nonce,
'iat': datetime.datetime.utcnow(),
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=15),
}
return 'JWT ' + jwt.encode(jwt_payload, amo_secret).decode()
print('Ask AMO to sign self-hosted xpi package...')
with open(unsigned_xpi_filepath, 'rb') as f:
# https://blog.mozilla.org/addons/2019/11/11/security-improvements-in-amo-upload-tools/
# "We recommend allowing up to 15 minutes."
interval = 60 # check every 60 seconds
countdown = 15 * 60 / interval # for at most 15 minutes
headers = { 'Authorization': get_jwt_auth(), }
data = { 'channel': 'unlisted' }
files = { 'upload': f, }
signing_url = 'https://addons.mozilla.org/api/v3/addons/{0}/versions/{1}/'.format(extension_id, ext_version)
print('Submitting package to be signed...')
response = requests.put(signing_url, headers=headers, data=data, files=files)
if response.status_code != 202:
print('Error: Creating new version failed -- server error {0}'.format(response.status_code))
print(response.text)
exit(1)
print('Request for signing self-hosted xpi package succeeded.')
signing_request_response = response.json();
f.close()
print('Waiting for AMO to process the request to sign the self-hosted xpi package...')
# Wait for signed package to be ready
signing_check_url = signing_request_response['url']
while True:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(interval)
countdown -= 1
if countdown <= 0:
print('Error: AMO signing timed out')
exit(1)
headers = { 'Authorization': get_jwt_auth(), }
response = requests.get(signing_check_url, headers=headers)
if response.status_code != 200:
print('Error: AMO signing failed -- server error {0}'.format(response.status_code))
print(response.text)
exit(1)
signing_check_response = response.json()
if not signing_check_response['processed']:
continue
if not signing_check_response['valid']:
print('Error: AMO validation failed')
print(response.text)
exit(1)
if not signing_check_response['files'] or len(signing_check_response['files']) == 0:
continue
if not signing_check_response['files'][0]['signed']:
continue
if not signing_check_response['files'][0]['download_url']:
print('Error: AMO signing failed')
print(response.text)
exit(1)
print('\r')
print('Self-hosted xpi package successfully signed.')
download_url = signing_check_response['files'][0]['download_url']
print('Downloading signed self-hosted xpi package from {0}...'.format(download_url))
response = requests.get(download_url, headers=headers)
if response.status_code != 200:
print('Error: Download signed package failed -- server error {0}'.format(response.status_code))
print(response.text)
exit(1)
with open(signed_xpi_filepath, 'wb') as f:
f.write(response.content)
f.close()
print('Signed self-hosted xpi package downloaded.')
break
#
# Upload signed package to GitHub
#
# https://developer.github.com/v3/repos/releases/#upload-a-release-asset
print('Uploading signed self-hosted xpi package to GitHub...')
with open(signed_xpi_filepath, 'rb') as f:
url = release_info['upload_url'].replace('{?name,label}', '?name=' + signed_xpi_filename)
headers = {
'Authorization': github_auth,
'Content-Type': 'application/zip',
}
response = requests.post(url, headers=headers, data=f.read())
if response.status_code != 201:
print('Error: Upload signed package failed -- server error: {0}'.format(response.status_code))
exit(1)
#
# Remove raw package from GitHub
#
# https://developer.github.com/v3/repos/releases/#delete-a-release-asset
print('Remove raw xpi package from GitHub...')
headers = { 'Authorization': github_auth, }
response = requests.delete(raw_xpi_url, headers=headers)
if response.status_code != 204:
print('Error: Deletion of raw package failed -- server error: {0}'.format(response.status_code))
#
# Update updates.json to point to new package -- but only if just-signed
# package is higher version than current one.
#
print('Update GitHub to point to newly signed self-hosted xpi package...')
updates_json_filepath = os.path.join(projdir, 'dist', 'firefox', 'updates.json')
with open(updates_json_filepath) as f:
updates_json = json.load(f)
f.close()
previous_version = updates_json['addons'][extension_id]['updates'][0]['version']
if LooseVersion(ext_version) > LooseVersion(previous_version):
with open(os.path.join(projdir, 'dist', 'firefox', 'updates.template.json')) as f:
template_json = Template(f.read())
f.close()
updates_json = template_json.substitute(ext_version=ext_version, tag_version=tag_version)
with open(updates_json_filepath, 'w') as f:
f.write(updates_json)
f.close()
# Automatically git add/commit if needed.
# - Stage the changed file
r = subprocess.run(['git', 'status', '-s', updates_json_filepath], stdout=subprocess.PIPE)
rout = bytes.decode(r.stdout).strip()
if len(rout) >= 2 and rout[1] == 'M':
subprocess.run(['git', 'add', updates_json_filepath])
# - Commit the staged file
r = subprocess.run(['git', 'status', '-s', updates_json_filepath], stdout=subprocess.PIPE)
rout = bytes.decode(r.stdout).strip()
if len(rout) >= 2 and rout[0] == 'M':
subprocess.run(['git', 'commit', '-m', 'Make Firefox dev build auto-update', updates_json_filepath])
subprocess.run(['git', 'push', 'origin', 'HEAD'])
print('All done.')
| 12,565 | 38.024845 | 189 |
py
|
oscimpDigital
|
oscimpDigital-master/doc/tutorials/plutosdr/2-PRN_on_PL/project_gps/app/top_block.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# GNU Radio version: 3.7.13.5
##################################################
from distutils.version import StrictVersion
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt5 import Qt
from PyQt5 import Qt, QtCore
from PyQt5.QtCore import QObject, pyqtSlot
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import eng_notation
from gnuradio import fft
from gnuradio import filter
from gnuradio import gr
from gnuradio import iio
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import sip
import sys
from gnuradio import qtgui
class top_block(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Top Block")
Qt.QWidget.__init__(self)
self.setWindowTitle("Top Block")
qtgui.util.check_set_qss()
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.restoreGeometry(self.settings.value("geometry", type=QtCore.QByteArray))
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = int(1e6)
self.prn_num = prn_num = 0x48
self.df_lo = df_lo = 0
self.df = df = 0
##################################################
# Blocks
##################################################
self._prn_num_options = (0x41, 0x48, )
self._prn_num_labels = ('0x41', '0x48', )
self._prn_num_tool_bar = Qt.QToolBar(self)
self._prn_num_tool_bar.addWidget(Qt.QLabel("prn_num"+": "))
self._prn_num_combo_box = Qt.QComboBox()
self._prn_num_tool_bar.addWidget(self._prn_num_combo_box)
for label in self._prn_num_labels: self._prn_num_combo_box.addItem(label)
self._prn_num_callback = lambda i: Qt.QMetaObject.invokeMethod(self._prn_num_combo_box, "setCurrentIndex", Qt.Q_ARG("int", self._prn_num_options.index(i)))
self._prn_num_callback(self.prn_num)
self._prn_num_combo_box.currentIndexChanged.connect(
lambda i: self.set_prn_num(self._prn_num_options[i]))
self.top_grid_layout.addWidget(self._prn_num_tool_bar)
self._df_lo_range = Range(-1e6, 1e6, 1, 0, 200)
self._df_lo_win = RangeWidget(self._df_lo_range, self.set_df_lo, "df_lo", "counter_slider", float)
self.top_grid_layout.addWidget(self._df_lo_win)
self._df_range = Range(-200000, 200000, 1000, 0, 200)
self._df_win = RangeWidget(self._df_range, self.set_df, "df", "counter_slider", float)
self.top_grid_layout.addWidget(self._df_win)
self.qtgui_time_sink_x_0_0 = qtgui.time_sink_f(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0_0.enable_autoscale(True)
self.qtgui_time_sink_x_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0.enable_control_panel(True)
self.qtgui_time_sink_x_0_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(1):
if len(labels[i]) == 0:
self.qtgui_time_sink_x_0_0.set_line_label(i, "Data {0}".format(i))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_0_win)
self.qtgui_time_sink_x_0 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(True)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(True)
self.qtgui_time_sink_x_0.enable_stem_plot(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_win)
self.pluto_source_0 = iio.pluto_source('', int(2400000000), int(samp_rate), int(20000000), 0x8000, True, True, True, "manual", 64.0, '', True)
self.pluto_sink_0 = iio.pluto_sink('', int(int(2400000000+df_lo)), int(samp_rate), int(20000000), 0x8000, False, 30.0, '', True)
self.freq_xlating_fir_filter_xxx_0 = filter.freq_xlating_fir_filter_ccc(1, (1, ), df, samp_rate)
self.fft_vxx_0_1 = fft.fft_vcc(1024, True, (window.blackmanharris(1024)), True, 1)
self.fft_vxx_0_0 = fft.fft_vcc(1024, False, (window.blackmanharris(1024)), True, 1)
self.fft_vxx_0 = fft.fft_vcc(1024, False, (window.blackmanharris(1024)), True, 1)
self.digital_glfsr_source_x_0 = digital.glfsr_source_f(7, True, prn_num, 1)
self.blocks_vector_to_stream_0 = blocks.vector_to_stream(gr.sizeof_gr_complex*1, 1024)
self.blocks_stream_to_vector_0_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, 1024)
self.blocks_stream_to_vector_0 = blocks.stream_to_vector(gr.sizeof_gr_complex*1, 1024)
self.blocks_multiply_conjugate_cc_0 = blocks.multiply_conjugate_cc(1024)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
self.analog_const_source_x_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_const_source_x_0, 0), (self.blocks_float_to_complex_0, 1))
self.connect((self.blocks_complex_to_mag_0, 0), (self.qtgui_time_sink_x_0_0, 0))
self.connect((self.blocks_float_to_complex_0, 0), (self.blocks_stream_to_vector_0_0, 0))
self.connect((self.blocks_float_to_complex_0, 0), (self.pluto_sink_0, 0))
self.connect((self.blocks_float_to_complex_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.blocks_multiply_conjugate_cc_0, 0), (self.fft_vxx_0_1, 0))
self.connect((self.blocks_stream_to_vector_0, 0), (self.fft_vxx_0_0, 0))
self.connect((self.blocks_stream_to_vector_0_0, 0), (self.fft_vxx_0, 0))
self.connect((self.blocks_vector_to_stream_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.digital_glfsr_source_x_0, 0), (self.blocks_float_to_complex_0, 0))
self.connect((self.fft_vxx_0, 0), (self.blocks_multiply_conjugate_cc_0, 0))
self.connect((self.fft_vxx_0_0, 0), (self.blocks_multiply_conjugate_cc_0, 1))
self.connect((self.fft_vxx_0_1, 0), (self.blocks_vector_to_stream_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.blocks_stream_to_vector_0, 0))
self.connect((self.pluto_source_0, 0), (self.freq_xlating_fir_filter_xxx_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_time_sink_x_0_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
self.pluto_source_0.set_params(int(2400000000), int(self.samp_rate), int(20000000), True, True, True, "manual", 64.0, '', True)
self.pluto_sink_0.set_params(int(int(2400000000+self.df_lo)), int(self.samp_rate), int(20000000), 30.0, '', True)
def get_prn_num(self):
return self.prn_num
def set_prn_num(self, prn_num):
self.prn_num = prn_num
self._prn_num_callback(self.prn_num)
def get_df_lo(self):
return self.df_lo
def set_df_lo(self, df_lo):
self.df_lo = df_lo
self.pluto_sink_0.set_params(int(int(2400000000+self.df_lo)), int(self.samp_rate), int(20000000), 30.0, '', True)
def get_df(self):
return self.df
def set_df(self, df):
self.df = df
self.freq_xlating_fir_filter_xxx_0.set_center_freq(self.df)
def main(top_block_cls=top_block, options=None):
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.aboutToQuit.connect(quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| 12,615 | 42.805556 | 163 |
py
|
oscimpDigital
|
oscimpDigital-master/doc/tutorials/plutosdr/99-gnuradio-audio/top_block.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Top Block
# Generated: Tue Feb 26 16:54:22 2019
##################################################
from gnuradio import analog
from gnuradio import audio
from gnuradio import eng_notation
from gnuradio import filter
from gnuradio import gr
from gnuradio import iio
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
class top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self, "Top Block")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 48e3*4*8
self.freq = freq = int(102.4e6)
##################################################
# Blocks
##################################################
self.pluto_source_0 = iio.pluto_source('local:', int(freq), int(samp_rate), int(20000000), 0x8000, True, True, True, "manual", 55, '', True)
self.low_pass_filter_0 = filter.fir_filter_ccf(8, firdes.low_pass(
1, samp_rate, 150000, 100000, firdes.WIN_HAMMING, 6.76))
self.audio_sink_0 = audio.sink(48000, '', True)
self.analog_wfm_rcv_0 = analog.wfm_rcv(
quad_rate=samp_rate/8,
audio_decimation=4,
)
##################################################
# Connections
##################################################
self.connect((self.analog_wfm_rcv_0, 0), (self.audio_sink_0, 0))
self.connect((self.low_pass_filter_0, 0), (self.analog_wfm_rcv_0, 0))
self.connect((self.pluto_source_0, 0), (self.low_pass_filter_0, 0))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.pluto_source_0.set_params(int(self.freq), int(self.samp_rate), int(20000000), True, True, True, "manual", 55, '', True)
self.low_pass_filter_0.set_taps(firdes.low_pass(1, self.samp_rate, 150000, 100000, firdes.WIN_HAMMING, 6.76))
def get_freq(self):
return self.freq
def set_freq(self, freq):
self.freq = freq
self.pluto_source_0.set_params(int(self.freq), int(self.samp_rate), int(20000000), True, True, True, "manual", 55, '', True)
def main(top_block_cls=top_block, options=None):
tb = top_block_cls()
tb.start()
try:
raw_input('Press Enter to quit: ')
except EOFError:
pass
tb.stop()
tb.wait()
if __name__ == '__main__':
main()
| 2,691 | 31.433735 | 148 |
py
|
lydia
|
lydia-main/scripts/run-clang-format.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of Lydia.
#
# Lydia is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lydia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lydia. If not, see <https://www.gnu.org/licenses/>.
#
"""A wrapper script around clang-format, suitable for linting multiple files
and to use for continuous integration.
This is an alternative API for the clang-format command line.
It runs over multiple files and directories in parallel.
A diff output is produced and a sensible exit code is returned.
"""
from __future__ import print_function, unicode_literals
import argparse
import codecs
import difflib
import fnmatch
import io
import errno
import multiprocessing
import os
import signal
import subprocess
import sys
import traceback
from functools import partial
try:
from subprocess import DEVNULL # py3k
except ImportError:
DEVNULL = open(os.devnull, "wb")
DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx'
DEFAULT_CLANG_FORMAT_IGNORE = '.clang-format-ignore'
class ExitStatus:
SUCCESS = 0
DIFF = 1
TROUBLE = 2
def excludes_from_file(ignore_file):
excludes = []
try:
with io.open(ignore_file, 'r', encoding='utf-8') as f:
for line in f:
if line.startswith('#'):
# ignore comments
continue
pattern = line.rstrip()
if not pattern:
# allow empty lines
continue
excludes.append(pattern)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
return excludes;
def list_files(files, recursive=False, extensions=None, exclude=None):
if extensions is None:
extensions = []
if exclude is None:
exclude = []
out = []
for file in files:
if recursive and os.path.isdir(file):
for dirpath, dnames, fnames in os.walk(file):
fpaths = [os.path.join(dirpath, fname) for fname in fnames]
for pattern in exclude:
# os.walk() supports trimming down the dnames list
# by modifying it in-place,
# to avoid unnecessary directory listings.
dnames[:] = [
x for x in dnames
if
not fnmatch.fnmatch(os.path.join(dirpath, x), pattern)
]
fpaths = [
x for x in fpaths if not fnmatch.fnmatch(x, pattern)
]
for f in fpaths:
ext = os.path.splitext(f)[1][1:]
if ext in extensions:
out.append(f)
else:
out.append(file)
return out
def make_diff(file, original, reformatted):
return list(
difflib.unified_diff(
original,
reformatted,
fromfile='{}\t(original)'.format(file),
tofile='{}\t(reformatted)'.format(file),
n=3))
class DiffError(Exception):
def __init__(self, message, errs=None):
super(DiffError, self).__init__(message)
self.errs = errs or []
class UnexpectedError(Exception):
def __init__(self, message, exc=None):
super(UnexpectedError, self).__init__(message)
self.formatted_traceback = traceback.format_exc()
self.exc = exc
def run_clang_format_diff_wrapper(args, file):
try:
ret = run_clang_format_diff(args, file)
return ret
except DiffError:
raise
except Exception as e:
raise UnexpectedError('{}: {}: {}'.format(file, e.__class__.__name__,
e), e)
def run_clang_format_diff(args, file):
try:
with io.open(file, 'r', encoding='utf-8') as f:
original = f.readlines()
except IOError as exc:
raise DiffError(str(exc))
invocation = [args.clang_format_executable, file]
# Use of utf-8 to decode the process output.
#
# Hopefully, this is the correct thing to do.
#
# It's done due to the following assumptions (which may be incorrect):
# - clang-format will returns the bytes read from the files as-is,
# without conversion, and it is already assumed that the files use utf-8.
# - if the diagnostics were internationalized, they would use utf-8:
# > Adding Translations to Clang
# >
# > Not possible yet!
# > Diagnostic strings should be written in UTF-8,
# > the client can translate to the relevant code page if needed.
# > Each translation completely replaces the format string
# > for the diagnostic.
# > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation
#
# It's not pretty, due to Python 2 & 3 compatibility.
encoding_py3 = {}
if sys.version_info[0] >= 3:
encoding_py3['encoding'] = 'utf-8'
try:
proc = subprocess.Popen(
invocation,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
**encoding_py3)
except OSError as exc:
raise DiffError(
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(invocation), exc
)
)
proc_stdout = proc.stdout
proc_stderr = proc.stderr
if sys.version_info[0] < 3:
# make the pipes compatible with Python 3,
# reading lines should output unicode
encoding = 'utf-8'
proc_stdout = codecs.getreader(encoding)(proc_stdout)
proc_stderr = codecs.getreader(encoding)(proc_stderr)
# hopefully the stderr pipe won't get full and block the process
outs = list(proc_stdout.readlines())
errs = list(proc_stderr.readlines())
proc.wait()
if proc.returncode:
raise DiffError(
"Command '{}' returned non-zero exit status {}".format(
subprocess.list2cmdline(invocation), proc.returncode
),
errs,
)
return make_diff(file, original, outs), errs
def bold_red(s):
return '\x1b[1m\x1b[31m' + s + '\x1b[0m'
def colorize(diff_lines):
def bold(s):
return '\x1b[1m' + s + '\x1b[0m'
def cyan(s):
return '\x1b[36m' + s + '\x1b[0m'
def green(s):
return '\x1b[32m' + s + '\x1b[0m'
def red(s):
return '\x1b[31m' + s + '\x1b[0m'
for line in diff_lines:
if line[:4] in ['--- ', '+++ ']:
yield bold(line)
elif line.startswith('@@ '):
yield cyan(line)
elif line.startswith('+'):
yield green(line)
elif line.startswith('-'):
yield red(line)
else:
yield line
def print_diff(diff_lines, use_color):
if use_color:
diff_lines = colorize(diff_lines)
if sys.version_info[0] < 3:
sys.stdout.writelines((l.encode('utf-8') for l in diff_lines))
else:
sys.stdout.writelines(diff_lines)
def print_trouble(prog, message, use_colors):
error_text = 'error:'
if use_colors:
error_text = bold_red(error_text)
print("{}: {} {}".format(prog, error_text, message), file=sys.stderr)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--clang-format-executable',
metavar='EXECUTABLE',
help='path to the clang-format executable',
default='clang-format')
parser.add_argument(
'--extensions',
help='comma separated list of file extensions (default: {})'.format(
DEFAULT_EXTENSIONS),
default=DEFAULT_EXTENSIONS)
parser.add_argument(
'-r',
'--recursive',
action='store_true',
help='run recursively over directories')
parser.add_argument('files', metavar='file', nargs='+')
parser.add_argument(
'-q',
'--quiet',
action='store_true',
help="disable output, useful for the exit code")
parser.add_argument(
'-j',
metavar='N',
type=int,
default=0,
help='run N clang-format jobs in parallel'
' (default number of cpus + 1)')
parser.add_argument(
'--color',
default='auto',
choices=['auto', 'always', 'never'],
help='show colored diff (default: auto)')
parser.add_argument(
'-e',
'--exclude',
metavar='PATTERN',
action='append',
default=[],
help='exclude paths matching the given glob-like pattern(s)'
' from recursive search')
args = parser.parse_args()
# use default signal handling, like diff return SIGINT value on ^C
# https://bugs.python.org/issue14229#msg156446
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
signal.SIGPIPE
except AttributeError:
# compatibility, SIGPIPE does not exist on Windows
pass
else:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
colored_stdout = False
colored_stderr = False
if args.color == 'always':
colored_stdout = True
colored_stderr = True
elif args.color == 'auto':
colored_stdout = sys.stdout.isatty()
colored_stderr = sys.stderr.isatty()
version_invocation = [args.clang_format_executable, str("--version")]
try:
subprocess.check_call(version_invocation, stdout=DEVNULL)
except subprocess.CalledProcessError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
return ExitStatus.TROUBLE
except OSError as e:
print_trouble(
parser.prog,
"Command '{}' failed to start: {}".format(
subprocess.list2cmdline(version_invocation), e
),
use_colors=colored_stderr,
)
return ExitStatus.TROUBLE
retcode = ExitStatus.SUCCESS
excludes = excludes_from_file(DEFAULT_CLANG_FORMAT_IGNORE)
excludes.extend(args.exclude)
files = list_files(
args.files,
recursive=args.recursive,
exclude=excludes,
extensions=args.extensions.split(','))
if not files:
return
njobs = args.j
if njobs == 0:
njobs = multiprocessing.cpu_count() + 1
njobs = min(len(files), njobs)
if njobs == 1:
# execute directly instead of in a pool,
# less overhead, simpler stacktraces
it = (run_clang_format_diff_wrapper(args, file) for file in files)
pool = None
else:
pool = multiprocessing.Pool(njobs)
it = pool.imap_unordered(
partial(run_clang_format_diff_wrapper, args), files)
while True:
try:
outs, errs = next(it)
except StopIteration:
break
except DiffError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
retcode = ExitStatus.TROUBLE
sys.stderr.writelines(e.errs)
except UnexpectedError as e:
print_trouble(parser.prog, str(e), use_colors=colored_stderr)
sys.stderr.write(e.formatted_traceback)
retcode = ExitStatus.TROUBLE
# stop at the first unexpected error,
# something could be very wrong,
# don't process all files unnecessarily
if pool:
pool.terminate()
break
else:
sys.stderr.writelines(errs)
if outs == []:
continue
if not args.quiet:
print_diff(outs, use_color=colored_stdout)
if retcode == ExitStatus.SUCCESS:
retcode = ExitStatus.DIFF
return retcode
if __name__ == '__main__':
sys.exit(main())
| 12,277 | 29.926952 | 87 |
py
|
lydia
|
lydia-main/scripts/check_copyright_notice.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of Lydia.
#
# Lydia is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lydia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lydia. If not, see <https://www.gnu.org/licenses/>.
#
import inspect
import os
from operator import methodcaller
from pathlib import Path
import re
import sys
from typing import Set
PY_SHEBANG = r"#!/usr/bin/env python3"
PY_ENCODING_HEADER = r"# -\*- coding: utf-8 -\*-"
HPP_PRAGMA_ONCE = r"#pragma once"
STAR_COPYRIGHT_NOTICE = r"""/\*
\* This file is part of Lydia.
\*
\* Lydia is free software: you can redistribute it and/or modify
\* it under the terms of the GNU Lesser General Public License as published by
\* the Free Software Foundation, either version 3 of the License, or
\* \(at your option\) any later version\.
\*
\* Lydia is distributed in the hope that it will be useful,
\* but WITHOUT ANY WARRANTY; without even the implied warranty of
\* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE\. See the
\* GNU Lesser General Public License for more details\.
\*
\* You should have received a copy of the GNU Lesser General Public License
\* along with Lydia\. If not, see <https://www\.gnu\.org/licenses/>\.
\*/"""
HASH_COPYRIGHT_NOTICE = r"""#
# This file is part of Lydia\.
#
# Lydia is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# \(at your option\) any later version\.
#
# Lydia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE\. See the
# GNU Lesser General Public License for more details\.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lydia. If not, see <https://www\.gnu\.org/licenses/>\.
#"""
CPP_HEADER_REGEX = re.compile(r"^{}".format(STAR_COPYRIGHT_NOTICE), re.MULTILINE)
HPP_HEADER_REGEX = re.compile(
r"^{}\n{}".format(HPP_PRAGMA_ONCE, STAR_COPYRIGHT_NOTICE), re.MULTILINE
)
PY_HEADER_REGEX = re.compile(
r"^({}\n)?{}\n{}".format(PY_SHEBANG, PY_ENCODING_HEADER, HASH_COPYRIGHT_NOTICE),
re.MULTILINE,
)
CMAKE_HEADER_REGEX = re.compile(r"^{}".format(HASH_COPYRIGHT_NOTICE), re.MULTILINE)
ROOT = Path(os.path.dirname(inspect.getfile(inspect.currentframe())), "..").absolute()
INCLUDE = {
*filter(methodcaller("is_file"), Path("app").glob("**/*")),
*filter(methodcaller("is_file"), Path("lib").glob("**/*")),
*filter(methodcaller("is_file"), Path("scripts").glob("**/*")),
Path("third_party/CMakeLists.txt"),
Path("CMakeLists.txt"),
}
IGNORE = {Path("scripts", "run-clang-tidy.py")}
def file_matches(path: Path) -> bool:
"""Check that a file passes the checks."""
content = path.read_text()
if path.name.endswith(".cpp"):
return re.match(CPP_HEADER_REGEX, content) is not None
if path.name.endswith(".hpp"):
return re.match(HPP_HEADER_REGEX, content) is not None
if path.name.endswith(".py"):
return re.match(PY_HEADER_REGEX, content) is not None
if path.name == "CMakeLists.txt":
return re.match(CMAKE_HEADER_REGEX, content) is not None
else:
return True
if __name__ == "__main__":
bad_files = set() # type: Set[Path]
for path in INCLUDE.difference(IGNORE):
print("Processing {}".format(path))
if not file_matches(path):
bad_files.add(path)
if len(bad_files) != 0:
print("The following files are misformatted, please fix the headers.")
for p in sorted(bad_files):
print(str(p))
sys.exit(1)
else:
print("OK!")
sys.exit(0)
| 4,260 | 36.052174 | 86 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-LaplaceEq-Forward/sciann_datagenerator.py
|
# ==============================================================================
# Copyright 2021 SciANN -- Ehsan Haghighat.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# A guide for generating collocation points for PINN solvers.
#
# Includes:
# - DataGeneratorX:
# Generate 1D collocation grid.
# - DataGeneratorXY:
# Generate 2D collocation grid for a rectangular domain.
# - DataGeneratorXT:
# Generate 1D time-dependent collocation grid.
# - DataGeneratorXYT:
# Generate 2D time-dependent collocation grid for a rectangular domain.
# ==============================================================================
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
cycol = cycle('bgrcmk')
class DataGeneratorX:
""" Generates 1D collocation grid for training PINNs
# Arguments:
X: [X0, X1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorX([0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
targets=['domain', 'bc-left', 'bc-right'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# left bc points
x_bc_left = np.full(int(num_sample/2), self.Xdomain[0])
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-int(num_sample/2), self.Xdomain[1])
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_bc_left, x_bc_right]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=1000):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
return xs
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = np.random.uniform(-.1, .1, x_data.shape)
plt.scatter(x_data, y_data)
plt.xlabel('x')
plt.ylabel('Random vals')
plt.ylim(-1,1)
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = np.random.uniform(-.1, .1, x_data.shape)
plt.scatter(x_data, y_data, label=t, c=next(cycol))
plt.ylim(-1,1)
plt.xlabel('x')
plt.ylabel('Random vals')
plt.title('Training Data')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXY:
""" Generates 2D collocation grid for a rectangular domain
# Arguments:
X: [X0, X1]
Y: [Y0, Y1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorXY([0., 1.], [0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
Y=[0., 1.],
targets=['domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.Ydomain = Y
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_dom = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# bc points
num_sample_per_edge = int(num_sample/4)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
y_bc_left = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample_per_edge, self.Xdomain[1])
y_bc_right = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
# bot bc points
x_bc_bot = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample_per_edge)
y_bc_bot = np.full(num_sample_per_edge, self.Ydomain[0])
ids_bc_bot = np.arange(x_bc_bot.shape[0]) + counter
counter += ids_bc_bot.size
# right bc points
x_bc_top = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample-num_sample_per_edge)
y_bc_top = np.full(num_sample-num_sample_per_edge, self.Ydomain[1])
ids_bc_top = np.arange(x_bc_top.shape[0]) + counter
counter += ids_bc_top.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right, ids_bc_bot, ids_bc_top])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc-bot': ids_bc_bot,
'bc-top': ids_bc_top,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_bc_left, x_bc_right, x_bc_bot, x_bc_top]).reshape(-1,1),
np.concatenate([y_dom, y_bc_left, y_bc_right, y_bc_bot, y_bc_top]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=200, Ny=200):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
ys = np.linspace(self.Ydomain[0], self.Ydomain[1], Ny)
input_data, target_data = np.meshgrid(xs, ys)
return [input_data, target_data]
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = self.input_data[1][ids,:]
plt.scatter(x_data, y_data)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = self.input_data[1][t_idx,:]
plt.scatter(x_data, y_data, label=t, c=next(cycol))
plt.xlabel('x')
plt.ylabel('y')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXT:
""" Generates 1D time-dependent collocation grid for training PINNs
# Arguments:
X: [X0, X1]
T: [T0, T1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'ic', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
logT: generate random samples logarithmic in time.
# Examples:
>> dg = DataGeneratorXT([0., 1.], [0., 1.], ["domain", "ic", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
T=[0., 1.],
targets=['domain', 'ic', 'bc-left', 'bc-right'],
num_sample=10000,
logT=False):
'Initialization'
self.Xdomain = X
self.Tdomain = T
self.logT = logT
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_uniform_T_samples(self, num_sample):
if self.logT is True:
t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
t_dom = np.exp(t_dom) - 1.
else:
t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
return t_dom
def generate_data(self):
# Half of the samples inside the domain.
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
t_dom = self.generate_uniform_T_samples(num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# The other half distributed equally between BC and IC.
num_sample = int(self.num_sample/4)
# initial conditions
x_ic = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
t_ic = np.full(num_sample, self.Tdomain[0])
ids_ic = np.arange(x_ic.shape[0]) + counter
counter += ids_ic.size
# bc points
num_sample_per_edge = int(num_sample/2)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
t_bc_left = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-num_sample_per_edge, self.Xdomain[1])
t_bc_right = self.generate_uniform_T_samples(num_sample-num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_ic, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'ic': ids_ic,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_ic, x_bc_left, x_bc_right]).reshape(-1,1),
np.concatenate([t_dom, t_ic, t_bc_left, t_bc_right]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=200, Nt=200):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
if self.logT:
ts = np.linspace(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), Nt)
ts = np.exp(ts) - 1.0
else:
ts = np.linspace(self.Tdomain[0], self.Tdomain[1], Nt)
return np.meshgrid(xs, ts)
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
t_data = self.input_data[1][ids,:]
plt.scatter(x_data, t_data)
plt.xlabel('x')
plt.ylabel('t')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
t_data = self.input_data[1][t_idx,:]
plt.scatter(x_data, t_data, label=t, c=next(cycol))
plt.xlabel('x')
plt.ylabel('t')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXYT:
""" Generates 2D time-dependent collocation grid for training PINNs
# Arguments:
X: [X0, X1]
Y: [Y0, Y1]
T: [T0, T1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top', 'all')
num_sample: total number of collocation points.
logT: generate random samples logarithmic in time.
# Examples:
>> dg = DataGeneratorXYT([0., 1.], [0., 1.], [0., 1.],
["domain", "ic", "bc-left", "bc-right", "bc-bot", "bc-top"],
10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
Y=[0., 1.],
T=[0., 1.],
targets=['domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=10000,
logT=False):
'Initialization'
self.Xdomain = X
self.Ydomain = Y
self.Tdomain = T
self.logT = logT
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_uniform_T_samples(self, num_sample):
if self.logT is True:
t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
t_dom = np.exp(t_dom) - 1.
else:
t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
return t_dom
def generate_data(self):
# Half of the samples inside the domain.
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_dom = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
t_dom = self.generate_uniform_T_samples(num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# The other half distributed equally between BC and IC.
num_sample = int(self.num_sample/4)
# initial conditions
x_ic = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_ic = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
t_ic = np.full(num_sample, self.Tdomain[0])
ids_ic = np.arange(x_ic.shape[0]) + counter
counter += ids_ic.size
# bc points
num_sample_per_edge = int(num_sample/4)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
y_bc_left = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
t_bc_left = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample_per_edge, self.Xdomain[1])
y_bc_right = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
t_bc_right = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
# bot bc points
x_bc_bot = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample_per_edge)
y_bc_bot = np.full(num_sample_per_edge, self.Ydomain[0])
t_bc_bot = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_bot = np.arange(x_bc_bot.shape[0]) + counter
counter += ids_bc_bot.size
# right bc points
x_bc_top = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample-num_sample_per_edge)
y_bc_top = np.full(num_sample-num_sample_per_edge, self.Ydomain[1])
t_bc_top = self.generate_uniform_T_samples(num_sample-num_sample_per_edge)
ids_bc_top = np.arange(x_bc_top.shape[0]) + counter
counter += ids_bc_top.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right, ids_bc_bot, ids_bc_top])
ids_all = np.concatenate([ids_dom, ids_ic, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc-bot': ids_bc_bot,
'bc-top': ids_bc_top,
'ic': ids_ic,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_grid = [
np.concatenate([x_dom, x_ic, x_bc_left, x_bc_right, x_bc_bot, x_bc_top]).reshape(-1,1),
np.concatenate([y_dom, y_ic, y_bc_left, y_bc_right, y_bc_bot, y_bc_top]).reshape(-1,1),
np.concatenate([t_dom, t_ic, t_bc_left, t_bc_right, t_bc_bot, t_bc_top]).reshape(-1,1),
]
total_sample = input_grid[0].shape[0]
target_grid = []
for i, tp in enumerate(self.targets):
target_grid.append(
(ids[tp], 'zeros')
)
return input_grid, target_grid
def get_test_grid(self, Nx=50, Ny=50, Nt=100):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
ys = np.linspace(self.Ydomain[0], self.Ydomain[1], Ny)
if self.logT:
ts = np.linspace(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), Nt)
ts = np.exp(ts) - 1.0
else:
ts = np.linspace(self.Tdomain[0], self.Tdomain[1], Nt)
return np.meshgrid(xs, ys, ts)
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = self.input_data[1][ids,:]
t_data = self.input_data[2][ids,:]
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(x_data, y_data, t_data)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('t')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = self.input_data[1][t_idx,:]
t_data = self.input_data[2][t_idx,:]
ax.scatter(x_data, y_data, t_data, label=t, c=next(cycol))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('t')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
def ex1():
dg = DataGeneratorX(
X=[-1., 1.],
targets=['domain', 'bc-left', 'bc-right'],
num_sample=1000
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex2():
dg = DataGeneratorXY(
X=[-1., 1.],
Y=[0., 10.],
targets=['domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=1000
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex3():
dg = DataGeneratorXT(
X=[-1., 1.],
T=[0., 100.],
targets=['domain', 'ic', 'bc-left', 'bc-right'],
num_sample=1000,
logT=False
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex4():
dg = DataGeneratorXYT(
X=[-1., 1.],
Y=[-1., 1.],
T=[0., 100.],
targets=['domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=2000,
logT=False
)
dg.plot_data()
dg.plot_sample_batch(500)
if __name__=='__main__':
# ex1()
# ex2()
ex3()
# ex4()
| 20,752 | 31.477308 | 98 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-SolidMechanics/SciANN-SolidMechanics.py
|
""" SciANN-SolidMechanics.py
Description:
SciANN code for solution and discovery of solid mechanics from data.
For additional details, please check our paper at: https://arxiv.org/abs/2003.02751
Created by Ehsan Haghighat on 2/14/20.
"""
import os, sys, time
import numpy as np
from sciann.utils.math import diff
from sciann import SciModel, Functional, Parameter
from sciann import Data, Tie
from sciann import Variable, Field
import matplotlib.pyplot as plt
import argparse
pi = np.pi
# current file name.
current_file_name = os.path.basename(__file__).split(".")[0]
# Lame paramters used in the paper.
lmbd = 1.0
mu = 0.5
qload = 4.0
# Input interface for python.
parser = argparse.ArgumentParser(description='''
SciANN code for solution and discovery of solid mechanics from data. \n
For additional details, please check our paper at: https://arxiv.org/submit/3042511'''
)
# Define number of data points.
parser.add_argument('-l', '--layers', help='Num layers and neurons (default 4 layers each 40 neurons [40, 40, 40, 40])', type=int, nargs='+', default=[40]*4)
parser.add_argument('-af', '--actf', help='Activation function (default tanh)', type=str, nargs=1, default=['tanh'])
parser.add_argument('-nx', '--numx', help='Num Node in X (default 40)', type=int, nargs=1, default=[20])
parser.add_argument('-ny', '--numy', help='Num Node in Y (default 40)', type=int, nargs=1, default=[20])
parser.add_argument('-bs', '--batchsize', help='Batch size for Adam optimizer (default 32)', type=int, nargs=1, default=[32])
parser.add_argument('-e', '--epochs', help='Maximum number of epochs (default 2000)', type=int, nargs=1, default=[5000])
parser.add_argument('-lr', '--learningrate', help='Initial learning rate (default 0.001)', type=float, nargs=1, default=[0.001])
parser.add_argument('-in', '--independent_networks', help='Use independent networks for each var (default True)', type=bool, nargs=1, default=[True])
parser.add_argument('-v', '--verbose', help='Show training progress (default 2) (check Keras.fit)', type=int, nargs=1, default=[2])
parser.add_argument('--shuffle', help='Shuffle data for training (default True)', type=bool, nargs=1, default=[True])
parser.add_argument('--stopafter', help='Patience argument from Keras (default 500)', type=int, nargs=1, default=[500])
parser.add_argument('--savefreq', help='Frequency to save weights (each n-epoch)', type=int, nargs=1, default=[100000])
parser.add_argument('--dtype', help='Data type for weights and biases (default float64)', type=str, nargs=1, default=['float64'])
parser.add_argument('--gpu', help='Use GPU if available (default False)', type=bool, nargs=1, default=[False])
parser.add_argument('-op', '--outputpath', help='Output path (default ./file_name)', type=str, nargs=1, default=['output'])
parser.add_argument('-of', '--outputprefix', help='Output path (default res**)', type=str, nargs=1, default=['res'])
parser.add_argument('-nxp', '--numxplot', help='Num Node in X for ploting final results (default 200)', type=int, nargs=1, default=[200])
parser.add_argument('-nyp', '--numyplot', help='Num Node in Y for ploting final results (default 200)', type=int, nargs=1, default=[200])
parser.add_argument('--plot', help='Plot the model', nargs='?', default=False)
args = parser.parse_args()
if not args.gpu[0]:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def load(xx):
x, y = xx[0], xx[1]
Q = qload
return Q * np.sin(pi*x)
def bodyfx(xx):
x, y = xx[0], xx[1]
Q = qload
frc = - lmbd*(4*pi**2*np.cos(2*pi*x)*np.sin(pi*y) - Q*y**3*pi*np.cos(pi*x)) \
- mu*(pi**2*np.cos(2*pi*x)*np.sin(pi*y) - Q*y**3*pi*np.cos(pi*x)) \
- 8*mu*pi**2*np.cos(2*pi*x)*np.sin(pi*y)
return frc
def bodyfy(xx):
x, y = xx[0], xx[1]
Q = qload
frc = lmbd*(3*Q*y**2*np.sin(pi*x) - 2*pi**2*np.cos(pi*y)*np.sin(2*pi*x)) \
- mu*(2*pi**2*np.cos(pi*y)*np.sin(2*pi*x) + (Q*y**4*pi**2*np.sin(pi*x))/4) \
+ 6*Q*mu*y**2*np.sin(pi*x)
return frc
def dispx(xx):
x, y = xx[0], xx[1]
return np.cos(2*pi*x) * np.sin(pi*y)
def dispy(xx):
x, y = xx[0], xx[1]
Q = qload
return np.sin(pi*x) * Q * y**4/4
def strainxx(xx):
x, y = xx[0], xx[1]
Q = qload
return -2*pi*np.sin(2*pi*x)*np.sin(pi*y)
def strainyy(xx):
x, y = xx[0], xx[1]
Q = qload
return np.sin(pi*x)*Q*y**3
def strainxy(xx):
x, y = xx[0], xx[1]
Q = qload
return 0.5*(pi*np.cos(2*pi*x)*np.cos(pi*y) + pi*np.cos(pi*x)*Q*y**4/4)
def stressxx(xx):
return (lmbd+2*mu)*strainxx(xx) + lmbd*strainyy(xx)
def stressyy(xx):
return (lmbd+2*mu)*strainyy(xx) + lmbd*strainxx(xx)
def stressxy(xx):
return 2.0*mu*strainxy(xx)
def cust_pcolor(AX, X, Y, C, title):
im = AX.pcolor(X, Y, C, cmap="jet")
AX.axis("equal")
AX.axis("off")
AX.set_title(title, fontsize=14)
plt.colorbar(im, ax=AX)
def cust_semilogx(AX, X, Y, xlabel, ylabel):
if X is None:
im = AX.semilogy(Y)
else:
im = AX.semilogy(X, Y)
if xlabel is not None: AX.set_xlabel(xlabel)
if ylabel is not None: AX.set_ylabel(ylabel)
def train():
# define output folder.
if not os.path.isdir(args.outputpath[0]):
os.mkdir(args.outputpath[0])
output_file_name = os.path.join(args.outputpath[0], args.outputprefix[0])
fname = output_file_name + "_{}_".format(args.actf[0]) + "x".join([str(x) for x in args.layers])
# Neural Network Setup.
x = Variable("x", dtype=args.dtype[0])
y = Variable("y", dtype=args.dtype[0])
if args.independent_networks[0]:
Uxy = Functional("Uxy", [x, y], args.layers, args.actf[0])
Vxy = Functional("Vxy", [x, y], args.layers, args.actf[0])
Sxx = Functional("Sxx", [x, y], args.layers, args.actf[0])
Syy = Functional("Syy", [x, y], args.layers, args.actf[0])
Sxy = Functional("Sxy", [x, y], args.layers, args.actf[0])
else:
Uxy, Vxy, Sxx, Syy, Sxy = Functional(
["Uxy", "Vxy", "Sxx", "Syy", "Sxy"],
[x, y],
args.layers, args.actf[0]).split()
lame1 = Parameter(2.0, inputs=[x,y], name="lame1")
lame2 = Parameter(2.0, inputs=[x,y], name="lame2")
C11 = (2*lame2 + lame1)
C12 = lame1
C33 = 2*lame2
Exx = diff(Uxy, x)
Eyy = diff(Vxy, y)
Exy = (diff(Uxy, y) + diff(Vxy, x))*0.5
# Define constraints
d1 = Data(Uxy)
d2 = Data(Vxy)
d3 = Data(Sxx)
d4 = Data(Syy)
d5 = Data(Sxy)
c1 = Tie(Sxx, Exx*C11 + Eyy*C12)
c2 = Tie(Syy, Eyy*C11 + Exx*C12)
c3 = Tie(Sxy, Exy*C33)
Lx = diff(Sxx, x) + diff(Sxy, y)
Ly = diff(Sxy, x) + diff(Syy, y)
# Define the optimization model (set of inputs and constraints)
model = SciModel(
inputs=[x, y],
targets=[d1, d2, d3, d4, d5, c1, c2, c3, Lx, Ly],
loss_func="mse"
)
with open("{}_summary".format(fname), "w") as fobj:
model.summary(print_fn=lambda x: fobj.write(x + '\n'))
# Prepare training data
## Training grid
XMIN, XMAX = 0.0, 1.0
YMIN, YMAX = 0.0, 1.0
Xmesh = np.linspace(XMIN, XMAX, args.numx[0]).reshape((-1, 1))
Ymesh = np.linspace(YMIN, YMAX, args.numy[0]).reshape((-1, 1))
X, Y = np.meshgrid(Xmesh, Ymesh)
input_data = [X.reshape(-1, 1), Y.reshape(-1, 1)]
## data associated to constrains defined earlier
# Define constraints
data_d1 = dispx(input_data)
data_d2 = dispy(input_data)
data_d3 = stressxx(input_data)
data_d4 = stressyy(input_data)
data_d5 = stressxy(input_data)
data_c1 = 'zeros'
data_c2 = 'zeros'
data_c3 = 'zeros'
data_Lx = bodyfx(input_data)
data_Ly = bodyfy(input_data)
target_data = [data_d1, data_d2, data_d3, data_d4, data_d5,
data_c1, data_c2, data_c3,
data_Lx, data_Ly]
# Train the model
training_time = time.time()
history = model.train(
x_true=input_data,
y_true=target_data,
epochs=args.epochs[0],
batch_size=args.batchsize[0],
shuffle=args.shuffle[0],
learning_rate=args.learningrate[0],
stop_after=args.stopafter[0],
verbose=args.verbose[0],
save_weights_to="{}_WEIGHTS".format(fname),
save_weights_freq=args.savefreq[0]
)
training_time = time.time() - training_time
for loss in history.history:
np.savetxt(fname+"_{}".format("_".join(loss.split("/"))),
np.array(history.history[loss]).reshape(-1, 1))
time_steps = np.linspace(0, training_time, len(history.history["loss"]))
np.savetxt(fname+"_Time", time_steps.reshape(-1,1))
# Post process the trained model.
Xmesh_plot = np.linspace(XMIN, XMAX, args.numxplot[0]).reshape((-1, 1))
Ymesh_plot = np.linspace(YMIN, YMAX, args.numyplot[0]).reshape((-1, 1))
X_plot, Y_plot = np.meshgrid(Xmesh_plot, Ymesh_plot)
input_plot = [X_plot.reshape(-1, 1), Y_plot.reshape(-1, 1)]
lame1_pred = lame1.eval(model, input_plot)
lame2_pred = lame2.eval(model, input_plot)
Uxy_pred = Uxy.eval(model, input_plot)
Vxy_pred = Vxy.eval(model, input_plot)
Exx_pred = Exx.eval(model, input_plot)
Eyy_pred = Eyy.eval(model, input_plot)
Exy_pred = Exy.eval(model, input_plot)
Sxx_pred = Sxx.eval(model, input_plot)
Syy_pred = Syy.eval(model, input_plot)
Sxy_pred = Sxy.eval(model, input_plot)
np.savetxt(fname+"_Xmesh", X_plot, delimiter=', ')
np.savetxt(fname+"_Ymesh", Y_plot, delimiter=', ')
np.savetxt(fname+"_lame1", lame1_pred, delimiter=', ')
np.savetxt(fname+"_lame2", lame2_pred, delimiter=', ')
np.savetxt(fname+"_Uxy", Uxy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Vxy", Vxy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Exx", Exx_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Eyy", Eyy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Exy", Exy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Sxx", Sxx_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Syy", Syy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Sxy", Sxy_pred.reshape(X_plot.shape), delimiter=', ')
def plot():
output_file_name = os.path.join(args.outputpath[0], args.outputprefix[0])
fname = output_file_name + "_{}_".format(args.actf[0]) + "x".join([str(x) for x in args.layers])
loss = np.loadtxt(fname+"_loss")
time = np.loadtxt(fname+"_Time")
fig, ax = plt.subplots(1, 2, figsize=(7, 3), dpi=300)
cust_semilogx(ax[0], None, loss/loss[0], "epochs", "L/L0")
cust_semilogx(ax[1], time, loss/loss[0], "time(s)", None)
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.15, top=0.9, wspace=0.3, hspace=0.2)
plt.savefig("{}_loss.png".format(output_file_name))
Xmesh = np.loadtxt(fname+"_Xmesh", delimiter=',')
Ymesh = np.loadtxt(fname+"_Ymesh", delimiter=',')
fig, ax = plt.subplots(2, 2, figsize=(8, 6), dpi=300)
cust_pcolor(ax[0, 0], Xmesh, Ymesh, np.ones_like(Xmesh)*lmbd, "L*={:.3f}".format(lmbd))
cust_pcolor(ax[0, 1], Xmesh, Ymesh, np.ones_like(Xmesh)*mu, "G*={:.3f}".format(mu))
lmbd_pred = np.loadtxt(fname+"_lame1", delimiter=',')
mu_pred = np.loadtxt(fname+"_lame2", delimiter=',')
cust_pcolor(ax[1, 0], Xmesh, Ymesh, np.ones_like(Xmesh)*lmbd_pred, "L={:.3f}".format(lmbd_pred.mean()))
cust_pcolor(ax[1, 1], Xmesh, Ymesh, np.ones_like(Xmesh)*mu_pred, "G={:.3f}".format(mu_pred.mean()))
plt.savefig("{}_Parameters.png".format(output_file_name))
fig, ax = plt.subplots(2, 2, figsize=(8, 6), dpi=300)
cust_pcolor(ax[0, 0], Xmesh, Ymesh, dispx([Xmesh, Ymesh]), "Ux*")
cust_pcolor(ax[0, 1], Xmesh, Ymesh, dispy([Xmesh, Ymesh]), "Uy*")
cust_pcolor(ax[1, 0], Xmesh, Ymesh, np.loadtxt(fname+"_Uxy", delimiter=','), "Ux")
cust_pcolor(ax[1, 1], Xmesh, Ymesh, np.loadtxt(fname+"_Vxy", delimiter=','), "Uy")
plt.savefig("{}_Displacement.png".format(output_file_name))
fig, ax = plt.subplots(2, 3, figsize=(11, 6), dpi=300)
cust_pcolor(ax[0, 0], Xmesh, Ymesh, stressxx([Xmesh, Ymesh]), "Sxx*")
cust_pcolor(ax[0, 1], Xmesh, Ymesh, stressyy([Xmesh, Ymesh]), "Syy*")
cust_pcolor(ax[0, 2], Xmesh, Ymesh, stressxy([Xmesh, Ymesh]), "Sxy*")
cust_pcolor(ax[1, 0], Xmesh, Ymesh, np.loadtxt(fname+"_Sxx", delimiter=','), "Sxx")
cust_pcolor(ax[1, 1], Xmesh, Ymesh, np.loadtxt(fname+"_Syy", delimiter=','), "Syy")
cust_pcolor(ax[1, 2], Xmesh, Ymesh, np.loadtxt(fname+"_Sxy", delimiter=','), "Sxy")
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.05, top=0.9, wspace=0.3, hspace=0.2)
plt.savefig("{}_Stress.png".format(output_file_name))
fig, ax = plt.subplots(2, 3, figsize=(11, 6), dpi=300)
cust_pcolor(ax[0, 0], Xmesh, Ymesh, strainxx([Xmesh, Ymesh]), "Exx*")
cust_pcolor(ax[0, 1], Xmesh, Ymesh, strainyy([Xmesh, Ymesh]), "Eyy*")
cust_pcolor(ax[0, 2], Xmesh, Ymesh, strainxy([Xmesh, Ymesh]), "Exy*")
cust_pcolor(ax[1, 0], Xmesh, Ymesh, np.loadtxt(fname+"_Exx", delimiter=','), "Exx")
cust_pcolor(ax[1, 1], Xmesh, Ymesh, np.loadtxt(fname+"_Eyy", delimiter=','), "Eyy")
cust_pcolor(ax[1, 2], Xmesh, Ymesh, np.loadtxt(fname+"_Exy", delimiter=','), "Exy")
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.05, top=0.9, wspace=0.3, hspace=0.2)
plt.savefig("{}_Strain.png".format(output_file_name))
if __name__ == "__main__":
if args.plot==False:
train()
plot()
else:
plot()
| 13,624 | 39.19174 | 157 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-ElastoPlasticity/plotting.py
|
"""
Description:
Plotting for von-Mises elasto-plasticity problem:
https://arxiv.org/abs/2003.02751
Created by Ehsan Haghighat on 6/10/20.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm, Normalize
import matplotlib.ticker as ticker
import matplotlib.ticker as mticker
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-2, 1))
def custom_pcolor(AX, X, Y, Z, title="", bar=True, ZLIM=None, **kwargs):
ZLIM = np.abs(Z).max() if ZLIM is None else ZLIM
if 'vmin' in kwargs:
im = AX.pcolor(X, Y, Z, cmap="seismic", **kwargs)
else:
im = AX.pcolor(X, Y, Z, cmap="seismic",
norm=Normalize(vmin=-ZLIM, vmax=ZLIM),
**kwargs)
AX.axis("equal")
AX.axis([X.min(), X.max(), Y.min(), Y.max()])
AX.get_xaxis().set_ticks([])
AX.get_yaxis().set_ticks([])
AX.set_title(title, fontsize=14)
if bar:
clb = plt.colorbar(im, ax=AX)
clb.formatter.set_powerlimits((0, 0))
return im
| 1,102 | 29.638889 | 72 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-ElastoPlasticity/utility_functions.py
|
"""
Description:
Utility functions for data preparation.
Created by Ehsan Haghighat on 6/10/20.
"""
import os, time
import sys
import numpy as np
import pandas as pd
from scipy.interpolate import griddata
RADI = 50.0
def eval_mu_sig(X):
return X.mean(), X.std()
def std(X, mu, sig):
return (X-mu)/sig
def invstd(X, mu, sig):
return X*sig + mu
def get_data():
file_path = "plate_with_hole_plastic_output_100x100_p4_mm.txt"
data = pd.read_csv(file_path, sep='\s+', skiprows=9, dtype='float64')
return data
def get_data_max():
data = get_data()
xcoord = data.x.values
ycoord = data.y.values
training_data_ids = np.where((xcoord**2 + ycoord**2 - RADI**2).reshape(-1) > 0)[0]
data_max = {}
for v in data.keys():
data_max[v] = abs(data[v].values[training_data_ids]).max()
return data_max
def get_training_data(ndata=None, adaptive_sampling=False):
""" get_training_data
Inputs:
ndata: number of training points.
defaulted to all available samples.
adaptive_sampling: pick more points at locations with high-strains.
Defaulted to False.
Returns:
mu_sig: normalization values (mu, sig) for each component.
data_s: nomalized data for training.
"""
data = get_data()
xcoord = data.x.values
ycoord = data.y.values
training_data_ids = np.where((xcoord**2 + ycoord**2 - RADI**2).reshape(-1) > 0)[0]
if ndata is not None:
if adaptive_sampling == False:
training_data_ids = np.random.choice(
training_data_ids,
ndata,
replace=False
)
else:
prob = np.sqrt(sum([data[v].values[training_data_ids]**2 for v in ['exx', 'eyy', 'ezz', 'exy', 'exy']]))
prob = prob / prob.sum()
training_data_ids = np.random.choice(
training_data_ids,
ndata,
p = prob,
replace=False
)
xcoord = xcoord[training_data_ids]
ycoord = ycoord[training_data_ids]
mu_sig = {'x': [0., 1.], 'y':[0., 1.]}
data_s = {'x': xcoord, 'y':ycoord}
for v in ['u', 'v', 'sxx', 'syy', 'szz', 'sxy', 'exx', 'eyy', 'exy']:
dt_val = data[v].values[training_data_ids]
mu, sig = eval_mu_sig(dt_val)
mu_sig[v] = [mu, sig]
data_s[v] = std(dt_val, mu, sig)
return mu_sig, data_s
def get_test_data(nx=200, ny=400):
data = get_data()
XMIN, XMAX = data.x.values.min(), data.x.values.max()
YMIN, YMAX = data.y.values.min(), data.y.values.max()
Xmesh_plot = np.linspace(XMIN, XMAX, nx)
Ymesh_plot = np.linspace(YMIN, YMAX, ny)
X_plot, Y_plot = np.meshgrid(Xmesh_plot, Ymesh_plot)
input_plot = [X_plot.reshape(-1, 1), Y_plot.reshape(-1, 1)]
nan_ids = np.where(input_plot[0]**2 + input_plot[1]**2 - RADI**2 < 0.0)[0].reshape(-1,1)
return X_plot, Y_plot, nan_ids
| 3,014 | 27.714286 | 114 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-Elasticity/sciann_datagenerator.py
|
# ==============================================================================
# Copyright 2021 SciANN -- Ehsan Haghighat.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# A guide for generating collocation points for PINN solvers.
#
# Includes:
# - DataGeneratorX:
# Generate 1D collocation grid.
# - DataGeneratorXY:
# Generate 2D collocation grid for a rectangular domain.
# - DataGeneratorXT:
# Generate 1D time-dependent collocation grid.
# - DataGeneratorXYT:
# Generate 2D time-dependent collocation grid for a rectangular domain.
# ==============================================================================
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
cycol = cycle('bgrcmk')
class DataGeneratorX:
""" Generates 1D collocation grid for training PINNs
# Arguments:
X: [X0, X1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorX([0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
targets=['domain', 'bc-left', 'bc-right'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# left bc points
x_bc_left = np.full(int(num_sample/2), self.Xdomain[0])
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-int(num_sample/2), self.Xdomain[1])
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_bc_left, x_bc_right]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=1000):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
return xs
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = np.random.uniform(-.1, .1, x_data.shape)
plt.scatter(x_data, y_data)
plt.xlabel('x')
plt.ylabel('Random vals')
plt.ylim(-1,1)
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = np.random.uniform(-.1, .1, x_data.shape)
plt.scatter(x_data, y_data, label=t, c=next(cycol))
plt.ylim(-1,1)
plt.xlabel('x')
plt.ylabel('Random vals')
plt.title('Training Data')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXY:
""" Generates 2D collocation grid for a rectangular domain
# Arguments:
X: [X0, X1]
Y: [Y0, Y1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorXY([0., 1.], [0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
Y=[0., 1.],
targets=['domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.Ydomain = Y
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_dom = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# bc points
num_sample_per_edge = int(num_sample/4)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
y_bc_left = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample_per_edge, self.Xdomain[1])
y_bc_right = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
# bot bc points
x_bc_bot = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample_per_edge)
y_bc_bot = np.full(num_sample_per_edge, self.Ydomain[0])
ids_bc_bot = np.arange(x_bc_bot.shape[0]) + counter
counter += ids_bc_bot.size
# right bc points
x_bc_top = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample-num_sample_per_edge)
y_bc_top = np.full(num_sample-num_sample_per_edge, self.Ydomain[1])
ids_bc_top = np.arange(x_bc_top.shape[0]) + counter
counter += ids_bc_top.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right, ids_bc_bot, ids_bc_top])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc-bot': ids_bc_bot,
'bc-top': ids_bc_top,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_bc_left, x_bc_right, x_bc_bot, x_bc_top]).reshape(-1,1),
np.concatenate([y_dom, y_bc_left, y_bc_right, y_bc_bot, y_bc_top]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=200, Ny=200):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
ys = np.linspace(self.Ydomain[0], self.Ydomain[1], Ny)
input_data, target_data = np.meshgrid(xs, ys)
return [input_data, target_data]
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = self.input_data[1][ids,:]
plt.scatter(x_data, y_data)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = self.input_data[1][t_idx,:]
plt.scatter(x_data, y_data, label=t, c=next(cycol))
plt.xlabel('x')
plt.ylabel('y')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXT:
""" Generates 1D time-dependent collocation grid for training PINNs
# Arguments:
X: [X0, X1]
T: [T0, T1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'ic', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
logT: generate random samples logarithmic in time.
# Examples:
>> dg = DataGeneratorXT([0., 1.], [0., 1.], ["domain", "ic", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
T=[0., 1.],
targets=['domain', 'ic', 'bc-left', 'bc-right'],
num_sample=10000,
logT=False):
'Initialization'
self.Xdomain = X
self.Tdomain = T
self.logT = logT
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_uniform_T_samples(self, num_sample):
if self.logT is True:
t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
t_dom = np.exp(t_dom) - 1.
else:
t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
return t_dom
def generate_data(self):
# Half of the samples inside the domain.
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
t_dom = self.generate_uniform_T_samples(num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# The other half distributed equally between BC and IC.
num_sample = int(self.num_sample/4)
# initial conditions
x_ic = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
t_ic = np.full(num_sample, self.Tdomain[0])
ids_ic = np.arange(x_ic.shape[0]) + counter
counter += ids_ic.size
# bc points
num_sample_per_edge = int(num_sample/2)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
t_bc_left = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-num_sample_per_edge, self.Xdomain[1])
t_bc_right = self.generate_uniform_T_samples(num_sample-num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_ic, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'ic': ids_ic,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_ic, x_bc_left, x_bc_right]).reshape(-1,1),
np.concatenate([t_dom, t_ic, t_bc_left, t_bc_right]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=200, Nt=200):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
if self.logT:
ts = np.linspace(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), Nt)
ts = np.exp(ts) - 1.0
else:
ts = np.linspace(self.Tdomain[0], self.Tdomain[1], Nt)
return np.meshgrid(xs, ts)
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
t_data = self.input_data[1][ids,:]
plt.scatter(x_data, t_data)
plt.xlabel('x')
plt.ylabel('t')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
t_data = self.input_data[1][t_idx,:]
plt.scatter(x_data, t_data, label=t, c=next(cycol))
plt.xlabel('x')
plt.ylabel('t')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXYT:
""" Generates 2D time-dependent collocation grid for training PINNs
# Arguments:
X: [X0, X1]
Y: [Y0, Y1]
T: [T0, T1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top', 'all')
num_sample: total number of collocation points.
logT: generate random samples logarithmic in time.
# Examples:
>> dg = DataGeneratorXYT([0., 1.], [0., 1.], [0., 1.],
["domain", "ic", "bc-left", "bc-right", "bc-bot", "bc-top"],
10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
Y=[0., 1.],
T=[0., 1.],
targets=['domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=10000,
logT=False):
'Initialization'
self.Xdomain = X
self.Ydomain = Y
self.Tdomain = T
self.logT = logT
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_uniform_T_samples(self, num_sample):
if self.logT is True:
t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
t_dom = np.exp(t_dom) - 1.
else:
t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
return t_dom
def generate_data(self):
# Half of the samples inside the domain.
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_dom = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
t_dom = self.generate_uniform_T_samples(num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# The other half distributed equally between BC and IC.
num_sample = int(self.num_sample/4)
# initial conditions
x_ic = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_ic = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
t_ic = np.full(num_sample, self.Tdomain[0])
ids_ic = np.arange(x_ic.shape[0]) + counter
counter += ids_ic.size
# bc points
num_sample_per_edge = int(num_sample/4)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
y_bc_left = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
t_bc_left = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample_per_edge, self.Xdomain[1])
y_bc_right = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
t_bc_right = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
# bot bc points
x_bc_bot = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample_per_edge)
y_bc_bot = np.full(num_sample_per_edge, self.Ydomain[0])
t_bc_bot = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_bot = np.arange(x_bc_bot.shape[0]) + counter
counter += ids_bc_bot.size
# right bc points
x_bc_top = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample-num_sample_per_edge)
y_bc_top = np.full(num_sample-num_sample_per_edge, self.Ydomain[1])
t_bc_top = self.generate_uniform_T_samples(num_sample-num_sample_per_edge)
ids_bc_top = np.arange(x_bc_top.shape[0]) + counter
counter += ids_bc_top.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right, ids_bc_bot, ids_bc_top])
ids_all = np.concatenate([ids_dom, ids_ic, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc-bot': ids_bc_bot,
'bc-top': ids_bc_top,
'ic': ids_ic,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_grid = [
np.concatenate([x_dom, x_ic, x_bc_left, x_bc_right, x_bc_bot, x_bc_top]).reshape(-1,1),
np.concatenate([y_dom, y_ic, y_bc_left, y_bc_right, y_bc_bot, y_bc_top]).reshape(-1,1),
np.concatenate([t_dom, t_ic, t_bc_left, t_bc_right, t_bc_bot, t_bc_top]).reshape(-1,1),
]
total_sample = input_grid[0].shape[0]
target_grid = []
for i, tp in enumerate(self.targets):
target_grid.append(
(ids[tp], 'zeros')
)
return input_grid, target_grid
def get_test_grid(self, Nx=50, Ny=50, Nt=100):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
ys = np.linspace(self.Ydomain[0], self.Ydomain[1], Ny)
if self.logT:
ts = np.linspace(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), Nt)
ts = np.exp(ts) - 1.0
else:
ts = np.linspace(self.Tdomain[0], self.Tdomain[1], Nt)
return np.meshgrid(xs, ys, ts)
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = self.input_data[1][ids,:]
t_data = self.input_data[2][ids,:]
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(x_data, y_data, t_data)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('t')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = self.input_data[1][t_idx,:]
t_data = self.input_data[2][t_idx,:]
ax.scatter(x_data, y_data, t_data, label=t, c=next(cycol))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('t')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
def ex1():
dg = DataGeneratorX(
X=[-1., 1.],
targets=['domain', 'bc-left', 'bc-right'],
num_sample=1000
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex2():
dg = DataGeneratorXY(
X=[-1., 1.],
Y=[0., 10.],
targets=['domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=1000
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex3():
dg = DataGeneratorXT(
X=[-1., 1.],
T=[0., 100.],
targets=['domain', 'ic', 'bc-left', 'bc-right'],
num_sample=1000,
logT=False
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex4():
dg = DataGeneratorXYT(
X=[-1., 1.],
Y=[-1., 1.],
T=[0., 100.],
targets=['domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=2000,
logT=False
)
dg.plot_data()
dg.plot_sample_batch(500)
if __name__=='__main__':
# ex1()
# ex2()
ex3()
# ex4()
| 20,752 | 31.477308 | 98 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-DataGenerator/sciann-datagenerator.py
|
# ==============================================================================
# Copyright 2021 SciANN -- Ehsan Haghighat.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# A guide for generating collocation points for PINN solvers.
#
# Includes:
# - DataGeneratorX:
# Generate 1D collocation grid.
# - DataGeneratorXY:
# Generate 2D collocation grid for a rectangular domain.
# - DataGeneratorXT:
# Generate 1D time-dependent collocation grid.
# - DataGeneratorXYT:
# Generate 2D time-dependent collocation grid for a rectangular domain.
# ==============================================================================
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
cycol = cycle('bgrcmk')
class DataGeneratorX:
""" Generates 1D collocation grid for training PINNs
# Arguments:
X: [X0, X1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorX([0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
targets=['domain', 'bc-left', 'bc-right'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# left bc points
x_bc_left = np.full(int(num_sample/2), self.Xdomain[0])
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-int(num_sample/2), self.Xdomain[1])
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_bc_left, x_bc_right]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=1000):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
return xs
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = np.random.uniform(-.1, .1, x_data.shape)
plt.scatter(x_data, y_data)
plt.xlabel('x')
plt.ylabel('Random vals')
plt.ylim(-1,1)
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = np.random.uniform(-.1, .1, x_data.shape)
plt.scatter(x_data, y_data, label=t, c=next(cycol))
plt.ylim(-1,1)
plt.xlabel('x')
plt.ylabel('Random vals')
plt.title('Training Data')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXY:
""" Generates 2D collocation grid for a rectangular domain
# Arguments:
X: [X0, X1]
Y: [Y0, Y1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorXY([0., 1.], [0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
Y=[0., 1.],
targets=['domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.Ydomain = Y
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_dom = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# bc points
num_sample_per_edge = int(num_sample/4)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
y_bc_left = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample_per_edge, self.Xdomain[1])
y_bc_right = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
# bot bc points
x_bc_bot = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample_per_edge)
y_bc_bot = np.full(num_sample_per_edge, self.Ydomain[0])
ids_bc_bot = np.arange(x_bc_bot.shape[0]) + counter
counter += ids_bc_bot.size
# right bc points
x_bc_top = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample-num_sample_per_edge)
y_bc_top = np.full(num_sample-num_sample_per_edge, self.Ydomain[1])
ids_bc_top = np.arange(x_bc_top.shape[0]) + counter
counter += ids_bc_top.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right, ids_bc_bot, ids_bc_top])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc-bot': ids_bc_bot,
'bc-top': ids_bc_top,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_bc_left, x_bc_right, x_bc_bot, x_bc_top]).reshape(-1,1),
np.concatenate([y_dom, y_bc_left, y_bc_right, y_bc_bot, y_bc_top]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=200, Ny=200):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
ys = np.linspace(self.Ydomain[0], self.Ydomain[1], Ny)
input_data, target_data = np.meshgrid(xs, ys)
return [input_data, target_data]
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = self.input_data[1][ids,:]
plt.scatter(x_data, y_data)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = self.input_data[1][t_idx,:]
plt.scatter(x_data, y_data, label=t, c=next(cycol))
plt.xlabel('x')
plt.ylabel('y')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXT:
""" Generates 1D time-dependent collocation grid for training PINNs
# Arguments:
X: [X0, X1]
T: [T0, T1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'ic', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
logT: generate random samples logarithmic in time.
# Examples:
>> dg = DataGeneratorXT([0., 1.], [0., 1.], ["domain", "ic", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
T=[0., 1.],
targets=['domain', 'ic', 'bc-left', 'bc-right'],
num_sample=10000,
logT=False):
'Initialization'
self.Xdomain = X
self.Tdomain = T
self.logT = logT
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_uniform_T_samples(self, num_sample):
if self.logT is True:
t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
t_dom = np.exp(t_dom) - 1.
else:
t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
return t_dom
def generate_data(self):
# Half of the samples inside the domain.
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
t_dom = self.generate_uniform_T_samples(num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# The other half distributed equally between BC and IC.
num_sample = int(self.num_sample/4)
# initial conditions
x_ic = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
t_ic = np.full(num_sample, self.Tdomain[0])
ids_ic = np.arange(x_ic.shape[0]) + counter
counter += ids_ic.size
# bc points
num_sample_per_edge = int(num_sample/2)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
t_bc_left = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-num_sample_per_edge, self.Xdomain[1])
t_bc_right = self.generate_uniform_T_samples(num_sample-num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_ic, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'ic': ids_ic,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
np.concatenate([x_dom, x_ic, x_bc_left, x_bc_right]).reshape(-1,1),
np.concatenate([t_dom, t_ic, t_bc_left, t_bc_right]).reshape(-1,1),
]
total_sample = input_data[0].shape[0]
target_data = []
for i, tp in enumerate(self.targets):
target_data.append(
(ids[tp], 'zeros')
)
return input_data, target_data
def get_test_grid(self, Nx=200, Nt=200):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
if self.logT:
ts = np.linspace(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), Nt)
ts = np.exp(ts) - 1.0
else:
ts = np.linspace(self.Tdomain[0], self.Tdomain[1], Nt)
return np.meshgrid(xs, ts)
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
t_data = self.input_data[1][ids,:]
plt.scatter(x_data, t_data)
plt.xlabel('x')
plt.ylabel('t')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
t_data = self.input_data[1][t_idx,:]
plt.scatter(x_data, t_data, label=t, c=next(cycol))
plt.xlabel('x')
plt.ylabel('t')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
class DataGeneratorXYT:
""" Generates 2D time-dependent collocation grid for training PINNs
# Arguments:
X: [X0, X1]
Y: [Y0, Y1]
T: [T0, T1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top', 'all')
num_sample: total number of collocation points.
logT: generate random samples logarithmic in time.
# Examples:
>> dg = DataGeneratorXYT([0., 1.], [0., 1.], [0., 1.],
["domain", "ic", "bc-left", "bc-right", "bc-bot", "bc-top"],
10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
Y=[0., 1.],
T=[0., 1.],
targets=['domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=10000,
logT=False):
'Initialization'
self.Xdomain = X
self.Ydomain = Y
self.Tdomain = T
self.logT = logT
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_uniform_T_samples(self, num_sample):
if self.logT is True:
t_dom = np.random.uniform(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), num_sample)
t_dom = np.exp(t_dom) - 1.
else:
t_dom = np.random.uniform(self.Tdomain[0], self.Tdomain[1], num_sample)
return t_dom
def generate_data(self):
# Half of the samples inside the domain.
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_dom = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
t_dom = self.generate_uniform_T_samples(num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# The other half distributed equally between BC and IC.
num_sample = int(self.num_sample/4)
# initial conditions
x_ic = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
y_ic = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample)
t_ic = np.full(num_sample, self.Tdomain[0])
ids_ic = np.arange(x_ic.shape[0]) + counter
counter += ids_ic.size
# bc points
num_sample_per_edge = int(num_sample/4)
# left bc points
x_bc_left = np.full(num_sample_per_edge, self.Xdomain[0])
y_bc_left = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
t_bc_left = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample_per_edge, self.Xdomain[1])
y_bc_right = np.random.uniform(self.Ydomain[0], self.Ydomain[1], num_sample_per_edge)
t_bc_right = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
# bot bc points
x_bc_bot = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample_per_edge)
y_bc_bot = np.full(num_sample_per_edge, self.Ydomain[0])
t_bc_bot = self.generate_uniform_T_samples(num_sample_per_edge)
ids_bc_bot = np.arange(x_bc_bot.shape[0]) + counter
counter += ids_bc_bot.size
# right bc points
x_bc_top = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample-num_sample_per_edge)
y_bc_top = np.full(num_sample-num_sample_per_edge, self.Ydomain[1])
t_bc_top = self.generate_uniform_T_samples(num_sample-num_sample_per_edge)
ids_bc_top = np.arange(x_bc_top.shape[0]) + counter
counter += ids_bc_top.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right, ids_bc_bot, ids_bc_top])
ids_all = np.concatenate([ids_dom, ids_ic, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc-bot': ids_bc_bot,
'bc-top': ids_bc_top,
'ic': ids_ic,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_grid = [
np.concatenate([x_dom, x_ic, x_bc_left, x_bc_right, x_bc_bot, x_bc_top]).reshape(-1,1),
np.concatenate([y_dom, y_ic, y_bc_left, y_bc_right, y_bc_bot, y_bc_top]).reshape(-1,1),
np.concatenate([t_dom, t_ic, t_bc_left, t_bc_right, t_bc_bot, t_bc_top]).reshape(-1,1),
]
total_sample = input_grid[0].shape[0]
target_grid = []
for i, tp in enumerate(self.targets):
target_grid.append(
(ids[tp], 'zeros')
)
return input_grid, target_grid
def get_test_grid(self, Nx=50, Ny=50, Nt=100):
xs = np.linspace(self.Xdomain[0], self.Xdomain[1], Nx)
ys = np.linspace(self.Ydomain[0], self.Ydomain[1], Ny)
if self.logT:
ts = np.linspace(np.log1p(self.Tdomain[0]), np.log1p(self.Tdomain[1]), Nt)
ts = np.exp(ts) - 1.0
else:
ts = np.linspace(self.Tdomain[0], self.Tdomain[1], Nt)
return np.meshgrid(xs, ys, ts)
def plot_sample_batch(self, batch_size=500):
ids = np.random.choice(len(self), batch_size, replace=False)
x_data = self.input_data[0][ids,:]
y_data = self.input_data[1][ids,:]
t_data = self.input_data[2][ids,:]
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(x_data, y_data, t_data)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('t')
plt.title('Sample batch = {}'.format(batch_size))
plt.show()
def plot_data(self):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
for t, (t_idx, t_val) in zip(self.targets, self.target_data):
x_data = self.input_data[0][t_idx,:]
y_data = self.input_data[1][t_idx,:]
t_data = self.input_data[2][t_idx,:]
ax.scatter(x_data, y_data, t_data, label=t, c=next(cycol))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('t')
plt.legend(title="Training Data", bbox_to_anchor=(1.05, 1), loc='upper left')
fig.tight_layout()
plt.show()
def ex1():
dg = DataGeneratorX(
X=[-1., 1.],
targets=['domain', 'bc-left', 'bc-right'],
num_sample=1000
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex2():
dg = DataGeneratorXY(
X=[-1., 1.],
Y=[0., 10.],
targets=['domain', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=1000
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex3():
dg = DataGeneratorXT(
X=[-1., 1.],
T=[0., 100.],
targets=['domain', 'ic', 'bc-left', 'bc-right'],
num_sample=1000,
logT=False
)
dg.plot_data()
dg.plot_sample_batch(100)
def ex4():
dg = DataGeneratorXYT(
X=[-1., 1.],
Y=[-1., 1.],
T=[0., 100.],
targets=['domain', 'ic', 'bc-left', 'bc-right', 'bc-bot', 'bc-top'],
num_sample=2000,
logT=False
)
dg.plot_data()
dg.plot_sample_batch(500)
if __name__=='__main__':
# ex1()
# ex2()
ex3()
# ex4()
| 20,752 | 31.477308 | 98 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-ConstitutiveModeling/vonMises_isotropic_stochastic.py
|
# Copyright 2022 SciANN -- Ehsan Haghighat.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# vonMises_isotropic_stochastic.py
#
# Using transfer learning to perform material characterization
#
# Requirements:
# - data/vonMisesGeneral-random.csv
# - transfer_learning_weights/vonMises_isotropic_stochastic-weights.hdf5
#
import numpy as np
import matplotlib.pyplot as plt
from Tensor import SnTensor as Tensor
import sciann as sn
import pandas as pd
from sciann.utils.math import sign, relu, abs, diff, step, tanh, sigmoid, exp
import sys
import os
file_name = os.path.basename(sys.argv[0]).split('.')[0]
if not os.path.exists(file_name):
os.mkdir(file_name)
path = os.path.join(file_name, 'res_')
inverted_parameters = {
"Realization": [],
"G": [],
"K": [],
"Sy": [],
"Hp":[]
}
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
MAX_EPOCHS = 1000
# data
data_all = pd.read_csv('data/vonMisesGeneral-random.csv')
data_groups = data_all['Group'].unique()
for iRealization in range(len(data_groups)):
GroupVal = data_groups[iRealization]
data = data_all[data_all.Group == GroupVal]
if data.shape[0]<5:
continue
# true values.
E_true = data.E.mean()
nu_true = data.nu.mean()
mu_true = E_true/2/(1+nu_true)
lmbd_true = E_true*nu_true/(1+nu_true)/(1-2*nu_true)
bulk_true = lmbd_true + 2/3*mu_true
Sy_true = data.Sy.mean()
Hp_true = data.K.mean()
class Normalizer:
def __init__(self, x):
self.mu = x.mean()
self.sig = x.std()
def transform(self, x):
return (x - self.mu) / self.sig
def inv_transform(self, x):
return x * self.sig + self.mu
input_data = {}
scalar = {}
t = data['t'].values
dt = np.gradient(t)
sig = data['S1'].values
dsig = np.gradient(sig)/dt
eps = data['E1'].values
deps = np.gradient(eps)/dt
SIG_STAR = np.abs(sig).max()
EPS_STAR = np.abs(eps).max()
ELS_STAR = SIG_STAR/EPS_STAR
mu_true /= ELS_STAR
bulk_true /= ELS_STAR
lmbd_true /= ELS_STAR
Hp_true /= ELS_STAR
Sy_true /= SIG_STAR
input_data['t'] = data['t'].values.reshape(-1,1)
input_data['dt'] = np.gradient(data['t'].values).reshape(-1,1)
scalar["t"] = Normalizer(input_data['t'])
for v in ['E1', 'E2', 'E3', 'Eq', 'Ep_q']:
x = data[v].values / EPS_STAR
input_data[v] = x.reshape(-1,1)
dx = (np.gradient(x).reshape(-1,1) / input_data['dt'])
input_data["d"+v] = dx
for v in ['S1', 'S2', 'S3', 'Sq']:
x = data[v].values / SIG_STAR
input_data[v] = x.reshape(-1,1)
dx = (np.gradient(x).reshape(-1,1) / input_data['dt'])
input_data["d"+v] = dx
delta = 200
def sigmoidf(x):
return sigmoid(delta*x)
sn.reset_session()
sn.set_random_seed(12345)
DTYPE = 'float32'
# Variables
t = sn.Variable('t', dtype=DTYPE)
# Strain components
E1 = sn.Variable('E1', dtype=DTYPE)
E2 = sn.Variable('E2', dtype=DTYPE)
E3 = sn.Variable('E3', dtype=DTYPE)
E = Tensor([E1, E2, E3])
# Deviatoric strain components
e1, e2, e3 = E.d()
Ev, Eeq = E.v(), E.eq()
# Incremental strain components
dE1 = sn.Variable('dE1', dtype=DTYPE)
dE2 = sn.Variable('dE2', dtype=DTYPE)
dE3 = sn.Variable('dE3', dtype=DTYPE)
dE = Tensor([dE1, dE2, dE3])
# Deviatoric incremental strain components
de1, de2, de3 = dE.d()
dEv, dEeq = dE.v(), dE.eq()
# Stress components
S1 = sn.Variable('S1', dtype=DTYPE)
S2 = sn.Variable('S2', dtype=DTYPE)
S3 = sn.Variable('S3', dtype=DTYPE)
S = Tensor([S1, S2, S3])
# Deviatoric stress components
s1, s2, s3 = S.d()
p, q = S.p(), S.q()
# Incremental stress components
dS1 = sn.Variable('dS1', dtype=DTYPE)
dS2 = sn.Variable('dS2', dtype=DTYPE)
dS3 = sn.Variable('dS3', dtype=DTYPE)
dS = Tensor([dS1, dS2, dS3])
# Deviatoric stress components
ds1, ds2, ds3 = dS.d()
dp, dq = dS.p(), dS.q()
t_s = scalar['t'].transform(t)
g = sn.Functional('g', [t_s], 8*[20], 'tanh')
dg = diff(g, t)
lmbd_par = sn.Parameter(np.random.rand(), inputs=[t_s], name='lmbd')
lmbd = lmbd_par*lmbd_true
mu_par = sn.Parameter(np.random.rand(), inputs=[t_s], name='mu')
mu = mu_par*mu_true
bulk = lmbd + 2*mu/3
SY0_par = sn.Parameter(np.random.rand(), inputs=[t_s], name='SY0')
SY0 = SY0_par*Sy_true
Hp_par = sn.Parameter(np.random.rand(), inputs=[t_s], name='Hp')
Hp = Hp_par*Hp_true
Sy = SY0 + Hp*g
params = [lmbd_par, mu_par, SY0_par, Hp_par]
params_mult = [lmbd_true, mu_true, Sy_true, Hp_true]
F = q - Sy
dF = dq - Hp*dg
Ce = np.array([[lmbd + 2*mu, lmbd, lmbd],
[lmbd, lmbd + 2*mu, lmbd],
[lmbd, lmbd, lmbd + 2*mu]])
n = np.array(S.dq())
dEt = np.array(dE._t)
He = np.dot(np.dot(n, Ce), n)
dL = np.dot(np.dot(n, Ce), dEt) #/ (He + Hp)
dL = np.dot(n, dS()) #/ (He + Hp)
dGamma = np.dot(np.dot(n, Ce), dEt) / (He + Hp)
e1_e = s1/(2.*mu)
e2_e = s2/(2.*mu)
e3_e = s3/(2.*mu)
e1_p = g * n[0]
e2_p = g * n[1]
e3_p = g * n[2]
Eeq_p = g
de1_e = ds1/(2*mu)
de2_e = ds2/(2*mu)
de3_e = ds3/(2*mu)
de1_p = dg * n[0]
de2_p = dg * n[1]
de3_p = dg * n[2]
de1_tr = de1_e + de1_p
de2_tr = de2_e + de2_p
de3_tr = de3_e + de3_p
dEeq_p = dg
inputs = [t,
E1, E2, E3, dE1, dE2, dE3,
S1, S2, S3, dS1, dS2, dS3]
dL_scaler = np.abs(input_data['dS1']).max()
targets = [
sigmoidf(-g) * abs(g),
sigmoidf(-dg) * abs(dg),
sigmoidf(F) * abs(F),
(dg) * (F),
sigmoidf(-F) * (de1 - de1_e),
sigmoidf(-F) * (de2 - de2_e),
sigmoidf(-F) * (de3 - de3_e),
sigmoidf(-dL/dL_scaler) * (de1 - de1_e),
sigmoidf(-dL/dL_scaler) * (de2 - de2_e),
sigmoidf(-dL/dL_scaler) * (de3 - de3_e),
sigmoidf(F)*(dg - dGamma),
sigmoidf(dL/dL_scaler) * sigmoidf(F) * (de1 - de1_tr),
sigmoidf(dL/dL_scaler) * sigmoidf(F) * (de2 - de2_tr),
sigmoidf(dL/dL_scaler) * sigmoidf(F) * (de3 - de3_tr),
(Ev + p/bulk),
]
training_inputs = [
input_data['t'],
input_data['E1'],
input_data['E2'],
input_data['E3'],
input_data['dE1'],
input_data['dE2'],
input_data['dE3'],
input_data['S1'],
input_data['S2'],
input_data['S3'],
input_data['dS1'],
input_data['dS2'],
input_data['dS3'],
]
training_data = len(targets)*['zeros']
ls_scheduler = {'scheduler': 'exponentialdecay',
'initial_learning_rate':0.001,
'final_learning_rate': 0.00005,
'decay_epochs': 40_000,
'delay_epochs': 10_000}
# Data driven fit.
m1 = sn.SciModel(
inputs,
[g] + targets,
loss_func='mse',
optimizer='adam',
load_weights_from='transfer_learning_weights/vonMises_isotropic_stochastic-weights.hdf5'
)
h= m1.train(training_inputs,
[(np.array([0]), 0.0)] + training_data,
learning_rate=0.0005,
batch_size=1000,
epochs=MAX_EPOCHS,
log_parameters=dict(parameters=params, freq=1),
# reduce_lr_after=1000,
stop_after=1000,
shuffle=False,
verbose=1)
m1.save_weights(path + f"i-{iRealization}.hdf5")
print(f'Training for iRealization={iRealization} is completed! ')
lmbd_pred = lmbd.eval(m1, training_inputs).mean()
mu_pred = mu.eval(m1, training_inputs).mean()
bulk_pred = bulk.eval(m1, training_inputs).mean()
Sy_pred = SY0.eval(m1, training_inputs).mean()
Hp_pred = Hp.eval(m1, training_inputs).mean()
to_write = f'Training for iRealization={iRealization} is completed! \n'
to_write += f'{bulk_pred = } v.s. {bulk_true} \n'
to_write += f'{mu_pred = } v.s. {mu_true} \n'
to_write += f'{Sy_pred = } v.s. {Sy_true} \n'
to_write += f'{Hp_pred = } v.s. {Hp_true} \n \n'
print(to_write)
inverted_parameters["Realization"] += [iRealization]
inverted_parameters["G"] += [mu_pred/mu_true]
inverted_parameters["K"] += [bulk_pred/bulk_true]
inverted_parameters["Sy"] += [Sy_pred/Sy_true]
inverted_parameters["Hp"] += [Hp_pred/Hp_true]
mode = 'a' if iRealization>0 else 'w'
with open(path + "params.txt", mode) as f:
f.write(to_write)
g_pred = (g.eval(m1, training_inputs)*EPS_STAR)
pd.DataFrame({'epochs': np.arange(len(h.history['loss'])),
'loss': h.history['loss']}).to_csv(path + f"loss-i{iRealization}.csv")
pd.DataFrame({'t': input_data['t'].flatten(),
'gamma': g_pred.flatten()}).to_csv(path + f"gamma-i{iRealization}.csv")
for v, v_mult in zip(params, params_mult):
v_name = v.name
v_vals = np.array(h.history[v_name]).flatten()*v_mult
pd.DataFrame({'epochs': np.arange(len(h.history['loss'])),
v_name: v_vals}).to_csv(path + f"{v_name}-i{iRealization}.csv")
lplot = ax[0].semilogy(h.history['loss'])
icolor = lplot[0].get_color()
ax[1].plot(training_inputs[0], g_pred, color=icolor)
ax[1].plot(training_inputs[0], data['Ep_q'].values, color=icolor)
trues = [bulk_true, mu_true, Sy_true, Hp_true]
predictions = [bulk_pred, mu_pred, Sy_pred, Hp_pred]
labels = ['$\\kappa$', '$\\mu$', '$\\sigma_Y$', '$K$']
colors = ['black', 'red', 'blue', 'orange']
for ivar in range(len(trues)):
ax[2].scatter(ivar, predictions[ivar]/trues[ivar], c=icolor, marker='o', s=4)
df = pd.DataFrame.from_dict(inverted_parameters, orient='index')
df.to_csv(path + "params.csv")
ax[0].set_xlabel('epochs')
ax[0].set_ylabel('$\mathcal{L}$')
# ax[0].legend()
ax[1].set_xlabel('t')
ax[1].set_ylabel('$\gamma$')
# ax[1].legend()
ax[2].set_xticks([0,1,2,3], labels, rotation=0)
ax[2].set_ylabel('Pred/True')
ax[2].set_ylim(0, 2)
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.15, top=0.9, wspace=0.3, hspace=0.3)
plt.savefig(path + "results.pdf", dpi=300)
plt.savefig(path + "results.pdf", dpi=300)
| 10,430 | 26.377953 | 96 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-ConstitutiveModeling/Tensor.py
|
# Copyright 2022 SciANN -- Ehsan Haghighat.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# Tensor class:
# Abstract 3x3 tensorial operations, on vectorized data (batch),
# with elasto-plasticity applications in mind.
#
import numpy as np
import sciann as sn
class BaseTensor:
""" BaseTensor class.
"""
eps = np.finfo(float).eps
def __init__(self, lib):
if lib=='numpy':
self._lib = np
elif lib=='sciann':
self._lib = sn.math
else:
raise ValueError('"numpy" or "sciann"')
self._t = None
self._d = None
self._v = None
self._p = None
def update(self, val):
self._t = val.copy()
pp = -sum(self._t) / 3.0
self._v = -pp*3.0
self._p = pp
self._d = np.array([ti + pp for ti in self._t])
return self
def t(self):
return self._t
def d(self):
return self._d
def v(self):
return self._v
def dv(self):
return np.ones(3)
def p(self):
return self._p
def dp(self):
return -np.ones(3)/3.0
def q(self):
J2 = sum([di**2 for di in self._d]) / 2.
return self._lib.sqrt(3.0*J2 + self.eps)
def q2(self):
J2 = sum([di**2 for di in self._d]) / 2.
return 3.0*J2
def dq(self):
q = self.q()
return np.array([1.5*di / (q + self.eps) for di in self._d])
def dq2(self):
return np.array([3.*di for di in self._d])
def eq(self):
J2 = sum([di**2 for di in self._d]) / 2.
return self._lib.sqrt(4./3.*J2 + self.eps)
def deq(self):
q = self.q()
return np.array([2./3.*di / (q + self.eps) for di in self._d])
def r(self):
J3 = sum([di**3 for di in self._d])/3
return self._lib.pow(J3*27./2., 1./3.)
def dr(self):
r = self.r()
s2 = [di**2 for di in self._d]
return np.array([4.5*(s2i - sum(s2)/3.)/(r**2 + self.eps) for s2i in s2])
def cos3th(self):
c = (self.r() / (self.q() + self.eps))**3
return self.relu(c+1) - self.relu(c-1) -1
def dcos3th(self):
c = self.cos3th()
q = self.q()
r = self.r()
dq = self.dq()
dr = self.dr()
dc_dr = np.array([dri / (q + self.eps) for dri in dr])
dc_dq = np.array([-dqi*r/(q+self.eps)**2 for dqi in dq])
return np.array([3*c**2*(X+Y) for X,Y in zip(dc_dr, dc_dq)])
def th(self):
c3th = self.cos3th()
return sn._lib.acos(c3th)/3.
def dth(self):
c3th = self.cos3th()
return np.array([- dci/(sn._lib.sqrt(1 - c3th**2 + self.eps) + self.eps) / 3. for dci in self.dcos3th()])
def __call__(self):
return self._t
def relu(self, x):
if self._lib == np:
return np.maximum(0, x)
else:
return sn.math.relu(x)
class NpTensor(BaseTensor):
def __init__(self, val):
super().__init__("numpy")
self.update(val)
class SnTensor(BaseTensor):
def __init__(self, val):
super().__init__("sciann")
self.update(val)
| 3,280 | 22.775362 | 113 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-ConstitutiveModeling/vonMises_isotropic_dimensionless.py
|
# Copyright 2022 SciANN -- Ehsan Haghighat.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# vonMises_isotropic_dimensionless.py
#
# Main code characterizing vonMises model with isotropic hardening
#
# Requirements:
# - data/vonMises.csv
#
import numpy as np
import matplotlib.pyplot as plt
from Tensor import SnTensor as Tensor
import sciann as sn
import pandas as pd
from sciann.utils.math import sign, relu, abs, diff, step, tanh, sigmoid, exp
import sys
import os
file_name = os.path.basename(sys.argv[0]).split('.')[0]
if not os.path.exists(file_name):
os.mkdir(file_name)
path = os.path.join(file_name, 'res_')
# true values.
E_true = 200e9
nu_true = 0.2
mu_true = E_true/2/(1+nu_true)
lmbd_true = E_true*nu_true/(1+nu_true)/(1-2*nu_true)
bulk_true = lmbd_true + 2/3*mu_true
Sy_true = 200e6
Hp_true = 10e9
inverted_parameters = {
"delta": [0.],
"G": [mu_true],
"K": [bulk_true],
"Sy": [Sy_true],
"Hp":[Hp_true]
}
# data
data = pd.read_csv('data/vonMises.csv')
class Normalizer:
def __init__(self, x):
self.mu = x.mean()
self.sig = x.std()
def transform(self, x):
return (x - self.mu) / self.sig
def inv_transform(self, x):
return x * self.sig + self.mu
input_data = {}
scalar = {}
t = data['t'].values
dt = np.gradient(t)
sig = data['S1'].values
dsig = np.gradient(sig)/dt
eps = data['E1'].values
deps = np.gradient(eps)/dt
SIG_STAR = np.abs(sig).max()
EPS_STAR = np.abs(eps).max()
ELS_STAR = SIG_STAR/EPS_STAR
mu_true /= ELS_STAR
bulk_true /= ELS_STAR
lmbd_true /= ELS_STAR
Hp_true /= ELS_STAR
Sy_true /= SIG_STAR
input_data['t'] = data['t'].values.reshape(-1,1)
input_data['dt'] = np.gradient(data['t'].values).reshape(-1,1)
scalar["t"] = Normalizer(input_data['t'])
for v in ['E1', 'E2', 'E3', 'Eq', 'Ep_q']:
x = data[v].values / EPS_STAR
input_data[v] = x.reshape(-1,1)
dx = (np.gradient(x).reshape(-1,1) / input_data['dt'])
input_data["d"+v] = dx
for v in ['S1', 'S2', 'S3', 'Sq']:
x = data[v].values / SIG_STAR
input_data[v] = x.reshape(-1,1)
dx = (np.gradient(x).reshape(-1,1) / input_data['dt'])
input_data["d"+v] = dx
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
delta_vals = [10, 50, 100, 200, 500]
for idelta, delta in enumerate(delta_vals):
def sigmoidf(x):
return sigmoid(delta*x)
sn.reset_session()
sn.set_random_seed(12345)
DTYPE = 'float32'
# Variables
t = sn.Variable('t', dtype=DTYPE)
# Strain components
E1 = sn.Variable('E1', dtype=DTYPE)
E2 = sn.Variable('E2', dtype=DTYPE)
E3 = sn.Variable('E3', dtype=DTYPE)
E = Tensor([E1, E2, E3])
# Deviatoric strain components
e1, e2, e3 = E.d()
Ev, Eeq = E.v(), E.eq()
# Incremental strain components
dE1 = sn.Variable('dE1', dtype=DTYPE)
dE2 = sn.Variable('dE2', dtype=DTYPE)
dE3 = sn.Variable('dE3', dtype=DTYPE)
dE = Tensor([dE1, dE2, dE3])
# Deviatoric incremental strain components
de1, de2, de3 = dE.d()
dEv, dEeq = dE.v(), dE.eq()
# Stress components
S1 = sn.Variable('S1', dtype=DTYPE)
S2 = sn.Variable('S2', dtype=DTYPE)
S3 = sn.Variable('S3', dtype=DTYPE)
S = Tensor([S1, S2, S3])
# Deviatoric stress components
s1, s2, s3 = S.d()
p, q = S.p(), S.q()
# Incremental stress components
dS1 = sn.Variable('dS1', dtype=DTYPE)
dS2 = sn.Variable('dS2', dtype=DTYPE)
dS3 = sn.Variable('dS3', dtype=DTYPE)
dS = Tensor([dS1, dS2, dS3])
# Deviatoric stress components
ds1, ds2, ds3 = dS.d()
dp, dq = dS.p(), dS.q()
t_s = scalar['t'].transform(t)
g = sn.Functional('g', [t_s], 8*[20], 'tanh')
dg = diff(g, t)
lmbd_par = sn.Parameter(np.random.rand(), inputs=[t_s], name='lmbd')
lmbd = lmbd_par*lmbd_true
mu_par = sn.Parameter(np.random.rand(), inputs=[t_s], name='mu')
mu = mu_par*mu_true
bulk = lmbd + 2*mu/3
SY0_par = sn.Parameter(np.random.rand(), inputs=[t_s], name='SY0')
SY0 = SY0_par*Sy_true
Hp_par = sn.Parameter(np.random.rand(), inputs=[t_s], name='Hp')
Hp = Hp_par*Hp_true
Sy = SY0 + Hp*g
params = [lmbd_par, mu_par, SY0_par, Hp_par]
params_mult = [lmbd_true, mu_true, Sy_true, Hp_true]
F = q - Sy
dF = dq - Hp*dg
Ce = np.array([[lmbd + 2*mu, lmbd, lmbd],
[lmbd, lmbd + 2*mu, lmbd],
[lmbd, lmbd, lmbd + 2*mu]])
n = np.array(S.dq())
dEt = np.array(dE._t)
He = np.dot(np.dot(n, Ce), n)
dL = np.dot(np.dot(n, Ce), dEt) #/ (He + Hp)
dL = np.dot(n, dS()) #/ (He + Hp)
dGamma = np.dot(np.dot(n, Ce), dEt) / (He + Hp)
e1_e = s1/(2.*mu)
e2_e = s2/(2.*mu)
e3_e = s3/(2.*mu)
e1_p = g * n[0]
e2_p = g * n[1]
e3_p = g * n[2]
Eeq_p = g
de1_e = ds1/(2*mu)
de2_e = ds2/(2*mu)
de3_e = ds3/(2*mu)
de1_p = dg * n[0]
de2_p = dg * n[1]
de3_p = dg * n[2]
de1_tr = de1_e + de1_p
de2_tr = de2_e + de2_p
de3_tr = de3_e + de3_p
dEeq_p = dg
inputs = [t,
E1, E2, E3, dE1, dE2, dE3,
S1, S2, S3, dS1, dS2, dS3]
dL_scaler = np.abs(input_data['dS1']).max()
targets = [
sigmoidf(-g) * abs(g),
sigmoidf(-dg) * abs(dg),
sigmoidf(F) * abs(F),
(dg) * (F),
sigmoidf(-F) * (de1 - de1_e),
sigmoidf(-F) * (de2 - de2_e),
sigmoidf(-F) * (de3 - de3_e),
sigmoidf(-dL/dL_scaler) * (de1 - de1_e),
sigmoidf(-dL/dL_scaler) * (de2 - de2_e),
sigmoidf(-dL/dL_scaler) * (de3 - de3_e),
sigmoidf(F)*(dg - dGamma),
sigmoidf(dL/dL_scaler) * sigmoidf(F) * (de1 - de1_tr),
sigmoidf(dL/dL_scaler) * sigmoidf(F) * (de2 - de2_tr),
sigmoidf(dL/dL_scaler) * sigmoidf(F) * (de3 - de3_tr),
(Ev + p/bulk),
]
training_inputs = [
input_data['t'],
input_data['E1'],
input_data['E2'],
input_data['E3'],
input_data['dE1'],
input_data['dE2'],
input_data['dE3'],
input_data['S1'],
input_data['S2'],
input_data['S3'],
input_data['dS1'],
input_data['dS2'],
input_data['dS3'],
]
training_data = len(targets)*['zeros']
ls_scheduler = {'scheduler': 'exponentialdecay',
'initial_learning_rate':0.001,
'final_learning_rate': 0.00005,
'decay_epochs': 40_000,
'delay_epochs': 10_000}
# Data driven fit.
m1 = sn.SciModel(inputs, [g] + targets, loss_func='mse', optimizer='adam')
h= m1.train(training_inputs,
[(np.array([0]), 0.0)] + training_data,
learning_rate=ls_scheduler,
batch_size=1000,
epochs=50_000,
log_parameters=dict(parameters=params, freq=1),
# reduce_lr_after=1000,
stop_after=5000,
shuffle=False,
verbose=-1)
m1.save_weights(path + f"d-{delta}.hdf5")
print('Training for $\\delta$={} is completed! '.format(delta))
# print('Lmbd = {:12.4e} vs {:12.4e}'.format(lmbd.eval(m1, training_inputs).mean(), lmbd_true))
# print('Mu = {:12.4e} vs {:12.4e}'.format(mu.eval(m1, training_inputs).mean(), mu_true))
# print('Sy = {:12.4e} vs {:12.4e}'.format(SY0.eval(m1, training_inputs).mean(), Sy_true))
# print('Hp = {:12.4e} vs {:12.4e}'.format(Hp.eval(m1, training_inputs).mean(), Hp_true))
lmbd_pred = lmbd.eval(m1, training_inputs).mean()
mu_pred = mu.eval(m1, training_inputs).mean()
bulk_pred = bulk.eval(m1, training_inputs).mean()
Sy_pred = SY0.eval(m1, training_inputs).mean()
Hp_pred = Hp.eval(m1, training_inputs).mean()
to_write = 'Training for $\\delta$={} is completed! \n'.format(delta)
to_write += f'{bulk_pred = } v.s. {bulk_true} \n'
to_write += f'{mu_pred = } v.s. {mu_true} \n'
to_write += f'{Sy_pred = } v.s. {Sy_true} \n'
to_write += f'{Hp_pred = } v.s. {Hp_true} \n \n'
print(to_write)
inverted_parameters["delta"] += [delta]
inverted_parameters["G"] += [mu_pred*ELS_STAR]
inverted_parameters["K"] += [bulk_pred*ELS_STAR]
inverted_parameters["Sy"] += [Sy_pred*SIG_STAR]
inverted_parameters["Hp"] += [Hp_pred*ELS_STAR]
mode = 'a' if idelta>0 else 'w'
with open(path + "params.txt", mode) as f:
f.write(to_write)
g_pred = (g.eval(m1, training_inputs)*EPS_STAR)
ax[0].semilogy(h.history['loss'], label='$\delta$={}'.format(delta))
ax[1].plot(training_inputs[0], g_pred, label='$\delta$={}'.format(delta))
ax[2].loglog(4*[delta], [lmbd_pred, mu_pred, Sy_pred, Hp_pred], '*')
pd.DataFrame({'epochs': np.arange(len(h.history['loss'])),
'loss': h.history['loss']}).to_csv(path + f"loss-d{delta}.csv")
pd.DataFrame({'t': input_data['t'].flatten(), 'gamma': g_pred.flatten()}).to_csv(path + f"gamma-d{delta}.csv")
for v, v_mult in zip(params, params_mult):
v_name = v.name
v_vals = np.array(h.history[v_name]).flatten()*v_mult
pd.DataFrame({'epochs': np.arange(len(h.history['loss'])),
v_name: v_vals}).to_csv(path + f"{v_name}-d{delta}.csv")
df = pd.DataFrame.from_dict(inverted_parameters, orient='index')
df.to_csv(path + "params.csv")
ax[0].set_xlabel('epochs')
ax[0].set_ylabel('$\mathcal{L}$')
ax[0].legend()
ax[1].set_xlabel('t')
ax[1].set_ylabel('$\gamma$')
ax[1].legend()
ax[1].plot(training_inputs[0], data['Ep_q'].values, 'k')
lw=3.0
for var, comp in zip([Sy_true, Hp_true, lmbd_true, mu_true],
['$\\sigma_Y$', '$H_p$', '$\\lambda$', '$\\mu$']):
ax[2].loglog(delta_vals, np.ones_like(delta_vals)*var, '--k', lw=lw, label=comp)
lw -= 0.5
ax[2].set_xlabel('$\\delta$')
ax[2].set_ylabel('$\\sigma_Y, ~H_p, ~\\lambda, ~\\mu$')
ax[2].legend()
plt.legend()
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.15, top=0.9, wspace=0.3, hspace=0.3)
plt.savefig(path + "results.pdf", dpi=300)
plt.savefig(path + "results.png", dpi=300)
| 10,131 | 26.911846 | 114 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-ConstitutiveModeling/druckerPrager_dimensionless-biaxial.py
|
# Copyright 2022 SciANN -- Ehsan Haghighat.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# druckerPrager_dimensionless-biaxial.py
#
# Main code for characterizing Drucker-Prager model for biaxial loading
#
# Requirements:
# - data/druckerPrager-biaxial.csv
#
import numpy as np
import matplotlib.pyplot as plt
from Tensor import SnTensor as Tensor
import sciann as sn
import pandas as pd
from sciann.utils.math import sign, relu, abs, diff, step, tanh, sigmoid, exp
import sys
import os
file_name = os.path.basename(sys.argv[0]).split('.')[0]
if not os.path.exists(file_name):
os.mkdir(file_name)
path = os.path.join(file_name, 'res_')
EPOCH_MAX = 50_000
# true values.
NU = 0.25
G_TRUE = 100e6
K_TRUE = 2/3*(1+NU)/(1-2*NU)*G_TRUE
SY_TRUE = 100.e3
BETA_TRUE = np.radians(30.0)
ETA_TRUE = np.tan(BETA_TRUE)
SHAPE = 1.
VOID0 = 0.5
inverted_parameters = {"delta": [0.], "G": [G_TRUE], "K": [K_TRUE], "Sy": [SY_TRUE], "eta":[ETA_TRUE]}
DTYPE = 'float32'
# data
data = pd.read_csv('data/druckerPrager-biaxial.csv')
class Normalizer:
def __init__(self, x):
self.mu = x.mean()
self.sig = x.std()
def transform(self, x):
return (x - self.mu) / self.sig
def inv_transform(self, x):
return x * self.sig + self.mu
def softstep(x):
return (tanh(100*x) + 1.0)/2.0
def sigmoidf(x, a=100):
return sigmoid(a*x)
def gaussian(x, a=100):
return exp(-(x*a)**2)
input_data = {}
scalar = {}
t = data['t'].values
dt = np.gradient(t)
sig = data['S1'].values
dsig = np.gradient(sig)/dt
eps = data['E1'].values
deps = np.gradient(eps)/dt
SIG_STAR = np.abs(sig).max()
EPS_STAR = np.abs(eps).max()
ELS_STAR = SIG_STAR/EPS_STAR
G_TRUE /= ELS_STAR
K_TRUE /= ELS_STAR
SY_TRUE /= SIG_STAR
input_data['t'] = data['t'].values.reshape(-1,1)
input_data['dt'] = np.gradient(data['t'].values).reshape(-1,1)
scalar["t"] = Normalizer(input_data['t'])
for v in ['E1', 'E2', 'E3', 'gamma', 'Ep_q', 'Eq']:
x = data[v].values / EPS_STAR
input_data[v] = x.reshape(-1,1)
dx = (np.gradient(x).reshape(-1,1) / input_data['dt'])
input_data["d"+v] = dx
for v in ['S1', 'S2', 'S3', 'Sq']:
x = data[v].values / SIG_STAR
input_data[v] = x.reshape(-1,1)
dx = (np.gradient(x).reshape(-1,1) / input_data['dt'])
input_data["d"+v] = dx
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
delta_vals = [10, 50, 100, 200, 500]
for idelta, delta in enumerate(delta_vals):
def sigmoidf(x):
return sigmoid(delta*x)
sn.reset_session()
sn.set_random_seed(12345)
# Variables
t = sn.Variable('t', dtype=DTYPE)
# Strain components
E1 = sn.Variable('E1', dtype=DTYPE)
E2 = sn.Variable('E2', dtype=DTYPE)
E3 = sn.Variable('E3', dtype=DTYPE)
E = Tensor([E1, E2, E3])
# Deviatoric strain components
e1, e2, e3 = E.d()
Ev, Eeq = E.v(), E.eq()
# Incremental strain components
dE1 = sn.Variable('dE1', dtype=DTYPE)
dE2 = sn.Variable('dE2', dtype=DTYPE)
dE3 = sn.Variable('dE3', dtype=DTYPE)
dE = Tensor([dE1, dE2, dE3])
# Deviatoric incremental strain components
de1, de2, de3 = dE.d()
dEv, dEeq = dE.v(), dE.eq()
# Stress components
S1 = sn.Variable('S1', dtype=DTYPE)
S2 = sn.Variable('S2', dtype=DTYPE)
S3 = sn.Variable('S3', dtype=DTYPE)
S = Tensor([S1, S2, S3])
# Deviatoric stress components
s1, s2, s3 = S.d()
p, q, r = S.p(), S.q(), S.r()
# Incremental stress components
dS1 = sn.Variable('dS1', dtype=DTYPE)
dS2 = sn.Variable('dS2', dtype=DTYPE)
dS3 = sn.Variable('dS3', dtype=DTYPE)
dS = Tensor([dS1, dS2, dS3])
# Deviatoric stress components
ds1, ds2, ds3 = dS.d()
dp, dq, dr = dS.p(), dS.q(), dS.r()
t_s = scalar['t'].transform(t)
g = sn.Functional('g', [t_s], 8*[20], 'tanh')
dg = diff(g, t)
bulk_par = sn.Parameter(np.random.rand(), inputs=[t], min_max=[0.1, 10], name='bulk')
bulk = bulk_par*K_TRUE
shear_par = sn.Parameter(np.random.rand(), inputs=[t], min_max=[0.1, 10], name='shear')
shear = shear_par*G_TRUE
lame = bulk - shear*(2./3.)
eta_par = sn.Parameter(np.random.rand(), inputs=[t], min_max=[0.1, 10], name='eta')
eta = eta_par*ETA_TRUE
sy_par = sn.Parameter(1., inputs=[t], min_max=[0.5, 2], name='SY0')
sy = sy_par*SY_TRUE
params = [bulk_par, shear_par, eta_par, sy_par]
params_mult = [K_TRUE, G_TRUE, ETA_TRUE, SY_TRUE]
R = 0.5*(1 + 1/SHAPE) # - (1-SHAPE)*(r/q)**3)
G = R*q - p*eta
dG_dp = -eta
dG_dq = R
Eeq_q = g * dG_dq
dEeq_q = dg * dG_dq
py = sy
dpy = 0.
F = G - py
dF_dp = dG_dp
dF_dq = dG_dq
dF_dg = -dpy*dF_dp
dF = S.dp()*dF_dp + S.dq()*dF_dq
Ce = np.array([[lame + 2*shear, lame, lame],
[lame, lame + 2*shear, lame],
[lame, lame, lame + 2*shear]])
ce_jac = (3.*lame + 2.*shear)*shear
Ce_inv = np.array([[lame+shear, -lame/2., -lame/2.],
[-lame/2., lame+shear, -lame/2.],
[-lame/2., -lame/2., lame+shear]]) / ce_jac
dFN = np.matmul(Ce, dF)
He = np.dot(dF, dFN)
Hp = dF_dg
dE_e = np.matmul(Ce_inv, dS())
dE_e_v = sum(dE_e)
dE_p = dF*dg
dE_tr = dE_e + dE_p
dE_tr_v = sum(dE_tr)
dGamma = np.dot(dFN, dE())/(He - Hp)
dLoad = np.dot(dF, dS())
# dLoad = np.dot(dFN, dE())
inputs = [t,
E1, E2, E3, dE1, dE2, dE3,
S1, S2, S3, dS1, dS2, dS3]
dL_scaler = np.abs(input_data['dS1']).max()
targets = [
sigmoidf(-g) * abs(g),
sigmoidf(-dg) * abs(dg),
sigmoidf(F) * abs(F),
(dg) * (F),
sigmoidf(F) * (dg - dGamma),
sigmoidf(-F)*(dE1 - dE_e[0]),
sigmoidf(-F)*(dE2 - dE_e[1]),
sigmoidf(-F)*(dE3 - dE_e[2]),
sigmoidf(-F)*(dEv - dE_e_v),
(dLoad < 0.) * (dE1 - dE_e[0]),
(dLoad < 0.) * (dE2 - dE_e[1]),
(dLoad < 0.) * (dE3 - dE_e[2]),
(dLoad < 0.) * (dEv - dE_e_v),
(dLoad > 0.) * sigmoidf(F) * (dE1 - dE_tr[0]),
(dLoad > 0.) * sigmoidf(F) * (dE2 - dE_tr[1]),
(dLoad > 0.) * sigmoidf(F) * (dE3 - dE_tr[2]),
(dLoad > 0.) * sigmoidf(F) * (dEv - dE_tr_v),
]
training_inputs = [
input_data['t'],
input_data['E1'],
input_data['E2'],
input_data['E3'],
input_data['dE1'],
input_data['dE2'],
input_data['dE3'],
input_data['S1'],
input_data['S2'],
input_data['S3'],
input_data['dS1'],
input_data['dS2'],
input_data['dS3'],
]
training_data = len(targets)*['zeros']
# Data driven fit.
m1 = sn.SciModel(inputs, [g] + targets, optimizer='adam')
ls_scheduler = {'scheduler': 'exponentialdecay',
'initial_learning_rate':0.002,
'final_learning_rate': 0.0002,
'decay_epochs': 40_000,
'delay_epochs': 10_000}
h= m1.train(training_inputs,
[(np.array([0]), 0.0)] + training_data,
learning_rate=ls_scheduler,
batch_size=10000,
epochs=EPOCH_MAX,
log_parameters=dict(parameters=params, freq=1),
reduce_lr_after=1000,
stop_after=2000,
shuffle=False,
verbose=1)
m1.save_weights(path + f"d-{delta}.hdf5")
bulk_pred = bulk.eval(m1, training_inputs).mean()
shear_pred = shear.eval(m1, training_inputs).mean()
eta_pred = eta.eval(m1, training_inputs).mean()
sy_pred = sy.eval(m1, training_inputs).mean()
to_write = 'Training for $\\delta$={} is completed! \n'.format(delta)
to_write += f'{bulk_pred = } v.s. {K_TRUE} \n'
to_write += f'{shear_pred = } v.s. {G_TRUE} \n'
to_write += f'{eta_pred = } v.s. {ETA_TRUE} \n'
to_write += f'{sy_pred = } v.s. {SY_TRUE} \n \n'
print(to_write)
inverted_parameters["delta"] += [delta]
inverted_parameters["G"] += [np.abs(shear_pred/G_TRUE - 1.)]
inverted_parameters["K"] += [np.abs(bulk_pred/K_TRUE - 1.)]
inverted_parameters["Sy"] += [np.abs(sy_pred/SY_TRUE - 1.)]
inverted_parameters["eta"] += [np.abs(eta_pred/ETA_TRUE - 1.)]
mode = 'a' if idelta>0 else 'w'
with open(path + "params.txt", mode) as f:
f.write(to_write)
g_pred = (g.eval(m1, training_inputs)*EPS_STAR)
ax[0].semilogy(h.history['loss'], label='$\delta$={}'.format(delta))
ax[1].plot(training_inputs[0], g_pred, label='$\delta$={}'.format(delta))
ax[2].loglog(4*[delta], [eta_pred, sy_pred, shear_pred, bulk_pred], '*')
pd.DataFrame({'epochs': np.arange(len(h.history['loss'])),
'loss': h.history['loss']}).to_csv(path + f"loss-d{delta}.csv")
pd.DataFrame({'t': input_data['t'].flatten(), 'gamma': g_pred.flatten()}).to_csv(path + f"gamma-d{delta}.csv")
for v, v_mult in zip(params, params_mult):
v_name = v.name
v_vals = np.array(h.history[v_name]).flatten()*v_mult
pd.DataFrame({'epochs': np.arange(len(h.history['loss'])),
v_name: v_vals}).to_csv(path + f"{v_name}-d{delta}.csv")
df = pd.DataFrame.from_dict(inverted_parameters, orient='index')
df.to_csv(path + "params.csv")
ax[0].set_xlabel('epochs')
ax[0].set_ylabel('MSE')
ax[0].legend()
ax[1].set_xlabel('t')
ax[1].set_ylabel('$\gamma$')
ax[1].legend()
ax[1].plot(training_inputs[0], data['gamma'].values, 'k')
lw=3.0
for var, comp in zip([ETA_TRUE, SY_TRUE, G_TRUE, K_TRUE],
['$tan(\\beta)$', '$C$', '$\\mu$', '$\\kappa$']):
ax[2].loglog(delta_vals, np.ones_like(delta_vals)*var, '--k', lw=lw, label=comp)
lw -= 0.5
ax[2].set_xlabel('$\\delta$')
ax[2].set_ylabel('$\\eta, ~C, ~\\mu, ~\\kappa$')
ax[2].legend()
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.15, top=0.9, wspace=0.2, hspace=0.3)
plt.savefig(path + "results.pdf", dpi=300)
plt.savefig(path + "results.png", dpi=300)
| 10,077 | 25.803191 | 114 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-ConstitutiveModeling/druckerPrager_dimensionless-biaxial-undrained.py
|
# Copyright 2022 SciANN -- Ehsan Haghighat.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# druckerPrager_dimensionless-biaxial-undrained.py
#
# Main code for characterizing Drucker-Prager model for undrained biaxial loading
#
# Requirements:
# - data/druckerPrager-biaxial-undrained.csv
#
import numpy as np
import matplotlib.pyplot as plt
from Tensor import SnTensor as Tensor
import sciann as sn
import pandas as pd
from sciann.utils.math import sign, relu, abs, diff, step, tanh, sigmoid, exp
import sys
import os
file_name = os.path.basename(sys.argv[0]).split('.')[0]
if not os.path.exists(file_name):
os.mkdir(file_name)
path = os.path.join(file_name, 'res_')
EPOCH_MAX = 50_000
# true values.
NU = 0.25
G_TRUE = 100e6
K_TRUE = 2/3*(1+NU)/(1-2*NU)*G_TRUE
SY_TRUE = 100.e3
BETA_TRUE = np.radians(30.0)
ETA_TRUE = np.tan(BETA_TRUE)
SHAPE = 1.
VOID0 = 0.5
inverted_parameters = {"delta": [0.], "G": [G_TRUE], "K": [K_TRUE], "Sy": [SY_TRUE], "eta":[ETA_TRUE]}
DTYPE = 'float32'
# data
data = pd.read_csv('data/druckerPrager-biaxial-undrained.csv')
class Normalizer:
def __init__(self, x):
self.mu = x.mean()
self.sig = x.std()
def transform(self, x):
return (x - self.mu) / self.sig
def inv_transform(self, x):
return x * self.sig + self.mu
def softstep(x):
return (tanh(100*x) + 1.0)/2.0
def sigmoidf(x, a=100):
return sigmoid(a*x)
def gaussian(x, a=100):
return exp(-(x*a)**2)
input_data = {}
scalar = {}
t = data['t'].values
dt = np.gradient(t)
sig = data['S1'].values
dsig = np.gradient(sig)/dt
eps = data['E1'].values
deps = np.gradient(eps)/dt
SIG_STAR = np.abs(sig).max()
EPS_STAR = np.abs(eps).max()
ELS_STAR = SIG_STAR/EPS_STAR
G_TRUE /= ELS_STAR
K_TRUE /= ELS_STAR
SY_TRUE /= SIG_STAR
input_data['t'] = data['t'].values.reshape(-1,1)
input_data['dt'] = np.gradient(data['t'].values).reshape(-1,1)
scalar["t"] = Normalizer(input_data['t'])
for v in ['E1', 'E2', 'E3', 'gamma', 'Ep_q', 'Eq']:
x = data[v].values / EPS_STAR
input_data[v] = x.reshape(-1,1)
dx = (np.gradient(x).reshape(-1,1) / input_data['dt'])
input_data["d"+v] = dx
for v in ['S1', 'S2', 'S3', 'Sq']:
x = data[v].values / SIG_STAR
input_data[v] = x.reshape(-1,1)
dx = (np.gradient(x).reshape(-1,1) / input_data['dt'])
input_data["d"+v] = dx
fig, ax = plt.subplots(1, 3, figsize=(15, 4))
delta_vals = [10, 50, 100, 200, 500]
for idelta, delta in enumerate(delta_vals):
def sigmoidf(x):
return sigmoid(delta*x)
sn.reset_session()
sn.set_random_seed(12345)
# Variables
t = sn.Variable('t', dtype=DTYPE)
# Strain components
E1 = sn.Variable('E1', dtype=DTYPE)
E2 = sn.Variable('E2', dtype=DTYPE)
E3 = sn.Variable('E3', dtype=DTYPE)
E = Tensor([E1, E2, E3])
# Deviatoric strain components
e1, e2, e3 = E.d()
Ev, Eeq = E.v(), E.eq()
# Incremental strain components
dE1 = sn.Variable('dE1', dtype=DTYPE)
dE2 = sn.Variable('dE2', dtype=DTYPE)
dE3 = sn.Variable('dE3', dtype=DTYPE)
dE = Tensor([dE1, dE2, dE3])
# Deviatoric incremental strain components
de1, de2, de3 = dE.d()
dEv, dEeq = dE.v(), dE.eq()
# Stress components
S1 = sn.Variable('S1', dtype=DTYPE)
S2 = sn.Variable('S2', dtype=DTYPE)
S3 = sn.Variable('S3', dtype=DTYPE)
S = Tensor([S1, S2, S3])
# Deviatoric stress components
s1, s2, s3 = S.d()
p, q, r = S.p(), S.q(), S.r()
# Incremental stress components
dS1 = sn.Variable('dS1', dtype=DTYPE)
dS2 = sn.Variable('dS2', dtype=DTYPE)
dS3 = sn.Variable('dS3', dtype=DTYPE)
dS = Tensor([dS1, dS2, dS3])
# Deviatoric stress components
ds1, ds2, ds3 = dS.d()
dp, dq, dr = dS.p(), dS.q(), dS.r()
t_s = scalar['t'].transform(t)
g = sn.Functional('g', [t_s], 8*[20], 'tanh')
dg = diff(g, t)
bulk_par = sn.Parameter(np.random.rand(), inputs=[t], min_max=[0.1, 10], name='bulk')
bulk = bulk_par*K_TRUE
shear_par = sn.Parameter(np.random.rand(), inputs=[t], min_max=[0.1, 10], name='shear')
shear = shear_par*G_TRUE
lame = bulk - shear*(2./3.)
eta_par = sn.Parameter(np.random.rand(), inputs=[t], min_max=[0.1, 10], name='eta')
eta = eta_par*ETA_TRUE
sy_par = sn.Parameter(1., inputs=[t], min_max=[0.5, 2], name='SY0')
sy = sy_par*SY_TRUE
params = [bulk_par, shear_par, eta_par, sy_par]
params_mult = [K_TRUE, G_TRUE, ETA_TRUE, SY_TRUE]
R = 0.5*(1 + 1/SHAPE) # - (1-SHAPE)*(r/q)**3)
G = R*q - p*eta
dG_dp = -eta
dG_dq = R
Eeq_q = g * dG_dq
dEeq_q = dg * dG_dq
py = sy
dpy = 0.
F = G - py
dF_dp = dG_dp
dF_dq = dG_dq
dF_dg = -dpy*dF_dp
dF = S.dp()*dF_dp + S.dq()*dF_dq
Ce = np.array([[lame + 2*shear, lame, lame],
[lame, lame + 2*shear, lame],
[lame, lame, lame + 2*shear]])
ce_jac = (3.*lame + 2.*shear)*shear
Ce_inv = np.array([[lame+shear, -lame/2., -lame/2.],
[-lame/2., lame+shear, -lame/2.],
[-lame/2., -lame/2., lame+shear]]) / ce_jac
dFN = np.matmul(Ce, dF)
He = np.dot(dF, dFN)
Hp = dF_dg
dE_e = np.matmul(Ce_inv, dS())
dE_e_v = sum(dE_e)
dE_p = dF*dg
dE_tr = dE_e + dE_p
dE_tr_v = sum(dE_tr)
dGamma = np.dot(dFN, dE())/(He - Hp)
dLoad = np.dot(dF, dS())
# dLoad = np.dot(dFN, dE())
inputs = [t,
E1, E2, E3, dE1, dE2, dE3,
S1, S2, S3, dS1, dS2, dS3]
dL_scaler = np.abs(input_data['dS1']).max()
targets = [
sigmoidf(-g) * abs(g),
sigmoidf(-dg) * abs(dg),
sigmoidf(F) * abs(F),
(dg) * (F),
sigmoidf(F) * (dg - dGamma),
sigmoidf(-F)*(dE1 - dE_e[0]),
sigmoidf(-F)*(dE2 - dE_e[1]),
sigmoidf(-F)*(dE3 - dE_e[2]),
sigmoidf(-F)*(dEv - dE_e_v),
(dLoad < 0.) * (dE1 - dE_e[0]),
(dLoad < 0.) * (dE2 - dE_e[1]),
(dLoad < 0.) * (dE3 - dE_e[2]),
(dLoad < 0.) * (dEv - dE_e_v),
(dLoad > 0.) * sigmoidf(F) * (dE1 - dE_tr[0]),
(dLoad > 0.) * sigmoidf(F) * (dE2 - dE_tr[1]),
(dLoad > 0.) * sigmoidf(F) * (dE3 - dE_tr[2]),
(dLoad > 0.) * sigmoidf(F) * (dEv - dE_tr_v),
]
training_inputs = [
input_data['t'],
input_data['E1'],
input_data['E2'],
input_data['E3'],
input_data['dE1'],
input_data['dE2'],
input_data['dE3'],
input_data['S1'],
input_data['S2'],
input_data['S3'],
input_data['dS1'],
input_data['dS2'],
input_data['dS3'],
]
training_data = len(targets)*['zeros']
# Data driven fit.
m1 = sn.SciModel(inputs, [g] + targets, optimizer='adam')
ls_scheduler = {'scheduler': 'exponentialdecay',
'initial_learning_rate':0.002,
'final_learning_rate': 0.0002,
'decay_epochs': 40_000,
'delay_epochs': 10_000}
h= m1.train(training_inputs,
[(np.array([0]), 0.0)] + training_data,
learning_rate=ls_scheduler,
batch_size=10000,
epochs=EPOCH_MAX,
log_parameters=dict(parameters=params, freq=1),
reduce_lr_after=1000,
stop_after=2000,
shuffle=False,
verbose=1)
m1.save_weights(path + f"d-{delta}.hdf5")
bulk_pred = bulk.eval(m1, training_inputs).mean()
shear_pred = shear.eval(m1, training_inputs).mean()
eta_pred = eta.eval(m1, training_inputs).mean()
sy_pred = sy.eval(m1, training_inputs).mean()
to_write = 'Training for $\\delta$={} is completed! \n'.format(delta)
to_write += f'{bulk_pred = } v.s. {K_TRUE} \n'
to_write += f'{shear_pred = } v.s. {G_TRUE} \n'
to_write += f'{eta_pred = } v.s. {ETA_TRUE} \n'
to_write += f'{sy_pred = } v.s. {SY_TRUE} \n \n'
print(to_write)
inverted_parameters["delta"] += [delta]
inverted_parameters["G"] += [np.abs(shear_pred/G_TRUE - 1.)]
inverted_parameters["K"] += [np.abs(bulk_pred/K_TRUE - 1.)]
inverted_parameters["Sy"] += [np.abs(sy_pred/SY_TRUE - 1.)]
inverted_parameters["eta"] += [np.abs(eta_pred/ETA_TRUE - 1.)]
mode = 'a' if idelta>0 else 'w'
with open(path + "params.txt", mode) as f:
f.write(to_write)
g_pred = (g.eval(m1, training_inputs)*EPS_STAR)
ax[0].semilogy(h.history['loss'], label='$\delta$={}'.format(delta))
ax[1].plot(training_inputs[0], g_pred, label='$\delta$={}'.format(delta))
ax[2].loglog(4*[delta], [eta_pred, sy_pred, shear_pred, bulk_pred], '*')
pd.DataFrame({'epochs': np.arange(len(h.history['loss'])),
'loss': h.history['loss']}).to_csv(path + f"loss-d{delta}.csv")
pd.DataFrame({'t': input_data['t'].flatten(), 'gamma': g_pred.flatten()}).to_csv(path + f"gamma-d{delta}.csv")
for v, v_mult in zip(params, params_mult):
v_name = v.name
v_vals = np.array(h.history[v_name]).flatten()*v_mult
pd.DataFrame({'epochs': np.arange(len(h.history['loss'])),
v_name: v_vals}).to_csv(path + f"{v_name}-d{delta}.csv")
df = pd.DataFrame.from_dict(inverted_parameters, orient='index')
df.to_csv(path + "params.csv")
ax[0].set_xlabel('epochs')
ax[0].set_ylabel('MSE')
ax[0].legend()
ax[1].set_xlabel('t')
ax[1].set_ylabel('$\gamma$')
ax[1].legend()
ax[1].plot(training_inputs[0], data['gamma'].values, 'k')
lw=3.0
for var, comp in zip([ETA_TRUE, SY_TRUE, G_TRUE, K_TRUE],
['$tan(\\beta)$', '$C$', '$\\mu$', '$\\kappa$']):
ax[2].loglog(delta_vals, np.ones_like(delta_vals)*var, '--k', lw=lw, label=comp)
lw -= 0.5
ax[2].set_xlabel('$\\delta$')
ax[2].set_ylabel('$\\eta, ~C, ~\\mu, ~\\kappa$')
ax[2].legend()
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.15, top=0.9, wspace=0.2, hspace=0.3)
plt.savefig(path + "results.pdf", dpi=300)
plt.savefig(path + "results.png", dpi=300)
| 10,126 | 25.862069 | 114 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-SolidMechanics-BCs/SciANN-SolidMechanics-BCs.py
|
""" SciANN-SolidMechanics.py
Description:
SciANN code for solution and discovery of solid mechanics from data.
For additional details, please check our paper at: https://arxiv.org/abs/2003.02751
Created by Ehsan Haghighat on 2/14/20.
"""
import os, sys, time
import numpy as np
from sciann.utils.math import diff
from sciann import SciModel, Functional, Parameter
from sciann import Data, Tie
from sciann import Variable, Field
import matplotlib.pyplot as plt
import argparse
pi = np.pi
# current file name.
current_file_name = os.path.basename(__file__).split(".")[0]
# Lame paramters used in the paper.
lmbd = 1.0
mu = 0.5
qload = 4.0
# Input interface for python.
parser = argparse.ArgumentParser(description='''
SciANN code for solution and discovery of solid mechanics from data. \n
For additional details, please check our paper at: https://arxiv.org/submit/3042511'''
)
# Define number of data points.
parser.add_argument('-l', '--layers', help='Num layers and neurons (default 4 layers each 40 neurons [40, 40, 40, 40])', type=int, nargs='+', default=[40]*4)
parser.add_argument('-af', '--actf', help='Activation function (default tanh)', type=str, nargs=1, default=['tanh'])
parser.add_argument('-nx', '--numx', help='Num Node in X (default 40)', type=int, nargs=1, default=[20])
parser.add_argument('-ny', '--numy', help='Num Node in Y (default 40)', type=int, nargs=1, default=[20])
parser.add_argument('-bs', '--batchsize', help='Batch size for Adam optimizer (default 32)', type=int, nargs=1, default=[32])
parser.add_argument('-e', '--epochs', help='Maximum number of epochs (default 2000)', type=int, nargs=1, default=[5000])
parser.add_argument('-lr', '--learningrate', help='Initial learning rate (default 0.001)', type=float, nargs=1, default=[0.001])
parser.add_argument('-in', '--independent_networks', help='Use independent networks for each var (default True)', type=bool, nargs=1, default=[True])
parser.add_argument('-v', '--verbose', help='Show training progress (default 2) (check Keras.fit)', type=int, nargs=1, default=[2])
parser.add_argument('--shuffle', help='Shuffle data for training (default True)', type=bool, nargs=1, default=[True])
parser.add_argument('--stopafter', help='Patience argument from Keras (default 500)', type=int, nargs=1, default=[500])
parser.add_argument('--savefreq', help='Frequency to save weights (each n-epoch)', type=int, nargs=1, default=[100000])
parser.add_argument('--dtype', help='Data type for weights and biases (default float64)', type=str, nargs=1, default=['float64'])
parser.add_argument('--gpu', help='Use GPU if available (default False)', type=bool, nargs=1, default=[False])
parser.add_argument('-op', '--outputpath', help='Output path (default ./file_name)', type=str, nargs=1, default=['output'])
parser.add_argument('-of', '--outputprefix', help='Output path (default res**)', type=str, nargs=1, default=['res'])
parser.add_argument('-nxp', '--numxplot', help='Num Node in X for ploting final results (default 200)', type=int, nargs=1, default=[200])
parser.add_argument('-nyp', '--numyplot', help='Num Node in Y for ploting final results (default 200)', type=int, nargs=1, default=[200])
parser.add_argument('--plot', help='Plot the model', nargs='?', default=False)
args = parser.parse_args()
if not args.gpu[0]:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
def load(xx):
x, y = xx[0], xx[1]
Q = qload
return Q * np.sin(pi*x)
def bodyfx(xx):
x, y = xx[0], xx[1]
Q = qload
frc = - lmbd*(4*pi**2*np.cos(2*pi*x)*np.sin(pi*y) - Q*y**3*pi*np.cos(pi*x)) \
- mu*(pi**2*np.cos(2*pi*x)*np.sin(pi*y) - Q*y**3*pi*np.cos(pi*x)) \
- 8*mu*pi**2*np.cos(2*pi*x)*np.sin(pi*y)
return frc
def bodyfy(xx):
x, y = xx[0], xx[1]
Q = qload
frc = lmbd*(3*Q*y**2*np.sin(pi*x) - 2*pi**2*np.cos(pi*y)*np.sin(2*pi*x)) \
- mu*(2*pi**2*np.cos(pi*y)*np.sin(2*pi*x) + (Q*y**4*pi**2*np.sin(pi*x))/4) \
+ 6*Q*mu*y**2*np.sin(pi*x)
return frc
def dispx(xx):
x, y = xx[0], xx[1]
return np.cos(2*pi*x) * np.sin(pi*y)
def dispy(xx):
x, y = xx[0], xx[1]
Q = qload
return np.sin(pi*x) * Q * y**4/4
def strainxx(xx):
x, y = xx[0], xx[1]
Q = qload
return -2*pi*np.sin(2*pi*x)*np.sin(pi*y)
def strainyy(xx):
x, y = xx[0], xx[1]
Q = qload
return np.sin(pi*x)*Q*y**3
def strainxy(xx):
x, y = xx[0], xx[1]
Q = qload
return 0.5*(pi*np.cos(2*pi*x)*np.cos(pi*y) + pi*np.cos(pi*x)*Q*y**4/4)
def stressxx(xx):
return (lmbd+2*mu)*strainxx(xx) + lmbd*strainyy(xx)
def stressyy(xx):
return (lmbd+2*mu)*strainyy(xx) + lmbd*strainxx(xx)
def stressxy(xx):
return 2.0*mu*strainxy(xx)
def cust_pcolor(AX, X, Y, C, title):
im = AX.pcolor(X, Y, C, cmap="jet")
AX.axis("equal")
AX.axis("off")
AX.set_title(title, fontsize=14)
plt.colorbar(im, ax=AX)
def cust_semilogx(AX, X, Y, xlabel, ylabel):
if X is None:
im = AX.semilogy(Y)
else:
im = AX.semilogy(X, Y)
if xlabel is not None: AX.set_xlabel(xlabel)
if ylabel is not None: AX.set_ylabel(ylabel)
def train():
# define output folder.
if not os.path.isdir(args.outputpath[0]):
os.mkdir(args.outputpath[0])
output_file_name = os.path.join(args.outputpath[0], args.outputprefix[0])
fname = output_file_name + "_{}_".format(args.actf[0]) + "x".join([str(x) for x in args.layers])
# Neural Network Setup.
x = Variable("x", dtype=args.dtype[0])
y = Variable("y", dtype=args.dtype[0])
if args.independent_networks[0]:
Uxy = Functional("Uxy", [x, y], args.layers, args.actf[0])
Vxy = Functional("Vxy", [x, y], args.layers, args.actf[0])
Sxx = Functional("Sxx", [x, y], args.layers, args.actf[0])
Syy = Functional("Syy", [x, y], args.layers, args.actf[0])
Sxy = Functional("Sxy", [x, y], args.layers, args.actf[0])
else:
Uxy, Vxy, Sxx, Syy, Sxy = Functional(
["Uxy", "Vxy", "Sxx", "Syy", "Sxy"],
[x, y],
args.layers, args.actf[0]).split()
lame1 = Parameter(2.0, inputs=[x,y], name="lame1")
lame2 = Parameter(2.0, inputs=[x,y], name="lame2")
C11 = (2*lame2 + lame1)
C12 = lame1
C33 = 2*lame2
Exx = diff(Uxy, x)
Eyy = diff(Vxy, y)
Exy = (diff(Uxy, y) + diff(Vxy, x))*0.5
# Define constraints
d1 = Data(Uxy)
d2 = Data(Vxy)
d3 = Data(Sxx)
d4 = Data(Syy)
d5 = Data(Sxy)
c1 = Tie(Sxx, Exx*C11 + Eyy*C12)
c2 = Tie(Syy, Eyy*C11 + Exx*C12)
c3 = Tie(Sxy, Exy*C33)
Lx = diff(Sxx, x) + diff(Sxy, y)
Ly = diff(Sxy, x) + diff(Syy, y)
# Define the optimization model (set of inputs and constraints)
model = SciModel(
inputs=[x, y],
targets=[d1, d2, d3, d4, d5, c1, c2, c3, Lx, Ly],
loss_func="mse"
)
with open("{}_summary".format(fname), "w") as fobj:
model.summary(print_fn=lambda x: fobj.write(x + '\n'))
# Prepare training data
## Training grid
XMIN, XMAX = 0.0, 1.0
YMIN, YMAX = 0.0, 1.0
Xmesh = np.linspace(XMIN, XMAX, args.numx[0]).reshape((-1, 1))
Ymesh = np.linspace(YMIN, YMAX, args.numy[0]).reshape((-1, 1))
X, Y = np.meshgrid(Xmesh, Ymesh)
input_data = [X.reshape(-1, 1), Y.reshape(-1, 1)]
# Assuing data is known only at boundary conditions
XTOL, YTOL = np.array([XMAX-XMIN, YMAX-YMIN])*1e-6
left_ids = np.where(abs(input_data[0] - XMIN) < XTOL)[0]
right_ids = np.where(abs(input_data[0] - XMAX) < XTOL)[0]
bot_ids = np.where(abs(input_data[1] - YMIN) < YTOL)[0]
top_ids = np.where(abs(input_data[1] - YMAX) < YTOL)[0]
BC_ids = np.unique(np.concatenate([left_ids, right_ids, bot_ids, top_ids]))
## data associated to constrains defined earlier
# Define constraints
data_d1 = dispx(input_data)
data_d2 = dispy(input_data)
data_d3 = stressxx(input_data)
data_d4 = stressyy(input_data)
data_d5 = stressxy(input_data)
data_c1 = 'zeros'
data_c2 = 'zeros'
data_c3 = 'zeros'
data_Lx = bodyfx(input_data)
data_Ly = bodyfy(input_data)
target_data = [(BC_ids, data_d1), #BC: Ux - only applied at BC_ids
(BC_ids, data_d2), #BC: Uy - only applied at BC_ids
(BC_ids, data_d3), #BC: Sxx - only applied at BC_ids
(BC_ids, data_d4), #BC: Syy - only applied at BC_ids
(BC_ids, data_d5), #BC: Sxy - only applied at BC_ids
data_c1, data_c2, data_c3, #Impose the constitutive model at all test points
data_Lx, data_Ly] #Impose the body force at all test points
# Train the model
training_time = time.time()
history = model.train(
x_true=input_data,
y_true=target_data,
epochs=args.epochs[0],
batch_size=args.batchsize[0],
shuffle=args.shuffle[0],
learning_rate=args.learningrate[0],
stop_after=args.stopafter[0],
verbose=args.verbose[0],
save_weights_to="{}_WEIGHTS".format(fname),
save_weights_freq=args.savefreq[0]
)
training_time = time.time() - training_time
for loss in history.history:
np.savetxt(fname+"_{}".format("_".join(loss.split("/"))),
np.array(history.history[loss]).reshape(-1, 1))
time_steps = np.linspace(0, training_time, len(history.history["loss"]))
np.savetxt(fname+"_Time", time_steps.reshape(-1,1))
# Post process the trained model.
Xmesh_plot = np.linspace(XMIN, XMAX, args.numxplot[0]).reshape((-1, 1))
Ymesh_plot = np.linspace(YMIN, YMAX, args.numyplot[0]).reshape((-1, 1))
X_plot, Y_plot = np.meshgrid(Xmesh_plot, Ymesh_plot)
input_plot = [X_plot.reshape(-1, 1), Y_plot.reshape(-1, 1)]
lame1_pred = lame1.eval(model, input_plot)
lame2_pred = lame2.eval(model, input_plot)
Uxy_pred = Uxy.eval(model, input_plot)
Vxy_pred = Vxy.eval(model, input_plot)
Exx_pred = Exx.eval(model, input_plot)
Eyy_pred = Eyy.eval(model, input_plot)
Exy_pred = Exy.eval(model, input_plot)
Sxx_pred = Sxx.eval(model, input_plot)
Syy_pred = Syy.eval(model, input_plot)
Sxy_pred = Sxy.eval(model, input_plot)
np.savetxt(fname+"_Xmesh", X_plot, delimiter=', ')
np.savetxt(fname+"_Ymesh", Y_plot, delimiter=', ')
np.savetxt(fname+"_lame1", lame1_pred, delimiter=', ')
np.savetxt(fname+"_lame2", lame2_pred, delimiter=', ')
np.savetxt(fname+"_Uxy", Uxy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Vxy", Vxy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Exx", Exx_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Eyy", Eyy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Exy", Exy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Sxx", Sxx_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Syy", Syy_pred.reshape(X_plot.shape), delimiter=', ')
np.savetxt(fname+"_Sxy", Sxy_pred.reshape(X_plot.shape), delimiter=', ')
def plot():
output_file_name = os.path.join(args.outputpath[0], args.outputprefix[0])
fname = output_file_name + "_{}_".format(args.actf[0]) + "x".join([str(x) for x in args.layers])
loss = np.loadtxt(fname+"_loss")
time = np.loadtxt(fname+"_Time")
fig, ax = plt.subplots(1, 2, figsize=(7, 3), dpi=300)
cust_semilogx(ax[0], None, loss/loss[0], "epochs", "L/L0")
cust_semilogx(ax[1], time, loss/loss[0], "time(s)", None)
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.15, top=0.9, wspace=0.3, hspace=0.2)
plt.savefig("{}_loss.png".format(output_file_name))
Xmesh = np.loadtxt(fname+"_Xmesh", delimiter=',')
Ymesh = np.loadtxt(fname+"_Ymesh", delimiter=',')
fig, ax = plt.subplots(2, 2, figsize=(8, 6), dpi=300)
cust_pcolor(ax[0, 0], Xmesh, Ymesh, np.ones_like(Xmesh)*lmbd, "L*={:.3f}".format(lmbd))
cust_pcolor(ax[0, 1], Xmesh, Ymesh, np.ones_like(Xmesh)*mu, "G*={:.3f}".format(mu))
lmbd_pred = np.loadtxt(fname+"_lame1", delimiter=',')
mu_pred = np.loadtxt(fname+"_lame2", delimiter=',')
cust_pcolor(ax[1, 0], Xmesh, Ymesh, np.ones_like(Xmesh)*lmbd_pred, "L={:.3f}".format(lmbd_pred.mean()))
cust_pcolor(ax[1, 1], Xmesh, Ymesh, np.ones_like(Xmesh)*mu_pred, "G={:.3f}".format(mu_pred.mean()))
plt.savefig("{}_Parameters.png".format(output_file_name))
fig, ax = plt.subplots(2, 2, figsize=(8, 6), dpi=300)
cust_pcolor(ax[0, 0], Xmesh, Ymesh, dispx([Xmesh, Ymesh]), "Ux*")
cust_pcolor(ax[0, 1], Xmesh, Ymesh, dispy([Xmesh, Ymesh]), "Uy*")
cust_pcolor(ax[1, 0], Xmesh, Ymesh, np.loadtxt(fname+"_Uxy", delimiter=','), "Ux")
cust_pcolor(ax[1, 1], Xmesh, Ymesh, np.loadtxt(fname+"_Vxy", delimiter=','), "Uy")
plt.savefig("{}_Displacement.png".format(output_file_name))
fig, ax = plt.subplots(2, 3, figsize=(11, 6), dpi=300)
cust_pcolor(ax[0, 0], Xmesh, Ymesh, stressxx([Xmesh, Ymesh]), "Sxx*")
cust_pcolor(ax[0, 1], Xmesh, Ymesh, stressyy([Xmesh, Ymesh]), "Syy*")
cust_pcolor(ax[0, 2], Xmesh, Ymesh, stressxy([Xmesh, Ymesh]), "Sxy*")
cust_pcolor(ax[1, 0], Xmesh, Ymesh, np.loadtxt(fname+"_Sxx", delimiter=','), "Sxx")
cust_pcolor(ax[1, 1], Xmesh, Ymesh, np.loadtxt(fname+"_Syy", delimiter=','), "Syy")
cust_pcolor(ax[1, 2], Xmesh, Ymesh, np.loadtxt(fname+"_Sxy", delimiter=','), "Sxy")
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.05, top=0.9, wspace=0.3, hspace=0.2)
plt.savefig("{}_Stress.png".format(output_file_name))
fig, ax = plt.subplots(2, 3, figsize=(11, 6), dpi=300)
cust_pcolor(ax[0, 0], Xmesh, Ymesh, strainxx([Xmesh, Ymesh]), "Exx*")
cust_pcolor(ax[0, 1], Xmesh, Ymesh, strainyy([Xmesh, Ymesh]), "Eyy*")
cust_pcolor(ax[0, 2], Xmesh, Ymesh, strainxy([Xmesh, Ymesh]), "Exy*")
cust_pcolor(ax[1, 0], Xmesh, Ymesh, np.loadtxt(fname+"_Exx", delimiter=','), "Exx")
cust_pcolor(ax[1, 1], Xmesh, Ymesh, np.loadtxt(fname+"_Eyy", delimiter=','), "Eyy")
cust_pcolor(ax[1, 2], Xmesh, Ymesh, np.loadtxt(fname+"_Exy", delimiter=','), "Exy")
fig.subplots_adjust(left=0.1, right=0.9, bottom=0.05, top=0.9, wspace=0.3, hspace=0.2)
plt.savefig("{}_Strain.png".format(output_file_name))
if __name__ == "__main__":
if args.plot==False:
train()
plot()
else:
plot()
| 14,462 | 40.088068 | 157 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-Vibrations/PlateVibration/membrane.py
|
import numpy as np
import matplotlib.pyplot as plt
import sciann as sn
from sciann.utils.math import diff, sign, sin
from gen_dataset import gen_grid
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
Lx = 1.0
Ly = 1.0
T_Final = 0.5
NX = 40
NY = 40
NT = 20
EPOCHS = 2000
BATCH = 1000
data = gen_grid(NX, NY, NT, Lx, Ly, T_Final)
# x_data, y_data, t_data = np.meshgrid(np.linspace(0, Lx, NX), np.linspace(0, Ly, NY), np.linspace(0, T_Final, NT))
x = sn.Variable('x', dtype='float64')
y = sn.Variable('y', dtype='float64')
t = sn.Variable('t', dtype='float64')
u = sn.Functional('u', [x, y, t], 4*[20], 'sin')
c = 1.0
L1 = c * (diff(u, x, order=2) + diff(u, y, order=2)) - diff(u, t, order=2)
TOL = 0.001
C1 = (1-sign(t - TOL)) * (u - sin(np.pi * x) * sin(np.pi * y))
C2 = (1-sign(t - TOL)) * (diff(u, t))
C3 = (1-sign(x - TOL)) * u
C4 = (1-sign(y - TOL)) * u
C5 = (1+sign(x - ( 1-TOL))) * u
C6 = (1+sign(y - ( 1-TOL))) * u
m = sn.SciModel(
[x, y, t],
[sn.PDE(L1), C1, C2, C3, C4, C5, C6],
# load_weights_from='membrane-weights.hdf5'
)
inputs = [data['x'], data['y'], data['t']]
targets = [(data['dom_ids'], 'zeros')] \
+ 2*[(data['t0_ids'], 'zeros')] \
+ 4*[(data['bc_ids'], 'zeros')]
h = m.train(
inputs, targets,
batch_size=BATCH,
learning_rate=0.001,
reduce_lr_after=50,
adaptive_weights={'freq':True},
epochs=EPOCHS)
m.save_weights('membrane-weights.hdf5')
x_test, y_test, t_test = data['x_test'], data['y_test'], data['t_test']
u_pred = u.eval(m, [x_test, y_test, t_test])
Lambd11 = np.pi * np.sqrt(2)
u_analytic = np.sin(np.pi * x_test[:,:,0]) * np.sin(np.pi * y_test[:,:,0]) * np.cos(Lambd11 * T_Final)
fig = plt.figure(figsize=plt.figaspect(0.6))
ax = fig.add_subplot(1, 2, 1, projection='3d')
# ax.plot_wireframe(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,0])
# ax.plot_wireframe(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,10])
surf = ax.plot_surface(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,-1], cmap='coolwarm')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$u$')
fig.colorbar(surf, shrink=0.75, orientation='horizontal', label='$u$')
ax = fig.add_subplot(1, 2, 2, projection='3d')
surf = ax.plot_surface(x_test[:,:,0], y_test[:,:,0], np.abs(u_analytic-u_pred[:,:,-1]), vmin=0., cmap='hot_r')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$|u-u^*|$', labelpad=10)
cbar = fig.colorbar(surf, shrink=0.75, orientation='horizontal',label='$|u-u^*|$')
cbar.formatter.set_powerlimits((0, 0))
# cbar.ax.set_xticklabels(np.linspace(0, 0.0012, 5), rotation=90, )
# plt.show()
plt.savefig('membrane-results.pdf', dpi=300)
| 2,735 | 27.206186 | 115 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-Vibrations/PlateVibration/membrane_inv.py
|
import numpy as np
import matplotlib.pyplot as plt
import sciann as sn
from sciann.utils.math import diff, sign, sin
from gen_dataset import gen_grid
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import gridspec
Lx = 1.0
Ly = 1.0
T_Final = 0.5
NX = 40
NY = 40
NT = 20
EPOCHS = 2000
BATCH = 1000
x_data, y_data, t_data = np.meshgrid(
np.linspace(0, Lx, NX),
np.linspace(0, Ly, NY),
np.linspace(0, T_Final, NT)
)
x_data = x_data.reshape(-1, 1)
y_data = y_data.reshape(-1, 1)
t_data = t_data.reshape(-1, 1)
Lambd11 = np.pi * np.sqrt(2)
u_data = np.sin(np.pi * x_data) * np.sin(np.pi * y_data) * np.cos(Lambd11 * t_data)
x = sn.Variable('x', dtype='float64')
y = sn.Variable('y', dtype='float64')
t = sn.Variable('t', dtype='float64')
u = sn.Functional('u', [x, y, t], 4*[20], 'sin')
c = sn.Parameter(np.random.rand(), inputs=[x,y,t], name='c')
L1 = c * (diff(u, x, order=2) + diff(u, y, order=2)) - diff(u, t, order=2)
m = sn.SciModel(
[x, y, t],
[sn.PDE(L1), sn.Data(u)],
# load_weights_from='membrane_inv-weights.hdf5'
)
inputs = [x_data, y_data, t_data]
targets = ['zeros', u_data]
h = m.train(
inputs, targets,
batch_size=BATCH,
learning_rate=0.001,
reduce_lr_after=50,
adaptive_weights={'freq':True},
epochs=EPOCHS,
log_parameters={'parameters': c, 'freq':1}
)
m.save_weights('membrane_inv-weights.hdf5')
x_test, y_test, t_test = np.meshgrid(
np.linspace(0, Lx, NX*3),
np.linspace(0, Ly, NY*3),
np.linspace(0, T_Final, NT*3)
)
u_pred = u.eval(m, [x_test, y_test, t_test])
Lambd11 = np.pi * np.sqrt(2)
u_analytic = np.sin(np.pi * x_test) * np.sin(np.pi * y_test) * np.cos(Lambd11 * t_test)
fig = plt.figure(figsize=plt.figaspect(0.6))
gs = gridspec.GridSpec(1, 2)
ax = fig.add_subplot(gs[0], projection='3d')
# ax.plot_wireframe(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,0])
# ax.plot_wireframe(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,10])
surf = ax.plot_surface(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,-1], cmap='coolwarm')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$u$')
fig.colorbar(surf, shrink=0.75, orientation='horizontal', label='$u$')
ax = fig.add_subplot(gs[1], projection='3d')
surf = ax.plot_surface(x_test[:,:,0], y_test[:,:,0], np.abs(u_analytic[:,:,-1]-u_pred[:,:,-1]), vmin=0., cmap='hot_r')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$|u-u^*|$', labelpad=10)
cbar = fig.colorbar(surf, shrink=0.75, orientation='horizontal',label='$|u-u^*|$')
cbar.formatter.set_powerlimits((0, 0))
# cbar.ax.set_xticklabels(np.linspace(0, 0.0012, 5), rotation=90, )
plt.savefig('membrane_inv-results.pdf', dpi=300)
fig = plt.figure(figsize=(4,3))
plt.semilogx(h.history['parameter_epochs'], np.concatenate(h.history['parameter_c']).flatten())
plt.xlabel('epochs')
plt.ylabel('$c$')
plt.title('$c^* = 1.0$')
plt.subplots_adjust(0.2,0.15,0.8,0.85)
plt.savefig('membrane_inv-resylts2.pdf', dpi=300)
| 3,060 | 25.617391 | 118 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-Vibrations/PlateVibration/plate.py
|
import numpy as np
import sciann as sn
from sciann.utils.math import diff, sign, sin
from gen_dataset import gen_grid
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
Lx = 1.0
Ly = 1.0
E = 7E+10
rho = 2700
h = 0.004
nu = 0.25
D = E * h ** 3 / (12 * (1 - nu ** 2))
D = D / rho
rho = 1.0
T_Final = 0.1
NX = 20
NY = 20
NT = 40
NTOT = NX*NY*NT
EPOCHS = 20000
BATCH = 1000
data = gen_grid(NX, NY, NT, Lx, Ly, T_Final)
x = sn.Variable('x', dtype='float64')
y = sn.Variable('y', dtype='float64')
t = sn.Variable('t', dtype='float64')
u = sn.Functional('u', [x, y, t], 4*[40], 'l-tanh')
L1 = D * (diff(u, x, order=4) + diff(u, y, order=4) + 2 * diff(diff(u, x, order=2), y, order=2)) + rho * diff(u, t, order=2)
TOL = 0.001
C1 = (1-sign(t - TOL)) * (u - sin(np.pi * x) * sin(np.pi * y))
C2 = (1-sign(t - TOL)) * (diff(u, t))
C3 = (1-sign(x - TOL)) * u
C4 = (1-sign(y - TOL)) * u
C5 = (1+sign(x - ( 1-TOL))) * u
C6 = (1+sign(y - ( 1-TOL))) * u
C7 = (1-sign(x - TOL)) * (diff(u, x, order=2))
C8 = (1-sign(y - TOL)) * (diff(u, y, order=2))
C9 = (1+sign(x - ( 1-TOL))) * (diff(u, x, order=2))
C10 = (1+sign(y - ( 1-TOL))) * (diff(u, y, order=2))
m = sn.SciModel(
[x, y, t],
[sn.PDE(L1), C1, C2, C3, C4, C5, C6, C7, C8, C9, C10],
# load_weights_from = 'plate-weights.hdf5'
)
inputs = [data['x'], data['y'], data['t']]
targets = [(data['dom_ids'], 'zeros')] \
+ 2*[(data['t0_ids'], 'zeros')] \
+ 8*[(data['bc_ids'], 'zeros')]
h = m.train(
inputs,
targets,
batch_size=BATCH,
learning_rate=0.001,
reduce_lr_after=20,
adaptive_weights={'freq': True},
epochs=EPOCHS,
)
m.save_weights('plate-weights.hdf5')
x_test, y_test, t_test = data['x_test'], data['y_test'], data['t_test']
u_pred = u.eval(m, [x_test, y_test, t_test])
Lambd11 = np.sqrt(4 * np.pi ** 4 * D / rho)
u_analytic = np.sin(np.pi * x_test) * np.sin(np.pi * y_test) * np.cos(Lambd11 * t_test)
fig = plt.figure(figsize=plt.figaspect(0.6))
ax = fig.add_subplot(1, 2, 1, projection='3d')
surf = ax.plot_surface(x_test[:,:,0], y_test[:,:,0], u_pred[:,:,-1], cmap='coolwarm')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$u$')
fig.colorbar(surf, shrink=0.75, orientation='horizontal', label='$u$')
ax = fig.add_subplot(1, 2, 2, projection='3d')
surf = ax.plot_surface(x_test[:,:,0], y_test[:,:,0], np.abs(u_analytic[:,:,-1]-u_pred[:,:,-1]), vmin=0., cmap='hot_r')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_zlabel('$|u-u^*|$')
fig.colorbar(surf, shrink=0.75, orientation='horizontal',label='$|u-u^*|$')
# plt.show()
plt.savefig('plate-results.pdf', dpi=300)
| 2,749 | 24.462963 | 124 |
py
|
sciann-applications
|
sciann-applications-master/SciANN-Vibrations/PlateVibration/gen_dataset.py
|
import numpy as np
def gen_grid(nx=5, ny=5, nt=10, Lx=1.0, Ly=1.0, T=1.0):
# domain grids
x_grid, y_grid, t_grid = np.meshgrid(
np.linspace(0, Lx, nx)[1:-1],
np.linspace(0, Ly, ny)[1:-1],
np.linspace(0, T, nt)[1:],
indexing='ij'
)
x_grid, y_grid, t_grid = [x.reshape(-1,1) for x in [x_grid, y_grid, t_grid]]
# init grid
x_init, y_init, t_init = np.meshgrid(
np.linspace(0, Lx, (nx-2)*int(np.sqrt(nt))),
np.linspace(0, Ly, (ny-2)*int(np.sqrt(nt))),
[0],
indexing='ij'
)
x_init, y_init, t_init = [x.reshape(-1,1) for x in [x_init, y_init, t_init]]
# bc X grid
x_Xbc, y_Xbc, t_Xbc = np.meshgrid(
np.linspace(0, Lx, nx*int((ny-2)/4)),
np.linspace(0, Ly, 2),
np.linspace(0, T, nt)[1:],
indexing='ij'
)
x_Xbc, y_Xbc, t_Xbc = [x.reshape(-1,1) for x in [x_Xbc, y_Xbc, t_Xbc]]
# bc Y grid
x_Ybc, y_Ybc, t_Ybc = np.meshgrid(
np.linspace(0, Lx, 2),
np.linspace(0, Ly, ny*int((nx-2)/4)),
np.linspace(0, T, nt)[1:],
indexing='ij'
)
x_Ybc, y_Ybc, t_Ybc = [x.reshape(-1,1) for x in [x_Ybc, y_Ybc, t_Ybc]]
x_bc, y_bc, t_bc = [np.concatenate([x,y],axis=0) for x,y in zip([x_Xbc, y_Xbc, t_Xbc], [x_Ybc, y_Ybc, t_Ybc])]
# x_grid
x_grid = np.concatenate([x_grid, x_init, x_Xbc, x_Ybc], axis=0)
y_grid = np.concatenate([y_grid, y_init, y_Xbc, y_Ybc], axis=0)
t_grid = np.concatenate([t_grid, t_init, t_Xbc, t_Ybc], axis=0)
# test grid
x_test, y_test, t_test = np.meshgrid(
np.linspace(0, Lx, 3*nx),
np.linspace(0, Ly, 3*ny),
np.linspace(0, T, 3*nt),
indexing='ij'
)
# init ids
t0_ids = np.where(t_grid.flatten() == 0.)[0]
bc_ids = np.where(
np.logical_or(
np.logical_or(x_grid.flatten() == 0., x_grid.flatten() == Lx),
np.logical_or(y_grid.flatten() == 0., y_grid.flatten() == Ly)
)
)[0]
dom_ids = np.where(
np.logical_and(
t_grid.flatten() > 0.,
np.logical_and(
np.logical_and(x_grid.flatten() > 0., x_grid.flatten() < Lx),
np.logical_and(y_grid.flatten() > 0., y_grid.flatten() < Ly),
)
)
)[0]
return {
'x': x_grid,
'y': y_grid,
't': t_grid,
't0_ids': t0_ids,
'bc_ids': bc_ids,
'dom_ids': dom_ids,
'x_test': x_test,
'y_test': y_test,
't_test': t_test
}
if __name__ == "__main__":
g = gen_grid(nx=5, ny=25, nt=100, Lx=1, Ly=5, T=10)
| 2,634 | 27.641304 | 114 |
py
|
vampire
|
vampire-master/scripts/stat_plotter.py
|
#!/usr/bin/python
#
# see https://docs.google.com/document/pub?id=1vsmC80shh7qCpwgaNTrLzRyz61I0BZWJFvIqiE9yIG8
# for documentation
#
import sys
import platform
import subprocess
import re
import time
import tempfile
import os
import math
timeDataRE = re.compile("^(.* t[0-9]+) at ([0-9]+): (.*)$")
labelRE = re.compile("^(.+) t([0-9]+)$")
lblDeclRE = re.compile("^stat: ([^ ]+) - (.+ t[0-9]+)$")
histogramSpecRE = re.compile("^[^ ]+@hist:[^ ]+$")
histSegmentRE = re.compile("^ *([0-9]+): ([0-9]+)")
tmpDataFile = tempfile.NamedTemporaryFile()
tmpHistFiles = []
useLogScale = False
vampCmdLine = None
plotGroups = None
def readPlotGroups(spec):
"""plot groups specification contain statistic indexes separated by commas in groups separated by semicolons"""
grps=spec.split(";")
res=[]
for g in grps:
idxStrings = g.split(",")
gContent = map(int, idxStrings)
res.append(gContent)
return res
def readArgs(args):
global useLogScale
global plotGroups
global vampCmdLine
locArgsEnd = False
while not locArgsEnd:
if args[0]=="-log":
useLogScale = True
args = args[1:]
elif args[0]=="-g":
plotGroups = readPlotGroups(args[1])
args = args[2:]
else:
locArgsEnd = True
vampCmdLine = args
for i in range(0,len(vampCmdLine)):
if vampCmdLine[i]=="-tr":
vampCmdLine[i+1] = "stat_labels,"+vampCmdLine[i+1]
readArgs(sys.argv[1:])
nextLblIdx = 0
lblIndexes = {}
idxLbls = {}
idx2HumanLabel = {}
# types:
# num - usual numbers
# hist - histograms
idxTypes = {}
histIndexes = []
histTmpFiles = {}
histMaxCounts = {}
histMaxKeys = {}
def addLabel(specStr,lblStr):
global nextLblIdx
global lblIndexes
global idxLbls
global idx2HumanLabel
global idxTypes
global histIndexes
global histTmpFiles
global histMaxCounts
if lblStr in lblIndexes:
raise Exception("duplicate label: "+lblStr)
newIdx = nextLblIdx
nextLblIdx = nextLblIdx + 1
lblIndexes[lblStr] = newIdx
idxLbls[newIdx] = lblStr
lblMO = labelRE.match(lblStr)
if not lblMO:
raise Exception("wrong label format: "+lblStr)
idx2HumanLabel[newIdx] = lblMO.group(1)
type = "num"
if histogramSpecRE.match(specStr):
type = "hist"
histIndexes.append(newIdx)
histTmpFiles[newIdx] = tempfile.NamedTemporaryFile()
#histTmpFiles[newIdx] = open("/work/Dracula/pdata.txt","w")
histMaxCounts[newIdx] = 0
histMaxKeys[newIdx] = 0
idxTypes[newIdx] = type
def getLblIdx(lbl):
global lblIndexes
if lbl not in lblIndexes:
raise Exception("undeclared label: "+lbl)
return lblIndexes[lbl]
def readHistData(histIdx,val):
global histMaxCounts
global histMaxKeys
res = {}
if val=="":
return res
segments = val.split(",");
for seg in segments:
mo = histSegmentRE.match(seg)
if not mo:
raise Exception("invalid segment: \""+seg+"\" in "+val)
key = int(mo.group(1))
ctr = int(mo.group(2))
if key in res:
raise Exception("duplicate key "+key+" in "+val)
res[key]=ctr
if ctr>histMaxCounts[histIdx]:
histMaxCounts[histIdx] = ctr
if key>histMaxKeys[histIdx]:
histMaxKeys[histIdx] = key
return res
#map from time points to map from indexes to data
data = {}
timePoints = []
def addDataPoint(lbl, t, v):
global data
global timePointsSet
global timePoints
global idxTypes
idx = getLblIdx(lbl)
if t not in data:
data[t]={}
timePoints.append(t)
type = idxTypes[idx]
if type=="num":
if v!="?":
data[t][idx]=float(v)
elif type=="hist":
data[t][idx]=readHistData(idx,v)
else:
raise "not implemented"
def outputHistFile(idx,f):
global data
global timePoints
global histMaxKeys
domEls = None
if False:
dom = set()
for t in timePoints:
if idx not in data[t]:
continue
distr = data[t][idx]
dom.update(distr.keys())
domEls = []
domEls.extend(dom)
domEls.sort()
else:
domEls = range(0,histMaxKeys[idx])
f.seek(0)
f.truncate()
for el in domEls:
for t in timePoints:
if idx not in data[t]:
continue
distr = data[t][idx]
if el in distr:
f.write(str(distr[el])+"\t")
else:
f.write("0\t")
f.write("\n")
f.flush()
def updateDataFiles():
"""populate data files for graphs and histograms"""
global tmpDataFile
global data
global timePoints
global nextLblIdx
global histIndexes
global histTmpFiles
global idxTypes
tmpDataFile.truncate(0)
for t in timePoints:
tmpDataFile.write(str(t))
dataLine = data[t]
for idx in range(0,nextLblIdx):
val = None
if idxTypes[idx]!="num":
val = "?"
elif idx not in dataLine:
val = "?"
else:
val = dataLine[idx]
tmpDataFile.write("\t"+str(val))
tmpDataFile.write("\n")
tmpDataFile.flush()
for hidx in histIndexes:
tf = histTmpFiles[hidx]
outputHistFile(hidx, tf)
tf.flush()
gnuplotProc = subprocess.Popen(["gnuplot"], bufsize=1, stdin=subprocess.PIPE, shell=True)
if useLogScale:
gnuplotProc.stdin.write("set logscale y\n")
def getIndexPlotStatement(idx):
global idx2HumanLabel
global tmpDataFile
global idxTypes
assert idxTypes[idx]=="num"
dataIdx = str(idx+2)
title = idx2HumanLabel[idx]
return "\""+tmpDataFile.name+"\" using 1:($"+dataIdx+") title \""+title+"\" with linespoints"
def buildHistPaletteCmd(idx):
global histMaxCounts
maxVal = histMaxCounts[idx]
if maxVal<2:
return ['set palette defined (0 "white", 1 "black")']
if maxVal<10:
return ['set palette defined (0 "white", 1 "black", %d "red")' % maxVal]
low = math.sqrt(maxVal)
high = maxVal/2
return ['set palette defined (0 "white", 1 "black", %d "purple", %d "red", %d "yellow")' % (low, high, maxVal)]
def buildHistRangeCmd(idx):
global timePoints
global histMaxKeys
res = ["set xrange [-0.5:%d]" % (len(timePoints)+0.5),
"set yrange [-0.5:%d]" % (histMaxKeys[idx]+0.5)]
return res
def buildHistPlotCommand(idx):
global histTmpFiles
global idx2HumanLabel
global idxTypes
assert idxTypes[idx]=="hist"
fname = histTmpFiles[idx].name
title = idx2HumanLabel[idx]
res = []
res.extend(buildHistPaletteCmd(idx))
res.extend(buildHistRangeCmd(idx))
res.append("plot \""+fname+"\" matrix with image title \""+title+"\"")
return res
def buildPlotCommand(idxList):
global idxTypes
if len(idxList)==1 and idxTypes[idxList[0]]=="hist":
return buildHistPlotCommand(idxList[0])
res = []
res.append("set xrange [*:*]")
res.append("set yrange [*:*]")
mainCmd = "plot "
first = True
for idx in idxList:
if idxTypes[idx]=="hist":
raise Exception("histogram statistics must be in their own group")
if first:
first = False
else:
mainCmd += ", "
mainCmd += getIndexPlotStatement(idx)
res.append(mainCmd)
return res
def buildGroupPlotScript(grps):
res = []
if len(grps)==1:
res.extend(buildPlotCommand(grps[0]))
else:
res.append("set multiplot layout "+str(len(grps))+",1")
res.append("unset title")
for grp in grps:
res.extend(buildPlotCommand(grp))
res.append("unset multiplot")
return res
def buildPlotScript():
global plotGroups
global nextLblIdx
global idxTypes
global histIndexes
grps = plotGroups
if not grps:
grps = [[x for x in range(0,nextLblIdx) if idxTypes[x]=="num" ]]
if len(grps[0])==0:
grps = []
if histIndexes:
grps.extend([[x] for x in histIndexes])
return buildGroupPlotScript(grps)
def redrawGnuplot():
global gnuplotProc
gpCmds = buildPlotScript()
gpCmd = "\n".join(gpCmds)+"\n"
gnuplotProc.stdin.write(gpCmd)
gnuplotProc.stdin.flush()
vampProc = subprocess.Popen(vampCmdLine, bufsize=1, stderr=subprocess.PIPE)
lastUpdateTime = None
while True:
line = vampProc.stderr.readline()
if not line:
break
mo = lblDeclRE.match(line)
if mo:
addLabel(mo.group(1),mo.group(2))
continue
mo = timeDataRE.match(line)
if not mo:
sys.stderr.write(line)
continue
lbl = mo.group(1)
timePnt = mo.group(2)
valPnt = mo.group(3)
addDataPoint(lbl, timePnt, valPnt)
curTime = time.time()
if len(timePoints)>3:
if lastUpdateTime==None or curTime-lastUpdateTime>0.3:
updateDataFiles()
redrawGnuplot()
lastUpdateTime = curTime
updateDataFiles()
redrawGnuplot()
time.sleep(0.25)
if platform.system()=="Linux":
sys.stdin.readline()
gnuplotProc.kill()
| 9,326 | 24.414169 | 115 |
py
|
vampire
|
vampire-master/scripts/determinism_checker.py
|
#!/usr/bin/python
"""
Will run two vampires in parallel and compare their output.
Attempts to change the memory alignment of the second vampire by
creating a lot of environment variables (this should make stack
start from a different place in memory).
Command line:
[-p] [-a alternative_executable] executable arg1 ...
default alternative_executable is the same as executable
Runs in parallel
executable arg1 ...
alternative_executable arg1 ...
and prints out a message when their outputs start to differ.
"-p" will cause the outputs to be printed out even if they do not
differ.
This script is particularly useful in combination with the "-tr"
options of Vampire which enable tracing ouputs. For example,
"-tr sa" prints out details of all newly generated, processed,
activated and simplified clauses. List of all possible arguments
to the "-tr" option can be obtained by running "-tr help".
"""
import sys
import platform
import subprocess
import re
import time
import tempfile
import os
import math
vampCmdLine = None
printTraces = False
altExecutable = None
errFollowUpLines = 5
def readArgs(args):
global vampCmdLine
global printTraces
global altExecutable
while True:
if args[0]=="-p":
printTraces = True
args = args[1:]
if args[0]=="-a":
altExecutable = args[1]
args = args[2:]
else:
break
vampCmdLine = args
class Finished(Exception):
def __init__(self, msg):
self.msg = msg
readArgs(sys.argv[1:])
def createVampProc(isSecond):
global vampCmdLine
global altExecutable
childEnv = os.environ
cmdLine = list(vampCmdLine)
if isSecond:
#we try to make the second Vampire behave differently, so we put some large stuff
#into the system environment, which could change the memory alignment a bit
childEnv = dict(childEnv)
s = "a"
for i in range(0,12):
s = s+s
childEnv["abcd"] = s
#we also use the alternative executable if it was supplied
if altExecutable:
cmdLine[0] = altExecutable
try:
return subprocess.Popen(cmdLine, bufsize=1, stderr=subprocess.PIPE, env=childEnv)
except OSError:
print "Command line giving error:"
print cmdLine
raise
def trimEOL(str):
if str[-1]=="\n":
return str[:-1]
else:
return str
def printFollowUp(hdr, firstLine, proc):
global errFollowUpLines
print "%s: %s" % (hdr, trimEOL(firstLine))
for i in range(0,errFollowUpLines):
ln = proc.stderr.readline()
if not ln:
print "%s terminated" % hdr
break
print "%s: %s" % (hdr, trimEOL(ln))
vp1 = createVampProc(False)
vp2 = createVampProc(True)
try:
while True:
ln1 = vp1.stderr.readline()
ln2 = vp2.stderr.readline()
if ln1==ln2:
if not ln1:
raise Finished("Both vampires terminated")
if printTraces:
print trimEOL(ln1)
continue
if not ln1:
raise Finished("First vampire terminated")
if not ln2:
raise Finished("Second vampire terminated")
if ln2[0:len(ln1)]==ln1 and vp1.poll():
print "v1: %s" % trimEOL(ln1)
print "v2: %s" % trimEOL(ln2)
raise Finished("First vampire terminated in the middle of a line")
if ln1[0:len(ln2)]==ln2 and vp2.poll():
print "v1: %s" % trimEOL(ln1)
print "v2: %s" % trimEOL(ln2)
raise Finished("Second vampire terminated in the middle of a line")
print "Vampire outputs differ:"
print
print "v1: %s" % trimEOL(ln1)
print "v2: %s" % trimEOL(ln2)
if errFollowUpLines:
print
printFollowUp("v1", ln1, vp1)
print
printFollowUp("v2", ln2, vp2)
print
raise Finished("Non-determinism detected")
except Finished as e:
print e.msg
finally:
if vp1.poll()==None:
vp1.kill()
if vp2.poll()==None:
vp2.kill()
| 4,220 | 25.71519 | 90 |
py
|
vampire
|
vampire-master/scripts/infXpostFx.py
|
#!/usr/bin/python
postfix = []
temp = []
operator = -10
operand = -20
leftparentheses = -30
rightparentheses = -40
space = -50
#convert a string into usable tokens
def strToTokens(str):
strArr = []
strArr = str
tempStr = ''
tokens = []
tokens_index = 0
count = 0
for x in strArr:
count = count+1
if typeof(x) == operand:
tempStr += x
if typeof(x) == space:
tokens.append(tempStr)
tokens_index = tokens_index + 1
tempStr = ''
if( typeof(x)== operator or x == ")" or x == "("):
if(tempStr != ''):
tokens.append(tempStr)
tokens_index = tokens_index+1
#tempStr = ''
tempStr=''
tokens.append(x)
tokens_index = tokens_index+1
if(count == len(strArr)):
if(tempStr != ''):
tokens.append(tempStr)
return (tokens)
#return the top of the stack
def topStack(stack):
return stack[len(stack)-1]
#return the top of the stack, but also pop that element
def pop_stack(stack):
return stack.pop()
#check if the open paranthesis match the closed ones
def balanceParanthesis(string):
count = 0
for x in string:
if x == '(':
count = count + 1
elif x == ')':
count = count - 1
else:
continue
return count
#find the position of value in strin - returns -1 if it doesn't find it
def FIND(value, strin):
try:
i = strin.index(value)
except ValueError:
i = -1
return i
#checks if a token is of array type
#this is a rather naive implementation - chechs if a operand is followed by paranthesis
#this function is needed in order to make the difference between the array type and normal
#open and closed paranthesis
def isArray(x, string):
if typeof(x)== operator or x =='(' or x==')':
return False
fin = FIND(x, string)
if fin == -1 :
return False
elif fin+1 < len(string) and string[fin+1] == '(':
return True
else: return False
#the this actually create the expression string to be outputed
def evaluate(str):
operands = []
for x in reversed(str):
if x == '':
continue
if isArray(x, str) == True:
#in case the operand is an array treat it more carefull
op = pop_stack(operands)
operands.append(x+"["+op+"]")
else:
if typeof(x) == operand :
operands.append(x)
elif typeof(x) == operator and x =='#':
# '#' is a special case of operator - it stands for unary minus
op = pop_stack(operands)
operands.append("("+x + op+")")
elif typeof(x) == operator and x == "~":
# '~' special case of operator: stands for logical negation
op = pop_stack(operands)
operands.append(x+op)
elif typeof(x) == operator:
#this happens for the binary operators
op1 = pop_stack(operands)
op2 = pop_stack(operands)
operands.append('('+op1 + x + op2+')')
else :
continue
#after the evaluation is done, all that we have on the stack is the expression to be written
return pop_stack(operands)
#defines the types of tokens
def typeof(s):
if s is '(':
return leftparentheses
elif s is ')':
return rightparentheses
elif s is '+' or s is '-' or s is '*' or s is '~' or s is '/' or s is '<' or s is '#'or s is '>':
return operator
elif s is ' ' or s is ',':
return space
else :
return operand
#this method is just for testing purpose
def convert(strin):
#infix = raw_input("Enter the infix notation : ")
print strin
print "deasupra e ce am primit "
tem = strin[0].strip("\n")
strin = []
if balanceParanthesis(tem) != 0 :
print "There are unbalanced paranthesis "
exit(-1)
else:
infix = strToTokens(tem)
final = []
final.append(evaluate(infix))
print "len of final", len(final)
return final
#print "final expression", final
def convertS(String):
returnV =[]
for x in String:
try:
temp = x.strip("\n")
if balanceParanthesis(temp) !=0 :
print "Therea are unbalanced paranthesis!"
exit(-1)
else:
if x.find("!=")!=-1:
spl = x.split("!=")
lhs = evaluate(strToTokens(spl[0]))
rhs = evaluate(strToTokens(spl[1]))
lhs = lhs + "!=" + rhs
returnV.append(lhs)
elif x.find("=")!=-1:
spl = x.split("=")
lhs = evaluate(strToTokens(spl[0]))
rhs = evaluate(strToTokens(spl[1]))
lhs = lhs + "==" + rhs
returnV.append(lhs)
else:
infix = strToTokens(temp)
returnV.append(evaluate(infix))
except e:
print e
#print returnV
return returnV
if __name__ == "__main__":
infix = raw_input("enter infix notation:")
print "final ", convert(infix)
| 5,395 | 30.190751 | 101 |
py
|
vampire
|
vampire-master/scripts/proof_checker.py
|
#!/usr/bin/env python
# @Author Giles
import os
import sys
import subprocess
if(len(sys.argv)<2):
print "You should provide a command to proof_check i.e. ../vampire_rel_master -sa inst_gen TPTP/Problems/SYN/SYN001+1.p"
sys.exit(0)
TPTP='~/TPTP-v6.4.0/'
VAMPIRE_ROOT = sys.argv[1]+' --include '+TPTP
VAMPIRE_CHECK = './vampire_z3_rel_master --mode casc'#VAMPIRE_ROOT
# Set the time out for all proof attempts
time_out=str(30)
# Set the strings for each prover
EPROVER='~/Vampire/prover-bin/eprover --auto --tptp3-in --proof-object --cpu-limit='+time_out
VAMPIRE= VAMPIRE_CHECK+' -p off --time_limit '+time_out
IPROVER='~/Vampire/prover-bin/iproveropt --clausifier ../vampire_rel_master --clausifier_options "--mode clausify" --time_out_real '+time_out
CVC4='cvc4 --lang tptp --tlimit='+time_out+'000' # to convert seconds to ms
SPASS='~/Vampire/prover-bin/SPASS -Auto=1 -TPTP=1 -TimeLimit='+time_out
CHECK_WITH=set()
#CHECK_WITH.add(EPROVER)
CHECK_WITH.add(VAMPIRE)
#CHECK_WITH.add(IPROVER)
#CHECK_WITH.add(CVC4)
#CHECK_WITH.add(SPASS)
verbose=True
ignores=set()#set(['%negated conjecture','%sat splitting component','%theory axiom','%cnf transformation','%flattening','%ennf transformation','%general splitting','%general splitting component introduction','%global subsumption','%sat splitting refutation','%rectify'])
ARGS= " -p proofcheck "+(' '.join(sys.argv[2:]))
print "Running vampire on "+ ARGS
OUT=""
try:
OUT=subprocess.check_output(VAMPIRE_ROOT+ARGS, shell = True)
except subprocess.CalledProcessError as err:
print "The problem was not solved"
print err
sys.exit(0)
refutation=False
obligation=[]
checked=0
for line in OUT.split('\n'):
line = line.strip()
if '%#' in line and len(obligation)>0:
if all(ignore not in o for o in obligation for ignore in ignores):
#Finished obligation, run it
checked+=1
if verbose:
print "Dealing with obligation:"
print '\n'.join(obligation)
#Create a temp file
with open('proof_obligation_'+str(checked), 'w') as tmp:
tmp.write('\n'.join(obligation))
any_failed=False
#Run provers
for prover in CHECK_WITH:
prover_result=""
try:
prover_result = subprocess.check_output(prover+' proof_obligation_'+str(checked),shell=True)
except subprocess.CalledProcessError as err:
prover_result = err.output
proved=False
for prover_line in prover_result.split('\n'):
if 'SZS status' in prover_line:
if verbose:
print "Prover Output:\t"+prover_line
if 'Theorem' in prover_line or 'Unsatisfiable' in prover_line:
proved=True
break
if 'SPASS beiseite: Proof found.' in prover_line:
proved=True
if not proved:
print '************************'
print 'Failed proof obligation: ',checked,' using ',prover
any_failed=True
if not any_failed:
os.remove('proof_obligation_'+str(checked))
#Reset obligation
obligation=[]
else:
print "Skipped ",obligation
elif refutation:
obligation.append(line)
if 'Refutation found' in line:
refutation=True
print "There was a refutation, checking proof..."
if not refutation:
print "There was no refutation"
else:
print "Finished checking :)"
print "We checked " + str(checked) +" obligations"
| 3,382 | 29.754545 | 270 |
py
|
vampire
|
vampire-master/scripts/annotateCode.py
|
#!/usr/bin/env python
import sys, os, time, getopt
from subprocess import Popen, list2cmdline
import subprocess
import argparse
import insertInv
import tempfile
def cpu_count():
if sys.platform =="win32":
try:
num = int(os.environ['NUMBER_OF_PROCESORS'])
except(ValueError, KeyError):
pass
elif sys.platform == 'darwin':
try:
num = int(os.popen('sysctl -n hw.ncpu').read())
except ValueError:
pass
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
pass
return num
#insert here work for parallel
#def worker(q):
# item = q.get()
# os.system(item)
# q.task_done()
#def execute_parallel(cmds):
# no_cpu = cpu_count()
def execute_commands(cmds):
# execute the commands in parallel (multiple processes),
# on as many CPU's we have
if not cmds: return #the list of commands is empty
def done(p):
return p.poll() is not None
def success(p):
return p.returncode == 0
def fail():
sys.exit(1)
max_tasks = cpu_count()
processes = []
while True:
while cmds and len(processes)< max_tasks:
task = cmds.pop()
#print list2cmdline(task)
#processes.append(os.system(task))
processes.append(subprocess.Popen(task, shell=True))
for p in processes:
if done(p):
if success(p):
processes.remove(p)
else:
fail()
if not processes and not cmds:
break
else:
time.sleep(0.05)
#parse the command line arguments and create an object containing all the information passed as command line
def createCommands(args):
commnadLine = ""
parser = argparse.ArgumentParser(description = "Annotate C code with invariants")
parser.add_argument('analyzer', metavar = 'ANALYZER',
help= "analyzer executable ")
parser.add_argument('input',metavar = 'INPUT-FILENAME',
help="input file to be processed. ")
parser.add_argument("-o","--output",action="store",
dest="outputFile",
help="output filename -- default value: output.txt")
parser.add_argument("-v","--verbose", action="store_true",dest="verbose")
parser.add_argument("-t","--time_limit",action="store", type=int,
dest="timeLimit",
help="time limit for invariant generation , default value 10 seconds")
parser.add_argument("--fno","--function_number", action="store",type=int,
dest="funcNO",
help="number of the function you want to process, default value is 0 - all functions")
parser.add_argument("--wno","--while_number",action="store",type=int,
dest="whileNO",
help="while to be processed, default value 1 (first while loop from each function, 0 = treat all the loops in a function) ")
parser.add_argument("-p","--parallel",action="store",
dest="parallel",
help="parallel execution, default value false -- under test now")
parser.add_argument("--vamp",action="store",default="false",
dest="vampire",
help="vampire executable, by default this option is deactivated ")
arg = parser.parse_args(args)
if arg.whileNO == None:
arg.whileNO = 1
else:
whileNo = arg.whileNO
if arg.timeLimit == None:
arg.timeLimit = 10
if arg.funcNO == None:
arg.funcNO = 0
if arg.input == None:
print "You must provide input file! "
parser.print_help()
sys.exit(1)
if arg.analyzer == None:
print "You must provide the analyzer!"
parser.print_help()
sys.exit(1)
if arg.outputFile == None:
arg.outputFile = "output.txt"
return arg
def createCom(arg,tempFileName):
commandLine=""
try:
if arg.vampire == "false":
commandLine = arg.analyzer +" -t " +str(arg.timeLimit)+ " -wno " + str(arg.whileNO) + " -fno "+ str(arg.funcNO)
commandLine = commandLine + " " + arg.input + " | grep \"tff(inv\" | " + \
"sed -e \"s/tff(inv[^,]*,//g\" | sed -e \"s/claim/loop invariant/g\" | sed -e \"s/\$sum/+/g\" "
commandLine = commandLine +" |sed -e \"s/\$uminus/#/g\" | sed -e \"s/-/#/g\" | sed -e \"s/\$lesseq/</g\" | sed -e \"s/\$greatereq/>/g\""
#final step put it in the temporary OutputFile
#modify it accordingly
commandLine = commandLine + ">"+tempFileName
os.system(commandLine)
else:
#create temporary file
intermT = tempfile.NamedTemporaryFile()
commandLine = arg.analyzer +" -t " +str(arg.timeLimit)+ " -wno " + str(arg.whileNO) + " -fno "+ str(arg.funcNO)
commandLine = commandLine + " " + arg.input +" | grep tff >"+ intermT.name
os.system(commandLine)
intermS = tempfile.NamedTemporaryFile()
#launch vampire with different strategies
os.system("./symel.sh "+arg.vampire+" "+intermT.name+" "+intermS.name)
commandLine = "cat "+intermS.name+ " | grep \"tff(inv\" | " + \
"sed -e \"s/tff(inv[^,]*,//g\" | sed -e \"s/claim/loop invariant/g\" | sed -e \"s/\$sum/+/g\" "
commandLine = commandLine +" |sed -e \"s/\$uminus/#/g\" | sed -e \"s/-/#/g\" | sed -e \"s/\$lesseq/</g\" | sed -e \"s/\$greatereq/>/g\""
#final step put it in the temporary OutputFile
#modify it accordingly
commandLine = commandLine + ">"+tempFileName
os.system(commandLine)
#close all temporary files created - this action also takes care of deleting them
intermT.close()
intermS.close()
except Exception,e:
sys.exit(1)
return commandLine
#count the number of occurences of a string in an array of strings
def getNoOccurance(inst, arg):
counter = 0
for i in inst:
if arg in i:
counter = counter + 1
return counter
#retrieve the while location in the original code ( does not take into account
#the function number
def whileLocation(inst, no):
function = 0
wno = 0
line = 0
for i in range(0, len(inst)):
if "WHILE" in inst[i]:
wno = wno +1
if no == wno :
w = inst[i].split(":")
line = int(w[1])
break
elif "Function" in inst[i]:
w = inst[i].split(":")
function = int(w[1])
return (function, line)
#retrieve the location of a specific while - takes into account the function number
def whileLocationInFun(inst, fn , wn ):
function = 0
wno = 0
line = 0
for i in range(0, len(inst)):
if "Function" in inst[i]:
function = function + 1
if function == fn and "WHILE" in inst[i]:
wno = wno + 1
if wno == wn :
t = inst[i].split(":")
line = int(t[1])
break
return line
#counts the number of while structures in a function
def countWhilesInFunction(inst, funcNo):
wno = 0
fno = 0
done = False
for x in range(0,len(inst)):
if "Function" in inst[x]:
fno = fno + 1
if fno == funcNo:
fno = x + 1
break
while not done:
if "WHILE" in inst[fno]:
wno = wno + 1
if len(inst)-1 == fno:
break
else:
fno = fno + 1
else:
done = True
return wno
#process all the whiles in a specific function
def workAllWhiles(parsedCmd, funcNO, sourceOrganization, fin,fout, start ):
done = False
print "function number: ", funcNO
WN=1
while not done:
tempF = tempfile.NamedTemporaryFile()
parsedCmd.whileNO = WN
parsedCmd.funcNO = funcNO
command = createCom(parsedCmd, tempF.name)
#os.system(command)
tempF.seek(0)
invariant = tempF.readlines()
tempF.close()
invariantI = insertInv.work(invariant)
for x in invariantI :
fout.write(x)
WN = WN + 1
stop = whileLocationInFun(sourceOrganization, funcNO, WN)
if stop == 0 :
done = True
else:
for i in range(start-1, stop-1):
fout.write(fin[i])
start = stop
# in case there is more write the rest of the file
print start
return start
from os import path
#main like function to run all depending on the strategy created
def runAccordingToOptions(args):
parsedCmd = createCommands(args)
noFunc = 0
if not path.exists(parsedCmd.analyzer) :
print "There is no such file ", parsedCmd.analyzer
sys.exit(1)
if not path.exists(parsedCmd.input) or not path.isfile(parsedCmd.input):
print "The input does not exist, or is not a file", parsedCmd.input
sys.exit(1)
with tempfile.NamedTemporaryFile() as tf:
#in case of analyzing all the functions from the file, get the number of functions
p = subprocess.Popen((parsedCmd.analyzer+" -wno -1 "+parsedCmd.input).split(), stdout = subprocess.PIPE, stderr=subprocess.PIPE)
outp,err = p.communicate()
if err != "":
print err
sys.exit(-1)
else:
ff = outp.split("\n")
if parsedCmd.verbose == True:
print outp
sourceOrganization = []
for x in ff :
if "WHILE LOCATION:" in x:
sourceOrganization.append(x)
elif "Function number:" in x:
sourceOrganization.append(x)
#occurences of while
noWhiles = getNoOccurance(sourceOrganization, "WHILE")
#number of functions
noFunctions = getNoOccurance(sourceOrganization, "Function")
#read the input C file
f = open(parsedCmd.input,"r")
fin = f.readlines()
f.close()
#store all the information in fin
#the case when you request a specific function and a specific while loop
if parsedCmd.whileNO != 0 and parsedCmd.funcNO != 0:
tempF = tempfile.NamedTemporaryFile()
command = createCom(parsedCmd,tempF.name)
#read output and transform it and annotate the code
whileLoc = whileLocationInFun(sourceOrganization,parsedCmd.funcNO, parsedCmd.whileNO)
tempF.seek(0)
invs = tempF.readlines()
if len(invs) == 0:
print "Something went wrong... try change the timelimit, or the while number!"
sys.exit(-1)
invariant = insertInv.work(invs)
tempF.close()
fout = open(parsedCmd.outputFile,"w")
for i in range(0, whileLoc-1):
fout.write(fin[i])
for x in invariant:
fout.write(x)
for i in range(whileLoc-1,len(fin)):
fout.write(fin[i])
fout.close()
#case when you treat a specific function but all the whiles contained
if parsedCmd.funcNO != 0 and parsedCmd.whileNO == 0:
fout = open(parsedCmd.outputFile, "w")
WN=1
start = whileLocationInFun(sourceOrganization, parsedCmd.funcNO, WN)
for i in range(0,start-1):
fout.write(fin[i])
stop = workAllWhiles(parsedCmd, parsedCmd.funcNO, sourceOrganization, fin, fout, start)
for i in range(stop-1, len(fin)):
fout.write(fin[i])
fout.close()
#all functions and all whiles should be treated
if parsedCmd.funcNO == 0 and parsedCmd.whileNO == 0:
fout = open(parsedCmd.outputFile, "w")
WN=1
FNO = 1
noFN = getNoOccurance(sourceOrganization, "Function")
start = whileLocationInFun(sourceOrganization, FNO, WN)
for i in range(0,start-1):
fout.write(fin[i])
s=0
for x in range(1, noFN+1):
s = workAllWhiles(parsedCmd, x, sourceOrganization, fin, fout, start)
start = whileLocationInFun(sourceOrganization, x+1, 1)
if start != 0:
for i in range(s-1, start-1):
fout.write(fin[i])
for i in range(s-1, len(fin)):
fout.write(fin[i])
fout.close()
#all functions special while
if parsedCmd.funcNO == 0 and parsedCmd.whileNO != 0:
noFN = getNoOccurance(sourceOrganization, "Function")
start = whileLocationInFun(sourceOrganization, 1, parsedCmd.whileNO)
if start == 0:
print "ERROR: there is no such while in function 1! try another one!"
sys.exit(-1)
fout = open(parsedCmd.outputFile, "w")
for i in range(0, start-1):
fout.write(fin[i])
for i in range(1, noFN+1):
tempF = tempfile.NamedTemporaryFile()
parsedCmd.funcNO = i
command = createCom(parsedCmd, tempF.name)
#os.system(command)
tempF.seek(0)
inv = tempF.readlines()
tempF.close()
if len(inv)==0:
print "Error: the while you try to analyze does not exist, functio: ", i
sys.exit(-1)
invariant = insertInv.work(inv)
for x in invariant:
fout.write(x)
stop = whileLocationInFun(sourceOrganization, i+1, parsedCmd.whileNO)
if stop == 0:
for t in range(start-1, len(fin)):
fout.write(fin[t])
else:
for t in range(start-1, stop-1):
fout.write(fin[t])
start = stop
if __name__ == '__main__':
runAccordingToOptions(sys.argv[1:])
| 13,937 | 36.069149 | 149 |
py
|
vampire
|
vampire-master/scripts/insertInv.py
|
#!/usr/bin/python
import os
import sys
import re
import string
import infXpostFx
def split_line(line):
# return a tuple (loop invariant) , (![...] and the rest)
s= line.split(",",1)
s1 = s[1].strip("\n")
s1 = s1.replace(").","")
#print s
return (s[0],s1)
def swap(str): # swaps the postitions ( eg: X2 : integer => integer X2 )
part1 = str[0]
spl = part1.split(" ",2); # splitted[2] is the part which interests us!
splitted = spl[2].split(","); # split in individual groups eg X21:$int
outputF = ''
for j in range(len(splitted)):
lhs = splitted[j].split(":") #split each group
splitted[j] = lhs[1]+" "+lhs[0]
if j < len(splitted)-1:
outputF = outputF + splitted[j] +","
else:
outputF = outputF + splitted[j]
outputF =spl[1] + outputF + ";" #add the semicolons after the quantification
return outputF
#introduce the correct negation sign ( aka replace ~ with !)
def introduceNegation(str):
str= str.replace("~","!")
return str
#check if the number of open paranthesis is equal with the closed ones
def balance_paranthesis(string):
res = 0
for x in string:
if x=="(":
res = res+1
if x==")":
res = res-1
return res
#replace the | with it's or correspondance || and the & with &&
#after this step, get rid of the sG functions ( just remove them from the invariant)
def introduceORAND(str):
afterOR = str.replace("|","||")
afterAND = afterOR.replace("&","&&")
afterS = afterOR.split("||")
final=""
#remove the sG*
for x in afterS:
#final = final +x+ "||"
if x.find("sG")==-1:
final=final+x+"||"
final = final.strip("||")
if balance_paranthesis(final)!=0:
final = final+")"
final = final.replace("#","-")
return final
def replaceConnectives(string):
afterNeg = introduceNegation(string)
final = introduceORAND(afterNeg)
return final
#quantify the variables needed to be quantified,
#and also translate the invariant into the syntax for FramaC - call for the other package
## infXpostFx.convertS(string)
def quantify(line):
# replace the ![X..] with \forall and for each variable define the type eg:
# \forall integer X1
firstStep = line.split("]:",1);
#in case the invariant has no quantified variables, return the invariant
if len(firstStep) == 1:
tempSplit = firstStep[0].split("|")
final = infXpostFx.convertS(tempSplit)
FIN = ''
for x in final:
FIN = FIN + x + '|'
FIN = FIN.strip('|')
final = []
final.append(FIN)
return final
else: #the other case: ![..]:invariant
forall = firstStep[0]
forall = forall.replace("![","\\forall ")
integers = forall
integers = integers.replace("$int","integer")
temp = firstStep[1].strip("\n")
temp = temp[:-1]
temp = temp.replace("(","",1)
spl = temp.split('|')
temp = []
temp = infXpostFx.convertS(spl)
finInv = ''
for x in temp:
finInv = finInv + "|" + x
finInv = finInv.strip("|")
return (integers,finInv)
def ensure_dir(name):
d = os.path.dirname(name)
if not os.path.exists(d):
os.makedirs(d)
#create the actual invariant list which has to be printed in the C file
def work(lines):
done= False
i=0
linv = ["/*@ \n"]
while not done:
finalInv=""
try:
l1 = split_line(lines[i])
l2 = quantify(l1[1]) # position two is the actual invariant
if len(l2)==1:
conn = replaceConnectives(l2[0].strip("\n"))
finalInv = l1[0] + " " + conn + ";\n"
else:
l3 = swap(l2)
conn = replaceConnectives(l2[1].strip("\n"))
finalInv = l1[0]+ "\f" + l3 + conn + ";\n" #l2[1].strip("\n") +";\n"
finalInv = finalInv.replace("<","<=")
finalInv = finalInv.replace(">",">=")
linv.append(finalInv)
i = i + 1
except IndexError,e:
print "%s main while loop" %e
print "tried %s records" % i
done = True
linv.append("*/\n")
return linv
#check if the number of command line arguments is correct
#arguments must be: file.c vanalyzeOutputFile outputFile
| 6,403 | 45.405797 | 174 |
py
|
vampire
|
vampire-master/scripts/history_search.py
|
#!/opt/local/bin/python3.2
#!/usr/bin/python
"""
Will find a svn revision that is the last to give a specified output
Command line:
[-f first_revision] [-l last_revision] [-d regex] executable arg1 ...
default first_revision is 1500
default last_revision is the current one
default regex is "Refutation found."
Looks for the latest revision for which the output of
executable arg1 ...
contains line that matches regex.
"""
import sys
import platform
import subprocess
import re
import time
import tempfile
import os
import math
DEVNULL = open('/dev/null', 'w')
revisionRE = re.compile("^Revision: ([0-9]+)$")
def getCmdResult(cmdLine):
"""return output of a command in a string"""
res = None
try:
resBytes = subprocess.check_output(cmdLine, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
resBytes = e.output
return str(resBytes,encoding="ascii")
def execCmd(cmdLine, report):
if report:
cmdStr = " ".join(cmdLine)
print(cmdStr, end="... ")
sys.stdout.flush()
res = subprocess.call(cmdLine, stderr=DEVNULL, stdout=DEVNULL)
#res = subprocess.call(cmdLine)
if report:
print("done")
return res==0
def getFirstMatch(linesStr, regex, fullMatch):
lines = linesStr.split("\n")
for line in lines:
mo = None
if fullMatch:
mo = regex.match(line)
else:
mo = regex.search(line)
if mo:
return mo
return None
class Failure(Exception):
def __init__(self, msg):
self.msg = msg
def getCurrentRevision():
infoOut = getCmdResult(["svn", "info"])
revMO = getFirstMatch(infoOut, revisionRE, True)
if revMO==None:
raise Failure("SVN repository not found")
revStr = revMO.group(1)
return int(revStr)
vampCmdLine = None
buildTgt = "vampire"
desiredRE = re.compile("Refutation found.")
#firstRevision = 1400
firstRevision = 1500
lastRevision = None
def readVampCmdLine(args):
global vampCmdLine
global buildTgt
vampCmdLine = args
execFile = vampCmdLine[0]
absExec = os.path.abspath(execFile)
repositoryPath,fname = os.path.split(absExec)
buildTgt = fname
relExec = "./"+fname
vampCmdLine[0] = relExec
print("repository path: ", repositoryPath)
os.chdir(repositoryPath)
def readArgs(args):
global vampCmdLine
global desiredRE
global firstRevision
global lastRevision
while True:
if args[0]=="-f":
firstRevision = int(args[1])
args = args[2:]
elif args[0]=="-l":
lastRevision = int(args[1])
args = args[2:]
elif args[0]=="-d":
desiredRE = re.compile(args[1])
args = args[2:]
else:
break
readVampCmdLine(args)
if lastRevision==None:
lastRevision = getCurrentRevision()
def switchToRevision(revNum):
global buildTgt
if not execCmd(["svn","update","-r",str(revNum)], True):
raise Failure("failed: svn update")
if not execCmd(["make","depend"], True):
raise Failure("failed: svn update")
if not execCmd(["make","-j","2",buildTgt], True):
raise Failure("failed: make %s" % buildTgt)
def checkSuccess():
global vampCmdLine
global desiredRE
vampOut = getCmdResult(vampCmdLine)
print(vampOut)
mo = getFirstMatch(vampOut, desiredRE, False)
return mo!=None
readArgs(sys.argv[1:])
print('Looking for regex "%s" in outputs of %s between revisions %d and %d' %
(desiredRE.pattern, buildTgt, firstRevision, lastRevision))
switchToRevision(lastRevision)
if checkSuccess():
print ("The final revision %s succeeded" % lastRevision)
sys.exit(0)
switchToRevision(firstRevision)
if not checkSuccess():
print ("The fist revision %s did not succeed" % firstRevision)
sys.exit(1)
minRev = firstRevision
maxRev = lastRevision-1
while minRev!=maxRev:
assert minRev<maxRev
mid = (minRev+maxRev+1)//2
assert mid<=maxRev
assert mid>minRev
switchToRevision(mid)
if checkSuccess():
minRev = mid
else:
maxRev = mid-1
assert minRev==maxRev
resultRev = minRev
if getCurrentRevision()!=resultRev:
switchToRevision(resultRev)
print('The last revision where regex "%s" is in outputs of %s is %d' %
(desiredRE.pattern, buildTgt, resultRev))
| 4,426 | 23.731844 | 78 |
py
|
vampire
|
vampire-master/scripts/generate_proof_checking_problems.py
|
import random
import os
import subprocess
N=1000
VAMPIRE='../vampire_rel_master'
TPTP='~/TPTP/TPTP-v6.1.0/'
read_from='all_relevant_problems'
directory='generated_proof_obligations'
if not os.path.exists(directory):
os.makedirs(directory)
#randomly generate N problems for proof checking
all_problems=set()
with open(read_from,'r') as probs:
for line in probs:
all_problems.add(line.strip())
problem_set = random.sample(all_problems,N)
#problem_set = all_problems
# For each problem generate obligations
for problem in problem_set:
if os.path.exists(directory+problem):
continue
print "Dealing with ",problem
#Solve problem using casc mode
OUT=""
try:
OUT=subprocess.check_output(VAMPIRE+' --ignore_missing on --mode casc --time_limit 300 --include '+TPTP+' '+TPTP+problem, shell = True)
except subprocess.CalledProcessError as err:
print "Problem not solved"
continue
#Extract option used to solve
found=False
option=""
for line in reversed(OUT.split('\n')):
line = line.strip()
if found:
option=line.split(' ')[0]
break;
if 'Refutation found' in line:
found=True
if not found:
print "There was a problem, option not found!"
continue
#print 'Option is ',option, 'found is ',found
#Generate proof checking problems
os.system('python proof_checker.py '+VAMPIRE+' --ignore_missing on --decode '+option+'0 '+TPTP+problem)
#Move them all to a directory for that problem
obligations = [ f for f in os.listdir('.') if f.startswith('proof_obligation')]
if obligations:
pdir = directory+'/'+problem
if os.path.exists(pdir):
print "Oh dear, we did ", problem, " before!"
else:
os.makedirs(pdir)
for o in obligations:
os.rename(o,pdir+'/'+o)
| 1,714 | 24.220588 | 137 |
py
|
vampire
|
vampire-master/scripts/papers/compare_preproc_analyzer.py
|
#!/usr/bin/python
import fileinput
import os
strategyCnt = None
benchs = []
intInfty = 10000000000000000
def readInpVal(v):
if v=="TO":
return intInfty
else:
return int(v)
class Rec:
idx = 0
def __init__(self, idx, vals):
self.idx = idx
self.parseTime = readInpVal(vals[0])
self.procTime = readInpVal(vals[1])
self.clauseCnt = readInpVal(vals[2])
self.atomCnt = readInpVal(vals[3])
self.distAtomCnt = readInpVal(vals[4])
def display(self):
print(self.clauseCnt, self.atomCnt, self.distAtomCnt)
def findIdxsWithLowest(arr,fn):
res = [arr[0].idx]
bestVal = fn(arr[0])
for r in arr[1:]:
val = fn(r)
if val==bestVal:
res.append(r.idx)
elif val<bestVal:
bestVal = val
res = [r.idx]
return res
class Bench:
def __init__(self, name):
self.name = name
self.recs = []
def display(self):
print self.name
for r in self.recs:
r.display()
class Observable:
def __init__(self, g, n):
self.getter = g
self.name = n
self.winners = []
self.singleWinners = []
self.TOs = []
self.allEqualCnt = 0
self.allTO = 0
self.arraysInitialized = False
def initArrays(self):
#we cannot do this in the constructor as then we don't know the strategy count
for i in range(0,strategyCnt):
self.winners.append(0)
self.singleWinners.append(0)
self.TOs.append(0)
self.arraysInitialized = True
def record(self,bench):
if not self.arraysInitialized:
self.initArrays()
winIdxs = findIdxsWithLowest(bench.recs, self.getter)
for idx in winIdxs:
self.winners[idx] += 1
if len(winIdxs)==strategyCnt:
self.allEqualCnt += 1
if self.getter(bench.recs[0])==intInfty:
self.allTO += 1
else:
for i in range(0,strategyCnt):
if self.getter(bench.recs[i])==intInfty:
self.TOs[i] += 1
self.winners.append(0)
self.TOs.append(0)
if len(winIdxs)==1:
self.singleWinners[winIdxs[0]] += 1
def display(self):
print self.name + ":"
for i in range(0,strategyCnt):
print i, "\t", self.winners[i],"\tTOs: ",self.TOs[i]
print "all eq: ", self.allEqualCnt
print "all TO: ", self.allTO
def displayForTable(self):
print self.name + "\t",
for i in range(0,strategyCnt):
print str(self.winners[i])+"\t",
print
def displaySinglesForTable(self):
print self.name + " O\t",
for i in range(0,strategyCnt):
print str(self.singleWinners[i])+"\t",
print
def displayTOsForTable(self):
print "TOs\t",
for i in range(0,strategyCnt):
print str(self.TOs[i])+"\t",
print
def getClauseCnt(r):
return r.clauseCnt
def getAtomCnt(r):
return r.atomCnt
def getDistAtomCnt(r):
return r.distAtomCnt
observers = []
observers.append(Observable(getClauseCnt,"clause count"))
observers.append(Observable(getAtomCnt,"atom count"))
observers.append(Observable(getDistAtomCnt,"distinct atom count"))
for line in fileinput.input():
args=line.split()
if not strategyCnt:
strategyCnt = (len(args)-1)/5
if len(args)!=strategyCnt*5+1:
#print " faulty benchmark: ", line
continue
bench = Bench(args[0])
for i in range(0,strategyCnt):
ofs = 1+(i*5)
rec = Rec(i, args[ofs:ofs+5])
bench.recs.append(rec)
benchs.append(bench)
for obs in observers:
obs.record(bench)
for obs in observers:
obs.displayForTable()
for obs in observers:
obs.displaySinglesForTable()
observers[0].displayTOsForTable()
| 4,832 | 31.655405 | 94 |
py
|
vampire
|
vampire-master/scripts/papers/get_cpa_interpolant_results.py
|
#!/usr/bin/python
import fileinput
import os
import re
printLatex = os.getenv("PRINT_LATEX", "ON")=="ON"
#if variable is set, benchmark names will be restricted only to those appearing in the specified file
restrFName = os.getenv("RESTRICTING_FILE", "")
reIgnoredLine = re.compile("(%)|(sh: line 1: .* Alarm clock)|(Unknown reason of termination!)|(Alarm clock)")
benchSep = re.compile("========$")
reName = re.compile("results for (.*)$")
reIgnoredName = re.compile(".*\?")
reKindExtractor = re.compile("../cpa/interpolation/([^/]*)/.*")
reOldItp = re.compile("Old interpolant:")
reSzMinItp = re.compile("Interpolant:")
reCntMinItp = re.compile("Count minimized interpolant:")
reQuantMinItp = re.compile("Quantifiers minimized interpolant:")
reSEGV = re.compile("Aborted by signal SIGSEGV")
reTimeOut = re.compile("(External time out \(SIGXCPU\))|(Time limit reached!)|(Aborted by signal SIGXCPU)")
reOutOfMem = re.compile("Memory limit exceeded!")
reSubvampireFailReport = re.compile("(Time limit reached!)|(Memory limit exceeded!)")
reMinimErrorLines = re.compile('(SMT solver gave "unknown" for cost value)'+
'|(Error: Undefined name "-2147483648.0".)'+
'|(cost overflow durint SMT minimization)')
reApproxVal = re.compile("Minimization gave approximate result")
reMinimFail = re.compile("Minimization timed failed to find a satisfiable assignment, generating basic interpolant")
reOrigWeight = re.compile("Original interpolant weight cost: ([0-9]*)$")
reMinWeight = re.compile("Minimized interpolant weight cost: ([0-9]*)$")
reOrigCount = re.compile("Original interpolant count cost: ([0-9]*)$")
reMinCount = re.compile("Minimized interpolant count cost: ([0-9]*)$")
reOrigQuant = re.compile("Original interpolant quantifiers cost: ([0-9]*)$")
reMinQuant = re.compile("Minimized interpolant quantifiers cost: ([0-9]*)$")
intInfty = 10000000000000000
trSkipped = "Skipped"
trTimeOut = "TO"
trOutOfMem = "MO"
trCannotMakeLocal = "CML"
trCannotColor = "CCL"
trMinimFail = "Fail"
trSolvSegf = "SlvSEG"
trNotSolved = "NS"
trAprox = "Aprox"
class IncompleteBenchmark(Exception):
pass
class ProcError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class EarlyRecEnd(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return repr(self.reason)
class LookAheadIterator(object):
def __init__(self,obj):
self.it = obj.__iter__()
self.reserve = []
def __iter__(self):
return self
def hasNext(self):
if self.reserve:
return True
try:
next = self.it.next()
self.reserve.append(next)
return True
except StopIteration:
return False
def next(self):
if self.reserve:
res = self.reserve[-1]
self.reserve = self.reserve[:-1]
return res
return self.it.next()
def peek(self):
if self.reserve:
return self.reserve[-1]
res = self.next()
self.reserve.append(res)
return res
def getMatch(strList,regex):
for s in strList:
mo = regex.match(s)
if mo:
return mo
return None
def updateCounter(ctr,val):
if val in ctr:
ctr[val] += 1
else:
ctr[val] = 1
def outputTable(labels,maps,lblCnt,keyLst):
for i in range(0,lblCnt):
print "\t",
for k in keyLst:
print k, "\t",
print
cnt = len(labels)
for i in range(0,cnt):
print labels[i],"\t",
m = maps[i]
for k in keyLst:
if k in m:
print m[k],
print "\t",
print
print
def outputTableLatex(labels,maps,lblCnt,keyLst):
print "\\begin{tabular}{",
for i in range(0,lblCnt):
print "l ",
for k in keyLst:
print "r ",
print "}"
for i in range(0,lblCnt-1):
print "\t&",
for k in keyLst:
print "\t&",k,
print "\\\\"
cnt = len(labels)
for i in range(0,cnt):
print labels[i][0],
for l in labels[i][1:]:
print "\t&",l,
m = maps[i]
for k in keyLst:
print "\t&",
if k in m:
print m[k],
print "\\\\"
print "\\end{tabular}"
print
def printTable(labels,maps,lblCnt=1):
keys = set()
for m in maps:
keys.update(m.keys())
keyLst = list(keys)
def getSortKey(k):
if not isinstance(k,str):
return k
if k=="TO":
return "zzzz"
ks = k.split()
v = ks[0]
try:
nv = int(v)
return nv
except ValueError:
return v
def keyCmp(k1,k2):
return cmp(getSortKey(k1),getSortKey(k2))
keyLst.sort(keyCmp)
if printLatex:
outputTableLatex(labels,maps,lblCnt,keyLst)
else:
outputTable(labels,maps,lblCnt,keyLst)
restrNameSet = None
if restrFName:
restrNameSet = set()
f = open(restrFName, 'r')
for line in f:
line = line.strip('\n')
restrNameSet.add(line)
def isHeadLineAllowed(nm):
if not restrNameSet:
return True
return nm in restrNameSet
class Bench(object):
def __init__(self):
self.lines = []
self.name = None
self.kind = None
#the ones below contain true if they are approximate or error
self.origSz = None
self.origCnt = None
self.origQuant = None
self.minSz = None
self.minCnt = None
self.minQuant = None
def addLine(self,line):
self.lines.append(line)
def markRemaining(self,reason):
for fld in self.__dict__:
if self.__dict__[fld]==None:
self.__dict__[fld] = (reason, True)
def checkForResLimits(self, line):
if reTimeOut.match(line):
raise EarlyRecEnd(trTimeOut)
if reOutOfMem.match(line):
raise EarlyRecEnd(trOutOfMem)
def readValue(self, iter, valRegex, valName):
approx = False
for line in iter:
self.checkForResLimits(line)
if reMinimErrorLines.match(line) or reApproxVal.match(line):
approx = True
continue
resVal = None
if reMinimFail.match(line):
resVal = trMinimFail
approx = True
else:
matchObj = valRegex.match(line)
if not matchObj:
raise ProcError("unrecognized line: "+line)
resVal = int(matchObj.group(1))
self.__dict__[valName] = (resVal, approx)
return
raise IncompleteBenchmark()
def tryAcceptInterpolant(self, iter, valRegex):
approx = False
if not iter.hasNext():
return False
line = iter.peek()
self.checkForResLimits(line)
if not valRegex.match(line):
return False
#raise ProcError("expected '"+valRegex.pattern+"' line: "+line)
iter.next()
return True
def readName(self, line):
nameMatch = reName.match(line)
if not nameMatch:
raise ProcError("no name match")
self.name = nameMatch.group(1)
kindMatch = reKindExtractor.match(self.name)
if kindMatch:
self.kind = kindMatch.group(1)
if not isHeadLineAllowed(line):
raise EarlyRecEnd(trSkipped)
def process(self):
try:
lineIt = LookAheadIterator(self.lines)
self.readName(lineIt.next())
if reIgnoredName.match(self.name):
raise IncompleteBenchmark()
while lineIt.hasNext() and not reOldItp.match(lineIt.peek()) and not reOrigWeight.match(lineIt.peek()):
lineIt.next()
if not lineIt.hasNext():
raise EarlyRecEnd(trNotSolved)
self.tryAcceptInterpolant(lineIt, reOldItp)
self.readValue(lineIt, reOrigWeight, "origSz")
self.readValue(lineIt, reMinWeight, "minSz")
self.readValue(lineIt, reOrigCount, "origCnt")
self.readValue(lineIt, reMinCount, "minCnt")
self.readValue(lineIt, reOrigQuant, "origQuant")
self.readValue(lineIt, reMinQuant, "minQuant")
self.tryAcceptInterpolant(lineIt, reSzMinItp)
self.tryAcceptInterpolant(lineIt, reCntMinItp)
self.tryAcceptInterpolant(lineIt, reQuantMinItp)
except StopIteration:
raise IncompleteBenchmark()
except EarlyRecEnd as ere:
self.markRemaining(ere.reason)
class NullPostprocessor(object):
def __call__(self,map):
pass
class CompoundPostprocessor(object):
def __init__(self,pps):
self.pps=pps
def __call__(self,map):
for pp in self.pps:
pp(map)
class DeletingPostprocessor(object):
def __init__(self,flds):
self.flds=flds
def __call__(self,map):
for k in self.flds:
if k in map:
del map[k]
class MergingPostprocessor(object):
def __init__(self,masterFld,mergedFlds):
self.masterFld=masterFld
self.mergedFlds=mergedFlds
def __call__(self,map):
for k in self.mergedFlds:
if k in map:
if self.masterFld!=False:
if self.masterFld in map:
map[self.masterFld] += map[k]
else:
map[self.masterFld] = map[k]
del map[k]
class GroupingPostprocessor(object):
def getTgt(self,num):
if not isinstance(num,int):
return num
if num<3:
return num
if num<6:
return "3 -- 5"
if num<11:
return "6 -- 10"
if num<21:
return "11 -- 20"
if num<51:
return "21 -- 50"
if num<101:
return "51 -- 100"
if num<501:
return "101 -- 500"
if num<1001:
return "501 -- 1,000"
if num<10001:
return "1,000 -- 10,000"
else:
return ">10,000"
def __call__(self,map):
keys = list(map.keys())
for k in keys:
master = self.getTgt(k)
if master==k:
continue
if master in map:
map[master] += map[k]
else:
map[master] = map[k]
del map[k]
class DeletingPostprocessor(object):
def __init__(self,flds):
self.flds=flds
def __call__(self,map):
for k in self.flds:
if k in map:
del map[k]
class Observer(object):
def __init__(self,name):
self.name = name
def observe(self,bench):
pass
def display(self):
print name, " <display not implemented>"
class CountingObserver(Observer):
def __init__(self,name, getter, postproc=NullPostprocessor()):
super(CountingObserver,self).__init__(name)
self.getter = getter
self.ctr={}
self.postproc=postproc
def observe(self,bench):
val = self.getter(bench)
updateCounter(self.ctr,val)
def display(self):
self.postproc(self.ctr)
print self.name
printTable([["all"]],[self.ctr])
return
def getCounter(self):
self.postproc(self.ctr)
return self.ctr
class FldGetter(object):
def __init__(self,fldName):
self.fldName = fldName
def __call__(self,rec):
val = rec.__dict__[self.fldName]
if isinstance(val,tuple) and isinstance(val[0],str):
val = val[0]
return val
class FldValGetter(object):
def __init__(self,fldName):
self.fldName = fldName
def __call__(self,rec):
val = rec.__dict__[self.fldName]
if isinstance(val,tuple):
val = val[0]
return val
class MinGate(object):
def __init__(self,measure,ignoreApprox):
self.oGetter = FldGetter("orig"+measure)
self.mGetter = FldGetter("min"+measure)
self.ignoreApprox = ignoreApprox
def __call__(self,rec):
oVal = self.oGetter(rec)
if isinstance(oVal,str):
return oVal
mVal = self.mGetter(rec)
if isinstance(mVal,str):
return "m"+mVal
if oVal==None:
raise ProcError("oval none")
if oVal[1]:
return trAprox
if self.ignoreApprox and mVal[1]:
return trAprox
oNum = oVal[0]
mNum = mVal[0]
if oNum<mNum:
if self.ignoreApprox:
raise ProcError("minimal worse than original in unapproximate")
return -1
if oNum==mNum:
return 0
if mNum==0:
return "to Zero"
return oNum/mNum
locProofPreproc = MergingPostprocessor(False,[None])
clrFailRemover = DeletingPostprocessor([trCannotMakeLocal,trCannotColor])
complYicesFailPostpr = MergingPostprocessor("Fail",[None,"mFail"])
pproc = CompoundPostprocessor([clrFailRemover,complYicesFailPostpr])
pproc = NullPostprocessor()
pproc = CompoundPostprocessor([GroupingPostprocessor(),MergingPostprocessor("TO",["NS","mTO", trAprox]),MergingPostprocessor(False,[trSkipped])])
class ObserverMaster(object):
def __init__(self):
self.general = self.buildObservers()
self.kinds = {}
def buildObservers(self):
return [
CountingObserver("size min", MinGate('Sz',True), pproc),
CountingObserver("count min", MinGate('Cnt',True), pproc),
CountingObserver("quant min", MinGate('Quant',True), pproc),
]
def observeByList(self,obsLst,bench):
for o in obsLst:
o.observe(bench)
def observe(self,bench):
self.observeByList(self.general,bench)
k = bench.kind
if k:
if k not in self.kinds:
self.kinds[k] = self.buildObservers()
self.observeByList(self.kinds[k],bench)
def collectObserversRes(self,prefix,obsLst,lblLstRef,mapLstRef):
for o in obsLst:
lbl = [prefix,o.name]
lblLstRef.append(lbl)
mapLstRef.append(o.getCounter())
def display(self):
lbls = []
maps = []
self.collectObserversRes("all",self.general,lbls,maps)
for k in self.kinds:
self.collectObserversRes(k,self.kinds[k],lbls,maps)
printTable(lbls,maps,2)
class ObserverMaster2(ObserverMaster):
def buildObservers(self):
return [
CountingObserver("origSz", FldValGetter('origSz'), pproc),
CountingObserver("minSz", FldValGetter('minSz'), pproc),
]
def onInvalidBench(bench):
print "########### invalid benchmark ###########"
if bench.error:
print bench.error
for line in bench.lines:
print line
benchs = []
observers = [ObserverMaster(),ObserverMaster2()]
currBench = None
for line in fileinput.input():
line = line.rstrip()
if reIgnoredLine.match(line):
continue
if benchSep.match(line):
if currBench:
try:
currBench.process()
for o in observers:
o.observe(currBench)
benchs.append(currBench)
#print currBench.name+"\t"+str(currBench.origSz)+" "+str(currBench.minSz)
except ProcError as err:
currBench.error = err
onInvalidBench(currBench)
except IncompleteBenchmark:
pass
currBench = Bench()
elif currBench:
currBench.addLine(line)
for o in observers:
o.display()
| 19,577 | 35.390335 | 145 |
py
|
vampire
|
vampire-master/scripts/papers/z3_interpolant_stat_analyzer.py
|
#!/usr/bin/python
import fileinput
import os
import re
reYicesGarbaggeLine = re.compile("sh: line 1: .* Alarm clock");
benchSep = re.compile("============#$")
colorSep = re.compile("------#$")
reName = re.compile("F: (.*)$")
reRedStart = re.compile("Rq:$")
reBlueStart = re.compile("Bq:$")
reDerLocal = re.compile("Derivation was already local")
reDerNonLocal = re.compile("Derivation not local")
reTimeOut = re.compile("(External time out \(SIGXCPU\))|(Time limit reached!)")
reOutOfMem = re.compile("Memory limit exceeded!")
reErrorResult = re.compile("error result code")
reNonLocalUnit = re.compile("Non-local unit:")
reCannotMakeLocal = re.compile("Cannot make the colored proof local")
reCannotColorRefutation = re.compile("Cannot color the refutation")
reMinimErrorLines = re.compile('(SMT solver gave "unknown" for cost value)'+
'|(Error: Undefined name "-2147483648.0".)'+
'|(cost overflow durint SMT minimization)')
reApproxVal = re.compile("Minimization gave approximate result")
reMinimFail = re.compile("Minimization timed failed to find a satisfiable assignment, generating basic interpolant")
reOrigWeight = re.compile("Original interpolant weight cost: ([0-9]*)$")
reMinWeight = re.compile("Minimized interpolant weight cost: ([0-9]*)$")
reOrigCount = re.compile("Original interpolant count cost: ([0-9]*)$")
reMinCount = re.compile("Minimized interpolant count cost: ([0-9]*)$")
reOrigQuant = re.compile("Original interpolant quantifiers cost: ([0-9]*)$")
reMinQuant = re.compile("Minimized interpolant quantifiers cost: ([0-9]*)$")
intInfty = 10000000000000000
trTimeOut = "TO"
trOutOfMem = "MO"
trCannotMakeLocal = "CML"
trCannotColor = "CCL"
trMinimFail = "Fail"
class IncompleteBenchmark(Exception):
pass
class ProcError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class EarlyRecEnd(Exception):
def __init__(self, reason):
self.reason = reason
def __str__(self):
return repr(self.reason)
def getMatch(strList,regex):
for s in strList:
mo = regex.match(s)
if mo:
return mo
return None
def updateCounter(ctr,val):
if val in ctr:
ctr[val] += 1
else:
ctr[val] = 1
def printTable(labels,maps):
keys = set()
for m in maps:
keys.update(m.keys())
keyLst = list(keys)
keyLst.sort()
print "\t",
for k in keyLst:
print k, "\t",
print
cnt = len(labels)
for i in range(0,cnt):
print labels[i],"\t",
m = maps[i]
for k in keyLst:
if k in m:
print m[k],
print "\t",
print
print
class Rec(object):
def __init__(self):
self.lines = []
self.derLocal = None
#the ones below contain true if they are approximate or error
self.origSz = None
self.origCnt = None
self.origQuant = None
self.minSz = None
self.minCnt = None
self.minQuant = None
def addLine(self,line):
self.lines.append(line)
def markRemaining(self,reason):
for fld in self.__dict__:
if self.__dict__[fld]==None:
self.__dict__[fld] = (reason, True)
def checkForResLimits(self, line):
if reTimeOut.match(line):
raise EarlyRecEnd(trTimeOut)
if reOutOfMem.match(line):
raise EarlyRecEnd(trOutOfMem)
def readValue(self, iter, valRegex, valName):
approx = False
for line in iter:
self.checkForResLimits(line)
if reCannotMakeLocal.match(line):
raise EarlyRecEnd(trCannotMakeLocal)
if reMinimErrorLines.match(line) or reApproxVal.match(line):
approx = True
continue
resVal = None
if reMinimFail.match(line):
resVal = trMinimFail
approx = True
else:
matchObj = valRegex.match(line)
if not matchObj:
raise ProcError("unrecognized line: "+line)
resVal = int(matchObj.group(1))
self.__dict__[valName] = (resVal, approx)
return
raise IncompleteBenchmark()
def process(self):
lineIt = self.lines.__iter__()
line = lineIt.next()
try:
self.checkForResLimits(line)
if reNonLocalUnit.match(line):
line = lineIt.next()
if not reCannotColorRefutation.match(line):
raise ProcError("non-colorability report expected: "+line)
raise EarlyRecEnd(trCannotColor)
if reDerNonLocal.match(line):
self.derLocal = False
elif reDerLocal.match(line):
self.derLocal = True
else:
raise ProcError("derivation locality not output on the first line")
self.readValue(lineIt, reOrigWeight, "origSz")
self.readValue(lineIt, reMinWeight, "minSz")
self.readValue(lineIt, reOrigCount, "origCnt")
self.readValue(lineIt, reMinCount, "minCnt")
self.readValue(lineIt, reOrigQuant, "origQuant")
self.readValue(lineIt, reMinQuant, "minQuant")
try:
line = lineIt.next()
raise ProcError("extra line: "+line)
except StopIteration:
pass
except EarlyRecEnd as ere:
self.markRemaining(ere.reason)
if not reErrorResult.match(self.lines[-1]):
raise ProcError(str(ere)+" without error result")
except StopIteration:
raise ProcError("more lines expected")
class Bench(object):
def __init__(self):
self.lines = []
self.name = None
self.redRec = None
self.blueRec = None
self.error = None
def addLine(self,line):
self.lines.append(line)
def process(self):
try:
lineIt = self.lines.__iter__()
nameMatch = reName.match(lineIt.next())
if not nameMatch:
raise ProcError("no name match")
self.name = nameMatch.group(1)
if not reRedStart.match(lineIt.next()):
raise ProcError("no red start match")
curr = self.redRec = Rec()
for line in lineIt:
if colorSep.match(line):
break
else:
curr.addLine(line)
if not reBlueStart.match(lineIt.next()):
raise ProcError("no blue start match")
curr = self.blueRec = Rec()
for line in lineIt:
curr.addLine(line)
self.redRec.process()
self.blueRec.process()
except StopIteration:
raise IncompleteBenchmark()
class NullPostprocessor(object):
def __call__(self,map):
pass
class CompoundPostprocessor(object):
def __init__(self,pps):
self.pps=pps
def __call__(self,map):
for pp in self.pps:
pp(map)
class DeletingPostprocessor(object):
def __init__(self,flds):
self.flds=flds
def __call__(self,map):
for k in self.flds:
if k in map:
del map[k]
class MergingPostprocessor(object):
def __init__(self,masterFld,mergedFlds):
self.masterFld=masterFld
self.mergedFlds=mergedFlds
def __call__(self,map):
for k in self.mergedFlds:
if k in map:
if self.masterFld in map:
map[self.masterFld] += map[k]
else:
map[self.masterFld] = map[k]
del map[k]
class Observer(object):
def __init__(self,name):
self.name = name
def observe(self,bench):
pass
def display(self):
print name, " <display not implemented>"
class CountingObserver(Observer):
def __init__(self,name, getter, postproc=NullPostprocessor()):
super(CountingObserver,self).__init__(name)
self.getter = getter
self.redCtr={}
self.blueCtr={}
self.betterCtr={}
self.allCtr={}
self.postproc=postproc
def observe(self,bench):
redVal = self.getter(bench.redRec)
blueVal = self.getter(bench.blueRec)
updateCounter(self.redCtr,redVal)
updateCounter(self.blueCtr,blueVal)
updateCounter(self.allCtr,redVal)
updateCounter(self.allCtr,blueVal)
betterVal = None
if isinstance(redVal,int):
if isinstance(blueVal,int):
betterVal = max(redVal, blueVal)
else:
betterVal = redVal
else:
betterVal = blueVal
updateCounter(self.betterCtr,betterVal)
def display(self):
self.postproc(self.allCtr)
self.postproc(self.redCtr)
self.postproc(self.blueCtr)
self.postproc(self.betterCtr)
print self.name
printTable(["all","red","blue","better"],[self.allCtr,self.redCtr,self.blueCtr,self.betterCtr])
return
print " all: ", self.allCtr
print " red: ", self.redCtr
print " blue: ", self.blueCtr
print " better: ", self.betterCtr
class ComparingObserver(Observer):
def __init__(self,name, getter):
super(ComparingObserver,self).__init__(name)
self.getter = getter
self.redBetter=0
self.blueBetter=0
self.bothSame=0
self.bothFail={}
def observe(self,bench):
redVal = self.getter(bench.redRec)
blueVal = self.getter(bench.blueRec)
if isinstance(redVal,tuple):
redVal = redVal[0]
if isinstance(blueVal,tuple):
blueVal = blueVal[0]
if isinstance(redVal,int):
if isinstance(blueVal,int):
if redVal==blueVal:
self.bothSame += 1
elif redVal<blueVal:
self.redBetter += 1
else:
self.blueBetter += 1
else:
self.redBetter += 1
else:
if isinstance(blueVal,int):
self.blueBetter += 1
else:
failVal = (redVal,blueVal)
if redVal==blueVal:
failVal = redVal
updateCounter(self.bothFail,failVal)
def display(self):
print self.name
print "red\t", self.redBetter
print "blue\t", self.blueBetter
print "same\t", self.bothSame
print "both fail\t", self.bothFail
class LocGetter(object):
def __call__(self,rec):
if isinstance(rec.derLocal,tuple) and isinstance(rec.derLocal[0],str):
return rec.derLocal[0]
if rec.derLocal:
return True
if rec.origSz[0]==trCannotMakeLocal:
return trCannotMakeLocal
if rec.origSz[0]==trTimeOut or rec.origSz[0]==trOutOfMem:
#here we don't know if it was timeout during localization or initial interpolant getting
return None
return False
class FldGetter(object):
def __init__(self,fldName):
self.fldName = fldName
def __call__(self,rec):
val = rec.__dict__[self.fldName]
if isinstance(val,tuple) and isinstance(val[0],str):
val = val[0]
return val
class MinGate(object):
def __init__(self,measure,ignoreApprox):
self.oGetter = FldGetter("orig"+measure)
self.mGetter = FldGetter("min"+measure)
self.ignoreApprox = ignoreApprox
def __call__(self,rec):
oVal = self.oGetter(rec)
if isinstance(oVal,str):
return oVal
mVal = self.mGetter(rec)
if isinstance(mVal,str):
return "m"+mVal
if oVal==None:
raise ProcError("oval none")
if oVal[1]:
return None
if self.ignoreApprox and mVal[1]:
return None
oNum = oVal[0]
mNum = mVal[0]
if oNum<mNum:
if self.ignoreApprox:
raise ProcError("minimal worse than original in unapproximate")
return -1
if oNum==mNum:
return 0
return oNum/mNum
def onInvalidBench(bench):
print "########### invalid benchmark ###########"
if bench.error:
print bench.error
for line in bench.lines:
print line
benchs = []
locProofPreproc = MergingPostprocessor(False,[None])
clrFailRemover = DeletingPostprocessor([trCannotMakeLocal,trCannotColor])
complYicesFailPostpr = MergingPostprocessor("Fail",[None,"mFail"])
pproc = CompoundPostprocessor([clrFailRemover,complYicesFailPostpr])
pproc({"a":1})
observers = [
CountingObserver("local proof", LocGetter(), locProofPreproc),
CountingObserver("size minimization", MinGate('Sz',True), pproc),
CountingObserver("count minimization", MinGate('Cnt',True), pproc),
CountingObserver("quantifier minimization", MinGate('Quant',True), pproc),
CountingObserver("size minimization w. approx", MinGate('Sz',False), pproc),
CountingObserver("count minimization w. approx", MinGate('Cnt',False), pproc),
CountingObserver("quantifier minimization w. approx", MinGate('Quant',False), pproc),
ComparingObserver("orig size", FldGetter('origSz')),
ComparingObserver("min size", FldGetter('minSz')),
ComparingObserver("orig cnt", FldGetter('origCnt')),
ComparingObserver("min cnt", FldGetter('minCnt')),
ComparingObserver("orig quant", FldGetter('origQuant')),
ComparingObserver("min quant", FldGetter('minQuant'))
]
currBench = None
for line in fileinput.input():
line = line.rstrip()
if reYicesGarbaggeLine.match(line):
continue
if benchSep.match(line):
if currBench:
try:
currBench.process()
for o in observers:
o.observe(currBench)
benchs.append(currBench)
except ProcError as err:
currBench.error = err
onInvalidBench(currBench)
except IncompleteBenchmark:
pass
currBench = Bench()
elif currBench:
currBench.addLine(line)
for o in observers:
o.display()
| 17,818 | 38.774554 | 117 |
py
|
openslide
|
openslide-main/scripts/dist.py
|
#!/usr/bin/python3
import os
from pathlib import Path
import shutil
import subprocess
base = Path(os.getenv('MESON_DIST_ROOT'))
subprocess.run(['meson', 'compile', 'doc/html'], check=True)
shutil.copytree('doc/html', base / 'doc/html', symlinks=True)
| 254 | 20.25 | 61 |
py
|
neuralqa
|
neuralqa-master/setup.py
|
import os
from importlib.machinery import SourceFileLoader
from setuptools import setup, find_packages
version = SourceFileLoader('neuralqa.version', os.path.join(
'neuralqa', 'version.py')).load_module().VERSION
def package_files(directory):
paths = []
for (path, _, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
ui_files = package_files("neuralqa/server/ui/build")
yaml_file = ["config_default.yaml"]
setup(
name='neuralqa',
packages=find_packages(exclude=['tests', 'tests.*']),
package_data={"neuralqa": ui_files + yaml_file},
version=version,
license='MIT',
description='NeuralQA: Question Answering on Large Datasets',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
author='Victor Dibia',
url='https://github.com/victordibia/neuralqa',
python_requires='>=3.5',
# download_url='https://github.com/victordibia/neuralqa/archive/v0.0.2.tar.gz',
keywords=['NLP', 'Question Answering', 'Machine Learning'],
install_requires=[
'fastapi',
'aiofiles',
'uvicorn',
'numpy',
'tensorflow>=2.1.0',
'torch',
'torchvision',
'transformers',
'elasticsearch>=7.7.1',
'pyyaml>=3.13',
'spacy'
],
extras_require={
'test': ['pytest']
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
entry_points={
"console_scripts": [
"neuralqa=neuralqa.cli:cli",
]
}
)
| 1,762 | 27.435484 | 83 |
py
|
neuralqa
|
neuralqa-master/neuralqa/cli.py
|
import click
from neuralqa.server import launch_server
from neuralqa.utils import cli_args
from neuralqa.utils import import_sample_data, ConfigParser
import os
from neuralqa.retriever import RetrieverPool
import logging
@click.group()
@click.version_option()
def cli():
pass
# @cli.command()
# @cli_args.HOST
# @cli_args.PORT
# @cli_args.WORKERS
# @cli_args.CONFIG_PATH
# def test(host, port, workers, config_path):
# import_sample_data()
@cli.command()
@cli_args.MAX_DOCS
def load(max_docs):
"""This command loads sample data into a local elastic search index."""
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
logging.getLogger(__name__).setLevel(logging.INFO)
import_sample_data(max_docs=max_docs)
@cli.command()
@cli_args.HOST
@cli_args.PORT
@cli_args.WORKERS
@cli_args.CONFIG_PATH
def ui(host, port, workers, config_path):
"""This command launches the web interface for NeuralQA."""
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
logging.getLogger(__name__).setLevel(logging.INFO)
if (config_path):
os.environ["NEURALQA_CONFIG_PATH"] = config_path
launch_server(host, port, workers)
if __name__ == '__main__':
cli()
| 1,233 | 22.283019 | 75 |
py
|
neuralqa
|
neuralqa-master/neuralqa/version.py
|
VERSION = "0.0.31-alpha"
| 26 | 8 | 24 |
py
|
neuralqa
|
neuralqa-master/neuralqa/__init__.py
|
import logging
from neuralqa.version import VERSION as __version__
from neuralqa.reader import BERTReader
from neuralqa.utils import import_sample_data
logging.getLogger("transformers").setLevel(logging.ERROR)
logging.getLogger("tensorflow").setLevel(logging.ERROR)
logging.getLogger("elasticsearch").setLevel(logging.CRITICAL)
__all__ = ["BERTReader", "import_sample_data"]
| 378 | 30.583333 | 61 |
py
|
neuralqa
|
neuralqa-master/neuralqa/expander/mlmexpander.py
|
from neuralqa.expander import Expander
import logging
from transformers import AutoTokenizer, TFBertForMaskedLM
import tensorflow as tf
import time
import spacy
logger = logging.getLogger(__name__)
class MLMExpander(Expander):
def __init__(self, index_type="mlm", model_path="bert-base-uncased", **kwargs):
Expander.__init__(self, index_type)
self.candidate_pos = ["NOUN", "ADJ", "ADV"]
self.model_path = model_path
allowed_keys = list(self.__dict__.keys())
self.__dict__.update((k, v)
for k, v in kwargs.items() if k in allowed_keys)
rejected_keys = set(kwargs.keys()) - set(allowed_keys)
if rejected_keys:
raise ValueError(
"Invalid arguments in ElasticSearchRetriever constructor:{}".format(rejected_keys))
logger.info(
">> loading HF model for Query Expansion from " + model_path)
self.tokenizer = AutoTokenizer.from_pretrained(
self.model_path, use_fast=True)
self.model = TFBertForMaskedLM.from_pretrained(
self.model_path, from_pt=True)
logger.info(">> Loading Spacy NLP model ")
try:
self.nlp = spacy.load('en_core_web_md')
except OSError:
logger.info(
"Downloading language model for the spaCy POS tagger (don't worry, this will only happen once)")
from spacy.cli import download
download('en_core_web_md')
self.nlp = spacy.load('en_core_web_md')
# self.nlp = en_core_web_md.load()
# logger.info(">> Spacy nlp model loaded ")
def predict_mask(self, sequence, model, tokenizer, top_n=2):
input = tokenizer.encode(sequence, return_tensors="tf")
mask_token_index = tf.where(input == tokenizer.mask_token_id)[0, 1]
token_logits = model(input)[0]
mask_token_logits = token_logits[0, mask_token_index, :]
probabilities = tf.nn.softmax(mask_token_logits)
topk = tf.math.top_k(probabilities, top_n)
top_n_probs, top_n_tokens = topk.values.numpy(), topk.indices.numpy()
results = [{"token": tokenizer.decode([top_n_tokens[i]]), "probability": float(top_n_probs[i])}
for i in range(len(top_n_probs))]
# print(results)
return results
def expand_query(self, query, top_n=3, threshold=0):
start_time = time.time()
doc = self.nlp(query)
query_tokens = [str(token) for token in doc]
new_terms = []
candidate_expansions = []
# print([chunk.text for chunk in doc.noun_chunks], "\n =========")
# print([ent.text for ent in doc.ents], "\n =========")
# for token in doc:
# print(token, "=>", token.ent_type_)
for i, token in enumerate(doc):
# only expand if pos is not in our candidate list and it is not a named entity type
pred_tokens = None
if (token.pos_ in self.candidate_pos and not token.ent_type_):
temp_doc = query_tokens.copy()
temp_doc[i] = self.tokenizer.mask_token
temp_doc = " ".join(temp_doc)
pred_tokens = self.predict_mask(
temp_doc, self.model, self.tokenizer, top_n=top_n)
new_terms = new_terms + pred_tokens
candidate_expansions.append(
{"token": str(token), "expansion": pred_tokens, "token_index": i, "pos": token.pos_, "pos_desc": spacy.explain(token.pos_), "named_entity": token.ent_type_, "ent_desc": spacy.explain(token.ent_type_)})
elapsed_time = time.time() - start_time
terms_list = []
seen_terms = []
# remove punctuation, low probability, words subwords, duplicates
for token in new_terms:
if token["token"].isalnum() and token["probability"] > threshold and "#" not in token["token"] and token["token"] not in query and token["token"] not in seen_terms:
terms_list.append(token)
seen_terms.append(token["token"])
result = {
"terms": terms_list,
"query": query_tokens,
"expansions": candidate_expansions,
"took": elapsed_time
}
return result
| 4,286 | 41.87 | 217 |
py
|
neuralqa
|
neuralqa-master/neuralqa/expander/expander.py
|
class Expander:
def __init__(self, expander_type, **kwargs):
self.expander_type = expander_type
| 109 | 21 | 48 |
py
|
neuralqa
|
neuralqa-master/neuralqa/expander/__init__.py
|
from .expander import *
from .mlmexpander import *
from .expanderpool import *
| 79 | 19 | 27 |
py
|
neuralqa
|
neuralqa-master/neuralqa/expander/expanderpool.py
|
from neuralqa.expander import MLMExpander
import logging
logger = logging.getLogger(__name__)
class ExpanderPool():
def __init__(self, expanders):
self._selected_expander = expanders["selected"]
self.expander_pool = {}
for expander in expanders["options"]:
if (expander["type"] == "maskedlm"):
self.expander_pool[expander["value"]] = MLMExpander(
model_path=expander["value"])
@property
def expander(self):
return self.expander_pool[self.selected_expander]
@property
def selected_expander(self):
return self._selected_expander
@selected_expander.setter
def selected_expander(self, selected_expander):
if (selected_expander in self.expander_pool):
self._selected_expander = selected_expander
else:
if (len(self.expander_pool) > 0):
default_expander = next(iter(self.expander_pool))
logger.info(
">> Expander you are attempting to use %s does not exist in expander pool. Using the following default expander instead %s ", selected_expander, default_expander)
self._selected_expander = default_expander
else:
logger.info(
">> No expander has been specified in config.yaml.")
self._selected_expander = None
| 1,398 | 34.871795 | 182 |
py
|
neuralqa
|
neuralqa-master/neuralqa/reader/reader.py
|
import tensorflow as tf
import numpy as np
from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering
import time
import logging
logger = logging.getLogger(__name__)
class Reader:
def __init__(self, model_name, model_path, model_type, **kwargs):
self.load_model(model_name, model_path, model_type)
def load_model(self, model_name, model_path, model_type):
logger.info(">> Loading HF model " +
model_name + " from " + model_path)
self.type = model_type
self.name = model_name
self.tokenizer = AutoTokenizer.from_pretrained(
model_path, use_fast=True)
self.model = TFAutoModelForQuestionAnswering.from_pretrained(
model_path, from_pt=True)
| 758 | 28.192308 | 71 |
py
|
neuralqa
|
neuralqa-master/neuralqa/reader/bertreader.py
|
from neuralqa.reader import Reader
import tensorflow as tf
import numpy as np
import time
import logging
logger = logging.getLogger(__name__)
class BERTReader(Reader):
def __init__(self, model_name, model_path, model_type="bert", **kwargs):
Reader.__init__(self, model_name, model_path, model_type)
# self.load_model(model_name, model_path, model_type)
def get_best_start_end_position(self, start_scores, end_scores):
answer_start = tf.argmax(start_scores, axis=1).numpy()[0]
answer_end = (tf.argmax(end_scores, axis=1) + 1).numpy()[0]
return answer_start, answer_end
def get_chunk_answer_span(self, inputs):
start_time = time.time()
answer_start_scores, answer_end_scores = self.model(inputs)
answer_start, answer_end = self.get_best_start_end_position(
answer_start_scores, answer_end_scores)
answer_end = answer_end - \
1 if answer_end == answer_end_scores.shape[1] else answer_end
answer_start_softmax_probability = tf.nn.softmax(
answer_start_scores, axis=1).numpy()[0][answer_start]
answer_end_softmax_probability = tf.nn.softmax(
answer_end_scores, axis=1).numpy()[0][answer_end]
answer = self.tokenizer.decode(
inputs["input_ids"][0][answer_start:answer_end], skip_special_tokens=True)
# if model predict first token 0 which is in the question as part of the answer, return nothing
if answer_start == 0:
answer = ""
elapsed_time = time.time() - start_time
return {"answer": answer, "took": elapsed_time,
"start_probability": str(answer_start_softmax_probability),
"end_probability": str(answer_end_softmax_probability),
"probability": str(answer_end_softmax_probability + answer_start_softmax_probability / 2)
}
def token_chunker(self, question, context, max_chunk_size=512, stride=2, max_num_chunks=5):
# we tokenize question and context once.
# if question + context > max chunksize, we break it down into multiple chunks of question +
# subsets of context with some stride overlap
question_tokens = self.tokenizer.encode(question)
context_tokens = self.tokenizer.encode(
context, add_special_tokens=False)
chunk_holder = []
chunk_size = max_chunk_size - len(question_tokens) - 1
# -1 for the 102 end token we append later
current_pos = 0
chunk_count = 0
while current_pos < len(context_tokens) and current_pos >= 0:
# we want to cap the number of chunks we create
if max_num_chunks and chunk_count >= max_num_chunks:
break
end_point = current_pos + \
chunk_size if (current_pos + chunk_size) < len(context_tokens) - \
1 else len(context_tokens) - 1
token_chunk = question_tokens + \
context_tokens[current_pos: end_point] + [102]
# question type is 0, context type is 1, convert to tf
token_type_ids = [0]*len(question_tokens) + \
[1] * (len(token_chunk) - len(question_tokens))
token_type_ids = tf.constant(
token_type_ids, dtype='int32', shape=(1, len(token_type_ids)))
# attend to every token
attention_mask = tf.ones(
(1, len(token_chunk)), dtype=tf.dtypes.int32)
# convert token chunk to tf
token_chunk = tf.constant(
token_chunk, dtype='int32', shape=(1, len(token_chunk)))
chunk_holder.append(
{"token_ids": token_chunk,
"context": self.tokenizer.decode(context_tokens[current_pos: end_point], skip_special_tokens=True),
"attention_mask": attention_mask,
"token_type_ids": token_type_ids
})
current_pos = current_pos + chunk_size - stride + 1
chunk_count += 1
return chunk_holder
def answer_question(self, question, context, max_chunk_size=512, stride=70):
# chunk tokens
chunked_tokens = self.token_chunker(
question, context, max_chunk_size, stride)
answer_holder = []
for chunk in chunked_tokens:
model_input = {"input_ids": chunk["token_ids"], "attention_mask":
chunk["attention_mask"], "token_type_ids": chunk["token_type_ids"]}
answer = self.get_chunk_answer_span(model_input)
if len(answer["answer"]) > 2:
answer["question"] = question
answer["context"] = chunk["context"].replace("##", "").replace(
answer["answer"], " <em>" + answer["answer"] + "</em> ")
answer_holder.append(answer)
return answer_holder
def get_correct_span_mask(self, correct_index, token_size):
span_mask = np.zeros((1, token_size))
span_mask[0, correct_index] = 1
span_mask = tf.constant(span_mask, dtype='float32')
return span_mask
def get_embedding_matrix(self):
if "DistilBert" in type(self.model).__name__:
return self.model.distilbert.embeddings.word_embeddings
else:
return self.model.bert.embeddings.word_embeddings
# move this to some utils file
def clean_tokens(self, gradients, tokens, token_types):
"""
Clean the tokens and gradients
Remove "[CLS]","[CLR]", "[SEP]" tokens
Reduce (mean) gradients values for tokens that are split ##
"""
token_holder = []
token_type_holder = []
gradient_holder = []
i = 0
while i < len(tokens):
if (tokens[i] not in ["[CLS]", "[CLR]", "[SEP]"]):
token = tokens[i]
conn = gradients[i]
token_type = token_types[i]
if i < len(tokens)-1:
if tokens[i+1][0:2] == "##":
token = tokens[i]
conn = gradients[i]
j = 1
while i < len(tokens)-1 and tokens[i+1][0:2] == "##":
i += 1
token += tokens[i][2:]
conn += gradients[i]
j += 1
conn = conn / j
token_holder.append(token)
token_type_holder.append(token_type)
# gradient_holder.append(conn)
gradient_holder.append(
{"gradient": conn, "token": token, "token_type": token_type})
i += 1
return gradient_holder
def get_gradient(self, question, context):
"""Return gradient of input (question) wrt to model output span prediction
Args:
question (str): text of input question
context (str): text of question context/passage
model (QA model): Hugging Face BERT model for QA transformers.modeling_tf_distilbert.TFDistilBertForQuestionAnswering, transformers.modeling_tf_bert.TFBertForQuestionAnswering
tokenizer (tokenizer): transformers.tokenization_bert.BertTokenizerFast
Returns:
(tuple): (gradients, token_words, token_types, answer_text)
"""
embedding_matrix = self.get_embedding_matrix()
encoded_tokens = self.tokenizer.encode_plus(
question, context, add_special_tokens=True, return_token_type_ids=True, return_tensors="tf")
token_ids = list(encoded_tokens["input_ids"].numpy()[0])
vocab_size = embedding_matrix.get_shape()[0]
# convert token ids to one hot. We can't differentiate wrt to int token ids hence the need for one hot representation
token_ids_tensor = tf.constant([token_ids], dtype='int32')
token_ids_tensor_one_hot = tf.one_hot(token_ids_tensor, vocab_size)
with tf.GradientTape(watch_accessed_variables=False) as tape:
# (i) watch input variable
tape.watch(token_ids_tensor_one_hot)
# multiply input model embedding matrix; allows us do backprop wrt one hot input
inputs_embeds = tf.matmul(
token_ids_tensor_one_hot, embedding_matrix)
# (ii) get prediction
start_scores, end_scores = self.model(
{"inputs_embeds": inputs_embeds, "token_type_ids": encoded_tokens["token_type_ids"], "attention_mask": encoded_tokens["attention_mask"]})
answer_start, answer_end = self.get_best_start_end_position(
start_scores, end_scores)
start_output_mask = self.get_correct_span_mask(
answer_start, len(token_ids))
end_output_mask = self.get_correct_span_mask(
answer_end, len(token_ids))
# zero out all predictions outside of the correct span positions; we want to get gradients wrt to just these positions
predict_correct_start_token = tf.reduce_sum(
start_scores * start_output_mask)
predict_correct_end_token = tf.reduce_sum(
end_scores * end_output_mask)
# (iii) get gradient of input with respect to both start and end output
gradient_non_normalized = tf.norm(
tape.gradient([predict_correct_start_token, predict_correct_end_token], token_ids_tensor_one_hot), axis=2)
# (iv) normalize gradient scores and return them as "explanations"
gradient_tensor = (
gradient_non_normalized /
tf.reduce_max(gradient_non_normalized)
)
gradients = gradient_tensor[0].numpy().tolist()
token_words = self.tokenizer.convert_ids_to_tokens(token_ids)
token_types = list(
encoded_tokens["token_type_ids"].numpy()[0].tolist())
answer_text = self.tokenizer.decode(
token_ids[answer_start:answer_end], skip_special_tokens=True)
# clean up gradients and words
gradients = self.clean_tokens(
gradients, token_words, token_types)
return gradients, answer_text, question
def explain_model(self, question, context, explain_method="gradient"):
if explain_method == "gradient":
return self.get_gradient(question, context)
| 10,473 | 42.641667 | 187 |
py
|
neuralqa
|
neuralqa-master/neuralqa/reader/readerpool.py
|
from neuralqa.reader import BERTReader
import logging
logger = logging.getLogger(__name__)
class ReaderPool():
def __init__(self, models):
self._selected_model = models["selected"]
self.reader_pool = {}
for model in models["options"]:
if (model["type"] == "bert" or model["type"] == "distilbert"):
self.reader_pool[model["value"]] = BERTReader(
model["name"], model["value"])
@property
def model(self):
return self.reader_pool[self.selected_model]
@property
def selected_model(self):
return self._selected_model
@selected_model.setter
def selected_model(self, selected_model):
if (selected_model in self.reader_pool):
self._selected_model = selected_model
else:
if (len(self.reader_pool) > 0):
default_model = next(iter(self.reader_pool))
logger.info(
">> Model you are attempting to use %s does not exist in model pool. Using the following default model instead %s ", selected_model, default_model)
self._selected_model = default_model
else:
logger.info(
">> No reader has been specified in config.yaml.")
self._selected_model = None
| 1,331 | 32.3 | 167 |
py
|
neuralqa
|
neuralqa-master/neuralqa/reader/__init__.py
|
from .reader import *
from .bertreader import *
from .readerpool import *
| 74 | 17.75 | 25 |
py
|
neuralqa
|
neuralqa-master/neuralqa/utils/config_utils.py
|
import yaml
import os
import logging
import shutil
logger = logging.getLogger(__name__)
class ConfigParser:
def __init__(self, config_path):
module_file_path = os.path.dirname(os.path.abspath(__file__))
self.default_config_path = os.path.join(
module_file_path, "../config_default.yaml")
self.current_config_path = os.path.join(os.getcwd(), "config.yaml")
if config_path and os.path.exists(config_path):
self.config = self.load_config(config_path)
# else:
# logger.info("Supplied config file does not exist. " +
# os.path.join(os.getcwd(), config_path))
# logger.info("Creating new config file at " +
# self.current_config_path)
# self.config = self.load_default_config()
else:
if (config_path and not os.path.exists(config_path)):
logger.info(">> Supplied config file does not exist. " +
os.path.join(os.getcwd(), config_path))
if os.path.exists(self.current_config_path):
logger.info(">> Found config.yaml file found in current directory " +
self.current_config_path)
self.config = self.load_config(self.current_config_path)
else:
logger.info(">> Creating new config file at " +
self.current_config_path)
shutil.copyfile(self.default_config_path,
self.current_config_path)
self.config = self.load_default_config()
def load_default_config(self):
with open(self.default_config_path) as f:
default_config = yaml.safe_load(f)
return default_config
def load_config(self, config_path):
"""Specially load a config file path.
Will first load the default config file, and update its values with
the content of the file in config_path.
Args:
config_path ([type]): [description]
Returns:
[type]: [description]
"""
default_config = self.load_default_config()
with open(config_path) as f:
config = yaml.safe_load(f)
default_config.update(config)
return default_config
| 2,359 | 34.757576 | 85 |
py
|
neuralqa
|
neuralqa-master/neuralqa/utils/data_utils.py
|
from elasticsearch import Elasticsearch
import os
import zipfile
import shutil
import urllib.request
import logging
import lzma
import json
import tarfile
import hashlib
logger = logging.getLogger(__name__)
# index settings with analyzer to automatically remove stop words
index_settings = {
"settings": {
"analysis": {
"analyzer": {
"stop_analyzer": {
"type": "standard",
"stopwords": "_english_"
}
}
}
},
"mappings": {
"properties": {
"casebody.data.opinions.text": {
"type": "text",
"analyzer": "stop_analyzer"
},
"name": {
"type": "text",
"analyzer": "stop_analyzer"
}
}
}
}
def create_index_from_json(index_name, file_path, max_docs=None):
"""Create an index from json file formats.
Read each file line by line, parse each line as json
jsonl.xz
json : must be a json file containing a list
Arguments:
file_path {str} -- path to case.law bulk file
Keyword Arguments:
max_docs {int} -- maximum size of records to use in creating index.
small default can be used to enable quick testing (e.g: {2000}).
set this to None to use the entire data file.
"""
# print("*** maxdocs", max_docs)
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
es.indices.create(
index=index_name, body=index_settings, ignore=400)
extension = os.path.splitext(file_path)[1]
logger.info(">> Creating index using file " + file_path)
i = 0
if extension == ".xz":
with lzma.open(file_path) as f:
for line in f:
i += 1
line = json.loads(str(line, 'utf8'))
try:
index_status = es.index(
index=index_name, id=i, body=line)
# print(index_status)
except Exception as e:
logger.info(
"An error has occurred while creating index " + str(e))
break
# logger.info(index_status)
if (i > max_docs):
break
logger.info(">> Creating index complete, delete data file .. ")
os.remove(file_path)
def import_scotus_files(max_docs=2000):
scotus_url = "https://www.courtlistener.com/api/bulk-data/opinions/scotus.tar.gz"
scotus_dir = "scotusdata"
index_name = "supremecourt"
if (not os.path.exists(scotus_dir)):
os.makedirs(scotus_dir, exist_ok=True)
logger.info(">>> Downloading supreme court case data")
ftpstream = urllib.request.urlopen(scotus_url)
thetarfile = tarfile.open(fileobj=ftpstream, mode="r|gz")
thetarfile.extractall(path=scotus_dir)
logger.info(">>> Download completed ")
logger.info(">> Creating %s index using %s documents",
index_name, str(max_docs))
scotus_files = os.listdir(scotus_dir)
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
es.indices.create(
index=index_name, body=index_settings, ignore=400)
i = 0
for file_path in (scotus_files):
with open("scotusdata/" + file_path) as json_file:
scotus_case = json.load(json_file)
case = {"author": scotus_case["author"],
"casebody": scotus_case["plain_text"]}
if (scotus_case["plain_text"] != ""):
try:
index_status = es.index(
index=index_name, id=scotus_case["id"], body=case)
except Exception as e:
logger.info(
"An error has occurred while creating index " + str(e))
break
i += 1
if (i > max_docs):
break
logger.info(">> Index creation complete.")
def download_data(data_url, source_name):
"""Download Zip datafile from case.law
Arguments:
data_url {str} -- url path dataset
source_name {str} -- name for dataset
"""
# create data directory
os.makedirs("data", exist_ok=True)
# download data from caselaw
zip_file_path = source_name + ".zip"
logger.info(">> Downloading data file for " + source_name)
urllib.request.urlretrieve(data_url, zip_file_path)
logger.info(">> Downloaded data file " + zip_file_path)
extract_dir = "temp" + source_name
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
zip_ref.extractall(extract_dir)
data_file = os.path.join(extract_dir, os.listdir(
extract_dir)[0], "data", "data.jsonl.xz")
final_file_path = os.path.join("data", source_name + "jsonl.xz")
shutil.copyfile(data_file, final_file_path)
logger.info(">> Extracted and moved jsonl file to data folder")
shutil.rmtree(extract_dir)
os.remove(zip_file_path)
return final_file_path
def import_sample_data(max_docs=2000):
"""This method downloads several datasets and builds an
elasticsearch index using the downloaded data.
Caselaw
Args:
max_docs (int, optional): [description]. Defaults to 2000.
"""
caselaw_data_paths = [
["https://api.case.law/v1/bulk/22411/download/", "newmexico"]
]
for data_path in caselaw_data_paths:
file_path = download_data(data_path[0], data_path[1])
create_index_from_json("cases", file_path, max_docs=max_docs)
# import_scotus_files(max_docs=max_docs)
# import_medical_data(max_docs=max_docs)
def parse_field_content(field_name, content):
"""Parse content fields if nested using dot notation, else return content as is.
e.g. for acrray content and field_name casebody.data.opinions.text, we return
content[casebody][data][opinions][text]. If any nest level is an array we return only the
first instance of this array. e.g. if opinions is an array, we return
content[casebody][data][opinions][0][text].
Args:
field_name ([str]): [description]
content ([dict]): [description]
Returns:
[str]: content of field
"""
if ("." not in field_name):
return content[field_name]
else:
fields = field_name.split(".")
for field in fields:
content = content[field]
if (isinstance(content, list)):
content = content[0]
return content
| 6,518 | 32.260204 | 94 |
py
|
neuralqa
|
neuralqa-master/neuralqa/utils/file_utils.py
| 0 | 0 | 0 |
py
|
|
neuralqa
|
neuralqa-master/neuralqa/utils/__init__.py
|
from .config_utils import ConfigParser
from .file_utils import *
from .data_utils import import_sample_data, parse_field_content
| 129 | 31.5 | 63 |
py
|
neuralqa
|
neuralqa-master/neuralqa/utils/cli_args.py
|
"""
Definitions of click options shared by several CLI commands.
"""
import click
HOST = click.option("--host", "-h", default="127.0.0.1",
help="The network address to listen on (default: 127.0.0.1). "
"Use 0.0.0.0 to bind to all addresses if you want to access the tracking "
"server from other machines.")
PORT = click.option("--port", "-p", default=5000,
help="The port to listen on (default: 5000).")
WORKERS = click.option("--workers", "-w", default=1,
help="Number of uviicorn worker processes to handle requests (default: 1).")
MAX_DOCS = click.option("--max-docs", "-md", default=2000,
help="Maximum number of sample documents to import when loading sample data into local index")
CONFIG_PATH = click.option("--config-path", "-cp", default=None,
help="Path to a yaml file containing config for neuralqa. "
"If none is provided, the default config.yaml is copied to the current directory.")
| 1,118 | 43.76 | 140 |
py
|
neuralqa
|
neuralqa-master/neuralqa/retriever/solrretriever.py
|
from neuralqa.retriever import Retriever
from neuralqa.utils import parse_field_content
import requests
import logging
logger = logging.getLogger(__name__)
class SolrRetriever(Retriever):
def __init__(self, index_type="solr", host="localhost", port=8983, protocol="http", ** kwargs):
Retriever.__init__(self, index_type)
self.username = ""
self.password = ""
self.body_field = ""
self.host = host
self.port = port
self.protocol = protocol
allowed_keys = list(self.__dict__.keys())
self.__dict__.update((k, v)
for k, v in kwargs.items() if k in allowed_keys)
self.base_solr_url = protocol + "://" + \
host + ":" + str(port) + "/solr"
# self.es = Elasticsearch([{'host': self.host, 'port': self.port}])
# self.isAvailable = self.es.ping()
rejected_keys = set(kwargs.keys()) - set(allowed_keys)
if rejected_keys:
raise ValueError(
"Invalid arguments in ElasticSearchRetriever constructor:{}".format(rejected_keys))
def run_query(self, index_name, search_query, max_documents=5, fragment_size=100, relsnip=True, num_fragments=5, highlight_tags=True):
query_url = self.base_solr_url + "/" + index_name + "/select"
params = {"df": self.body_field, "fl": self.body_field,
"wt": "json", "q": search_query, "rows": max_documents}
hl_params = {"hl": "true", "hl.method": "unified", "hl.snippets": num_fragments,
"hl.fragsize": num_fragments, "hl.usePhraseHighlighter": "true"}
if not highlight_tags:
hl_params["hl.tags.pre"] = ""
hl_params["hl.tags.post"] = ""
if relsnip:
params = {**params, **hl_params}
else:
params["fl"] = "null"
response = requests.get(query_url, params=params)
highlights = []
docs = []
results = {}
status = False
if (response.status_code == 200):
status = True
print(response.url, response.status_code)
response = response.json()
print((response.keys()))
highlights = [" ".join(response["highlighting"][key][self.body_field])
for key in response["highlighting"].keys()] if "highlighting" in response else highlights
docs = [" ".join(doc[self.body_field])
for doc in response["response"]["docs"]]
results = {"took": response["responseHeader"]
["QTime"], "highlights": highlights, "docs": docs}
else:
print("An error has occured",
response.status_code, response.__dict__)
status = False
results["errormsg"] = str(response.status_code)
results["status"] = status
return results
| 2,910 | 36.320513 | 138 |
py
|
neuralqa
|
neuralqa-master/neuralqa/retriever/elasticsearchretriever.py
|
from neuralqa.retriever import Retriever
from neuralqa.utils import parse_field_content
from elasticsearch import Elasticsearch, ConnectionError, NotFoundError
import logging
logger = logging.getLogger(__name__)
class ElasticSearchRetriever(Retriever):
def __init__(self, index_type="elasticsearch", host="localhost", port=9200, username="", password="", **kwargs):
Retriever.__init__(self, index_type)
self.username = username
self.password = password
self.body_field = ""
self.host = host
self.port = port
allowed_keys = list(self.__dict__.keys())
self.__dict__.update((k, v)
for k, v in kwargs.items() if k in allowed_keys)
print(self.__dict__)
# self.es = Elasticsearch(
# [{'host': self.host, 'port': self.port,
# "username": self.username, "password": self.password}])
self.es = Elasticsearch(hosts=[{"host": self.host, "port": self.port}],
http_auth=(self.username, self.password))
self.isAvailable = self.es.ping()
rejected_keys = set(kwargs.keys()) - set(allowed_keys)
if rejected_keys:
raise ValueError(
"Invalid arguments in ElasticSearchRetriever constructor:{}".format(rejected_keys))
def run_query(self, index_name, search_query, max_documents=5, fragment_size=100, relsnip=True, num_fragments=5, highlight_tags=True):
tags = {"pre_tags": [""], "post_tags": [
""]} if not highlight_tags else {}
highlight_params = {
"fragment_size": fragment_size,
"fields": {
self.body_field: tags
},
"number_of_fragments": num_fragments
}
search_query = {
"_source": {"includes": [self.body_field]},
"query": {
"multi_match": {
"query": search_query,
"fields": [self.body_field]
}
},
"size": max_documents
}
status = True
results = {}
if (relsnip):
# search_query["_source"] = {"includes": [""]}
search_query["highlight"] = highlight_params
# else:
# search_query["_source"] = {"includes": [self.body_field]}
try:
query_result = self.es.search(
index=index_name, body=search_query)
# RelSnip: for each document, we concatenate all
# fragments in each document and return as the document.
highlights = [" ".join(hit["highlight"][self.body_field])
for hit in query_result["hits"]["hits"] if "highlight" in hit]
docs = [parse_field_content(self.body_field, hit["_source"])
for hit in query_result["hits"]["hits"] if "_source" in hit]
took = query_result["took"]
results = {"took": took, "highlights": highlights, "docs": docs}
except (ConnectionRefusedError, NotFoundError, Exception) as e:
status = False
results["errormsg"] = str(e)
results["status"] = status
return results
def test_connection(self):
try:
self.es.cluster.health()
return True
except ConnectionError:
return False
except Exception as e:
logger.info(
'An unknown error occured connecting to ElasticSearch: %s' % e)
return False
| 3,559 | 34.959596 | 138 |
py
|
neuralqa
|
neuralqa-master/neuralqa/retriever/retriever.py
|
class Retriever:
def __init__(self, index_type):
self.index_type = index_type
| 92 | 14.5 | 36 |
py
|
neuralqa
|
neuralqa-master/neuralqa/retriever/__init__.py
|
from .retriever import *
from .elasticsearchretriever import *
from .solrretriever import *
from .retrieverpool import *
| 121 | 23.4 | 37 |
py
|
neuralqa
|
neuralqa-master/neuralqa/retriever/retrieverpool.py
|
from neuralqa.retriever import ElasticSearchRetriever
import logging
logger = logging.getLogger(__name__)
class RetrieverPool():
def __init__(self, retrievers):
self.retriever_pool = {}
for retriever in retrievers["options"]:
if (retriever["value"] in self.retriever_pool):
raise ValueError(
"Duplicate retriever value : {} ".format(retriever["value"]))
if (retriever["type"] == "elasticsearch"):
self.retriever_pool[retriever["value"]] = ElasticSearchRetriever(
**retriever["connection"])
if (retriever["type"] == "solr"):
logger.info("We do not yet support Solr retrievers")
self.selected_retriever = retrievers["selected"]
@property
def retriever(self):
return self.retriever_pool[self.selected_retriever]
@property
def selected_retriever(self):
return self._selected_retriever
@selected_retriever.setter
def selected_retriever(self, selected_retriever):
if (selected_retriever in self.retriever_pool):
self._selected_retriever = selected_retriever
else:
if (len(self.retriever_pool) > 0):
default_retriever = next(iter(self.retriever_pool))
logger.info(
">> Retriever you are attempting to use (%s) does not exist in retriever pool. Using the following default retriever instead %s ", selected_retriever, default_retriever)
self._selected_retriever = default_retriever
else:
logger.info(
">> No retriever has been specified in config.yaml.")
self._selected_retriever = None
| 1,751 | 37.086957 | 189 |
py
|
neuralqa
|
neuralqa-master/neuralqa/server/serve.py
|
from neuralqa.reader import BERTReader, ReaderPool
from neuralqa.server.routehandlers import Handler
from neuralqa.retriever import ElasticSearchRetriever, RetrieverPool
from neuralqa.utils import ConfigParser
from neuralqa.expander import ExpanderPool
import os
import logging
import time
import uvicorn
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
# from fastapi.middleware.cors import CORSMiddleware
logger = logging.getLogger(__name__)
config_path = os.environ.get("NEURALQA_CONFIG_PATH")
app_config = ConfigParser(config_path)
app = FastAPI()
api = FastAPI(root_path="/api")
# origins = [
# "http://localhost",
# "http://localhost:3000",
# ]
# app.add_middleware(
# CORSMiddleware,
# allow_origins=origins,
# allow_credentials=True,
# allow_methods=["*"],
# allow_headers=["*"],
# )
root_file_path = os.path.dirname(os.path.abspath(__file__))
static_folder_root = os.path.join(root_file_path, "ui/build")
app.mount("/api", api)
app.mount("/", StaticFiles(directory=static_folder_root, html=True),
name="ui")
@api.get('/config')
async def get_config():
config = app_config.config["ui"]
# show only listed models to ui
config["queryview"]["options"]["relsnip"] = app_config.config["relsnip"]
config["queryview"]["options"]["samples"] = app_config.config["samples"]
config["queryview"]["options"]["expander"] = app_config.config["expander"]
config["queryview"]["options"]["reader"] = app_config.config["reader"]
config["queryview"]["options"]["retriever"] = app_config.config["retriever"]
return config
# # Define a Reader Pool, load into memory
reader_pool = ReaderPool(app_config.config["reader"])
# # define the search index, load into memory
retriever_pool = RetrieverPool(app_config.config["retriever"])
# define the expander, load into memory
expander_pool = ExpanderPool(app_config.config["expander"])
handlers = Handler(reader_pool, retriever_pool, expander_pool)
# handlers = Handler(None, None)
api.include_router(handlers.router)
| 2,057 | 28.4 | 80 |
py
|
neuralqa
|
neuralqa-master/neuralqa/server/routemodels.py
|
from pydantic import BaseModel
from typing import Optional
class Document(BaseModel):
max_documents: Optional[int] = 5
query: str = "what is a fourth amendment right violation?"
fragment_size: int = 250
retriever: Optional[str] = None
relsnip: Optional[bool] = True
class Answer(BaseModel):
max_documents: Optional[int] = 5
query: str = "what is a fourth amendment right violation?"
fragment_size: int = 250
tokenstride: int = 50
context: Optional[str] = "The fourth amendment kind of protects the rights of citizens .. such that they dont get searched"
reader: str = None
relsnip: bool = True
expander: Optional[str] = None
expansionterms: Optional[list] = None
retriever: Optional[str] = "manual"
class Explanation(BaseModel):
query: str = "what is a fourth amendment right violation?"
context: str = "The fourth amendment kind of protects the rights of citizens .. such that they dont get searched"
class Expansion(BaseModel):
query: str = "what is a fourth amendment right violation?"
expander: Optional[str] = None
| 1,108 | 28.184211 | 127 |
py
|
neuralqa
|
neuralqa-master/neuralqa/server/server_app.py
|
import uvicorn
import os
def launch_server(host="127.0.0.1", port=5000, workers=1, reload=False):
uvicorn.run("neuralqa.server.serve:app", host=host, port=port, workers=workers,
log_level="info", reload=reload)
if __name__ == "__main__":
launch_server()
| 283 | 20.846154 | 83 |
py
|
neuralqa
|
neuralqa-master/neuralqa/server/__init__.py
|
from .server_app import launch_server
| 38 | 18.5 | 37 |
py
|
neuralqa
|
neuralqa-master/neuralqa/server/routehandlers.py
|
from neuralqa.utils import ConfigParser
import time
from fastapi import APIRouter
from typing import Optional
from neuralqa.server.routemodels import Document, Answer, Explanation, Expansion
import logging
logger = logging.getLogger(__name__)
class Handler:
def __init__(self, reader_pool, retriever_pool, expander_pool):
router = APIRouter()
self.router = router
self.reader_pool = reader_pool
self.retriever_pool = retriever_pool
self.expander_pool = expander_pool
@router.post("/answers")
async def get_answers(params: Answer):
"""Generate an answer for the given search query.
Performed as two stage process
1.) Get sample passages from neighbourhood provided by matches by elastic search
2.) Used BERT Model to identify exact answer spans
Returns:
[type] -- [description]
"""
answer_holder = []
response = {}
start_time = time.time()
# switch to the selected model and retriever
self.reader_pool.selected_model = params.reader
self.retriever_pool.selected_retriever = params.retriever
# print(params.query + " ".join(params.expansionterms))
# answer question based on provided context
if (params.retriever == "none" or self.retriever_pool.selected_retriever == None):
answers = self.reader_pool.model.answer_question(
params.query, params.context, stride=params.tokenstride)
for answer in answers:
answer["index"] = 0
answer_holder.append(answer)
# answer question based on retrieved passages from elastic search
else:
# add query expansion terms to query if any
retriever_query = params.query + \
" ".join(params.expansionterms)
num_fragments = 5
query_results = self.retriever_pool.retriever.run_query(params.retriever, retriever_query,
max_documents=params.max_documents, fragment_size=params.fragment_size,
relsnip=params.relsnip, num_fragments=num_fragments, highlight_tags=False)
# print(query_results)
if (query_results["status"]):
# if relsnip is not enabled, read the entire document ... this is super slow
docs = query_results["highlights"] if params.relsnip else query_results["docs"]
for i, doc in enumerate(docs):
doc = doc.replace("\n", " ")
answers = self.reader_pool.model.answer_question(
params.query, doc, stride=params.tokenstride)
for answer in answers:
answer["index"] = i
answer_holder.append(answer)
# sort answers by probability
answer_holder = sorted(
answer_holder, key=lambda k: k['probability'], reverse=True)
elapsed_time = time.time() - start_time
response = {"answers": answer_holder,
"took": elapsed_time}
return response
@router.post("/documents")
async def get_documents(params: Document):
"""Get a list of documents and highlights that match the given search query
Returns:
dictionary -- contains details on elastic search results.
"""
num_fragments = 5
query_results = {"docs": [], "highlights": []}
self.retriever_pool.selected_retriever = params.retriever
if self.retriever_pool.selected_retriever:
query_results = self.retriever_pool.retriever.run_query(
params.retriever, params.query, max_documents=params.max_documents, fragment_size=params.fragment_size, relsnip=params.relsnip, num_fragments=num_fragments)
# print(query_results)
max_doc_size = 1200
if not params.relsnip:
query_results["highlights"] = [
doc[:max_doc_size] + " .." for doc in query_results["docs"]]
return query_results
@router.post("/explain")
async def get_explanation(params: Explanation):
"""Return an explanation for a given model
Returns:
[dictionary]: [explanation , query, question, ]
"""
# TODO: Do we need to switch readers here?
context = params.context.replace(
"<em>", "").replace("</em>", "")
gradients, answer_text, question = self.reader_pool.model.explain_model(
params.query, context)
explanation_result = {"gradients": gradients,
"answer": answer_text,
"question": question
}
return explanation_result
@router.post("/expand")
async def get_expansion(params: Expansion):
"""Return an expansion for a given query
Returns:
[dictionary]: [expansion]
"""
expanded_query = {"query": None}
# switch to selected expander, perform expansion
if params.expander != "none":
self.expander_pool.selected_expander = params.expander
if self.expander_pool.selected_expander:
expanded_query = self.expander_pool.expander.expand_query(
params.query)
return expanded_query
| 5,890 | 40.485915 | 176 |
py
|
neuralqa
|
neuralqa-master/tests/expander/test_expander.py
|
from neuralqa.expander import MLMExpander
def test_mlm_expander():
expander_kwargs = {
# "model_path": "distilbert-base-uncased"
}
test_string = "Steve jobs created the apple computer in which year"
expander = MLMExpander(**expander_kwargs)
expansion = expander.expand_query(test_string)
assert len(expansion["terms"]) > 0
print(expansion)
test_mlm_expander()
| 400 | 24.0625 | 71 |
py
|
neuralqa
|
neuralqa-master/tests/reader/test_reader.py
| 0 | 0 | 0 |
py
|
|
neuralqa
|
neuralqa-master/tests/retriever/test_retriever.py
|
from neuralqa.retriever import ElasticSearchRetriever
from neuralqa.utils import ConfigParser
def test_elasticserch_retriever():
app_config = ConfigParser("config.yaml")
rkwargs = app_config.config["retriever"]["options"][1]["connection"]
retriever = ElasticSearchRetriever(**rkwargs)
results = retriever.run_query(
"cases", "what is the punishment for arson crime")
assert results != None
test_elasticserch_retriever()
| 452 | 29.2 | 72 |
py
|
neuralqa
|
neuralqa-master/docs/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../neuralqa/'))
# -- Project information -----------------------------------------------------
project = 'NeuralQA'
copyright = '2020, Victor Dibia'
author = 'Victor Dibia'
# The full version, including alpha/beta/rc tags
release = '0.0.16a'
# set master doc
master_doc = 'index'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| 1,992 | 33.362069 | 79 |
py
|
LRMI
|
LRMI-main/feature/svm.py
|
import numpy as np
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
# data = np.loadtxt('dataset/waveform/waveform-+noise.data', dtype=np.float64, delimiter=',')
# features = [
# [6, 10, 15, 9, 12, 11, 30, 37, 23, 29, ], # MRMI
# [6, 10, 15, 9, 5, 8, 4, 16, 12, 11, ], # LRMI
# [6, 10, 16, 33, 28, 39, 36, 21, 23, 31, ], # MIFS
# [6, 10, 16, 11, 9, 12, 23, 32, 30, 22, ], # FOU
# [6, 10, 15, 11, 16, 4, 9, 5, 8, 12, ], # MRMR
# [6, 10, 14, 15, 4, 9, 5, 16, 11, 7, ], # JMI
# [6, 10, 16, 14, 9, 11, 4, 8, 12, 15, ], # CMIM
# [6, 10, 14, 15, 4, 9, 5, 16, 11, 7, ], # DISR
# ]
# data = np.loadtxt('dataset/spambase/spambase.data', dtype=np.float64, delimiter=',')
# features = [
# [24, 25, 28, 26, 41, 40, 29, 43, 27, 45, ], # MRMI
# [20, 52, 6, 51, 4, 22, 15, 56, 23, 7, ], # LRMI
# [56, 26, 6, 3, 40, 46, 37, 41, 21, 43, ], # MIFS
# [56, 18, 54, 49, 24, 11, 48, 44, 36, 2, ], # FOU
# [56, 3, 6, 26, 51, 23, 52, 24, 15, 22, ], # MRMR
# [56, 18, 51, 52, 54, 6, 24, 55, 20, 15, ], # JMI
# [56, 18, 51, 52, 6, 20, 15, 55, 24, 26, ], # CMIM
# [6, 23, 26, 52, 15, 22, 24, 51, 25, 19, ], # DISR
# ]
# data = np.loadtxt('dataset/krvskp/krvskp.txt', dtype=np.float64)
# features = [
# [20, 9, 32, 31, 5, 34, 14, 0, 33, 6, ], # MRMI
# [20, 9, 32, 31, 5, 34, 14, 0, 33, 13, ], # LRMI
# [20, 9, 32, 31, 15, 8, 2, 27, 24, 11, ], # MIFS
# [20, 9, 32, 31, 14, 0, 33, 8, 1, 15, ], # FOU
# [20, 9, 32, 31, 14, 7, 15, 17, 5, 26, ], # MRMR
# [20, 9, 32, 31, 14, 7, 6, 15, 17, 5, ], # JMI
# [20, 9, 32, 31, 14, 7, 15, 5, 17, 21, ], # CMIM
# [20, 9, 32, 31, 28, 15, 13, 7, 26, 14, ], # DISR
# ]
data = np.loadtxt('dataset/breast/wdbc.txt', dtype=np.float64) # breast
features = [
[27, 20, 21, 7, 10, 22, 29, 26, 1, 23, ], # MRMI
[27, 20, 21, 7, 10, 22, 29, 26, 1, 23, ], # LRMI
[22, 27, 1, 28, 19, 11, 18, 4, 14, 13, ], # MIFS
[22, 24, 9, 14, 29, 3, 19, 18, 4, 11, ], # FOU
[22, 24, 7, 1, 13, 27, 28, 23, 26, 10, ], # MRMR
[22, 24, 23, 27, 7, 20, 13, 26, 3, 6, ], # JMI
[22, 24, 27, 21, 7, 9, 13, 26, 1, 3, ], # CMIM
[22, 20, 23, 3, 2, 0, 27, 7, 6, 26, ], # DISR
]
X = data[:, :-1]
y = data[:, -1]
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
feat_ids = []
feat_map = {}
for m in features:
feat_id = []
for i in range(len(m)):
feat = tuple(set(m[:i+1]))
if feat in feat_map:
fid = feat_map[feat]
else:
fid = len(feat_map)
feat_map[feat] = fid
feat_id.append(fid)
feat_ids.append(feat_id)
feat_res = np.zeros(len(feat_map))
classifier = SVC(class_weight='balanced')
for feat, fid in feat_map.items():
error = []
for index_train, index_test in KFold(n_splits=10, shuffle=True).split(X):
X_train, X_test = X[index_train, :][:, feat], X[index_test, :][:, feat]
y_train, y_test = y[index_train], y[index_test]
if i == 0:
X_train = X_train.reshape(-1, 1)
X_test = X_test.reshape(-1, 1)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# print(confusion_matrix(y_test, y_pred))
# print(classification_report(y_test, y_pred))
error.append(np.mean(y_pred != y_test))
error = np.array(error) * 100
m = np.mean(error)
sd = np.sqrt(np.square(error - m).mean())
# print(m)
print(feat, '%.2f (%.2f)' % (m, sd))
feat_res[fid] = m
for m in feat_ids:
for fid in m:
print('%.2f' % feat_res[fid], end='\t')
print()
ranking = np.zeros((len(features), len(features[0])))
for i in range(len(features[0])):
feat_rank = []
for j in range(len(features)):
feat_rank.append((feat_res[feat_ids[j][i]], j))
feat_rank.sort()
for j in range(len(features)):
for k in range(len(features)):
if feat_rank[k][0] == feat_res[feat_ids[j][i]]:
ranking[j][i] = k + 1
break
with np.printoptions(formatter={'float': '{:0.1f}'.format}):
print(np.mean(ranking, 1))
| 3,828 | 31.176471 | 93 |
py
|
LRMI
|
LRMI-main/feature/knn.py
|
import numpy as np
from sklearn.model_selection import train_test_split, KFold
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, confusion_matrix, mutual_info_score
from sklearn.feature_selection import mutual_info_classif
from pyitlib import discrete_random_variable as drv
import math
data = np.loadtxt('dataset/madelon.txt', dtype=np.int32)
features = [
[241, 338, 378, 318, 153, 455, 48, 493, 28, 298, ], # MRMI
[241, 338, 378, 318, 153, 455, 48, 493, 28, 124], # LRMI
[105, 90, 423, 276, 404, 228, 332, 168, 173, 402, ], # MIFS
[105, 493, 462, 175, 281, 382, 136, 107, 467, 478, ], # FOU
[105, 90, 423, 276, 404, 228, 332, 168, 173, 283, ], # MRMR
[105, 453, 64, 442, 153, 475, 338, 455, 378, 493, ], # JMI
[105, 453, 433, 411, 336, 442, 475, 455, 75, 13, ], # CMIM
[105, 433, 493, 336, 338, 442, 475, 455, 378, 453, ], # DISR
]
# data = np.loadtxt('dataset/semeion/semeion.txt', dtype=np.int32)
# features = [
# [161, 78, 81, 228, 76, 5, 190, 118, 234, 14, ], # MRMI
# [161, 78, 81, 228, 76, 5, 190, 15, 118, 234, ], # LRMI
# [161, 78, 81, 228, 76, 7, 190, 129, 193, 134, ], # MIFS
# [161, 78, 81, 228, 100, 5, 113, 115, 85, 70, ], # FOU
# [161, 78, 81, 177, 145, 111, 228, 62, 144, 193, ], # MRMR
# [161, 78, 81, 177, 145, 62, 144, 193, 110, 129, ], # JMI
# [161, 78, 81, 228, 7, 76, 126, 129, 190, 193, ], # CMIM
# [161, 78, 145, 177, 111, 81, 62, 144, 193, 94, ], # DISR
# ]
# data = np.loadtxt('dataset/statlog/statlog.txt', dtype=np.int32)
# features = [
# [17, 16, 19, 27, 11, 32, 2, 0, 34, 9, ], # MRMI
# [17, 16, 19, 11, 27, 35, 2, 32, 0, 9, ], # LRMI
# [17, 20, 24, 35, 0, 9, 26, 32, 2, 8, ], # MIFS
# [17, 20, 34, 10, 0, 26, 2, 24, 30, 9, ], # FOU
# [17, 24, 8, 35, 1, 20, 15, 32, 12, 9, ], # MRMR
# [17, 20, 19, 12, 5, 35, 16, 33, 15, 13, ], # JMI
# [17, 20, 19, 26, 2, 18, 10, 34, 3, 0, ], # CMIM
# [16, 17, 19, 20, 21, 12, 23, 13, 28, 15, ], # DISR
# ]
# data = np.loadtxt('dataset/optdigits/optdigits.txt', dtype=np.int32)
# features = [
# [42, 21, 43, 26, 10, 61, 27, 19, 37, 5, ], # MRMI
# [42, 21, 43, 26, 10, 61, 27, 52, 36, 5, ], # LRMI
# [42, 21, 61, 38, 26, 10, 43, 27, 0, 39, ], # MIFS
# [42, 21, 27, 44, 37, 45, 29, 52, 53, 51, ], # FOU
# [42, 21, 61, 30, 26, 43, 28, 10, 34, 38, ], # MRMR
# [42, 21, 43, 28, 61, 26, 34, 36, 20, 29, ], # JMI
# [42, 21, 43, 61, 20, 26, 36, 2, 27, 13, ], # CMIM
# [42, 30, 43, 21, 28, 34, 36, 20, 38, 54, ], # DISR
# ]
X = data[:, :-1]
y = data[:, -1]
# scaler = StandardScaler()
# scaler.fit(X)
# X_train = scaler.transform(X_train)
# X_test = scaler.transform(X_test)
feat_ids = []
feat_map = {}
for m in features:
feat_id = []
for i in range(len(m)):
feat = tuple(set(m[:i+1]))
if feat in feat_map:
fid = feat_map[feat]
else:
fid = len(feat_map)
feat_map[feat] = fid
feat_id.append(fid)
feat_ids.append(feat_id)
feat_res = np.zeros(len(feat_map))
classifier = KNeighborsClassifier(n_neighbors=3)
for feat, fid in feat_map.items():
error = []
for index_train, index_test in KFold(n_splits=10, shuffle=True).split(X):
X_train, X_test = X[index_train, :][:, feat], X[index_test, :][:, feat]
y_train, y_test = y[index_train], y[index_test]
if i == 0:
X_train = X_train.reshape(-1, 1)
X_test = X_test.reshape(-1, 1)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# print(confusion_matrix(y_test, y_pred))
# print(classification_report(y_test, y_pred))
error.append(np.mean(y_pred != y_test))
error = np.array(error) * 100
m = np.mean(error)
sd = np.sqrt(np.square(error - m).mean())
# print(m)
print(feat, '%.2f (%.2f)' % (m, sd))
feat_res[fid] = m
for m in feat_ids:
for fid in m:
print('%.2f' % feat_res[fid], end='\t')
print()
ranking = np.zeros((len(features), len(features[0])))
for i in range(len(features[0])):
feat_rank = []
for j in range(len(features)):
feat_rank.append((feat_res[feat_ids[j][i]], j))
feat_rank.sort()
for j in range(len(features)):
for k in range(len(features)):
if feat_rank[k][0] == feat_res[feat_ids[j][i]]:
ranking[j][i] = k + 1
break
with np.printoptions(formatter={'float': '{:0.1f}'.format}):
print(np.mean(ranking, 1))
| 4,257 | 33.33871 | 86 |
py
|
BeatTheBookie
|
BeatTheBookie-master/src/Figure2A.py
|
import numpy as np
import pandas as pd
import matplotlib as mpl
from scipy.stats import norm
import random
import bisect
bet = 50 # money on each bet
marg = 0.05 # margin odds above the mean.
n_samples = 10 # number of returns to calculate (with replacement) for the random strategy
#rand('seed',1) # use always the same seed to get same results
runStrategies = 1 # 1: run both strategies, 0: load results from disk
dir_path = '../data/'
# if your file has no headers please uncomment this line and comment the following
#data = pd.read_csv(dir_path + "closing_odds.csv",
# names = ['match_id','league','match_date','home_team',
# 'home_score','away_team','away_score','avg_odds_home_win',
# 'avg_odds_draw','avg_odds_away_win','max_odds_home_win',
# 'max_odds_draw','max_odds_away_win','top_bookie_home_win',
# 'top_bookie_draw','top_bookie_away_win','n_odds_home_win',
# 'n_odds_draw','n_odds_away_win'], header=None)
data = pd.read_csv(dir_path + "closing_odds.csv")
# helper function from: https://eli.thegreenplace.net/2010/01/22/weighted-random-generation-in-python
class WeightedRandomGenerator(object):
def __init__(self, weights):
self.totals = []
running_total = 0
for w in weights:
running_total += w
self.totals.append(running_total)
def next(self):
rnd = random.random() * self.totals[-1]
return bisect.bisect_right(self.totals, rnd)
def __call__(self):
return self.next()
def beatthebookie_strategy(data, bet, marg):
nValidOdds = 3
result = 0 * (data['home_score'] > data['away_score']) \
+ 1 * (data['home_score'] == data['away_score']) \
+ 2 * (data['home_score'] < data['away_score'])
earn_margin_home = ((1 / data['avg_odds_home_win'] - marg) * data['max_odds_home_win'] - 1) * \
(data['n_odds_home_win'] > nValidOdds)
earn_margin_draw = ((1 / data['avg_odds_draw'] - marg) * data['max_odds_draw'] - 1) * \
(data['n_odds_draw'] > nValidOdds)
earn_margin_away = ((1 / data['avg_odds_away_win'] - marg) * data['max_odds_away_win'] - 1) * \
(data['n_odds_away_win'] > nValidOdds)
max_margin = np.max(pd.concat([earn_margin_home,earn_margin_draw,earn_margin_away],axis=1),axis=1)
max_arg = pd.concat([earn_margin_home,earn_margin_draw,earn_margin_away],axis=1).apply(np.argmax,axis=1)
max_margin_max_odd = (max_arg == 0) * data['max_odds_home_win'] + \
(max_arg == 1) * data['max_odds_draw'] + \
(max_arg == 2) * data['max_odds_away_win']
max_margin_mean_odd = (max_arg == 0) * data['avg_odds_home_win'] + \
(max_arg == 1) * data['avg_odds_draw'] + \
(max_arg == 2) * data['avg_odds_away_win'
]
top_bookie = (max_arg == 0) * data['top_bookie_home_win'] + \
(max_arg == 1) * data['top_bookie_draw'] + \
(max_arg == 2) * data['top_bookie_away_win']
should_bet = max_margin > 0
bets_outcome = bet * (max_margin_max_odd - 1) * (max_arg == result) - bet * (max_arg != result)
accuracy = (max_arg == result)[should_bet].apply(int)
return [np.cumsum(bets_outcome[should_bet]), accuracy, max_margin_max_odd[should_bet], max_margin_mean_odd[should_bet], \
max_arg.iloc(np.where(should_bet)), top_bookie[should_bet]]
[s1_money, s1_accuracy, s1_max_odds, s1_mean_odds, s1_ids, s1_top_bookie] = beatthebookie_strategy(data,bet,marg)
def random_strategy(data, n_samples, n_games, bet, p_home, p_draw, p_away):
money = np.zeros([n_samples, n_games])
accuracy = np.zeros([n_samples, n_games])
max_odds = np.zeros([n_samples, n_games])
mean_odds = np.zeros([n_samples, n_games])
ids = np.zeros([n_samples, n_games])
wrg = WeightedRandomGenerator([p_home,p_draw,p_away])
dat = data[(data['avg_odds_home_win'] != 0.0) & (data['avg_odds_draw'] != 0.0) & (data['avg_odds_away_win'] != 0.0)]
result = 0 * (dat['home_score'] > dat['away_score']) \
+ 1 * (dat['home_score'] == dat['away_score']) \
+ 2 * (dat['home_score'] < dat['away_score'])
for samp in range(0,n_samples):
print("sample: %1.0f \n" % (samp))
inds = np.random.choice(range(0,dat.shape[0]-1),(n_games),replace=False)
sample = dat.iloc[inds]
sample_result = result.iloc[inds]
bet_side = np.array([wrg.next() for i in xrange(n_games)])
sample_max_odds = (bet_side == 0) * sample['max_odds_home_win'] + \
(bet_side == 1) * sample['max_odds_draw'] + \
(bet_side == 2) * sample['max_odds_away_win']
sample_mean_odds = (bet_side == 0) * sample['max_odds_home_win'] + \
(bet_side == 1) * sample['max_odds_draw'] + \
(bet_side == 2) * sample['max_odds_away_win']
bets_outcome = bet * (sample_max_odds - 1) * (sample_result == bet_side) - bet * (sample_result != bet_side)
money[samp,] = np.cumsum(bets_outcome)
accuracy[samp,] = (sample_result == bet_side).apply(int)
max_odds[samp,] = sample_max_odds
mean_odds[samp,] = sample_mean_odds
ids[samp,] = bet_side
return [money, accuracy, max_odds, mean_odds, ids]
[p_home, p_draw, p_away] = [np.mean(s1_ids[0:]==0), np.mean(s1_ids[0:]==1), np.mean(s1_ids[0:]==2)]
[s2_money, s2_accuracy, s2_max_odds, s2_mean_odds, s2_ids] = random_strategy(data, n_samples, s1_money.shape[0],
bet, p_home, p_draw, p_away)
# Mean closing odds and Expected accuracy
# Compute descriptive stats
#mS1 = mean(s1.mean_odds);
#mS2 = mean(s2.mean_odds(:));
mean_s1 = np.mean(s1_mean_odds)
mean_s2 = np.mean(s2_mean_odds[:])
#stdS1 = std(s1.mean_odds);
#stdS2 = std(s2.mean_odds(:));
std_s1 = np.std(s1_mean_odds)
std_s2 = np.std(s2_mean_odds[:])
# These are are the intercepts obtained in the regression analysis of
# Figure 1 (see Figure1.py)
offsets = [-0.034, -0.057, -0.037]
# Calculate Expected Accuracy of our strategy
s1_mean_accuracy = np.mean(s1_accuracy[:])
s1_expected_accuracy = np.mean(pd.concat([(1 / s1_mean_odds[s1_ids[:]==0]) + offsets[0],
(1 / s1_mean_odds[s1_ids[:]==1]) + offsets[1],
(1 / s1_mean_odds[s1_ids[:]==2]) + offsets[2]]))
# Calculate Expected Accuracy of Random bet Strategy
s2_expected_accuracies = np.zeros((n_samples))
for m in range(0,n_samples):
odds_home = s2_mean_odds[m, s2_ids[m,:]==0]
odds_draw = s2_mean_odds[m, s2_ids[m,:]==1]
odds_away = s2_mean_odds[m, s2_ids[m,:]==2]
s2_prob = np.concatenate([(1 / odds_home) + offsets[0],
(1 / odds_draw) + offsets[1],
(1 / odds_away) + offsets[2]])
s2_expected_accuracies[m] = np.mean(s2_prob)
s2_mean_accuracy = np.mean(np.mean(s2_accuracy))
s2_expected_accuracy = np.mean(s2_expected_accuracies)
random_strategy_mean = np.nanmean(s2_money[:,-1])
random_strategy_std = np.nanstd(s2_money[:,-1])
delta_sigma = (np.array(s1_money)[-1] - random_strategy_mean) / random_strategy_std # distance to the mean in standard deviations
#p = norm.cdf(np.array(s1_money)[-1],random_strategy_mean,random_strategy_std)
# percentage of z values expected to lie above zσ. CI = (−zσ, zσ)
#prop = (1 - p);
#fraction = 1 / prop; % expressed as fraction
print('Mean odds of our strategy: %2.3f (STD=%2.3f) \nMean Odds Random Bet Strategy: %2.3f (STD= %2.3f) \n' % (mean_s1, std_s1, mean_s2, std_s2))
print('Beat The Bookie statistics:\n');
print(' # of bets: %2.0f \n Return: %2.4f\n Profit: %2.0f\n Expected Accuracy: %2.1f\n Accuracy: %2.2f \n' % (s1_money.shape[0],
np.array(s1_money)[-1]/(s1_money.shape[0] * bet) * 100,np.array(s1_money)[-1], s1_expected_accuracy * 100, s1_mean_accuracy * 100) )
print('Random bet strategy statistics:\n');
print(' # of bets: %2.0f \n Return: %2.4f\n Profit: %2.0f\n STD: %2.4f\n Expected Accuracy: %2.1f\n Accuracy: %2.2f \n' % (s2_money.shape[1],
random_strategy_mean/(s2_money.shape[1]*bet) * 100, random_strategy_mean, random_strategy_std, s2_expected_accuracy * 100, s2_mean_accuracy * 100) )
mpl.pyplot.plot(range(s1_money.shape[0]),s1_money)
mpl.pyplot.show()
| 8,610 | 44.803191 | 150 |
py
|
BeatTheBookie
|
BeatTheBookie-master/src/collect.py
|
import os, fnmatch
from itertools import cycle
dest = open('odds_series.csv','w') # replace by 'odds_series_b.csv' for b dataset
flatten = lambda l: [item for sublist in l for item in sublist]
header_cols = ['match_id','match_date','match_time','score_home','score_away'] \
+ flatten([[''.join(map(str,t)) for t in zip(cycle(['home_b'+str(x)+'_']),range(0,72))] \
+ [''.join(map(str,t)) for t in zip(cycle(['draw_b'+str(x)+'_']),range(0,72))] \
+ [''.join(map(str,t)) for t in zip(cycle(['away_b'+str(x)+'_']),range(0,72))] for x in range(1,33)])
header = ','.join(header_cols) + "\n"
dest.write(header)
path = "../data/odds_series/" # replace by "../data/odds_series_b/" for b dataset
for fn in fnmatch.filter(os.listdir(path), '*.txt'):
f = open(os.path.join(path,fn),"r")
collected = []
details = fn.rstrip(".txt").split("_")
match_id = details[1]
match_year = details[2]
match_month = details[3]
match_day = details[4]
match_hour = details[5]
match_minutes = details[6]
match_seconds = details[7]
match_score_home = details[8]
match_score_away = details[9]
for line in f.readlines():
for col in line.rstrip().split(","):
collected.append(col)
match_cols = [match_id, match_year + "-" + match_month + "-" + match_day, match_hour + ":" + match_minutes + ":" + match_seconds, match_score_home, match_score_away] + collected
match_line = ','.join(match_cols) + "\n"
dest.write(match_line)
f.close()
dest.close()
| 1,542 | 31.829787 | 181 |
py
|
BeatTheBookie
|
BeatTheBookie-master/src/unpack.py
|
import os
source = open('../data/odds_series.csv','r') # replace path for '../data/odds_series_b.csv' for the other dataset
path = "../data/odds_series/" # replace path for "../data/odds_series_b/" for the other dataset
header = True
for line in source.readlines():
if header:
header = False
continue
cols = line.rstrip().split(',')
details = cols[0:5]
fn = 'match_' + details[0] + "_" + "_".join(details[1].split("-")) + "_" + "_".join(details[2].split(":")) + "_" + "_".join(details[3:]) + ".txt"
cols_mat = zip(*[iter(cols[5:])]*(72*3))
file_str = "\n".join([",".join(line) for line in cols_mat]) + "\n"
f = open(os.path.join(path,fn),"w")
f.write(file_str)
f.close()
| 727 | 41.823529 | 149 |
py
|
BeatTheBookie
|
BeatTheBookie-master/src/Figure1.py
|
import numpy as np
import pandas as pd
import matplotlib as mpl
dir_path = '../data/'
data = pd.read_csv(dir_path + "closing_odds.csv")
#data = np.genfromtxt(dir_path + "closing_odds.csv", delimiter=',')
# Fields:
# 1. match_table_id: unique identifier of the game
# 2. league of the game
# 3. match date
# 4. home team
# 5. 90-minute score of home team
# 6. away team
# 7. 90-minute score of away team
# 8. average closing odds home win
# 9. average closing odds draw
# 10. average closing odds away win
# 11. maximum offered closing odds home win
# 12. maximum offered closing odds draw
# 13. maximum offered closing odds away win
# 14. name of bookmaker offering maximum closing odds for home win
# 15. name of bookmaker offering maximum closing odds for draw
# 16. name of bookmaker offering maximum closing odds for away win
# 17. number of available closing odds for home win
# 18. number of available closing odds for draw
# 19. number of available closing odds for away win
n_games = data.shape[0]
[u'match_id', u'league', u'match_date', u'home_team', u'home_score',
u'away_team', u'away_score', u'avg_odds_home_win', u'avg_odds_draw',
u'avg_odds_away_win', u'max_odds_home_win', u'max_odds_draw',
u'max_odds_away_win', u'top_bookie_home_win', u'top_bookie_draw',
u'top_bookie_away_win', u'n_odds_home_win', u'n_odds_draw',
u'n_odds_away_win']
leagues = data['league']
n_leagues = pd.unique(data['league']).shape[0]
prior_home = float(sum(data['home_score'] > data['away_score'])) / n_games
prior_draw = float(sum(data['home_score'] == data['away_score'])) / n_games
prior_away = float(sum(data['home_score'] < data['away_score'])) / n_games
print('Total number of games: ' + str(n_games) + "\n")
print('Total number of Leagues:' + str(n_leagues) + "\n")
print('Proportion of Home victories: ' + str(prior_home) + "\n")
print('Proportion of Draws: ' + str(prior_draw) + "\n")
print(('Proportion of Away victories: ' + str(prior_away) + "\n"))
# Calculate accuracy of prediction as a function of the implicit probability
# contained in the odds
odds_bins = np.arange(0, 1, 0.0125) # probability bins
min_games = 100
# Home victory
p_home = 1 / data['avg_odds_home_win']
p_draw = 1 / data['avg_odds_draw']
p_away = 1 / data['avg_odds_away_win']
home_score = data['home_score']
away_score = data['away_score']
acc_home = []
acc_draw = []
acc_away = []
bin_odds_home_mean = []
bin_odds_draw_mean = []
bin_odds_away_mean = []
for bn in range(0, len(odds_bins) - 2):
#print("bin " + str(bn + 1) + " from" + str(len(odds_bins) -1) + "\n")
# Get the data from the bin
inds_home = np.where((p_home > odds_bins[bn]) & (p_home <= odds_bins[bn + 1]))[0]
inds_draw = np.where((p_draw > odds_bins[bn]) & (p_draw <= odds_bins[bn + 1]))[0]
inds_away = np.where((p_away > odds_bins[bn]) & (p_away <= odds_bins[bn + 1]))[0]
# Get accuracy for home, draw away
if (len(inds_home) >= min_games):
acc_home.append(float(sum(home_score[inds_home] > away_score[inds_home])) / len(inds_home))
bin_odds_home_mean.append(np.mean(p_home[inds_home]))
if (len(inds_draw) >= min_games):
acc_draw.append(float(sum(home_score[inds_draw] == away_score[inds_draw])) / len(inds_draw))
bin_odds_draw_mean.append(np.mean(p_draw[inds_draw]))
if (len(inds_away) >= min_games):
acc_away.append(float(sum(home_score[inds_away] < away_score[inds_away])) / len(inds_away))
bin_odds_away_mean.append(np.mean(p_away[inds_away]))
mpl.pyplot.plot(acc_home, bin_odds_home_mean, '.k')
mpl.pyplot.plot(acc_draw, bin_odds_draw_mean, '.r')
mpl.pyplot.plot(acc_away, bin_odds_away_mean, '.b')
mpl.pyplot.show()
# linear regression
from sklearn import linear_model
from sklearn.metrics import r2_score
home_regr = linear_model.LinearRegression()
draw_regr = linear_model.LinearRegression()
away_regr = linear_model.LinearRegression()
x_home = np.array(bin_odds_home_mean).reshape(-1, 1)
y_home = np.array(acc_home).reshape(-1, 1)
x_draw = np.array(bin_odds_draw_mean).reshape(-1, 1)
y_draw = np.array(acc_draw).reshape(-1, 1)
x_away = np.array(bin_odds_away_mean).reshape(-1, 1)
y_away = np.array(acc_away).reshape(-1, 1)
# fit a linear regression line to the data
home_regr.fit(x_home, y_home)
draw_regr.fit(x_draw, y_draw)
away_regr.fit(x_away, y_away)
home_preds = home_regr.predict(x_home)
draw_preds = draw_regr.predict(x_draw)
away_preds = away_regr.predict(x_away)
print('Home r2: %1.3f, slope: %1.3f, intercept: %1.3f \n' % (r2_score(y_home, home_preds),
home_regr.coef_[0][0], home_regr.intercept_[0]))
print('Draw r2: %1.3f, slope: %1.3f, intercept: %1.3f \n' % (r2_score(y_draw, draw_preds),
draw_regr.coef_[0][0], draw_regr.intercept_[0]))
print('Away r2: %1.3f, slope: %1.3f, intercept: %1.3f \n' % (r2_score(y_away, away_preds),
away_regr.coef_[0][0], away_regr.intercept_[0]))
| 4,908 | 36.473282 | 100 |
py
|
DeepAligned-Clustering
|
DeepAligned-Clustering-main/pretrain.py
|
from util import *
from model import *
from dataloader import *
class PretrainModelManager:
def __init__(self, args, data):
set_seed(args.seed)
self.model = BertForModel.from_pretrained(args.bert_model, cache_dir = "", num_labels = data.n_known_cls)
if args.freeze_bert_parameters:
self.freeze_parameters(self.model)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
n_gpu = torch.cuda.device_count()
if n_gpu > 1:
self.model = torch.nn.DataParallel(self.model)
self.num_train_optimization_steps = int(len(data.train_labeled_examples) / args.train_batch_size) * args.num_train_epochs
self.optimizer = self.get_optimizer(args)
self.best_eval_score = 0
def eval(self, args, data):
self.model.eval()
total_labels = torch.empty(0,dtype=torch.long).to(self.device)
total_logits = torch.empty((0, data.n_known_cls)).to(self.device)
for batch in tqdm(data.eval_dataloader, desc="Iteration"):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.set_grad_enabled(False):
_, logits = self.model(input_ids, segment_ids, input_mask, mode = 'eval')
total_labels = torch.cat((total_labels,label_ids))
total_logits = torch.cat((total_logits, logits))
total_probs, total_preds = F.softmax(total_logits.detach(), dim=1).max(dim = 1)
y_pred = total_preds.cpu().numpy()
y_true = total_labels.cpu().numpy()
acc = round(accuracy_score(y_true, y_pred) * 100, 2)
return acc
def train(self, args, data):
wait = 0
best_model = None
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
self.model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(data.train_labeled_dataloader, desc="Iteration")):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.set_grad_enabled(True):
loss = self.model(input_ids, segment_ids, input_mask, label_ids, mode = "train")
loss.backward()
tr_loss += loss.item()
self.optimizer.step()
self.optimizer.zero_grad()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
loss = tr_loss / nb_tr_steps
print('train_loss',loss)
eval_score = self.eval(args, data)
print('eval_score',eval_score)
if eval_score > self.best_eval_score:
best_model = copy.deepcopy(self.model)
wait = 0
self.best_eval_score = eval_score
else:
wait += 1
if wait >= args.wait_patient:
break
self.model = best_model
if args.save_model:
self.save_model(args)
def get_optimizer(self, args):
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr = args.lr_pre,
warmup = args.warmup_proportion,
t_total = self.num_train_optimization_steps)
return optimizer
def save_model(self, args):
if not os.path.exists(args.pretrain_dir):
os.makedirs(args.pretrain_dir)
self.save_model = self.model.module if hasattr(self.model, 'module') else self.model
model_file = os.path.join(args.pretrain_dir, WEIGHTS_NAME)
model_config_file = os.path.join(args.pretrain_dir, CONFIG_NAME)
torch.save(self.save_model.state_dict(), model_file)
with open(model_config_file, "w") as f:
f.write(self.save_model.config.to_json_string())
def freeze_parameters(self,model):
for name, param in model.bert.named_parameters():
param.requires_grad = False
if "encoder.layer.11" in name or "pooler" in name:
param.requires_grad = True
| 4,907 | 40.59322 | 129 |
py
|
DeepAligned-Clustering
|
DeepAligned-Clustering-main/DeepAligned.py
|
from model import *
from init_parameter import *
from dataloader import *
from pretrain import *
from util import *
class ModelManager:
def __init__(self, args, data, pretrained_model=None):
if pretrained_model is None:
pretrained_model = BertForModel.from_pretrained(args.bert_model, cache_dir = "", num_labels = data.n_known_cls)
if os.path.exists(args.pretrain_dir):
pretrained_model = self.restore_model(args.pretrained_model)
self.pretrained_model = pretrained_model
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.cluster_num_factor > 1:
self.num_labels = self.predict_k(args, data)
else:
self.num_labels = data.num_labels
self.model = BertForModel.from_pretrained(args.bert_model, cache_dir = "", num_labels = self.num_labels)
if args.pretrain:
self.load_pretrained_model(args)
if args.freeze_bert_parameters:
self.freeze_parameters(self.model)
self.model.to(self.device)
num_train_examples = len(data.train_labeled_examples) + len(data.train_unlabeled_examples)
self.num_train_optimization_steps = int(num_train_examples / args.train_batch_size) * args.num_train_epochs
self.optimizer = self.get_optimizer(args)
self.best_eval_score = 0
self.centroids = None
self.test_results = None
self.predictions = None
self.true_labels = None
def get_features_labels(self, dataloader, model, args):
model.eval()
total_features = torch.empty((0,args.feat_dim)).to(self.device)
total_labels = torch.empty(0,dtype=torch.long).to(self.device)
for batch in tqdm(dataloader, desc="Extracting representation"):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
with torch.no_grad():
feature = model(input_ids, segment_ids, input_mask, feature_ext = True)
total_features = torch.cat((total_features, feature))
total_labels = torch.cat((total_labels, label_ids))
return total_features, total_labels
def predict_k(self, args, data):
feats, _ = self.get_features_labels(data.train_semi_dataloader, self.pretrained_model, args)
feats = feats.cpu().numpy()
km = KMeans(n_clusters = data.num_labels).fit(feats)
y_pred = km.labels_
pred_label_list = np.unique(y_pred)
drop_out = len(feats) / data.num_labels
print('drop',drop_out)
cnt = 0
for label in pred_label_list:
num = len(y_pred[y_pred == label])
if num < drop_out:
cnt += 1
num_labels = len(pred_label_list) - cnt
print('pred_num',num_labels)
return num_labels
def get_optimizer(self, args):
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr = args.lr,
warmup = args.warmup_proportion,
t_total = self.num_train_optimization_steps)
return optimizer
def evaluation(self, args, data):
feats, labels = self.get_features_labels(data.test_dataloader, self.model, args)
feats = feats.cpu().numpy()
km = KMeans(n_clusters = self.num_labels).fit(feats)
y_pred = km.labels_
y_true = labels.cpu().numpy()
results = clustering_score(y_true, y_pred)
print('results',results)
ind, _ = hungray_aligment(y_true, y_pred)
map_ = {i[0]:i[1] for i in ind}
y_pred = np.array([map_[idx] for idx in y_pred])
cm = confusion_matrix(y_true,y_pred)
print('confusion matrix',cm)
self.test_results = results
self.save_results(args)
def alignment(self, km, args):
if self.centroids is not None:
old_centroids = self.centroids.cpu().numpy()
new_centroids = km.cluster_centers_
DistanceMatrix = np.linalg.norm(old_centroids[:,np.newaxis,:]-new_centroids[np.newaxis,:,:],axis=2)
row_ind, col_ind = linear_sum_assignment(DistanceMatrix)
new_centroids = torch.tensor(new_centroids).to(self.device)
self.centroids = torch.empty(self.num_labels ,args.feat_dim).to(self.device)
alignment_labels = list(col_ind)
for i in range(self.num_labels):
label = alignment_labels[i]
self.centroids[i] = new_centroids[label]
pseudo2label = {label:i for i,label in enumerate(alignment_labels)}
pseudo_labels = np.array([pseudo2label[label] for label in km.labels_])
else:
self.centroids = torch.tensor(km.cluster_centers_).to(self.device)
pseudo_labels = km.labels_
pseudo_labels = torch.tensor(pseudo_labels, dtype=torch.long).to(self.device)
return pseudo_labels
def update_pseudo_labels(self, pseudo_labels, args, data):
train_data = TensorDataset(data.semi_input_ids, data.semi_input_mask, data.semi_segment_ids, pseudo_labels)
train_sampler = SequentialSampler(train_data)
train_dataloader = DataLoader(train_data, sampler = train_sampler, batch_size = args.train_batch_size)
return train_dataloader
def train(self, args, data):
best_score = 0
best_model = None
wait = 0
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
feats, _ = self.get_features_labels(data.train_semi_dataloader, self.model, args)
feats = feats.cpu().numpy()
km = KMeans(n_clusters = self.num_labels).fit(feats)
score = metrics.silhouette_score(feats, km.labels_)
print('score',score)
if score > best_score:
best_model = copy.deepcopy(self.model)
wait = 0
best_score = score
else:
wait += 1
if wait >= args.wait_patient:
self.model = best_model
break
pseudo_labels = self.alignment(km, args)
train_dataloader = self.update_pseudo_labels(pseudo_labels, args, data)
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
self.model.train()
for batch in tqdm(train_dataloader, desc="Pseudo-Training"):
batch = tuple(t.to(self.device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = self.model(input_ids, segment_ids, input_mask, label_ids, mode='train')
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
self.optimizer.step()
self.optimizer.zero_grad()
tr_loss = tr_loss / nb_tr_steps
print('train_loss',tr_loss)
def load_pretrained_model(self, args):
pretrained_dict = self.pretrained_model.state_dict()
classifier_params = ['classifier.weight','classifier.bias']
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k not in classifier_params}
self.model.load_state_dict(pretrained_dict, strict=False)
def restore_model(self, args, model):
output_model_file = os.path.join(args.pretrain_dir, WEIGHTS_NAME)
model.load_state_dict(torch.load(output_model_file))
return model
def freeze_parameters(self,model):
for name, param in model.bert.named_parameters():
param.requires_grad = False
if "encoder.layer.11" in name or "pooler" in name:
param.requires_grad = True
def save_results(self, args):
if not os.path.exists(args.save_results_path):
os.makedirs(args.save_results_path)
var = [args.dataset, args.method, args.known_cls_ratio, args.labeled_ratio, args.cluster_num_factor, args.seed, self.num_labels]
names = ['dataset', 'method', 'known_cls_ratio', 'labeled_ratio', 'cluster_num_factor','seed', 'K']
vars_dict = {k:v for k,v in zip(names, var) }
results = dict(self.test_results,**vars_dict)
keys = list(results.keys())
values = list(results.values())
file_name = 'results.csv'
results_path = os.path.join(args.save_results_path, file_name)
if not os.path.exists(results_path):
ori = []
ori.append(values)
df1 = pd.DataFrame(ori,columns = keys)
df1.to_csv(results_path,index=False)
else:
df1 = pd.read_csv(results_path)
new = pd.DataFrame(results,index=[1])
df1 = df1.append(new,ignore_index=True)
df1.to_csv(results_path,index=False)
data_diagram = pd.read_csv(results_path)
print('test_results', data_diagram)
if __name__ == '__main__':
print('Data and Parameters Initialization...')
parser = init_model()
args = parser.parse_args()
data = Data(args)
if args.pretrain:
print('Pre-training begin...')
manager_p = PretrainModelManager(args, data)
manager_p.train(args, data)
print('Pre-training finished!')
manager = ModelManager(args, data, manager_p.model)
else:
manager = ModelManager(args, data)
print('Training begin...')
manager.train(args,data)
print('Training finished!')
print('Evaluation begin...')
manager.evaluation(args, data)
print('Evaluation finished!')
manager.save_results(args)
| 10,443 | 36.3 | 136 |
py
|
DeepAligned-Clustering
|
DeepAligned-Clustering-main/dataloader.py
|
from util import *
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
class Data:
def __init__(self, args):
set_seed(args.seed)
max_seq_lengths = {'clinc':30, 'stackoverflow':45,'banking':55}
args.max_seq_length = max_seq_lengths[args.dataset]
processor = DatasetProcessor()
self.data_dir = os.path.join(args.data_dir, args.dataset)
self.all_label_list = processor.get_labels(self.data_dir)
self.n_known_cls = round(len(self.all_label_list) * args.known_cls_ratio)
self.known_label_list = list(np.random.choice(np.array(self.all_label_list), self.n_known_cls, replace=False))
self.num_labels = int(len(self.all_label_list) * args.cluster_num_factor)
self.train_labeled_examples, self.train_unlabeled_examples = self.get_examples(processor, args, 'train')
print('num_labeled_samples',len(self.train_labeled_examples))
print('num_unlabeled_samples',len(self.train_unlabeled_examples))
self.eval_examples = self.get_examples(processor, args, 'eval')
self.test_examples = self.get_examples(processor, args, 'test')
self.train_labeled_dataloader = self.get_loader(self.train_labeled_examples, args, 'train')
self.semi_input_ids, self.semi_input_mask, self.semi_segment_ids, self.semi_label_ids = self.get_semi(self.train_labeled_examples, self.train_unlabeled_examples, args)
self.train_semi_dataloader = self.get_semi_loader(self.semi_input_ids, self.semi_input_mask, self.semi_segment_ids, self.semi_label_ids, args)
self.eval_dataloader = self.get_loader(self.eval_examples, args, 'eval')
self.test_dataloader = self.get_loader(self.test_examples, args, 'test')
def get_examples(self, processor, args, mode = 'train'):
ori_examples = processor.get_examples(self.data_dir, mode)
if mode == 'train':
train_labels = np.array([example.label for example in ori_examples])
train_labeled_ids = []
for label in self.known_label_list:
num = round(len(train_labels[train_labels == label]) * args.labeled_ratio)
pos = list(np.where(train_labels == label)[0])
train_labeled_ids.extend(random.sample(pos, num))
train_labeled_examples, train_unlabeled_examples = [], []
for idx, example in enumerate(ori_examples):
if idx in train_labeled_ids:
train_labeled_examples.append(example)
else:
train_unlabeled_examples.append(example)
return train_labeled_examples, train_unlabeled_examples
elif mode == 'eval':
eval_examples = []
for example in ori_examples:
if example.label in self.known_label_list:
eval_examples.append(example)
return eval_examples
elif mode == 'test':
return ori_examples
return examples
def get_semi(self, labeled_examples, unlabeled_examples, args):
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
labeled_features = convert_examples_to_features(labeled_examples, self.known_label_list, args.max_seq_length, tokenizer)
unlabeled_features = convert_examples_to_features(unlabeled_examples, self.all_label_list, args.max_seq_length, tokenizer)
labeled_input_ids = torch.tensor([f.input_ids for f in labeled_features], dtype=torch.long)
labeled_input_mask = torch.tensor([f.input_mask for f in labeled_features], dtype=torch.long)
labeled_segment_ids = torch.tensor([f.segment_ids for f in labeled_features], dtype=torch.long)
labeled_label_ids = torch.tensor([f.label_id for f in labeled_features], dtype=torch.long)
unlabeled_input_ids = torch.tensor([f.input_ids for f in unlabeled_features], dtype=torch.long)
unlabeled_input_mask = torch.tensor([f.input_mask for f in unlabeled_features], dtype=torch.long)
unlabeled_segment_ids = torch.tensor([f.segment_ids for f in unlabeled_features], dtype=torch.long)
unlabeled_label_ids = torch.tensor([-1 for f in unlabeled_features], dtype=torch.long)
semi_input_ids = torch.cat([labeled_input_ids, unlabeled_input_ids])
semi_input_mask = torch.cat([labeled_input_mask, unlabeled_input_mask])
semi_segment_ids = torch.cat([labeled_segment_ids, unlabeled_segment_ids])
semi_label_ids = torch.cat([labeled_label_ids, unlabeled_label_ids])
return semi_input_ids, semi_input_mask, semi_segment_ids, semi_label_ids
def get_semi_loader(self, semi_input_ids, semi_input_mask, semi_segment_ids, semi_label_ids, args):
semi_data = TensorDataset(semi_input_ids, semi_input_mask, semi_segment_ids, semi_label_ids)
semi_sampler = SequentialSampler(semi_data)
semi_dataloader = DataLoader(semi_data, sampler=semi_sampler, batch_size = args.train_batch_size)
return semi_dataloader
def get_loader(self, examples, args, mode = 'train'):
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
if mode == 'train' or mode == 'eval':
features = convert_examples_to_features(examples, self.known_label_list, args.max_seq_length, tokenizer)
elif mode == 'test':
features = convert_examples_to_features(examples, self.all_label_list, args.max_seq_length, tokenizer)
input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
data = TensorDataset(input_ids, input_mask, segment_ids, label_ids)
if mode == 'train':
sampler = RandomSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size = args.train_batch_size)
elif mode == 'eval' or mode == 'test':
sampler = SequentialSampler(data)
dataloader = DataLoader(data, sampler=sampler, batch_size = args.eval_batch_size)
return dataloader
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class DatasetProcessor(DataProcessor):
def get_examples(self, data_dir, mode):
if mode == 'train':
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
elif mode == 'eval':
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "train")
elif mode == 'test':
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self, data_dir):
"""See base class."""
import pandas as pd
test = pd.read_csv(os.path.join(data_dir, "train.tsv"), sep="\t")
labels = np.unique(np.array(test['label']))
return labels
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
if len(line) != 2:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for i, label in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s" % (example.guid))
# logger.info("tokens: %s" % " ".join(
# [str(x) for x in tokens]))
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info(
# "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop(0) # For dialogue context
else:
tokens_b.pop()
| 13,724 | 44.598007 | 175 |
py
|
DeepAligned-Clustering
|
DeepAligned-Clustering-main/model.py
|
from util import *
class BertForModel(BertPreTrainedModel):
def __init__(self,config,num_labels):
super(BertForModel, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
def forward(self, input_ids = None, token_type_ids = None, attention_mask=None , labels = None, mode = None, centroids = None, labeled = False, feature_ext = False):
encoded_layer_12, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers = False)
pooled_output = self.dense(encoded_layer_12.mean(dim = 1))
pooled_output = self.activation(pooled_output)
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
if feature_ext:
return pooled_output
elif mode == 'train':
loss = nn.CrossEntropyLoss()(logits, labels)
return loss
else:
return pooled_output, logits
| 1,273 | 42.931034 | 169 |
py
|
DeepAligned-Clustering
|
DeepAligned-Clustering-main/util.py
|
import itertools
import subprocess
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import copy
import torch.nn.functional as F
import random
import csv
import sys
from torch import nn
from tqdm import tqdm_notebook, trange, tqdm
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.modeling import WEIGHTS_NAME,CONFIG_NAME,BertPreTrainedModel,BertModel
from pytorch_pretrained_bert.tokenization import BertTokenizer
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from datetime import datetime
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix,normalized_mutual_info_score, adjusted_rand_score, accuracy_score
from scipy.optimize import linear_sum_assignment
from sklearn import metrics
def hungray_aligment(y_true, y_pred):
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D))
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = np.transpose(np.asarray(linear_sum_assignment(w.max() - w)))
return ind, w
def clustering_accuracy_score(y_true, y_pred):
ind, w = hungray_aligment(y_true, y_pred)
acc = sum([w[i, j] for i, j in ind]) / y_pred.size
return acc
def clustering_score(y_true, y_pred):
return {'ACC': round(clustering_accuracy_score(y_true, y_pred)*100, 2),
'ARI': round(adjusted_rand_score(y_true, y_pred)*100, 2),
'NMI': round(normalized_mutual_info_score(y_true, y_pred)*100, 2)}
| 1,544 | 31.87234 | 110 |
py
|
DeepAligned-Clustering
|
DeepAligned-Clustering-main/init_parameter.py
|
from argparse import ArgumentParser
def init_model():
parser = ArgumentParser()
parser.add_argument("--data_dir", default='data', type=str,
help="The input data dir. Should contain the .csv files (or other data files) for the task.")
parser.add_argument("--save_results_path", type=str, default='outputs', help="The path to save results.")
parser.add_argument("--pretrain_dir", default='pretrain_models', type=str,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--bert_model", default="uncased_L-12_H-768_A-12", type=str, help="The path for the pre-trained bert model.")
parser.add_argument("--max_seq_length", default=None, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--feat_dim", default=768, type=int, help="The feature dimension.")
parser.add_argument("--warmup_proportion", default=0.1, type=float)
parser.add_argument("--freeze_bert_parameters", action="store_true", help="Freeze the last parameters of BERT.")
parser.add_argument("--save_model", action="store_true", help="Save trained model.")
parser.add_argument("--pretrain", action="store_true", help="Pre-train the model with labeled data.")
parser.add_argument("--dataset", default=None, type=str, required=True,
help="The name of the dataset to train selected.")
parser.add_argument("--known_cls_ratio", default=0.75, type=float, required=True, help="The number of known classes.")
parser.add_argument("--cluster_num_factor", default=1.0, type=float, required=True, help="The factor (magnification) of the number of clusters K.")
parser.add_argument('--seed', type=int, default=0, help="Random seed for initialization.")
parser.add_argument("--method", type=str, default='DeepAligned',help="Which method to use.")
parser.add_argument("--labeled_ratio", default=0.1, type=float, help="The ratio of labeled samples in the training set.")
parser.add_argument("--gpu_id", type=str, default='0', help="Select the GPU id.")
parser.add_argument("--train_batch_size", default=128, type=int,
help="Batch size for training.")
parser.add_argument("--eval_batch_size", default=64, type=int,
help="Batch size for evaluation.")
parser.add_argument("--wait_patient", default=20, type=int,
help="Patient steps for Early Stop.")
parser.add_argument("--num_pretrain_epochs", default=100, type=float,
help="The pre-training epochs.")
parser.add_argument("--num_train_epochs", default=100, type=float,
help="The training epochs.")
parser.add_argument("--lr_pre", default=5e-5, type=float,
help="The learning rate for pre-training.")
parser.add_argument("--lr", default=5e-5, type=float,
help="The learning rate for training.")
return parser
| 3,278 | 46.521739 | 151 |
py
|
lm-intervention
|
lm-intervention-master/experiment_num_agreement.py
|
import torch
# import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# import random
from functools import partial
from tqdm import tqdm
# from tqdm import tqdm_notebook
import math
import statistics
from utils_num_agreement import batch, convert_results_to_pd
from transformers import (
GPT2LMHeadModel, GPT2Tokenizer,
TransfoXLTokenizer,
XLNetTokenizer,
BertForMaskedLM, BertTokenizer
)
from transformers_modified.modeling_transfo_xl import TransfoXLLMHeadModel
from transformers_modified.modeling_xlnet import XLNetLMHeadModel
from attention_intervention_model import (
AttentionOverride, TXLAttentionOverride, XLNetAttentionOverride, BertAttentionOverride
)
# sns.set(style="ticks", color_codes=True)
np.random.seed(1)
torch.manual_seed(1)
# Padding text for XLNet (from examples/text-generation/run_generation.py)
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
class Intervention():
'''
Wrapper for all the possible interventions
'''
def __init__(self,
tokenizer,
base_string: str,
substitutes: list,
candidates: list,
device='cpu'):
super()
self.device = device
self.enc = tokenizer
if isinstance(tokenizer, XLNetTokenizer):
base_string = PADDING_TEXT + ' ' + base_string
# All the initial strings
# First item should be neutral, others tainted
self.base_strings = [base_string.format(s)
for s in substitutes]
# Tokenized bases
#self.base_strings_tok = [self.enc.encode(s)
# for s in self.base_strings]
# print(self.base_strings_tok)
#self.base_strings_tok = torch.LongTensor(self.base_strings_tok)\
# .to(device)
self.base_strings_tok = [
self.enc.encode(s, add_special_tokens=False,
add_space_before_punct_symbol=True)
for s in self.base_strings
]
self.base_strings_tok = torch.LongTensor(self.base_strings_tok).to(device)
# Where to intervene
#self.position = base_string.split().index('{}')
if isinstance(tokenizer, XLNetTokenizer):
diff = len(base_string.split()) - base_string.split().index('{}')
self.position = len(self.base_strings_tok[0]) - diff
assert len(self.base_strings_tok[0]) == len(self.base_strings_tok[1])
else:
self.position = base_string.split().index('{}')
self.candidates = []
for c in candidates:
# 'a ' added to input so that tokenizer understand that first word
# follows a space.
# tokens = self.enc.tokenize('. ' + c)[1:]
tokens = self.enc.tokenize('a ' + c,
add_space_before_punct_symbol=True)[1:]
assert(len(tokens) == 1)
self.candidates.append(tokens)
for s in substitutes:
# 'a ' added to input so that tokenizer understand that first word
# follows a space.
tokens = self.enc.tokenize('a ' + s,
add_space_before_punct_symbol=True)[1:]
assert(len(tokens) == 1)
self.candidates_tok = [self.enc.convert_tokens_to_ids(tokens)
for tokens in self.candidates]
class Model():
'''
Wrapper for all model logic
'''
def __init__(self,
device='cpu',
output_attentions=False,
random_weights=False,
gpt2_version='gpt2'):
super()
# check what model architecture we're using
self.is_gpt2 = (gpt2_version.startswith('gpt2') or
gpt2_version.startswith('distilgpt2'))
self.is_txl = gpt2_version.startswith('transfo-xl')
self.is_xlnet = gpt2_version.startswith('xlnet')
self.is_bert = gpt2_version.startswith('bert')
assert (self.is_gpt2 or self.is_txl or self.is_xlnet or self.is_bert)
self.device = device
#self.model = GPT2LMHeadModel.from_pretrained(
# gpt2_version,
# output_attentions=output_attentions)
self.model = (GPT2LMHeadModel if self.is_gpt2 else
XLNetLMHeadModel if self.is_xlnet else
TransfoXLLMHeadModel if self.is_txl else
BertForMaskedLM).from_pretrained(
gpt2_version,
output_attentions=output_attentions
)
self.model.eval()
self.model.to(device)
if random_weights:
print('Randomizing weights')
self.model.init_weights()
# Options
self.top_k = 5
# 12 for GPT-2
# self.num_layers = len(self.model.transformer.h)
self.num_layers = self.model.config.num_hidden_layers
# 768 for GPT-2
# self.num_neurons = self.model.transformer.wte.weight.shape[1]
self.num_neurons = self.model.config.hidden_size
# 12 for GPT-2
# self.num_heads = self.model.transformer.h[0].attn.n_head
self.num_heads = self.model.config.num_attention_heads
self.masking_approach = 1
tokenizer = (GPT2Tokenizer if self.is_gpt2 else
TransfoXLTokenizer if self.is_txl else
XLNetTokenizer if self.is_xlnet else
BertTokenizer if self.is_bert else
DistilBertTokenizer if self.is_distilbert else
RobertaTokenizer).from_pretrained(gpt2_version)
# Special token id's: (mask, cls, sep)
self.st_ids = (tokenizer.mask_token_id,
tokenizer.cls_token_id,
tokenizer.sep_token_id)
# To account for switched dimensions in model internals:
# Default: [batch_size, seq_len, hidden_dim],
# txl and xlnet: [seq_len, batch_size, hidden_dim]
self.order_dims = lambda a: a
if self.is_gpt2:
self.attention_layer = lambda layer: self.model.transformer.h[layer].attn
self.word_emb_layer = self.model.transformer.wte
self.neuron_layer = lambda layer: self.model.transformer.h[layer].mlp
elif self.is_txl:
self.attention_layer = lambda layer: self.model.transformer.layers[layer].dec_attn
self.word_emb_layer = self.model.transformer.word_emb
self.neuron_layer = lambda layer: self.model.transformer.layers[layer].pos_ff
self.order_dims = lambda a: (a[1], a[0], *a[2:])
elif self.is_xlnet:
self.attention_layer = lambda layer: self.model.transformer.layer[layer].rel_attn
self.word_emb_layer = self.model.transformer.word_embedding
self.neuron_layer = lambda layer: self.model.transformer.layer[layer].ff
self.order_dims = lambda a: (a[1], a[0], *a[2:])
elif self.is_bert:
self.attention_layer = lambda layer: self.model.bert.encoder.layer[layer].attention.self
self.word_emb_layer = self.model.bert.embeddings.word_embeddings
self.neuron_layer = lambda layer: self.model.bert.encoder.layer[layer].output
def mlm_inputs(self, context, candidate):
input_tokens = []
for i in range(len(candidate)):
combined = context + candidate[:i] + [self.st_ids[0]]
if self.masking_approach in [2, 5]:
combined = combined + candidate[i+1:]
elif self.masking_approach in [3, 6]:
combined = combined + [self.st_ids[0]] * len(candidate[i+1:])
if self.masking_approach > 3:
combined = [self.st_ids[1]] + combined + [self.st_ids[2]]
pred_idx = combined.index(self.st_ids[0])
input_tokens.append((combined, pred_idx))
return input_tokens
def xlnet_forward(self, batch, clen):
""" Return the outputs of XLNet's forward pass;
clen = length of the candidate """
bsz, seqlen = batch.shape
perm_mask = torch.triu(
torch.ones((bsz, seqlen, seqlen), device=self.device), diagonal=0)
perm_mask[:, :, :-clen] = 0
#if self.masking_approach == 2:
# perm_mask[:, -clen:, -clen:] = torch.eye(clen)
target_mapping = torch.zeros(
(bsz, clen, seqlen), dtype=torch.float, device=self.device)
target_mapping[:, :, -clen:] = torch.eye(clen)
return self.model(batch,
perm_mask=perm_mask,
target_mapping=target_mapping)
def get_representations(self, context, position):
# Hook for saving the representation
def extract_representation_hook(module,
input,
output,
position,
representations,
layer):
# representations[layer] = output[0][position]
if self.is_xlnet and output.shape[0] == 1: return output
representations[layer] = output[self.order_dims((0, position))]
handles = []
representation = {}
with torch.no_grad():
# construct all the hooks
# word embeddings will be layer -1
# handles.append(self.model.transformer.wte.register_forward_hook(
handles.append(self.word_emb_layer.register_forward_hook(
partial(extract_representation_hook,
position=position,
representations=representation,
layer=-1)))
# hidden layers
for layer in range(self.num_layers):
#handles.append(self.model.transformer.h[layer]\
# .mlp.register_forward_hook(
handles.append(self.neuron_layer(layer).register_forward_hook(
partial(extract_representation_hook,
position=position,
representations=representation,
layer=layer)))
# logits, past = self.model(context)
if self.is_xlnet:
self.xlnet_forward(context.unsqueeze(0), clen=1)
else:
self.model(context.unsqueeze(0))
for h in handles:
h.remove()
# print(representation[0][:5])
return representation
def get_probabilities_for_examples(self, context, candidates):
"""Return probabilities of single-token candidates given context"""
for c in candidates:
if len(c) > 1:
raise ValueError(f"Multiple tokens not allowed: {c}")
outputs = [c[0] for c in candidates]
# logits, past = self.model(context)[:2]
if self.is_xlnet:
logits = self.xlnet_forward(context, clen=1)[0]
else:
logits = self.model(context)[0]
logits = logits[:, -1, :]
probs = F.softmax(logits, dim=-1)
return probs[:, outputs].tolist()
def get_probabilities_for_examples_multitoken(self, context, candidates):
"""
Return probability of multi-token candidates given context.
Prob of each candidate is normalized by number of tokens.
Args:
context: Tensor of token ids in context
candidates: list of list of token ids in each candidate
Returns: list containing probability for each candidate
"""
# TODO: Combine into single batch
token_log_probs = []
mean_probs = []
context = context.tolist()
for candidate in candidates:
if self.is_bert:
mlm_inputs = self.mlm_inputs(context, candidate)
for i, c in enumerate(candidate):
combined, pred_idx = mlm_inputs[i]
batch = torch.tensor(combined).unsqueeze(dim=0).to(self.device)
logits = self.model(batch)[0]
log_probs = F.log_softmax(logits[-1, :, :], dim=-1)
token_log_probs.append(log_probs[pred_idx][c].item())
elif self.is_xlnet:
combined = context + candidate
batch = torch.tensor(combined).unsqueeze(dim=0).to(self.device)
logits = self.xlnet_forward(batch, clen=len(candidate))[0]
log_probs = F.log_softmax(logits[-1, :, :], dim=-1)
for i, next_token_id in enumerate(candidate):
token_log_probs.append(log_probs[i][next_token_id].item())
else:
combined = context + candidate
# Exclude last token position when predicting next token
batch = torch.tensor(combined[:-1]).unsqueeze(dim=0).to(self.device)
# Shape (batch_size, seq_len, vocab_size)
logits = self.model(batch)[0]
# Shape (seq_len, vocab_size)
log_probs = F.log_softmax(logits[-1, :, :], dim=-1)
context_end_pos = len(context) - 1
continuation_end_pos = context_end_pos + len(candidate)
# TODO: Vectorize this
# Up to but not including last token position
for i in range(context_end_pos, continuation_end_pos):
next_token_id = combined[i+1]
next_token_log_prob = log_probs[i][next_token_id].item()
token_log_probs.append(next_token_log_prob)
mean_token_log_prob = statistics.mean(token_log_probs)
mean_token_prob = math.exp(mean_token_log_prob)
mean_probs.append(mean_token_prob)
return mean_probs
def neuron_intervention(self,
context,
outputs,
rep,
layers,
neurons,
position,
intervention_type='diff',
alpha=1.):
# Hook for changing representation during forward pass
def intervention_hook(module,
input,
output,
position,
neurons,
intervention,
intervention_type):
# XLNet: ignore query stream
if self.is_xlnet and output.shape[0] == 1: return output
# Get the neurons to intervene on
neurons = torch.LongTensor(neurons).to(self.device)
# First grab the position across batch
# Then, for each element, get correct index w/ gather
#base = output[:, position, :].gather(
# 1, neurons)
base_slice = self.order_dims((slice(None), position, slice(None)))
base = output[base_slice].gather(1, neurons)
intervention_view = intervention.view_as(base)
if intervention_type == 'replace':
base = intervention_view
elif intervention_type == 'diff':
base += intervention_view
else:
raise ValueError(f"Invalid intervention_type: {intervention_type}")
# Overwrite values in the output
# First define mask where to overwrite
# scatter_mask = torch.zeros_like(output).byte()
scatter_mask = torch.zeros_like(output, dtype=torch.bool)
for i, v in enumerate(neurons):
# scatter_mask[i, position, v] = 1
scatter_mask[self.order_dims((i, position, v))] = 1
# Then take values from base and scatter
output.masked_scatter_(scatter_mask, base.flatten())
# Set up the context as batch
batch_size = len(neurons)
context = context.unsqueeze(0).repeat(batch_size, 1)
handle_list = []
for layer in set(layers):
neuron_loc = np.where(np.array(layers) == layer)[0]
n_list = []
for n in neurons:
unsorted_n_list = [n[i] for i in neuron_loc]
n_list.append(list(np.sort(unsorted_n_list)))
if self.is_txl: m_list = list(np.array(n_list).squeeze())
else: m_list = n_list
intervention_rep = alpha * rep[layer][m_list]
if layer == -1:
handle_list.append(self.word_emb_layer.register_forward_hook(
partial(intervention_hook,
position=position,
neurons=n_list,
intervention=intervention_rep,
intervention_type=intervention_type)))
else:
handle_list.append(self.neuron_layer(layer).register_forward_hook(
partial(intervention_hook,
position=position,
neurons=n_list,
intervention=intervention_rep,
intervention_type=intervention_type)))
new_probabilities = self.get_probabilities_for_examples(
context,
outputs)
for hndle in handle_list:
hndle.remove()
return new_probabilities
def head_pruning_intervention(self,
context,
outputs,
layer,
head):
# Recreate model and prune head
save_model = self.model
# TODO Make this more efficient
self.model = GPT2LMHeadModel.from_pretrained('gpt2')
self.model.prune_heads({layer: [head]})
self.model.eval()
# Compute probabilities without head
new_probabilities = self.get_probabilities_for_examples(
context,
outputs)
# Reinstate original model
# TODO Handle this in cleaner way
self.model = save_model
return new_probabilities
def attention_intervention(self,
context,
outputs,
attn_override_data):
""" Override attention values in specified layer
Args:
context: context text
outputs: candidate outputs
attn_override_data: list of dicts of form:
{
'layer': <index of layer on which to intervene>,
'attention_override': <values to override the computed attention weights.
Shape is [batch_size, num_heads, seq_len, seq_len]>,
'attention_override_mask': <indicates which attention weights to override.
Shape is [batch_size, num_heads, seq_len, seq_len]>
}
"""
def intervention_hook(module, input, outputs, attn_override, attn_override_mask):
#attention_override_module = AttentionOverride(
# module, attn_override, attn_override_mask)
attention_override_module = (AttentionOverride if self.is_gpt2 else
TXLAttentionOverride if self.is_txl else
XLNetAttentionOverride)(
module, attn_override, attn_override_mask
)
# outputs[:] = attention_override_module(*input)
return attention_override_module(*input)
with torch.no_grad():
hooks = []
for d in attn_override_data:
# use the statement in the line below for the `swap_number` intervention.
attn_override = d['attention_override']
# uncomment the line below to use the `zero` intervention.
# attn_override = torch.zeros_like(d['attention_override'])
attn_override_mask = d['attention_override_mask']
layer = d['layer']
hooks.append(self.attention_layer(layer).register_forward_hook(
partial(intervention_hook,
attn_override=attn_override,
attn_override_mask=attn_override_mask)))
new_probabilities = self.get_probabilities_for_examples_multitoken(
context,
outputs)
for hook in hooks:
hook.remove()
return new_probabilities
def neuron_intervention_experiment(self,
word2intervention,
intervention_type, layers_to_adj=[], neurons_to_adj=[],
alpha=1, intervention_loc='all'):
"""
run multiple intervention experiments
"""
# if you run into memory issues, use the `bsize` argument
# bsize=100 works for XLNet, bsize=1 for TransformerXL
word2intervention_results = {}
for word in tqdm(word2intervention, desc='words'):
word2intervention_results[word] = self.neuron_intervention_single_experiment(
word2intervention[word], intervention_type, layers_to_adj, neurons_to_adj,
alpha, intervention_loc=intervention_loc)
return word2intervention_results
def neuron_intervention_single_experiment(self,
intervention,
intervention_type, layers_to_adj=[], neurons_to_adj=[],
alpha=100,
bsize=800, intervention_loc='all'):
"""
run one full neuron intervention experiment
"""
if self.is_txl: bsize = 100
if self.is_xlnet or self.is_txl: 32
with torch.no_grad():
'''
Compute representations for base terms (one for each side of bias)
'''
if self.is_xlnet:
num_alts = intervention.base_strings_tok.shape[0]
masks = torch.tensor([self.st_ids[0]]).repeat(num_alts, 1).to(self.device)
intervention.base_strings_tok = torch.cat(
(intervention.base_strings_tok, masks), dim=1)
base_representations = self.get_representations(
intervention.base_strings_tok[0],
intervention.position)
complement_representations = self.get_representations(
intervention.base_strings_tok[1],
intervention.position)
if intervention_type == 'indirect':
context = intervention.base_strings_tok[0]
rep = complement_representations
replace_or_diff = 'replace'
elif intervention_type == 'direct':
context = intervention.base_strings_tok[1]
rep = base_representations
replace_or_diff = 'replace'
else:
raise ValueError(f"Invalid intervention_type: {intervention_type}")
# Probabilities without intervention (Base case)
candidate1_base_prob, candidate2_base_prob = self.get_probabilities_for_examples(
intervention.base_strings_tok[0].unsqueeze(0),
intervention.candidates_tok)[0]
candidate1_alt_prob, candidate2_alt_prob = self.get_probabilities_for_examples(
intervention.base_strings_tok[1].unsqueeze(0),
intervention.candidates_tok)[0]
# Now intervening on potentially biased example
if intervention_loc == 'all':
candidate1_probs = torch.zeros((self.num_layers + 1, self.num_neurons))
candidate2_probs = torch.zeros((self.num_layers + 1, self.num_neurons))
for layer in range(-1, self.num_layers):
for neurons in batch(range(self.num_neurons), bsize):
neurons_to_search = [[i] + neurons_to_adj for i in neurons]
layers_to_search = [layer] + layers_to_adj
probs = self.neuron_intervention(
context=context,
outputs=intervention.candidates_tok,
rep=rep,
layers=layers_to_search,
neurons=neurons_to_search,
position=intervention.position,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons, probs):
candidate1_probs[layer + 1][neuron] = p1
candidate2_probs[layer + 1][neuron] = p2
# Now intervening on potentially biased example
elif intervention_loc == 'layer':
layers_to_search = (len(neurons_to_adj) + 1)*[layers_to_adj]
candidate1_probs = torch.zeros((1, self.num_neurons))
candidate2_probs = torch.zeros((1, self.num_neurons))
for neurons in batch(range(self.num_neurons), bsize):
neurons_to_search = [[i] + neurons_to_adj for i in neurons]
probs = self.neuron_intervention(
context=context,
outputs=intervention.candidates_tok,
rep=rep,
layers=layers_to_search,
neurons=neurons_to_search,
position=intervention.position,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons, probs):
candidate1_probs[0][neuron] = p1
candidate2_probs[0][neuron] = p2
else:
probs = self.neuron_intervention(
context=context,
outputs=intervention.candidates_tok,
rep=rep,
layers=layers_to_adj,
neurons=neurons_to_adj,
position=intervention.position,
intervention_type=replace_or_diff,
alpha=alpha)
for neuron, (p1, p2) in zip(neurons_to_adj, probs):
candidate1_probs = p1
candidate2_probs = p2
return (candidate1_base_prob, candidate2_base_prob,
candidate1_alt_prob, candidate2_alt_prob,
candidate1_probs, candidate2_probs)
def attention_intervention_experiment(self, intervention, effect):
"""
Run one full attention intervention experiment
measuring indirect or direct effect.
"""
# E.g. The doctor asked the nurse a question. She
x = intervention.base_strings_tok[0]
# E.g. The doctor asked the nurse a question. He
x_alt = intervention.base_strings_tok[1]
if effect == 'indirect':
input = x_alt # Get attention for x_alt
elif effect == 'direct':
input = x # Get attention for x
else:
raise ValueError(f"Invalid effect: {effect}")
# batch = torch.tensor(input).unsqueeze(0).to(self.device)
# attention_override = self.model(batch)[-1]
if self.is_xlnet:
batch = input.clone().detach().unsqueeze(0).to(self.device)
target_mapping = torch.zeros(
(1, 1, len(input)), dtype=torch.float, device=self.device)
attention_override = self.model(
batch, target_mapping=target_mapping)[-1]
else:
batch = input.clone().detach().unsqueeze(0).to(self.device)
attention_override = self.model(batch)[-1]
batch_size = 1
seq_len = len(x)
seq_len_alt = len(x_alt)
assert seq_len == seq_len_alt
# assert len(attention_override) == self.num_layers
# assert attention_override[0].shape == (batch_size, self.num_heads, seq_len, seq_len)
with torch.no_grad():
candidate1_probs_head = torch.zeros((self.num_layers, self.num_heads))
candidate2_probs_head = torch.zeros((self.num_layers, self.num_heads))
candidate1_probs_layer = torch.zeros(self.num_layers)
candidate2_probs_layer = torch.zeros(self.num_layers)
if effect == 'indirect':
context = x
else:
context = x_alt
# Intervene at every layer and head by overlaying attention induced by x_alt
model_attn_override_data = [] # Save layer interventions for model-level intervention later
for layer in range(self.num_layers):
layer_attention_override = attention_override[layer]
if self.is_xlnet:
attention_override_mask = torch.ones_like(layer_attention_override[0], dtype=torch.uint8)
else:
attention_override_mask = torch.ones_like(layer_attention_override, dtype=torch.uint8)
layer_attn_override_data = [{
'layer': layer,
'attention_override': layer_attention_override,
'attention_override_mask': attention_override_mask
}]
candidate1_probs_layer[layer], candidate2_probs_layer[layer] = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data = layer_attn_override_data)
model_attn_override_data.extend(layer_attn_override_data)
for head in range(self.num_heads):
if self.is_xlnet:
attention_override_mask = torch.zeros_like(layer_attention_override[0], dtype=torch.uint8)
else:
attention_override_mask = torch.zeros_like(layer_attention_override, dtype=torch.uint8)
attention_override_mask[0][head] = 1 # Set mask to 1 for single head only
head_attn_override_data = [{
'layer': layer,
'attention_override': layer_attention_override,
'attention_override_mask': attention_override_mask
}]
candidate1_probs_head[layer][head], candidate2_probs_head[layer][head] = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data=head_attn_override_data)
# Intervene on entire model by overlaying attention induced by x_alt
candidate1_probs_model, candidate2_probs_model = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data=model_attn_override_data)
return candidate1_probs_head, candidate2_probs_head, candidate1_probs_layer, candidate2_probs_layer,\
candidate1_probs_model, candidate2_probs_model
def attention_intervention_single_experiment(self, intervention, effect, layers_to_adj, heads_to_adj, search):
"""
Run one full attention intervention experiment
measuring indirect or direct effect.
"""
# E.g. The doctor asked the nurse a question. He
x = intervention.base_strings_tok[0]
# E.g. The doctor asked the nurse a question. She
x_alt = intervention.base_strings_tok[1]
if effect == 'indirect':
input = x_alt # Get attention for x_alt
elif effect == 'direct':
input = x # Get attention for x
else:
raise ValueError(f"Invalid effect: {effect}")
batch = torch.tensor(input).unsqueeze(0).to(self.device)
attention_override = self.model(batch)[-1]
batch_size = 1
seq_len = len(x)
seq_len_alt = len(x_alt)
assert seq_len == seq_len_alt
assert len(attention_override) == self.num_layers
assert attention_override[0].shape == (batch_size, self.num_heads, seq_len, seq_len)
with torch.no_grad():
if search:
candidate1_probs_head = torch.zeros((self.num_layers, self.num_heads))
candidate2_probs_head = torch.zeros((self.num_layers, self.num_heads))
if effect == 'indirect':
context = x
else:
context = x_alt
model_attn_override_data = []
for layer in range(self.num_layers):
if layer in layers_to_adj:
layer_attention_override = attention_override[layer]
layer_ind = np.where(layers_to_adj == layer)[0]
heads_in_layer = heads_to_adj[layer_ind]
attention_override_mask = torch.zeros_like(layer_attention_override, dtype=torch.uint8)
# set multiple heads in layer to 1
for head in heads_in_layer:
attention_override_mask[0][head] = 1 # Set mask to 1 for single head only
# get head mask
head_attn_override_data = [{
'layer': layer,
'attention_override': layer_attention_override,
'attention_override_mask': attention_override_mask
}]
# should be the same length as the number of unique layers to adj
model_attn_override_data.extend(head_attn_override_data)
# basically generate the mask for the layers_to_adj and heads_to_adj
if search:
for layer in range(self.num_layers):
layer_attention_override = attention_override[layer]
layer_ind = np.where(layers_to_adj == layer)[0]
heads_in_layer = heads_to_adj[layer_ind]
for head in range(self.num_heads):
if head not in heads_in_layer:
model_attn_override_data_search = []
attention_override_mask = torch.zeros_like(layer_attention_override, dtype=torch.uint8)
heads_list = [head]
if len(heads_in_layer) > 0:
heads_list.extend(heads_in_layer)
for h in (heads_list):
attention_override_mask[0][h] = 1 # Set mask to 1 for single head only
head_attn_override_data = [{
'layer': layer,
'attention_override': layer_attention_override,
'attention_override_mask': attention_override_mask
}]
model_attn_override_data_search.extend(head_attn_override_data)
for override in model_attn_override_data:
if override['layer'] != layer:
model_attn_override_data_search.append(override)
candidate1_probs_head[layer][head], candidate2_probs_head[layer][head] = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data=model_attn_override_data_search)
else:
candidate1_probs_head[layer][head] = -1
candidate2_probs_head[layer][head] = -1
else:
candidate1_probs_head, candidate2_probs_head = self.attention_intervention(
context=context,
outputs=intervention.candidates_tok,
attn_override_data=model_attn_override_data)
return candidate1_probs_head, candidate2_probs_head
def main():
DEVICE = 'cpu'
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = Model(device=DEVICE)
base_sentence = "The {}"
base_word = 'key'
intervention = Intervention(
tokenizer,
base_sentence,
[base_word, 'keys'],
["is", "are"],
device=DEVICE)
interventions = {base_word: intervention}
for intervention_type in ['direct']:
intervention_results = model.neuron_intervention_experiment(
interventions, intervention_type)
df = convert_results_to_pd(
interventions, intervention_results)
print('more probable candidate per layer, across all neurons in the layer')
print(df[0:5])
df.to_csv(f'results/intervention_examples/results_{intervention_type}.csv')
if __name__ == "__main__":
main()
| 37,950 | 44.724096 | 129 |
py
|
lm-intervention
|
lm-intervention-master/aggregate_total_effect_bar_plot.py
|
import pandas as pd
import matplotlib.pyplot as plt
import sys
import seaborn as sns
sns.set()
PATH = sys.argv[1]
FIGURES_PATH = sys.argv[2]
MODELS = ['Distil', 'Small', 'Medium', 'Large', 'XL']
EXAMPLE_TYPES = ['None', 'Distractor', 'Plural attractor',
'Singular attractor']
FORMAT = '.pdf'
def save_aggregate_total_effect_bar():
try: df = pd.read_feather(PATH + 'effects.feather')
except:
print(PATH + 'effects.feather not found.'
+ f'Run `make_feathers.py {PATH}` to generate.')
return
data = df[~df.Random & (df['Effect type'] == 'Indirect')]\
.groupby(['Model size', 'Intervening tokens', 'base_string',
'candidate1'])\
.mean().reset_index()
sns.FacetGrid(data,
row='Intervening tokens', row_order=EXAMPLE_TYPES,
height=5, aspect=2,
sharey=True, sharex=False)\
.map(sns.barplot, 'Model size', 'Total effect',
orient='v', order=MODELS)\
.set(yscale='log')
title = 'Total effects'
plt.suptitle(title)
plt.tight_layout(rect=[0, 0, 1, 0.95])
plt.savefig(FIGURES_PATH + f'{title.lower().replace(" ", "_")}' + FORMAT)
plt.show()
if __name__ == "__main__":
save_aggregate_total_effect_bar()
| 1,324 | 31.317073 | 77 |
py
|
lm-intervention
|
lm-intervention-master/attention_utils.py
|
import torch
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
from tqdm import tqdm
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
def perform_intervention(intervention, model, effect_types=('indirect', 'direct')):
"""Perform intervention and return results for specified effects"""
x = intervention.base_strings_tok[0] # E.g. The doctor asked the nurse a question. She
x_alt = intervention.base_strings_tok[1] # E.g. The doctor asked the nurse a question. He
with torch.no_grad():
candidate1_base_prob, candidate2_base_prob = model.get_probabilities_for_examples_multitoken(
x,
intervention.candidates_tok)
candidate1_alt_prob, candidate2_alt_prob = model.get_probabilities_for_examples_multitoken(
x_alt,
intervention.candidates_tok)
candidate1 = ' '.join(intervention.candidates[0]).replace('Ġ', '')
candidate2 = ' '.join(intervention.candidates[1]).replace('Ġ', '')
odds_base = candidate2_base_prob / candidate1_base_prob
odds_alt = candidate2_alt_prob / candidate1_alt_prob
total_effect = (odds_alt - odds_base) / odds_base
results = {
'base_string1': intervention.base_strings[0],
'base_string2': intervention.base_strings[1],
'candidate1': candidate1,
'candidate2': candidate2,
'candidate1_base_prob': candidate1_base_prob,
'candidate2_base_prob': candidate2_base_prob,
'odds_base': odds_base,
'candidate1_alt_prob': candidate1_alt_prob,
'candidate2_alt_prob': candidate2_alt_prob,
'odds_alt': odds_alt,
'total_effect': total_effect,
}
for effect_type in effect_types:
candidate1_probs_head, candidate2_probs_head, candidate1_probs_layer, candidate2_probs_layer,\
candidate1_probs_model, candidate2_probs_model = model.attention_intervention_experiment(
intervention, effect_type)
odds_intervention_head = candidate2_probs_head / candidate1_probs_head
odds_intervention_layer = candidate2_probs_layer / candidate1_probs_layer
odds_intervention_model = candidate2_probs_model / candidate1_probs_model
effect_head = (odds_intervention_head - odds_base) / odds_base
effect_layer = (odds_intervention_layer - odds_base) / odds_base
effect_model = (odds_intervention_model - odds_base) / odds_base
results[effect_type + "_odds_head"] = odds_intervention_head.tolist()
results[effect_type + "_effect_head"] = effect_head.tolist()
results[effect_type + "_effect_layer"] = effect_layer.tolist()
results[effect_type + "_effect_model"] = effect_model
return results
def report_intervention(results, effect_types=('indirect', 'direct'), verbose=False):
"""Report results for single intervention"""
print(f"x : {results['base_string1']}")
print(f"x': {results['base_string2']}")
print(f"c1: {results['candidate1']}")
print(f"c2: {results['candidate2']}")
print(f"\np(c2|x) / p(c1|x) = {results['odds_base']:.5f}")
print(f"p(c2|x') / p(c1|x') = {results['odds_alt']:.5f}")
print(f"\nTOTAL Effect: (p(c2|x') / p(c1|x')) / (p(c2|x) / p(c1|x)) - 1 = {results['total_effect']:.3f}")
for effect_type in effect_types:
if verbose:
print(f'\n{effect_type.upper()} Effect')
if effect_type == 'indirect':
print(" Intervention: replace Attn(x) with Attn(x') in a specific layer/head")
print(f" Effect = (p(c2|x, Attn(x')) / p(c1|x, Attn(x')) / (p(c2|x) / p(c1|x)) - 1")
elif effect_type == 'direct':
print(" Intervention: replace x with x' while preserving Attn(x) in a specific layer/head")
print(f" Effect = (p(c2|x', Attn(x)) / p(c1|x', Attn(x)) / (p(c2|x) / p(c1|x)) - 1")
plt.figure(figsize=(9, 7))
ax = sns.heatmap(results[effect_type + '_effect_head'], annot=True, annot_kws={"size": 12}, fmt=".2f")
ax.set(xlabel='Head', ylabel='Layer', title=f'{effect_type.capitalize()} Effect')
def perform_interventions(interventions, model, effect_types=('indirect', 'direct')):
"""Perform multiple interventions"""
results_list = []
for intervention in tqdm(interventions):
results = perform_intervention(intervention, model, effect_types)
results_list.append(results)
return results_list
def report_interventions_summary_by_head(results, effect_types=('indirect', 'direct'), verbose=False, k=10,
show_head_examples=False):
"""Report summary results for multiple interventions by head"""
df = pd.DataFrame(results)
print('*** SUMMARY BY HEAD ***')
print(f"Num interventions: {len(df)}")
print(f"Mean total effect: {df.total_effect.mean():.3f}")
for effect_type in effect_types:
effect = np.stack(df[effect_type + '_effect_head'].to_numpy()) # Convert column to 3d ndarray (num_examples x num_layers x num_heads)
mean_effect = effect.mean(axis=0)
if effect_type == 'indirect':
ranking_metric = mean_effect
else:
ranking_metric = -mean_effect
topk_indices = topk_indices(ranking_metric, k)
# Compute significance levels
all_values = effect.flatten()
print(f'\n{effect_type.upper()} Effect (mean = {all_values.mean()})')
print(f"Top {k} heads:")
for ind in topk_indices:
layer, head = np.unravel_index(ind, mean_effect.shape)
head_values = effect[:, layer, head].flatten()
tstatistic, pvalue = ttest_ind(head_values, all_values)
if effect_type == 'indirect':
assert tstatistic > 0
else:
assert tstatistic < 0
one_tailed_pvalue = pvalue / 2
print(f' {layer} {head}: {mean_effect[layer, head]:.3f} (p={one_tailed_pvalue:.4f})')
if effect_type == 'indirect' and show_head_examples:
top_results_for_head = sorted(results,
key=lambda result: result['indirect_effect_head'][layer][head],
reverse=True)
for result in top_results_for_head[:3]:
print(f' {result["indirect_effect_head"][layer][head]:.3f} '
f'{result["base_string1"]} | {result["candidate1"]} | {result["candidate2"]}')
if verbose:
if effect_type == 'indirect':
print(" Intervention: replace Attn(x) with Attn(x') in a specific layer/head")
print(f" Effect = (p(c2|x, Attn(x')) / p(c1|x, Attn(x')) / (p(c2|x) / p(c1|x)) - 1")
elif effect_type == 'direct':
print(" Intervention: replace x with x' while preserving Attn(x) in a specific layer/head")
print(f" Effect = (p(c2|x', Attn(x)) / p(c1|x', Attn(x)) / (p(c2|x) / p(c1|x)) - 1")
plt.figure(figsize=(14, 10))
ax = sns.heatmap(mean_effect, annot=True, annot_kws={"size": 12}, fmt=".2f")
ax.set(xlabel='Head', ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
def report_interventions_summary_by_layer(results, effect_types=('indirect', 'direct')):
"""Report summary results for multiple interventions by layer"""
df = pd.DataFrame(results)
print('*** SUMMARY BY LAYER ***')
print(f"Num interventions: {len(df)}")
print(f"Mean total effect: {df.total_effect.mean():.3f}")
for effect_type in effect_types:
effect = np.stack(df[effect_type + '_effect_layer'].to_numpy()) # Convert column to 2d ndarray (num_examples x num_layers)
mean_effect = effect.mean(axis=0)
n_layers = mean_effect.shape[0]
plt.figure(figsize=(9, 7))
ax = sns.barplot(x=mean_effect, y=list(range(n_layers)), color="blue", saturation=.3, orient="h")
ax.set(ylabel='Layer', title=f'Mean {effect_type.capitalize()} Effect')
def get_odds_ratio(intervention, model):
x = intervention.base_strings_tok[0]
x_alt = intervention.base_strings_tok[1]
with torch.no_grad():
candidate1_base_prob, candidate2_base_prob = model.get_probabilities_for_examples_multitoken(
x,
intervention.candidates_tok)
candidate1_alt_prob, candidate2_alt_prob = model.get_probabilities_for_examples_multitoken(
x_alt,
intervention.candidates_tok)
odds_base = candidate2_base_prob / candidate1_base_prob
odds_alt = candidate2_alt_prob / candidate1_alt_prob
return odds_alt / odds_base
def topk_indices(arr, k):
"""Return indices of top-k values"""
return (-arr).argsort(axis=None)[:k]
if __name__ == "__main__":
from transformers import GPT2Tokenizer
from experiment_num_agreement import Intervention, Model
from pandas import DataFrame
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = Model(output_attentions=True)
# Test experiment
interventions = [
Intervention(
tokenizer,
"The doctor asked the nurse a question. {}",
["He", "She"],
["asked", "answered"]),
Intervention(
tokenizer,
"The doctor asked the nurse a question. {}",
["He", "She"],
["requested", "responded"])
]
results = perform_interventions(interventions, model)
report_interventions_summary_by_layer(results)
| 9,550 | 44.265403 | 142 |
py
|
lm-intervention
|
lm-intervention-master/attention_figures3.py
|
"""Creates figures showing attention for specific examples, based on JSON files"""
import json
import math
from operator import itemgetter
import numpy as np
import seaborn as sns
import torch
from matplotlib import pyplot as plt
from transformers import GPT2Model, GPT2Tokenizer
BLACK = '#000000'
GRAY = '#303030'
def save_fig(prompts, heads, model, tokenizer, fname, device, highlight_indices=None):
palette = sns.color_palette('muted')
plt.rc('text', usetex=True)
fig, axs = plt.subplots(1, 2, sharey=False, figsize=(4.0, 3.5))
axs[0].yaxis.set_ticks_position('none')
plt.rcParams.update({'axes.titlesize': 'xx-large'})
attentions = []
max_attn = 0
seqs = []
for g_index in range(2):
prompt = prompts[g_index]
print(prompt)
input_ = tokenizer.encode(prompt)
print(input_)
batch = torch.tensor(input_).unsqueeze(0).to(device)
attention = model(batch)[-1]
seq = tokenizer.convert_ids_to_tokens(input_)
print(seq)
seq = [t.replace('Ġ', '') for t in seq]
seqs.append(seq)
seq_len = len(input_)
attention = torch.stack(attention)
attention = attention.squeeze(1)
assert torch.allclose(attention.sum(-1), torch.tensor([1.0]))
attentions.append(attention)
attn_sum = torch.Tensor([0])
for layer, head in heads:
attn_sum = attention[layer][head][-1] + attn_sum
if max(attn_sum) > max_attn:
max_attn = max(attn_sum)
xlim_upper = math.ceil(max_attn * 10) / 10
for g_index in range(2):
attention = attentions[g_index]
head_names = []
ax = axs[g_index]
seq = seqs[g_index]
formatted_seq = []
if highlight_indices:
for i, t in enumerate(seq):
formatted_t = t
for j in range(2):
if i in highlight_indices[j]:
if j == g_index:
formatted_t = f"\\textbf{{{t}}}"
else:
formatted_t = f"\\setul{{.15ex}}{{.2ex}}\\ul{{{t}}}"
break
formatted_seq.append(formatted_t)
formatted_seq[-1] = f"\\textbf{{{formatted_seq[-1]}}}"
else:
formatted_seq = seq
print('formatted', formatted_seq)
plts = []
left = None
for i, (layer, head) in enumerate(heads):
attn_last_word = attention[layer][head][-1].numpy()
seq_placeholders = [f'a{i}' for i in range(len(formatted_seq))]
if left is None:
print(attn_last_word)
p = ax.barh(seq_placeholders, attn_last_word, color=palette[i], linewidth=0)
else:
p = ax.barh(seq_placeholders, attn_last_word, left=left, color=palette[i], linewidth=0)
print(ax.get_yticklabels())
ax.set_yticklabels(formatted_seq)
if left is None:
left = np.zeros_like(attn_last_word)
left += attn_last_word
if highlight_indices:
for i in range(seq_len):
if i in highlight_indices[g_index]:
color = BLACK
else:
color = GRAY
ax.get_yticklabels()[i].set_color(color)
ax.get_yticklabels()[-1].set_color(BLACK)
plts.append(p)
head_names.append(f"{layer}-{head}")
ax.set_xlim([0, xlim_upper])
# ax.set_xlim([0, 0.5])
ax.set_xticks([0, xlim_upper])
ax.invert_yaxis()
plt.setp(ax.get_yticklabels(), fontsize=8, ha='right')
#ax.set_xticks([0, 0.5])
plt.setp(ax.get_xticklabels(), fontsize=7)
sns.despine(left=True, bottom=True)
ax.tick_params(axis='x', pad=0, length=0)
ax.tick_params(axis='y', pad=0)
ax.yaxis.labelpad = 0
ax.xaxis.labelpad = 0
lgd = plt.figlegend(plts, head_names,'lower center', fontsize=7, borderpad=0.3, handlelength=.6,
handletextpad=.2, labelspacing = 0.1, bbox_to_anchor=(0.86, 0.11))
plt.savefig(fname, format='pdf', bbox_extra_artists = (lgd,), bbox_inches = 'tight')
plt.close()
def main():
sns.set_context("paper")
sns.set_style("white")
device = 'cpu'
plt.rc('text', usetex=True)
plt.rcParams.update({
"text.latex.preamble": [
"\\usepackage{color}",
"\\usepackage{soul}",
"\\setulcolor{blue}"
]
})
top_heads = {
'gpt2':[(10, 9), (11, 11)],
'gpt2-medium': [(10, 9), (6, 15), (10,12)],
'gpt2-xl':[(16,15), (16, 24), (17,10)],
'gpt2-large':[(16,19), (16,5), (15,6)],
'distilgpt2': [(3,1), (2,6), (3,6)]
}
models = ['gpt2']#, 'gpt2-medium', 'gpt2-xl', 'gpt2-large', 'distilgpt2']
# structures = ['simple', 'distractor_1', 'distractor', 'within_rc_singular', 'singular', 'rc_singular']
structures = ['simple']
'''
examples_to_highlight = {
"The guard appreciated getting treatment from the nurse": [[7], [1]],
"The driver transported the housekeeper to the job because": [[4,5], [1]],
"The manager promised to give a raise to the teacher": [[9], [1]],
"The driver never drove the librarian anywhere": [[5,6], [1]],
"The nurse examined the farmer for injuries because": [[1], [4]],
"The CEO ordered the cleaner out of the room": [[4],[1]],
"The hairdresser shouted at the driver because": [[1,2,3,4],[8]],
"The chief demanded an apology from the teacher because": [[7], [1]],
"The physician examined the auditor for sores because": [[4],[1]],
"The laborer dug a hole for the assistant because": [[8],[1,2]]
}
'''
# split = 'dev'
testing = False
for model_version in models:
for structure in structures:
if structure.startswith('within_rc'):
highlight_indices = [[4], [1]]
elif structure.startswith('rc'):
highlight_indices = [[1], [4]]
elif structure == 'singular' or structure == 'plural':
highlight_indices = [[1], [4]]
else:
highlight_indices = None
heads = top_heads[model_version]
if model_version == 'distilgpt2':
filter = 'unfiltered' # In order to get canonical example
else:
filter = 'filtered'
fname = f"attention_results/{structure}/attention_intervention_{model_version}_{filter}.json"
with open(fname) as f:
data = json.load(f)
prompts = None
results = data['results']
results_by_ratio = sorted(results, key=itemgetter('total_effect'), reverse=True)
with torch.no_grad():
# Get attention and validate
model = GPT2Model.from_pretrained(model_version, output_attentions=True)
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model.eval()
for result_index, result in enumerate(results_by_ratio):
prompts = (result['base_string1']+' '+result['candidate1'], result['base_string2']+' '+result['candidate2'])
#highlight_indices = None
#for example, indices in examples_to_highlight.items():
# if example in prompts[0]:
# highlight_indices = indices
# break
fname = f'attention_figures/qualitative/{structure}_{model_version}_{filter}_{result_index}.pdf'
save_fig(prompts, heads, model, tokenizer, fname, device, highlight_indices)
if result_index >= 5:
break
# For testing only:
if testing:
break
if testing:
break
if __name__ == '__main__':
main()
| 8,088 | 38.847291 | 128 |
py
|
lm-intervention
|
lm-intervention-master/generate_sentences.py
|
import os
import csv
from vocab_utils import get_nouns, get_verbs
PATH = "vocab/"
def get_nouns2():
noun2_list = []
with open(os.path.join(PATH, "noun2.txt"), "r") as noun2_file:
for noun2 in noun2_file:
noun2s, noun2p = noun2.split()
noun2_list.append((noun2s, noun2p))
return noun2_list
def get_verbs2():
verb2_list = []
with open(os.path.join(PATH, "verb2.txt"), "r") as verb2_file:
for verb2 in verb2_file:
verb2s, verb2p = verb2.split()
verb2_list.append((verb2s, verb2p))
return verb2_list
def generate_rc(noun1_number, noun2_number, nouns1, nouns2, verbs1, verbs2, _id, complementizer=True):
if complementizer:
template = "The {} that the {} {} {}"
else:
template = "The {} the {} {} {}"
out_list = []
for (noun1s, noun1p) in nouns1:
for (noun2s, noun2p) in nouns2:
for (verb1s, verb1p) in verbs1:
for (verb2s, verb2p) in verbs2:
noun1 = noun1s if noun1_number == "singular" else noun1p
noun2 = noun2s if noun2_number == "singular" else noun2p
verb2 = verb2s if noun2_number == "singular" else verb2p
correct_verb = verb1s if noun1_number == "singular" else verb1p
incorrect_verb = verb1p if noun1_number == "singular" else verb1s
label = noun1_number + "_" + noun2_number
out_list.append([template.format(noun1, noun2, verb2, correct_verb), \
label, "correct", "id"+str(_id)])
out_list.append([template.format(noun1, noun2, verb2, incorrect_verb), \
label, "wrong", "id"+str(_id)])
_id += 1
return out_list, _id
def generate_within_rc(noun1_number, noun2_number, nouns1, nouns2, verbs1, _id, complementizer=True):
if complementizer:
template = "The {} that the {} {}"
else:
template = "The {} the {} {}"
out_list = []
for (noun1s, noun1p) in nouns1:
for (noun2s, noun2p) in nouns2:
for (verb1s, verb1p) in verbs1:
noun1 = noun1s if noun1_number == "singular" else noun1p
noun2 = noun2s if noun2_number == "singular" else noun2p
correct_verb = verb1s if noun2_number == "singular" else verb1p
incorrect_verb = verb1p if noun2_number == "singular" else verb1s
label = noun1_number + "_" + noun2_number
out_list.append([template.format(noun1, noun2, correct_verb), \
label, "correct", "id"+str(_id)])
out_list.append([template.format(noun1, noun2, incorrect_verb), \
label, "wrong", "id"+str(_id)])
_id += 1
return out_list, _id
nouns1 = get_nouns()
nouns2 = get_nouns2()
verbs1 = get_verbs()
verbs2 = get_verbs2()
out_list = []
out_list_no_that = []
_id = 1
_id_no_that = 1
for noun1_number in ("singular", "plural"):
for noun2_number in ("singular", "plural"):
this_list, _id = generate_within_rc(noun1_number, noun2_number, \
nouns1, nouns2, verbs1, _id)
this_list_no_that, _id_no_that = generate_within_rc(noun1_number, noun2_number, \
nouns1, nouns2, verbs1, _id_no_that, complementizer=False)
out_list.extend(this_list)
out_list_no_that.extend(this_list_no_that)
with open(os.path.join(PATH, "within_rc.txt"), "w") as rc_file:
writer = csv.writer(rc_file, delimiter="\t")
for out in out_list:
writer.writerow(out)
with open(os.path.join(PATH, "within_rc_no_that.txt"), "w") as rc_file:
writer = csv.writer(rc_file, delimiter="\t")
for out_no_that in out_list_no_that:
writer.writerow(out_no_that)
| 3,852 | 37.148515 | 102 |
py
|
lm-intervention
|
lm-intervention-master/attention_figures4.py
|
"""Creates summary figure of various effects for attention intervention from JSON file"""
import json
import matplotlib as mpl
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
sns.set()
import pandas as pd
import os
def main():
#models = ['distilgpt2', 'gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']
models = ['gpt2']
model_to_name = {
'distilgpt2': 'distil',
'gpt2': 'small',
'gpt2-medium': 'medium',
'gpt2-large': 'large',
'gpt2-xl': 'xl'
}
sns.set_context("paper")
sns.set_style("white")
mpl.rcParams['hatch.linewidth'] = 0.3
palette = sns.color_palette()
filter = 'filtered'
#split = 'dev'
structures = ['simple', 'distractor', 'distractor_1', 'rc_singular', 'rc_plural']
te = []
nde_all = []
nie_all = []
nie_sum = []
model_names = []
for structure in structures:
for model_version in models:
fname = f"attention_results/{structure}/attention_intervention_{model_version}_{filter}.json"
with open(fname) as f:
data = json.load(f)
df = pd.DataFrame(data['results'])
# Convert to shape (num_examples X num_layers X num_heads)
indirect_by_head = np.stack(df['indirect_effect_head'].to_numpy())
mean_sum_indirect_effect = indirect_by_head.sum(axis=(1, 2)).mean()
te.append(data['mean_total_effect'])
nde_all.append(data['mean_model_direct_effect'])
print(nde_all)
nie_all.append(data['mean_model_indirect_effect'])
print(nie_all)
nie_sum.append(mean_sum_indirect_effect)
print(nie_sum)
model_names.append(model_to_name[model_version])
# Plot stacked bar chart
plt.figure(num=1, figsize=(3, 1.2))
width = .29
inds = np.arange(len(models))
spacing = 0.015
p1 = plt.bar(inds, te, width, color=palette[2], linewidth=0, hatch='/////', edgecolor='darkgreen')
p2 = plt.bar(inds + width + spacing, nie_all, width, color=palette[4], linewidth=0, hatch='\\\\\\',
edgecolor='#4E456D')
p3 = plt.bar(inds + width + spacing, nde_all, width, bottom=nie_all, color=palette[1], linewidth=0,
hatch='----', edgecolor='#BB592D')
p4 = plt.bar(inds + 2 * (width + spacing), nie_sum, width, color=palette[3], linewidth=0, hatch='///',
edgecolor='darkred')
plt.gca().tick_params(axis='x', pad=0)
plt.gca().tick_params(axis='y', pad=0)
plt.gca().yaxis.labelpad = 3
plt.ylabel('Effect', size=9)
# plt.xticks(inds + .3, model_names, size=7)
for tick in plt.gca().xaxis.get_minor_ticks():
tick.label1.set_horizontalalignment('center')
plt.yticks(size=7)
leg = plt.legend((p1[0], p3[0], p2[0], p4[0]), ('TE', 'NDE-all', 'NIE-all', 'NIE-sum'), loc='upper left', fontsize=7)
for patch in leg.get_patches():
patch.set_height(7)
patch.set_y(-1)
sns.despine()
plt.subplots_adjust(left=0.08, right=0.99, top=0.99, bottom=0.15)
path = 'attention_figures/stacked_bars'
if not os.path.exists(path):
os.makedirs(path)
plt.savefig(f'{path}/{structure}_effect_bars.pdf', format='pdf')
plt.close()
if __name__ == '__main__':
main()
| 3,438 | 35.2 | 125 |
py
|
lm-intervention
|
lm-intervention-master/neuron_experiment_multiple_templates_num_agreement.py
|
from datetime import datetime
import os
import sys
import random
from utils_num_agreement import convert_results_to_pd
from experiment_num_agreement import Intervention, Model
from transformers import (
GPT2Tokenizer, TransfoXLTokenizer, XLNetTokenizer, BertTokenizer
)
from vocab_utils import get_nouns, get_nouns2, get_verbs, get_verbs2, get_prepositions, \
get_preposition_nouns, get_adv1s, get_adv2s
import vocab_utils as vocab
'''
Run all the extraction for a model across many templates
'''
def get_intervention_types():
return ['indirect', 'direct']
def construct_templates():
# specify format of inputs. fill in with terminals later
templates = []
if attractor in ['singular', 'plural']:
for p in get_prepositions():
for ppns, ppnp in get_preposition_nouns():
ppn = ppns if attractor == 'singular' else ppnp
template = ' '.join(['The', '{}', p, 'the', ppn])
templates.append(template)
elif attractor in ('rc_singular', 'rc_plural', 'rc_singular_no_that', 'rc_plural_no_that'):
for noun2s, noun2p in get_nouns2():
noun2 = noun2s if attractor.startswith('rc_singular') else noun2p
for verb2s, verb2p in get_verbs2():
verb2 = verb2s if attractor.startswith('rc_singular') else verb2p
if attractor.endswith('no_that'):
template = ' '.join(['The', '{}', 'the', noun2, verb2])
else:
template = ' '.join(['The', '{}', 'that', 'the', noun2, verb2])
templates.append(template)
elif attractor in ('within_rc_singular', 'within_rc_plural', 'within_rc_singular_no_that', 'within_rc_plural_no_that'):
for ns, np in vocab.get_nouns():
noun = ns if attractor.startswith('within_rc_singular') else np
if attractor.endswith('no_that'):
template = ' '.join(['The', noun, 'the', '{}'])
else:
template = ' '.join(['The', noun, 'that', 'the', '{}'])
templates.append(template)
elif attractor == 'distractor':
for adv1 in get_adv1s():
for adv2 in get_adv2s():
templates.append(' '.join(['The', '{}', adv1, 'and', adv2]))
elif attractor == 'distractor_1':
for adv1 in get_adv1s():
templates.append(' '.join(['The', '{}', adv1]))
else: # defaults to simple agreement
templates = ['The {}']
return templates
def construct_interventions(tokenizer, DEVICE, attractor, seed, examples):
interventions = {}
all_word_count = 0
used_word_count = 0
templates = construct_templates()
for temp in templates:
if attractor.startswith('within_rc'):
for noun2s, noun2p in get_nouns2():
for v_singular, v_plural in vocab.get_verbs():
all_word_count += 1
try:
intervention_name = '_'.join([temp, noun2s, v_singular])
interventions[intervention_name] = Intervention(
tokenizer,
temp,
[noun2s, noun2p],
[v_singular, v_plural],
device=DEVICE)
used_word_count += 1
except Exception as e:
pass
else:
for ns, np in vocab.get_nouns():
for v_singular, v_plural in vocab.get_verbs():
all_word_count += 1
try:
intervention_name = '_'.join([temp, ns, v_singular])
interventions[intervention_name] = Intervention(
tokenizer,
temp,
[ns, np],
[v_singular, v_plural],
device=DEVICE)
used_word_count += 1
except Exception as e:
pass
print(f"\t Only used {used_word_count}/{all_word_count} nouns due to tokenizer")
if examples > 0 and len(interventions) >= examples: # randomly sample input sentences
random.seed(seed)
interventions = {k: v
for k, v in random.sample(interventions.items(), examples)}
return interventions
def run_all(model_type="gpt2", device="cuda", out_dir=".",
random_weights=False, attractor=None, seed=5, examples=100):
print("Model:", model_type)
# Set up all the potential combinations
intervention_types = get_intervention_types()
# Initialize Model and Tokenizer
# tokenizer = GPT2Tokenizer.from_pretrained(model_type)
model = Model(device=device, gpt2_version=model_type,
random_weights=random_weights)
tokenizer = (GPT2Tokenizer if model.is_gpt2 else
TransfoXLTokenizer if model.is_txl else
XLNetTokenizer if model.is_xlnet else
BertTokenizer).from_pretrained(model_type)
# Set up folder if it does not exist
dt_string = datetime.now().strftime("%Y%m%d")
folder_name = dt_string+"_neuron_intervention"
base_path = os.path.join(out_dir, "results", folder_name)
if random_weights:
base_path = os.path.join(base_path, "random")
if not os.path.exists(base_path):
os.makedirs(base_path)
interventions = construct_interventions(tokenizer, device, attractor, seed,
examples)
# Consider all the intervention types
for itype in intervention_types:
print("\t Running with intervention: {}".format(
itype))
# Run actual exp
intervention_results = model.neuron_intervention_experiment(
interventions, itype, alpha=1.0)
df = convert_results_to_pd(interventions, intervention_results)
# Generate file name
random = ['random'] if random_weights else []
fcomponents = random + [str(attractor), itype, model_type]
fname = "_".join(fcomponents)
# Finally, save each exp separately
df.to_csv(os.path.join(base_path, fname+".csv"))
if __name__ == "__main__":
if not (len(sys.argv) == 8):
print("USAGE: python ", sys.argv[0],
"<model> <device> <out_dir> <random_weights> <attractor> <seed> <examples>")
model = sys.argv[1] # distilgpt2, gpt2, gpt2-medium, gpt2-large, gpt2-xl
device = sys.argv[2] # cpu vs cuda
out_dir = sys.argv[3] # dir to write results
random_weights = sys.argv[4] == 'random' # true or false
attractor = sys.argv[5] # singular, plural or none
seed = int(sys.argv[6]) # to allow consistent sampling
examples = int(sys.argv[7]) # number of examples to try, 0 for all
run_all(model, device, out_dir, random_weights, attractor, seed, examples)
| 6,895 | 42.64557 | 123 |
py
|
lm-intervention
|
lm-intervention-master/vocab_utils.py
|
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
PATH = 'vocab/'
simple = pd.read_csv(PATH + 'simple.txt', sep=' |\t',
engine='python',
names=['The','noun','verb','number',
'grammaticality','id'])
nounpp = pd.read_csv(PATH + 'nounpp.txt', sep=' |\t|_',
engine='python',
names=['The', 'noun', 'preposition', 'the',
'pp_noun', 'verb', 'n_number',
'pp_number', 'grammaticality', 'id'])
adv = pd.read_csv(PATH + 'adv_conjunction.txt', sep=' |\t|_', engine='python',
names=['The','noun','adv1', 'and', 'adv2','verb','number',
'grammaticality','id'])
rc = pd.read_csv(PATH + 'rc.txt', sep=' |\t|_', engine='python',
names=['The','noun','that','the','noun2','verb2','verb','n1_number',
'n2_number', 'grammaticality','id'])
within_rc = pd.read_csv(PATH + 'within_rc.txt', sep=' |\t|_', engine='python',
names=['The','noun','that','the','noun2','verb','n1_number','n2_number',
'grammaticality','id'])
# Construct nouns
n_singular = nounpp['noun'][nounpp['n_number'] == 'singular']\
.drop_duplicates().reset_index(drop=True)
n_plural = nounpp['noun'][nounpp['n_number'] == 'plural']\
.drop_duplicates().reset_index(drop=True)
n_frame = {'n_singular':n_singular, 'n_plural':n_plural}
nouns = pd.DataFrame(n_frame)
n2_singular = within_rc['noun2'][within_rc['n2_number'] == 'singular']\
.drop_duplicates().reset_index(drop=True)
n2_plural = within_rc['noun2'][within_rc['n2_number'] == 'plural']\
.drop_duplicates().reset_index(drop=True)
n2_frame = {'n2_singular': n2_singular, 'n2_plural': n2_plural}
nouns2 = pd.DataFrame(n2_frame)
# Construct verbs
v_singular = nounpp['verb'][nounpp['n_number'] == 'singular']\
[nounpp['grammaticality'] == 'correct'].drop_duplicates()\
.reset_index(drop=True)
v_plural = nounpp['verb'][nounpp['n_number'] == 'singular']\
[nounpp['grammaticality'] == 'wrong'].drop_duplicates()\
.reset_index(drop=True)
v_frame = {'v_singular':v_singular, 'v_plural':v_plural}
verbs = pd.DataFrame(v_frame)
v2_singular = rc['verb2'][rc['n2_number'] == 'singular']\
.drop_duplicates().reset_index(drop=True)
v2_plural = rc['verb2'][rc['n2_number'] == 'plural']\
.drop_duplicates().reset_index(drop=True)
v2_frame = {'v2_singular': v2_singular, 'v2_plural': v2_plural}
verbs2 = pd.DataFrame(v2_frame)
# Construct prepositional nouns
ppn_singular = nounpp['pp_noun'][nounpp['pp_number'] == 'singular']\
.drop_duplicates().sort_values().reset_index(drop=True)
ppn_plural = nounpp['pp_noun'][nounpp['pp_number'] == 'plural']\
.drop_duplicates().sort_values().reset_index(drop=True)
ppn_frame = {'ppn_singular':ppn_singular, 'ppn_plural':ppn_plural}
ppns = pd.DataFrame(ppn_frame)
# Construct prepositions
prepositions = nounpp['preposition'].drop_duplicates()
# Construct adverbs
adv1s = adv['adv1'].drop_duplicates()
adv2s = adv['adv2'].drop_duplicates()
def get_nouns():
return [(s,p) for s, p in zip(nouns['n_singular'],
nouns['n_plural'])]
def get_nouns2():
return [(s,p) for s, p in zip(nouns2['n2_singular'],
nouns2['n2_plural'])]
def get_verbs():
return [(s,p) for s, p in zip(verbs['v_singular'],
verbs['v_plural'])]
def get_verbs2():
return [(s,p) for s, p in zip(verbs2['v2_singular'],
verbs2['v2_plural'])]
def get_preposition_nouns():
return [(s,p) for s, p in zip(ppns['ppn_singular'],
ppns['ppn_plural'])]
def get_prepositions():
return prepositions.tolist()
def make_template(noun, preposition, ppn):
return ' '.join([noun, preposition, 'the', ppn])
def get_adv1s():
return adv1s.tolist()
def get_adv2s():
return adv2s.tolist()
if __name__ == '__main__':
print(get_adv1s())
print(get_adv2s())
| 4,098 | 32.876033 | 80 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.