repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
pySDC | pySDC-master/pySDC/projects/parallelSDC/GeneralizedFisher_1D_FD_implicit_Jac.py | import scipy.sparse as sp
import numpy as np
from scipy.sparse.linalg import spsolve
from pySDC.implementations.problem_classes.GeneralizedFisher_1D_FD_implicit import generalized_fisher
# noinspection PyUnusedLocal
class generalized_fisher_jac(generalized_fisher):
def eval_jacobian(self, u):
"""
Evaluation of the Jacobian of the right-hand side
Args:
u: space values
Returns:
Jacobian matrix
"""
# noinspection PyTypeChecker
dfdu = self.A[1:-1, 1:-1] + sp.diags(
self.lambda0**2 - self.lambda0**2 * (self.nu + 1) * u**self.nu, offsets=0
)
return dfdu
def solve_system_jacobian(self, dfdu, rhs, factor, u0, t):
"""
Simple linear solver for (I-dtA)u = rhs
Args:
dfdu: the Jacobian of the RHS of the ODE
rhs: right-hand side for the linear system
factor: abbrev. for the node-to-node stepsize (or any other factor required)
u0: initial guess for the iterative solver (not used here so far)
t: current time (e.g. for time-dependent BCs)
Returns:
solution as mesh
"""
me = self.dtype_u((self.init[0], self.init[1], np.dtype('complex128')))
me[:] = spsolve(sp.eye(self.nvars) - factor * dfdu, rhs)
return me
| 1,368 | 28.76087 | 101 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/ErrReductionHook.py | import numpy as np
from pySDC.core.Hooks import hooks
class err_reduction_hook(hooks):
def pre_iteration(self, step, level_number):
"""
Routine called before iteration starts
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(err_reduction_hook, self).pre_iteration(step, level_number)
L = step.levels[level_number]
if step.status.iter == 2 and np.isclose(L.time + L.dt, 0.1):
P = L.prob
err = []
for m in range(L.sweep.coll.num_nodes):
uex = P.u_exact(L.time + L.dt * L.sweep.coll.nodes[m])
err.append(abs(uex - L.u[m + 1]))
err_full = max(err)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='error_pre_iteration',
value=err_full,
)
# print(L.time, step.status.iter, err_full)
def post_iteration(self, step, level_number):
"""
Routine called after each iteration
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(err_reduction_hook, self).post_iteration(step, level_number)
L = step.levels[level_number]
if step.status.iter == 2 and np.isclose(L.time + L.dt, 0.1):
P = L.prob
err = []
for m in range(L.sweep.coll.num_nodes):
uex = P.u_exact(L.time + L.dt * L.sweep.coll.nodes[m])
err.append(abs(uex - L.u[m + 1]))
err_full = max(err)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='error_post_iteration',
value=err_full,
)
# print(L.time, step.status.iter, err_full)
| 2,170 | 31.402985 | 74 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/BaseTransfer_MPI.py | import logging
import numpy as np
import scipy.sparse as sp
from pySDC.core.Errors import UnlockError
from pySDC.helpers.pysdc_helper import FrozenClass
# short helper class to add params as attributes
class _Pars(FrozenClass):
def __init__(self, pars):
self.finter = False
for k, v in pars.items():
setattr(self, k, v)
self._freeze()
class base_transfer_MPI(object):
"""
Standard base_transfer class
Attributes:
logger: custom logger for sweeper-related logging
params(__Pars): parameter object containing the custom parameters passed by the user
fine (pySDC.Level.level): reference to the fine level
coarse (pySDC.Level.level): reference to the coarse level
"""
def __init__(self, fine_level, coarse_level, base_transfer_params, space_transfer_class, space_transfer_params):
"""
Initialization routine
Args:
fine_level (pySDC.Level.level): fine level connected with the base_transfer operations
coarse_level (pySDC.Level.level): coarse level connected with the base_transfer operations
base_transfer_params (dict): parameters for the base_transfer operations
space_transfer_class: class to perform spatial transfer
space_transfer_params (dict): parameters for the space_transfer operations
"""
self.params = _Pars(base_transfer_params)
# set up logger
self.logger = logging.getLogger('transfer')
# just copy by object
self.fine = fine_level
self.coarse = coarse_level
fine_grid = self.fine.sweep.coll.nodes
coarse_grid = self.coarse.sweep.coll.nodes
if len(fine_grid) == len(coarse_grid):
self.Pcoll = sp.eye(len(fine_grid)).toarray()
self.Rcoll = sp.eye(len(fine_grid)).toarray()
else:
raise NotImplementedError('require no reduction of collocation nodes')
# set up spatial transfer
self.space_transfer = space_transfer_class(
fine_prob=self.fine.prob, coarse_prob=self.coarse.prob, params=space_transfer_params
)
@staticmethod
def get_transfer_matrix_Q(f_nodes, c_nodes):
"""
Helper routine to quickly define transfer matrices between sets of nodes (fully Lagrangian)
Args:
f_nodes: fine nodes
c_nodes: coarse nodes
Returns:
matrix containing the interpolation weights
"""
nnodes_f = len(f_nodes)
nnodes_c = len(c_nodes)
tmat = np.zeros((nnodes_f, nnodes_c))
for i in range(nnodes_f):
xi = f_nodes[i]
for j in range(nnodes_c):
den = 1.0
num = 1.0
for k in range(nnodes_c):
if k == j:
continue
else:
den *= c_nodes[j] - c_nodes[k]
num *= xi - c_nodes[k]
tmat[i, j] = num / den
return tmat
def restrict(self):
"""
Space-time restriction routine
The routine applies the spatial restriction operator to teh fine values on the fine nodes, then reevaluates f
on the coarse level. This is used for the first part of the FAS correction tau via integration. The second part
is the integral over the fine values, restricted to the coarse level. Finally, possible tau corrections on the
fine level are restricted as well.
"""
# get data for easier access
F = self.fine
G = self.coarse
PG = G.prob
SF = F.sweep
SG = G.sweep
# only if the level is unlocked at least by prediction
if not F.status.unlocked:
raise UnlockError('fine level is still locked, cannot use data from there')
# restrict fine values in space
G.u[0] = self.space_transfer.restrict(F.u[0])
G.u[SG.rank + 1] = self.space_transfer.restrict(F.u[SF.rank + 1])
# re-evaluate f on coarse level
G.f[0] = PG.eval_f(G.u[0], G.time)
G.f[SG.rank + 1] = PG.eval_f(G.u[SG.rank + 1], G.time + G.dt * SG.coll.nodes[SG.rank])
# build coarse level tau correction part
tauG = G.sweep.integrate()
# build fine level tau correction part
tauF = F.sweep.integrate()
# restrict fine level tau correction part in space
tauFG = self.space_transfer.restrict(tauF)
# build tau correction
G.tau[SG.rank] = tauFG - tauG
if F.tau[SF.rank] is not None:
# restrict possible tau correction from fine in space
G.tau[SG.rank] += self.space_transfer.restrict(F.tau[SF.rank])
else:
pass
# save u and rhs evaluations for interpolation
G.uold[SG.rank + 1] = PG.dtype_u(G.u[SG.rank + 1])
G.fold[SG.rank + 1] = PG.dtype_f(G.f[SG.rank + 1])
# G.uold[0] = PG.dtype_u(G.u[0])
# G.fold[0] = PG.dtype_f(G.f[0])
# works as a predictor
G.status.unlocked = True
return None
def prolong(self):
"""
Space-time prolongation routine
This routine applies the spatial prolongation routine to the difference between the computed and the restricted
values on the coarse level and then adds this difference to the fine values as coarse correction.
"""
# get data for easier access
F = self.fine
G = self.coarse
PF = F.prob
SF = F.sweep
SG = G.sweep
# only of the level is unlocked at least by prediction or restriction
if not G.status.unlocked:
raise UnlockError('coarse level is still locked, cannot use data from there')
# build coarse correction
# we need to update u0 here for the predictor step, since here the new values for the fine sweep are not
# received from the previous processor but interpolated from the coarse level.
# need to restrict F.u[0] again here, since it might have changed in PFASST
G.uold[0] = self.space_transfer.restrict(F.u[0])
# interpolate values in space first
F.u[SF.rank + 1] += self.space_transfer.prolong(G.u[SG.rank + 1] - G.uold[SG.rank + 1])
# re-evaluate f on fine level
F.f[0] = PF.eval_f(F.u[0], F.time)
F.f[SF.rank + 1] = PF.eval_f(F.u[SF.rank + 1], F.time + F.dt * SF.coll.nodes[SF.rank])
return None
def prolong_f(self):
"""
Space-time prolongation routine w.r.t. the rhs f
This routine applies the spatial prolongation routine to the difference between the computed and the restricted
values on the coarse level and then adds this difference to the fine values as coarse correction.
"""
# get data for easier access
F = self.fine
G = self.coarse
PG = G.prob
SF = F.sweep
SG = G.sweep
# only of the level is unlocked at least by prediction or restriction
if not G.status.unlocked:
raise UnlockError('coarse level is still locked, cannot use data from there')
# build coarse correction
# need to restrict F.u[0] again here, since it might have changed in PFASST
G.uold[0] = self.space_transfer.restrict(F.u[0])
G.fold[0] = PG.eval_f(G.uold[0], G.time)
# interpolate values in space first
tmp_u = [self.space_transfer.prolong(G.u[0] - G.uold[0])]
tmp_f = [self.space_transfer.prolong(G.f[0] - G.fold[0])]
for m in range(1, SG.coll.num_nodes + 1):
tmp_u.append(self.space_transfer.prolong(G.u[m] - G.uold[m]))
tmp_f.append(self.space_transfer.prolong(G.f[m] - G.fold[m]))
# interpolate values in collocation
F.u[0] += tmp_u[0]
F.f[0] += tmp_f[0]
for n in range(1, SF.coll.num_nodes + 1):
for m in range(1, SG.coll.num_nodes + 1):
F.u[n] += self.Pcoll[n - 1, m - 1] * tmp_u[m]
F.f[n] += self.Pcoll[n - 1, m - 1] * tmp_f[m]
return None
| 8,162 | 33.588983 | 119 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/preconditioner_playground.py | import os
import pickle
from collections import namedtuple
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.problem_classes.GeneralizedFisher_1D_FD_implicit import generalized_fisher
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
ID = namedtuple('ID', ['setup', 'qd_type', 'param'])
def main():
# initialize level parameters (part I)
level_params = dict()
level_params['restol'] = 1e-08
# initialize sweeper parameters (part I)
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 100
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# set up list of Q-delta types and setups
qd_list = ['LU', 'IE', 'IEpar', 'Qpar', 'MIN', 'MIN3', 'MIN_GT']
setup_list = [
('heat', 63, [10.0**i for i in range(-3, 3)]),
('advection', 64, [10.0**i for i in range(-3, 3)]),
('vanderpol', 2, [0.1 * 2**i for i in range(0, 10)]),
('fisher', 63, [2**i for i in range(-2, 3)]),
]
# setup_list = [('fisher', 63, [2 * i for i in range(1, 6)])]
# pre-fill results with lists of setups
results = dict()
for setup, nvars, param_list in setup_list:
results[setup] = (nvars, param_list)
# loop over all Q-delta matrix types
for qd_type in qd_list:
# assign implicit Q-delta matrix
sweeper_params['QI'] = qd_type
# loop over all setups
for setup, nvars, param_list in setup_list:
# initialize problem parameters (part I)
problem_params = dict()
if setup != 'vanderpol':
problem_params['nvars'] = nvars # number of degrees of freedom for each level
# loop over all parameters
for param in param_list:
# fill description for the controller
description = dict()
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['step_params'] = step_params # pass step parameters
print('working on: %s - %s - %s' % (qd_type, setup, param))
# decide which setup to take
if setup == 'heat':
problem_params['nu'] = param
problem_params['freq'] = 2
problem_params['bc'] = 'dirichlet-zero' # boundary conditions
level_params['dt'] = 0.1
description['problem_class'] = heatNd_unforced
description['problem_params'] = problem_params
description['level_params'] = level_params # pass level parameters
elif setup == 'advection':
problem_params['c'] = param
problem_params['order'] = 2
problem_params['freq'] = 2
problem_params['stencil_type'] = 'center' # boundary conditions
problem_params['bc'] = 'periodic' # boundary conditions
level_params['dt'] = 0.1
description['problem_class'] = advectionNd
description['problem_params'] = problem_params
description['level_params'] = level_params # pass level parameters
elif setup == 'vanderpol':
problem_params['newton_tol'] = 1e-09
problem_params['newton_maxiter'] = 20
problem_params['mu'] = param
problem_params['u0'] = np.array([2.0, 0])
level_params['dt'] = 0.1
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['level_params'] = level_params
elif setup == 'fisher':
problem_params['nu'] = 1
problem_params['lambda0'] = param
problem_params['newton_maxiter'] = 20
problem_params['newton_tol'] = 1e-10
problem_params['interval'] = (-5, 5)
level_params['dt'] = 0.01
description['problem_class'] = generalized_fisher
description['problem_params'] = problem_params
description['level_params'] = level_params
else:
print('Setup not implemented..', setup)
exit()
# instantiate controller
controller = controller_nonMPI(
num_procs=1, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=0, Tend=level_params['dt'])
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# just one time-step, grep number of iteration and store
niter = iter_counts[0][1]
id = ID(setup=setup, qd_type=qd_type, param=param)
results[id] = niter
assert len(results) == (6 + 6 + 10 + 5) * 7 + 4, 'ERROR: did not get all results, got %s' % len(results)
# write out for later visualization
file = open('data/parallelSDC_iterations_precond.pkl', 'wb')
pickle.dump(results, file)
assert os.path.isfile('data/parallelSDC_iterations_precond.pkl'), 'ERROR: pickle did not create file'
def plot_iterations():
"""
Helper routine to plot iteration counts
"""
file = open('data/parallelSDC_iterations_precond.pkl', 'rb')
results = pickle.load(file)
# find the lists/header required for plotting
qd_type_list = []
setup_list = []
for key in results.keys():
if isinstance(key, ID):
if key.qd_type not in qd_type_list:
qd_type_list.append(key.qd_type)
elif isinstance(key, str):
setup_list.append(key)
print('Found these type of preconditioners:', qd_type_list)
print('Found these setups:', setup_list)
assert len(qd_type_list) == 7, 'ERROR did not find five preconditioners, got %s' % qd_type_list
assert len(setup_list) == 4, 'ERROR: did not find three setup, got %s' % setup_list
qd_type_list = ['LU', 'IE', 'IEpar', 'Qpar', 'MIN', 'MIN3', 'MIN_GT']
marker_list = [None, None, 's', 'o', '^', 'd', 'x']
color_list = ['k', 'k', 'r', 'g', 'b', 'c', 'm']
plt_helper.setup_mpl()
# loop over setups and Q-delta types: one figure per setup, all Qds in one plot
for setup in setup_list:
plt_helper.newfig(textwidth=238.96, scale=0.89)
for qd_type, marker, color in zip(qd_type_list, marker_list, color_list):
niter = np.zeros(len(results[setup][1]))
for key in results.keys():
if isinstance(key, ID):
if key.setup == setup and key.qd_type == qd_type:
xvalue = results[setup][1].index(key.param)
niter[xvalue] = results[key]
if qd_type == 'LU':
ls = '--'
lw = 0.5
elif qd_type == 'IE':
ls = '-.'
lw = 0.5
else:
ls = '-'
lw = 1
plt_helper.plt.semilogx(
results[setup][1],
niter,
label=qd_type,
lw=lw,
linestyle=ls,
color=color,
marker=marker,
markeredgecolor='k',
)
if setup == 'heat':
xlabel = r'$\nu$'
elif setup == 'advection':
xlabel = r'$c$'
elif setup == 'fisher':
xlabel = r'$\lambda_0$'
elif setup == 'vanderpol':
xlabel = r'$\mu$'
else:
print('Setup not implemented..', setup)
exit()
plt_helper.plt.ylim([0, 60])
plt_helper.plt.legend(loc=2, ncol=1)
plt_helper.plt.ylabel('number of iterations')
plt_helper.plt.xlabel(xlabel)
plt_helper.plt.grid()
# save plot as PDF and PGF
fname = 'data/parallelSDC_preconditioner_' + setup
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
if __name__ == "__main__":
main()
plot_iterations()
| 9,443 | 37.080645 | 108 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/nonlinear_playground.py | import os
import pickle
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.projects.parallelSDC.GeneralizedFisher_1D_FD_implicit_Jac import generalized_fisher_jac
from pySDC.projects.parallelSDC.linearized_implicit_fixed_parallel import linearized_implicit_fixed_parallel
from pySDC.projects.parallelSDC.linearized_implicit_fixed_parallel_prec import linearized_implicit_fixed_parallel_prec
from pySDC.projects.parallelSDC.linearized_implicit_parallel import linearized_implicit_parallel
def main():
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 0.01
# This comes as read-in for the step class (this is optional!)
step_params = dict()
step_params['maxiter'] = 50
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 1
problem_params['nvars'] = 255
problem_params['lambda0'] = 5.0
problem_params['newton_maxiter'] = 50
problem_params['newton_tol'] = 1e-12
problem_params['interval'] = (-5, 5)
# This comes as read-in for the sweeper class
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 5
sweeper_params['QI'] = 'LU'
sweeper_params['fixed_time_in_jacobian'] = 0
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = generalized_fisher_jac
description['problem_params'] = problem_params
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
sweeper_list = [
generic_implicit,
linearized_implicit_fixed_parallel_prec,
linearized_implicit_fixed_parallel,
linearized_implicit_parallel,
]
f = open('data/parallelSDC_nonlinear_out.txt', 'w')
uinit = None
uex = None
uend = None
P = None
# loop over the different sweepers and check results
for sweeper in sweeper_list:
description['sweeper_class'] = sweeper
# instantiate the controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# setup parameters "in time"
t0 = 0
Tend = 0.1
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# compute exact solution and compare
uex = P.u_exact(Tend)
err = abs(uex - uend)
print('error at time %s: %s' % (Tend, err))
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
f.write(out + '\n')
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
f.write(out + '\n')
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (
int(np.argmax(niters)),
int(np.argmin(niters)),
)
f.write(out + '\n')
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
f.write(out + '\n')
f.write(out + '\n')
print(out)
f.write('\n')
print()
assert err < 3.686e-05, 'ERROR: error is too high for sweeper %s, got %s' % (sweeper.__name__, err)
assert (
np.mean(niters) == 7.5 or np.mean(niters) == 4.0
), 'ERROR: mean number of iterations not as expected, got %s' % np.mean(niters)
f.close()
results = dict()
results['interval'] = problem_params['interval']
results['xvalues'] = np.array([(i + 1 - (P.nvars + 1) / 2) * P.dx for i in range(P.nvars)])
results['uinit'] = uinit
results['uend'] = uend
results['uex'] = uex
# write out for later visualization
file = open('data/parallelSDC_results_graphs.pkl', 'wb')
pickle.dump(results, file)
assert os.path.isfile('data/parallelSDC_results_graphs.pkl'), 'ERROR: pickle did not create file'
def plot_graphs():
"""
Helper function to plot graphs of initial and final values
"""
file = open('data/parallelSDC_results_graphs.pkl', 'rb')
results = pickle.load(file)
interval = results['interval']
xvalues = results['xvalues']
uinit = results['uinit']
uend = results['uend']
uex = results['uex']
plt_helper.setup_mpl()
# set up figure
plt_helper.newfig(textwidth=338.0, scale=1.0)
plt_helper.plt.xlabel('x')
plt_helper.plt.ylabel('f(x)')
plt_helper.plt.xlim((interval[0] - 0.01, interval[1] + 0.01))
plt_helper.plt.ylim((-0.1, 1.1))
plt_helper.plt.grid()
# plot
plt_helper.plt.plot(xvalues, uinit, 'r--', lw=1, label='initial')
plt_helper.plt.plot(xvalues, uend, 'bs', lw=1, markeredgecolor='k', label='computed')
plt_helper.plt.plot(xvalues, uex, 'g-', lw=1, label='exact')
plt_helper.plt.legend(loc=2, ncol=1)
# save plot as PDF, beautify
fname = 'data/parallelSDC_fisher'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
if __name__ == "__main__":
# main()
plot_graphs()
| 6,097 | 32.505495 | 120 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/generic_implicit_MPI.py | from mpi4py import MPI
from pySDC.core.Sweeper import sweeper
class generic_implicit_MPI(sweeper):
"""
Generic implicit sweeper, expecting lower triangular matrix type as input
Attributes:
QI: lower triangular matrix
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'QI' not in params:
params['QI'] = 'IE'
# call parent's initialization routine
super(generic_implicit_MPI, self).__init__(params)
# get QI matrix
self.QI = self.get_Qdelta_implicit(self.coll, qd_type=self.params.QI)
self.rank = self.params.comm.Get_rank()
def integrate(self):
"""
Integrates the right-hand side
Returns:
list of dtype_u: containing the integral as values
"""
# get current level and problem description
L = self.level
P = L.prob
me = P.dtype_u(P.init, val=0.0)
for m in range(self.coll.num_nodes):
if m == self.rank:
self.params.comm.Reduce(
L.dt * self.coll.Qmat[m + 1, self.rank + 1] * L.f[self.rank + 1], me, root=m, op=MPI.SUM
)
else:
self.params.comm.Reduce(
L.dt * self.coll.Qmat[m + 1, self.rank + 1] * L.f[self.rank + 1], None, root=m, op=MPI.SUM
)
return me
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
# gather all terms which are known already (e.g. from the previous iteration)
# this corresponds to u0 + QF(u^k) - QdF(u^k) + tau
# get QF(u^k)
rhs = self.integrate()
rhs -= L.dt * self.QI[self.rank + 1, self.rank + 1] * L.f[self.rank + 1]
# add initial value
rhs += L.u[0]
# add tau if associated
if L.tau[self.rank] is not None:
rhs += L.tau[self.rank]
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[self.rank + 1] = P.solve_system(
rhs,
L.dt * self.QI[self.rank + 1, self.rank + 1],
L.u[self.rank + 1],
L.time + L.dt * self.coll.nodes[self.rank],
)
# update function values
L.f[self.rank + 1] = P.eval_f(L.u[self.rank + 1], L.time + L.dt * self.coll.nodes[self.rank])
# indicate presence of new values at this level
L.status.updated = True
return None
def compute_end_point(self):
"""
Compute u at the right point of the interval
The value uend computed here is a full evaluation of the Picard formulation unless do_full_update==False
Returns:
None
"""
# get current level and problem description
L = self.level
# check if Mth node is equal to right point and do_coll_update is false, perform a simple copy
if self.coll.right_is_node and not self.params.do_coll_update:
# a copy is sufficient
L.uend = self.params.comm.bcast(L.u[self.rank + 1], root=self.params.comm.Get_size() - 1)
else:
raise NotImplementedError('require last node to be identical with right interval boundary')
return None
def compute_residual(self, stage=None):
"""
Computation of the residual using the collocation matrix Q
Args:
stage (str): The current stage of the step the level belongs to
"""
# get current level and problem description
L = self.level
# Check if we want to skip the residual computation to gain performance
# Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual!
if stage in self.params.skip_residual_computation:
L.status.residual = 0.0 if L.status.residual is None else L.status.residual
return None
# check if there are new values (e.g. from a sweep)
# assert L.status.updated
# compute the residual for each node
# build QF(u)
res = self.integrate()
res += L.u[0] - L.u[self.rank + 1]
# add tau if associated
if L.tau[self.rank] is not None:
res += L.tau[self.rank]
# use abs function from data type here
res_norm = abs(res)
# find maximal residual over the nodes
L.status.residual = self.params.comm.allreduce(res_norm, op=MPI.MAX)
# indicate that the residual has seen the new values
L.status.updated = False
return None
def predict(self):
"""
Predictor to fill values at nodes before first sweep
Default prediction for the sweepers, only copies the values to all collocation nodes
and evaluates the RHS of the ODE there
"""
# get current level and problem description
L = self.level
P = L.prob
# evaluate RHS at left point
L.f[0] = P.eval_f(L.u[0], L.time)
if self.params.initial_guess == 'spread':
L.u[self.rank + 1] = P.dtype_u(L.u[0])
L.f[self.rank + 1] = P.eval_f(L.u[self.rank + 1], L.time + L.dt * self.coll.nodes[self.rank])
else:
L.u[self.rank + 1] = P.dtype_u(init=P.init, val=0.0)
L.f[self.rank + 1] = P.dtype_f(init=P.init, val=0.0)
# indicate that this level is now ready for sweeps
L.status.unlocked = True
L.status.updated = True
| 6,050 | 30.515625 | 114 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/AllenCahn_parallel.py | import subprocess
import os
import numpy as np
from mpi4py import MPI
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AllenCahn_2D_FD import allencahn_fullyimplicit
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh_FFT2D import mesh_to_mesh_fft2d
from pySDC.playgrounds.Allen_Cahn.AllenCahn_monitor import monitor
from pySDC.projects.parallelSDC.BaseTransfer_MPI import base_transfer_MPI
from pySDC.projects.parallelSDC.generic_implicit_MPI import generic_implicit_MPI
# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf
def run_variant(variant=None):
"""
Routine to run a particular variant
Args:
variant (str): string describing the variant
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-07
level_params['dt'] = 1e-03 / 2
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 2
problem_params['eps'] = 0.04
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1e-08
problem_params['lin_tol'] = 1e-09
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = allencahn_fullyimplicit
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
do_print = True
# add stuff based on variant
if variant == 'sl_serial':
maxmeaniters = 5.0
sweeper_params['QI'] = ['LU']
problem_params['nvars'] = [(128, 128)]
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
elif variant == 'sl_parallel':
maxmeaniters = 5.12
assert MPI.COMM_WORLD.Get_size() == sweeper_params['num_nodes']
sweeper_params['QI'] = ['MIN3']
sweeper_params['comm'] = MPI.COMM_WORLD
problem_params['nvars'] = [(128, 128)]
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit_MPI # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
do_print = MPI.COMM_WORLD.Get_rank() == 0
elif variant == 'ml_serial':
maxmeaniters = 3.125
sweeper_params['QI'] = ['LU']
problem_params['nvars'] = [(128, 128), (64, 64)]
description['space_transfer_class'] = mesh_to_mesh_fft2d
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
elif variant == 'ml_parallel':
assert MPI.COMM_WORLD.Get_size() == sweeper_params['num_nodes']
maxmeaniters = 4.25
sweeper_params['QI'] = ['MIN3']
sweeper_params['comm'] = MPI.COMM_WORLD
problem_params['nvars'] = [(128, 128), (64, 64)]
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit_MPI # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['space_transfer_class'] = mesh_to_mesh_fft2d
description['base_transfer_class'] = base_transfer_MPI
do_print = MPI.COMM_WORLD.Get_rank() == 0
else:
raise NotImplementedError('Wrong variant specified, got %s' % variant)
if do_print:
out = 'Working on %s variant...' % variant
print(out)
# setup parameters "in time"
t0 = 0
Tend = 0.004
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by variant (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
niters = np.array([item[1] for item in iter_counts])
if do_print:
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
assert np.mean(niters) <= maxmeaniters, 'ERROR: number of iterations is too high, got %s instead of %s' % (
np.mean(niters),
maxmeaniters,
)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (
int(np.argmax(niters)),
int(np.argmin(niters)),
)
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
print(' Iteration count (nonlinear/linear): %i / %i' % (P.newton_itercount, P.lin_itercount))
print(
' Mean Iteration count per call: %4.2f / %4.2f'
% (P.newton_itercount / max(P.newton_ncalls, 1), P.lin_itercount / max(P.lin_ncalls, 1))
)
timing = get_sorted(stats, type='timing_run', sortby='time')
print('Time to solution: %6.4f sec.' % timing[0][1])
return None
def main():
"""
Main driver
"""
run_variant(variant='sl_serial')
print()
run_variant(variant='ml_serial')
print()
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
cmd = (
"mpirun -np 3 python -c \"from pySDC.projects.parallelSDC.AllenCahn_parallel import *; "
"run_variant(\'sl_parallel\');\""
)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
p.wait()
(output, err) = p.communicate()
print(output)
cmd = (
"mpirun -np 3 python -c \"from pySDC.projects.parallelSDC.AllenCahn_parallel import *; "
"run_variant(\'ml_parallel\');\""
)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True)
p.wait()
(output, err) = p.communicate()
print(output)
if __name__ == "__main__":
main()
| 7,251 | 35.079602 | 120 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/linearized_implicit_fixed_parallel_prec.py | import numpy as np
from pySDC.projects.parallelSDC.linearized_implicit_fixed_parallel import linearized_implicit_fixed_parallel
class linearized_implicit_fixed_parallel_prec(linearized_implicit_fixed_parallel):
"""
Custom sweeper class, implements Sweeper.py
Attributes:
D: eigenvalues of the QI
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'fixed_time_in_jacobian' not in params:
params['fixed_time_in_jacobian'] = 0
# call parent's initialization routine
super(linearized_implicit_fixed_parallel, self).__init__(params)
assert self.params.fixed_time_in_jacobian in range(self.coll.num_nodes + 1), (
"ERROR: fixed_time_in_jacobian is too small or too large, got %s" % self.params.fixed_time_in_jacobian
)
self.D, self.V = np.linalg.eig(self.QI[1:, 1:])
self.Vi = np.linalg.inv(self.V)
| 1,033 | 29.411765 | 114 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/minimization.py | import matplotlib
import matplotlib.pylab as plt
import numpy as np
import scipy.optimize as opt
from pySDC.core.Collocation import CollBase
def main():
def rho(x):
return max(abs(np.linalg.eigvals(np.eye(M) - np.diag([x[i] for i in range(M)]).dot(coll.Qmat[1:, 1:]))))
M = 2
coll = CollBase(M, 0, 1, quad_type='RADAU-RIGHT')
x0 = np.ones(M)
d = opt.minimize(rho, x0, method='Nelder-Mead')
print(d)
numsteps = 800
xdim = np.linspace(0, 8, numsteps)
ydim = np.linspace(0, 13, numsteps)
minfield = np.zeros((len(xdim), len(ydim)))
for idx, x in enumerate(xdim):
for idy, y in enumerate(ydim):
minfield[idx, idy] = max(abs(np.linalg.eigvals(np.eye(M) - np.diag([x, y]).dot(coll.Qmat[1:, 1:]))))
# Set up plotting parameters
params = {
'legend.fontsize': 20,
'figure.figsize': (12, 8),
'axes.labelsize': 20,
'axes.titlesize': 20,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'lines.linewidth': 3,
}
plt.rcParams.update(params)
matplotlib.style.use('classic')
plt.figure()
plt.pcolor(xdim, ydim, minfield.T, cmap='Reds', vmin=0, vmax=1)
plt.text(d.x[0], d.x[1], 'X', horizontalalignment='center', verticalalignment='center')
plt.xlim((min(xdim), max(xdim)))
plt.ylim((min(ydim), max(ydim)))
plt.xlabel('component 1')
plt.ylabel('component 2')
cbar = plt.colorbar()
cbar.set_label('spectral radius')
fname = 'data/parallelSDC_minimizer_full.png'
plt.savefig(fname, bbox_inches='tight')
plt.figure()
xdim_part = xdim[int(0.25 * numsteps) : int(0.75 * numsteps) + 1]
ydim_part = ydim[0 : int(0.25 * numsteps)]
minfield_part = minfield[int(0.25 * numsteps) : int(0.75 * numsteps) + 1, 0 : int(0.25 * numsteps)]
plt.pcolor(xdim_part, ydim_part, minfield_part.T, cmap='Reds', vmin=0, vmax=1)
plt.text(d.x[0], d.x[1], 'X', horizontalalignment='center', verticalalignment='center')
plt.xlim((min(xdim_part), max(xdim_part)))
plt.ylim((min(ydim_part), max(ydim_part)))
plt.xlabel('component 1')
plt.ylabel('component 2')
cbar = plt.colorbar()
cbar.set_label('spectral radius')
fname = 'data/parallelSDC_minimizer_zoom.png'
plt.savefig(fname, bbox_inches='tight')
if __name__ == "__main__":
main()
| 2,352 | 29.960526 | 112 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/linearized_implicit_parallel.py | import numpy as np
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
class linearized_implicit_parallel(generic_implicit):
"""
Parallel sweeper using Newton for linearization
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'fixed_time_in_jacobian' not in params:
params['fixed_time_in_jacobian'] = 0
# call parent's initialization routine
super(linearized_implicit_parallel, self).__init__(params)
self.D, self.V = np.linalg.eig(self.QI[1:, 1:])
self.Vi = np.linalg.inv(self.V)
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# form Jacobian on each node
dfdu = []
for m in range(M + 1):
dfdu.append(P.eval_jacobian(L.u[m]))
# form collocation problem
Gu = self.integrate()
for m in range(M):
Gu[m] -= L.u[m + 1] - L.u[0]
# transform collocation problem forward
Guv = []
for m in range(M):
Guv.append(P.dtype_u((P.init[0], P.init[1], np.dtype('complex128')), val=0.0 + 0.0j))
for j in range(M):
Guv[m] += self.Vi[m, j] * Gu[j]
# solve implicit system with Jacobians
uv = []
for m in range(M): # hell yeah, this is parallel!!
uv.append(
P.solve_system_jacobian(
dfdu[m], Guv[m], L.dt * self.D[m], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]
)
)
# transform solution backward
for m in range(M):
tmp = P.dtype_u((P.init[0], P.init[1], np.dtype('complex128')), val=0.0 + 0.0j)
for j in range(M):
tmp += self.V[m, j] * uv[j]
L.u[m + 1][:] += np.real(tmp)
# evaluate f
for m in range(M): # hell yeah, this is parallel!!
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
# indicate presence of new values at this level
L.status.updated = True
return None
| 2,595 | 28.83908 | 107 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/parallelSDC/newton_vs_sdc.py | import os
import pickle
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.projects.parallelSDC.ErrReductionHook import err_reduction_hook
from pySDC.projects.parallelSDC.GeneralizedFisher_1D_FD_implicit_Jac import generalized_fisher_jac
from pySDC.projects.parallelSDC.linearized_implicit_fixed_parallel import linearized_implicit_fixed_parallel
from pySDC.projects.parallelSDC.linearized_implicit_fixed_parallel_prec import linearized_implicit_fixed_parallel_prec
def main():
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-12
# This comes as read-in for the step class (this is optional!)
step_params = dict()
step_params['maxiter'] = 20
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 1
problem_params['nvars'] = 2047
problem_params['lambda0'] = 5.0
problem_params['newton_maxiter'] = 50
problem_params['newton_tol'] = 1e-12
problem_params['interval'] = (-5, 5)
# This comes as read-in for the sweeper class
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 5
sweeper_params['QI'] = 'LU'
sweeper_params['fixed_time_in_jacobian'] = 0
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = err_reduction_hook
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = generalized_fisher_jac
description['problem_params'] = problem_params
description['sweeper_params'] = sweeper_params
description['step_params'] = step_params
# setup parameters "in time"
t0 = 0
Tend = 0.1
sweeper_list = [generic_implicit, linearized_implicit_fixed_parallel, linearized_implicit_fixed_parallel_prec]
dt_list = [Tend / 2**i for i in range(1, 5)]
results = dict()
results['sweeper_list'] = [sweeper.__name__ for sweeper in sweeper_list]
results['dt_list'] = dt_list
# loop over the different sweepers and check results
for sweeper in sweeper_list:
description['sweeper_class'] = sweeper
error_reduction = []
for dt in dt_list:
print('Working with sweeper %s and dt = %s...' % (sweeper.__name__, dt))
level_params['dt'] = dt
description['level_params'] = level_params
# instantiate the controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics
error_pre = get_sorted(stats, type='error_pre_iteration', sortby='iter')[0][1]
error_post = get_sorted(stats, type='error_post_iteration', sortby='iter')[0][1]
error_reduction.append(error_post / error_pre)
print('error and reduction rate at time %s: %6.4e -- %6.4e' % (Tend, error_post, error_reduction[-1]))
results[sweeper.__name__] = error_reduction
print()
file = open('data/error_reduction_data.pkl', 'wb')
pickle.dump(results, file)
file.close()
def plot_graphs(cwd=''):
"""
Helper function to plot graphs of initial and final values
Args:
cwd (str): current working directory
"""
plt_helper.mpl.style.use('classic')
file = open(cwd + 'data/error_reduction_data.pkl', 'rb')
results = pickle.load(file)
sweeper_list = results['sweeper_list']
dt_list = results['dt_list']
color_list = ['red', 'blue', 'green']
marker_list = ['o', 's', 'd']
label_list = []
for sweeper in sweeper_list:
if sweeper == 'generic_implicit':
label_list.append('SDC')
elif sweeper == 'linearized_implicit_fixed_parallel':
label_list.append('Simplified Newton')
elif sweeper == 'linearized_implicit_fixed_parallel_prec':
label_list.append('Inexact Newton')
setups = zip(sweeper_list, color_list, marker_list, label_list)
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=0.89)
for sweeper, color, marker, label in setups:
plt_helper.plt.loglog(
dt_list, results[sweeper], lw=1, ls='-', color=color, marker=marker, markeredgecolor='k', label=label
)
plt_helper.plt.loglog(dt_list, [dt * 2 for dt in dt_list], lw=0.5, ls='--', color='k', label='linear')
plt_helper.plt.loglog(
dt_list, [dt * dt / dt_list[0] * 2 for dt in dt_list], lw=0.5, ls='-.', color='k', label='quadratic'
)
plt_helper.plt.xlabel('dt')
plt_helper.plt.ylabel('error reduction')
plt_helper.plt.grid()
# ax.set_xticks(dt_list, dt_list)
plt_helper.plt.xticks(dt_list, dt_list)
plt_helper.plt.legend(loc=1, ncol=1)
plt_helper.plt.gca().invert_xaxis()
plt_helper.plt.xlim([dt_list[0] * 1.1, dt_list[-1] / 1.1])
plt_helper.plt.ylim([4e-03, 1e0])
# save plot, beautify
fname = 'data/parallelSDC_fisher_newton'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
if __name__ == "__main__":
# main()
plot_graphs()
| 5,861 | 34.101796 | 118 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/preconditioner_playground_MPI.py | import os
import pickle
from collections import namedtuple
import numpy as np
from mpi4py import MPI
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.problem_classes.GeneralizedFisher_1D_FD_implicit import generalized_fisher
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.projects.parallelSDC.generic_implicit_MPI import generic_implicit_MPI
# from pySDC.projects.parallelSDC.BaseTransfer_MPI import base_transfer_mpi
ID = namedtuple('ID', ['setup', 'qd_type', 'param'])
def main(comm=None):
# initialize level parameters (part I)
level_params = dict()
level_params['restol'] = 1e-08
# initialize sweeper parameters (part I)
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = comm.Get_size()
sweeper_params['comm'] = comm
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 100
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# set up list of Q-delta types and setups
qd_list = ['IEpar', 'Qpar', 'MIN', 'MIN3', 'MIN_GT']
setup_list = [
('heat', 63, [10.0**i for i in range(-3, 3)]),
('advection', 64, [10.0**i for i in range(-3, 3)]),
('vanderpol', 2, [0.1 * 2**i for i in range(0, 10)]),
('fisher', 63, [2**i for i in range(-2, 3)]),
]
# setup_list = [('fisher', 63, [2 * i for i in range(1, 6)])]
# pre-fill results with lists of setups
results = dict()
for setup, nvars, param_list in setup_list:
results[setup] = (nvars, param_list)
# loop over all Q-delta matrix types
for qd_type in qd_list:
# assign implicit Q-delta matrix
sweeper_params['QI'] = qd_type
# loop over all setups
for setup, nvars, param_list in setup_list:
# initialize problem parameters (part I)
problem_params = dict()
if setup != 'vanderpol':
problem_params['nvars'] = nvars # number of degrees of freedom for each level
# loop over all parameters
for param in param_list:
# fill description for the controller
description = dict()
description['sweeper_class'] = generic_implicit_MPI # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['step_params'] = step_params # pass step parameters
# description['base_transfer_class'] = base_transfer_mpi
print('working on: %s - %s - %s' % (qd_type, setup, param))
# decide which setup to take
if setup == 'heat':
problem_params['nu'] = param
problem_params['freq'] = 2
problem_params['bc'] = 'dirichlet-zero' # boundary conditions
level_params['dt'] = 0.1
description['problem_class'] = heatNd_unforced
description['problem_params'] = problem_params
description['level_params'] = level_params # pass level parameters
elif setup == 'advection':
problem_params['c'] = param
problem_params['order'] = 2
problem_params['freq'] = 2
problem_params['stencil_type'] = 'center' # boundary conditions
problem_params['bc'] = 'periodic' # boundary conditions
level_params['dt'] = 0.1
description['problem_class'] = advectionNd
description['problem_params'] = problem_params
description['level_params'] = level_params # pass level parameters
elif setup == 'vanderpol':
problem_params['newton_tol'] = 1e-09
problem_params['newton_maxiter'] = 20
problem_params['mu'] = param
problem_params['u0'] = np.array([2.0, 0])
level_params['dt'] = 0.1
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['level_params'] = level_params
elif setup == 'fisher':
problem_params['nu'] = 1
problem_params['lambda0'] = param
problem_params['newton_maxiter'] = 20
problem_params['newton_tol'] = 1e-10
problem_params['interval'] = (-5, 5)
level_params['dt'] = 0.01
description['problem_class'] = generalized_fisher
description['problem_params'] = problem_params
description['level_params'] = level_params
else:
print('Setup not implemented..', setup)
exit()
# instantiate controller
controller = controller_nonMPI(
num_procs=1, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=0, Tend=level_params['dt'])
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# just one time-step, grep number of iteration and store
niter = iter_counts[0][1]
id = ID(setup=setup, qd_type=qd_type, param=param)
results[id] = niter
assert len(results) == (6 + 6 + 10 + 5) * 5 + 4, 'ERROR: did not get all results, got %s' % len(results)
if comm.Get_rank() == 0:
# write out for later visualization
file = open('data/parallelSDC_iterations_precond_MPI.pkl', 'wb')
pickle.dump(results, file)
assert os.path.isfile('data/parallelSDC_iterations_precond_MPI.pkl'), 'ERROR: pickle did not create file'
def plot_iterations():
"""
Helper routine to plot iteration counts
"""
file = open('data/parallelSDC_iterations_precond_MPI.pkl', 'rb')
results = pickle.load(file)
# find the lists/header required for plotting
qd_type_list = []
setup_list = []
for key in results.keys():
if isinstance(key, ID):
if key.qd_type not in qd_type_list:
qd_type_list.append(key.qd_type)
elif isinstance(key, str):
setup_list.append(key)
print('Found these type of preconditioners:', qd_type_list)
print('Found these setups:', setup_list)
assert len(qd_type_list) == 5, 'ERROR did not find four preconditioners, got %s' % qd_type_list
assert len(setup_list) == 4, 'ERROR: did not find four setup, got %s' % setup_list
qd_type_list = ['IEpar', 'Qpar', 'MIN', 'MIN3', 'MIN_GT']
marker_list = ['s', 'o', '^', 'v', 'x']
color_list = ['r', 'g', 'b', 'c', 'm']
plt_helper.setup_mpl()
# loop over setups and Q-delta types: one figure per setup, all Qds in one plot
for setup in setup_list:
plt_helper.newfig(textwidth=238.96, scale=0.89)
for qd_type, marker, color in zip(qd_type_list, marker_list, color_list):
niter = np.zeros(len(results[setup][1]))
for key in results.keys():
if isinstance(key, ID):
if key.setup == setup and key.qd_type == qd_type:
xvalue = results[setup][1].index(key.param)
niter[xvalue] = results[key]
ls = '-'
lw = 1
plt_helper.plt.semilogx(
results[setup][1],
niter,
label=qd_type,
lw=lw,
linestyle=ls,
color=color,
marker=marker,
markeredgecolor='k',
)
if setup == 'heat':
xlabel = r'$\nu$'
elif setup == 'advection':
xlabel = r'$c$'
elif setup == 'fisher':
xlabel = r'$\lambda_0$'
elif setup == 'vanderpol':
xlabel = r'$\mu$'
else:
print('Setup not implemented..', setup)
exit()
plt_helper.plt.ylim([0, 60])
plt_helper.plt.legend(loc=2, ncol=1)
plt_helper.plt.ylabel('number of iterations')
plt_helper.plt.xlabel(xlabel)
plt_helper.plt.grid()
# save plot as PDF and PGF
fname = 'data/parallelSDC_preconditioner_MPI_' + setup
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
if __name__ == "__main__":
comm = MPI.COMM_WORLD
main(comm=comm)
if comm.Get_rank() == 0:
plot_iterations()
| 9,562 | 37.405622 | 113 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/Van_der_Pol_implicit_Jac.py | import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import spsolve
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
# noinspection PyUnusedLocal
class vanderpol_jac(vanderpol):
def eval_jacobian(self, u):
"""
Evaluation of the Jacobian of the right-hand side
Args:
u: space values
Returns:
Jacobian matrix
"""
x1 = u[0]
x2 = u[1]
dfdu = np.array([[0, 1], [-2 * self.params.mu * x1 * x2 - 1, self.params.mu * (1 - x1**2)]])
return dfdu
def solve_system_jacobian(self, dfdu, rhs, factor, u0, t):
"""
Simple linear solver for (I-dtA)u = rhs
Args:
dfdu: the Jacobian of the RHS of the ODE
rhs: right-hand side for the linear system
factor: abbrev. for the node-to-node stepsize (or any other factor required)
u0: initial guess for the iterative solver (not used here so far)
t: current time (e.g. for time-dependent BCs)
Returns:
solution as mesh
"""
me = self.dtype_u(2)
me[:] = spsolve(sp.eye(2) - factor * dfdu, rhs)
return me
| 1,228 | 25.717391 | 100 | py |
pySDC | pySDC-master/pySDC/projects/parallelSDC/linearized_implicit_fixed_parallel.py | import numpy as np
from pySDC.projects.parallelSDC.linearized_implicit_parallel import linearized_implicit_parallel
class linearized_implicit_fixed_parallel(linearized_implicit_parallel):
"""
Custom sweeper class, implements Sweeper.py
Generic implicit sweeper, expecting lower triangular matrix QI as input
Attributes:
D: eigenvalues of the QI
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'fixed_time_in_jacobian' not in params:
params['fixed_time_in_jacobian'] = 0
# call parent's initialization routine
super(linearized_implicit_fixed_parallel, self).__init__(params)
assert self.params.fixed_time_in_jacobian in range(self.coll.num_nodes + 1), (
"ERROR: fixed_time_in_jacobian is too small or too large, got %s" % self.params.fixed_time_in_jacobian
)
self.D, self.V = np.linalg.eig(self.coll.Qmat[1:, 1:])
self.Vi = np.linalg.inv(self.V)
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# form Jacobian at fixed time
jtime = self.params.fixed_time_in_jacobian
dfdu = P.eval_jacobian(L.u[jtime])
# form collocation problem
Gu = self.integrate()
for m in range(M):
Gu[m] -= L.u[m + 1] - L.u[0]
# transform collocation problem forward
Guv = []
for m in range(M):
Guv.append(P.dtype_u((P.init[0], P.init[1], np.dtype('complex128')), val=0.0 + 0.0j))
for j in range(M):
Guv[m] += self.Vi[m, j] * Gu[j]
# solve implicit system with Jacobian (just this one, does not change with the nodes)
uv = []
for m in range(M): # hell yeah, this is parallel!!
uv.append(
P.solve_system_jacobian(dfdu, Guv[m], L.dt * self.D[m], L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
)
# transform soultion backward
for m in range(M):
tmp = P.dtype_u((P.init[0], P.init[1], np.dtype('complex128')), val=0.0 + 0.0j)
for j in range(M):
tmp += self.V[m, j] * uv[j]
L.u[m + 1][:] += np.real(tmp)
# evaluate f
for m in range(M): # hell yeah, this is parallel!!
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
# indicate presence of new values at this level
L.status.updated = True
return None
| 2,978 | 31.032258 | 119 | py |
pySDC | pySDC-master/pySDC/projects/AsympConv/conv_test_to0.py | import matplotlib
import numpy as np
import scipy.linalg as LA
matplotlib.use('Agg')
import matplotlib.pylab as plt
from matplotlib import rc
from pySDC.core.Collocation import CollBase
def compute_and_plot_specrad(Nnodes, lam):
"""
Compute and plot the spectral radius of the smoother for different step-sizes
Args:
Nnodes: number of collocation nodes
lam: test parameter representing the spatial problem
"""
coll = CollBase(Nnodes, 0, 1, quad_type='RADAU-RIGHT')
Qmat = coll.Qmat[1:, 1:]
# do LU decomposition of QT (St. Martin's trick)
QT = coll.Qmat[1:, 1:].T
[_, _, U] = LA.lu(QT, overwrite_a=True)
QDmat = U.T
Nmat = np.zeros((Nnodes, Nnodes))
Nmat[:, -1] = 1
Nsteps_list = [64, 256]
color_list = ['red', 'blue']
marker_list = ['s', 'o']
setup_list = zip(Nsteps_list, color_list, marker_list)
xlist = [0.1**i for i in range(11)]
rc('font', **{"sans-serif": ["Arial"], "size": 24})
plt.subplots(figsize=(15, 10))
for Nsteps, color, marker in setup_list:
Emat = np.zeros((Nsteps, Nsteps))
np.fill_diagonal(Emat[1:, :], 1)
Prho_list = []
predict_list = []
for x in xlist:
mat = np.linalg.inv(np.eye(Nnodes * Nsteps) - x * lam * np.kron(np.eye(Nsteps), QDmat)).dot(
x * lam * np.kron(np.eye(Nsteps), (Qmat - QDmat)) + np.kron(Emat, Nmat)
)
Prho_list.append(max(abs(np.linalg.eigvals(mat))))
# predict_list.append((1 + x) ** (1.0 - 1.0 / (Nnodes * Nsteps)) * x ** (1.0 / (Nnodes * Nsteps)))
predict_list.append(x ** (1.0 / (Nsteps)))
if len(predict_list) > 1:
print(
x,
predict_list[-1],
Prho_list[-1],
Prho_list[-2] / Prho_list[-1],
predict_list[-2] / predict_list[-1],
)
plt.loglog(
xlist,
Prho_list,
linestyle='-',
linewidth=3,
color=color,
marker=marker,
markersize=10,
label='spectral radius, L=' + str(Nsteps),
)
plt.loglog(
xlist,
predict_list,
linestyle='--',
linewidth=2,
color=color,
marker=marker,
markersize=10,
label='estimate, L=' + str(Nsteps),
)
ax = plt.gca()
ax.invert_xaxis()
plt.xlabel('time-step size')
plt.ylabel('spectral radius')
plt.legend(loc=3, numpoints=1)
plt.grid()
plt.ylim([1e-02, 1e01])
if type(lam) is complex:
fname = 'data/smoother_specrad_to0_L64+256_M' + str(Nnodes) + 'LU_imag.png'
else:
fname = 'data/smoother_specrad_to0_L64+256_M' + str(Nnodes) + 'LU_real.png'
plt.savefig(fname, transparent=True, bbox_inches='tight')
if __name__ == "__main__":
compute_and_plot_specrad(Nnodes=3, lam=-1)
compute_and_plot_specrad(Nnodes=3, lam=1j)
| 3,041 | 27.429907 | 110 | py |
pySDC | pySDC-master/pySDC/projects/AsympConv/PFASST_conv_Linf.py | import csv
import os
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
def main():
"""
Main driver running diffusion and advection tests
"""
QI = 'LU'
run_diffusion(QI=QI)
run_advection(QI=QI)
QI = 'LU2'
run_diffusion(QI=QI)
run_advection(QI=QI)
plot_results()
def run_diffusion(QI, max_proc_exp=13):
"""
A simple test program to test PFASST convergence for the heat equation with random initial data
Args:
QI: preconditioner
max_proc_exp: max number of processors will be 2^max_proc_exp
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = [QI, 'LU']
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.1 # diffusion coefficient
problem_params['freq'] = 2 # frequency for the test value
problem_params['nvars'] = [127, 63] # number of degrees of freedom for each level
problem_params['bc'] = 'dirichlet-zero' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 200
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = False
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_unforced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 1.0
# set up number of parallel time-steps to run PFASST with
fname = 'data/results_conv_diffusion_Linf_QI' + str(QI) + '.txt'
file = open(fname, 'w')
writer = csv.writer(file)
writer.writerow(('num_proc', 'niter'))
file.close()
for i in range(0, max_proc_exp):
num_proc = 2**i
level_params['dt'] = (Tend - t0) / num_proc
description['level_params'] = level_params # pass level parameters
out = 'Working on num_proc = %5i' % num_proc
print(out)
cfl = problem_params['nu'] * level_params['dt'] / (1.0 / (problem_params['nvars'][0] + 1)) ** 2
out = ' CFL number: %4.2e' % cfl
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=num_proc, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
file = open(fname, 'a')
writer = csv.writer(file)
writer.writerow((num_proc, np.mean(niters)))
file.close()
assert os.path.isfile(fname), 'ERROR: pickle did not create file'
def run_advection(QI, max_proc_exp=7):
"""
A simple test program to test PFASST convergence for the periodic advection equation
Args:
QI: preconditioner
max_proc_exp: max number of processors will be 2^max_proc_exp
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = [QI, 'LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['freq'] = 64 # frequency for the test value
problem_params['nvars'] = [128, 64] # number of degrees of freedom for each level
problem_params['order'] = 2
problem_params['stencil_type'] = 'center'
problem_params['c'] = 0.1
problem_params['bc'] = 'periodic' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 200
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = advectionNd # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 1.0
# set up number of parallel time-steps to run PFASST with
fname = 'data/results_conv_advection_Linf_QI' + str(QI) + '.txt'
file = open(fname, 'w')
writer = csv.writer(file)
writer.writerow(('num_proc', 'niter'))
file.close()
for i in range(0, max_proc_exp):
num_proc = 2**i
level_params['dt'] = (Tend - t0) / num_proc
description['level_params'] = level_params # pass level parameters
out = 'Working on num_proc = %5i' % num_proc
print(out)
cfl = problem_params['c'] * level_params['dt'] / (1.0 / problem_params['nvars'][0])
out = ' CFL number: %4.2e' % cfl
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=num_proc, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
file = open(fname, 'a')
writer = csv.writer(file)
writer.writerow((num_proc, np.mean(niters)))
file.close()
assert os.path.isfile(fname), 'ERROR: pickle did not create file'
def plot_results(cwd=''):
"""
Plotting routine for iteration counts
Args:
cwd: current working directory
"""
setups = [('diffusion', 'LU', 'LU2'), ('advection', 'LU', 'LU2')]
for type, QI1, QI2 in setups:
fname = cwd + 'data/results_conv_' + type + '_Linf_QI' + QI1 + '.txt'
file = open(fname, 'r')
reader = csv.DictReader(file, delimiter=',')
xvalues_1 = []
niter_1 = []
for row in reader:
xvalues_1.append(int(row['num_proc']))
niter_1.append(float(row['niter']))
file.close()
fname = cwd + 'data/results_conv_' + type + '_Linf_QI' + QI2 + '.txt'
file = open(fname, 'r')
reader = csv.DictReader(file, delimiter=',')
xvalues_2 = []
niter_2 = []
for row in reader:
xvalues_2.append(int(row['num_proc']))
niter_2.append(float(row['niter']))
file.close()
# set up plotting parameters
params = {
'legend.fontsize': 20,
'figure.figsize': (12, 8),
'axes.labelsize': 20,
'axes.titlesize': 20,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'lines.linewidth': 3,
}
plt.rcParams.update(params)
# set up figure
plt.figure()
plt.xlabel('number of time-steps (L)')
plt.ylabel('no. of iterations')
plt.xlim(min(xvalues_1 + xvalues_2) / 2.0, max(xvalues_1 + xvalues_2) * 2.0)
plt.ylim(min(niter_1 + niter_2) - 1, max(niter_1 + niter_2) + 1)
plt.grid()
# plot
plt.semilogx(xvalues_1, niter_1, 'r-', marker='s', markersize=10, label=QI1)
plt.semilogx(xvalues_2, niter_2, 'b-', marker='o', markersize=10, label=QI2)
plt.legend(loc=2, ncol=1, numpoints=1)
# save plot, beautify
fname = 'data/conv_test_niter_Linf_' + type + '.png'
plt.savefig(fname, bbox_inches='tight')
assert os.path.isfile(fname), 'ERROR: plotting did not create file'
if __name__ == "__main__":
main()
| 10,334 | 33.108911 | 120 | py |
pySDC | pySDC-master/pySDC/projects/AsympConv/smoother_specrad_heatmap.py | import matplotlib
import numpy as np
import scipy.linalg as LA
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from pySDC.core.Collocation import CollBase
def compute_and_plot_specrad():
"""
Compute and plot spectral radius of smoother iteration matrix for a whole range of eigenvalues
"""
# setup_list = [('LU', 'to0'), ('LU', 'toinf'), ('IE', 'to0'), ('IE', 'toinf')]
# setup_list = [('LU', 'to0'), ('LU', 'toinf')]
# setup_list = [('IE', 'to0'), ('IE', 'toinf')]
# setup_list = [('LU', 'toinf'), ('IE', 'toinf')]
setup_list = [('IE', 'full'), ('LU', 'full')]
# setup_list = [('EX', 'to0'), ('PIC', 'to0')]
# set up plotting parameters
params = {
'legend.fontsize': 24,
'figure.figsize': (12, 8),
'axes.labelsize': 24,
'axes.titlesize': 24,
'xtick.labelsize': 24,
'ytick.labelsize': 24,
'lines.linewidth': 3,
}
plt.rcParams.update(params)
Nnodes = 3
Nsteps = 4
coll = CollBase(Nnodes, 0, 1, quad_type='RADAU-RIGHT')
Qmat = coll.Qmat[1:, 1:]
Nmat = np.zeros((Nnodes, Nnodes))
Nmat[:, -1] = 1
Emat = np.zeros((Nsteps, Nsteps))
np.fill_diagonal(Emat[1:, :], 1)
for qd_type, conv_type in setup_list:
if qd_type == 'LU':
QT = coll.Qmat[1:, 1:].T
[_, _, U] = LA.lu(QT, overwrite_a=True)
QDmat = U.T
elif qd_type == 'IE':
QI = np.zeros(np.shape(coll.Qmat))
for m in range(coll.num_nodes + 1):
QI[m, 1 : m + 1] = coll.delta_m[0:m]
QDmat = QI[1:, 1:]
elif qd_type == 'EE':
QE = np.zeros(np.shape(coll.Qmat))
for m in range(coll.num_nodes + 1):
QE[m, 0:m] = coll.delta_m[0:m]
QDmat = QE[1:, 1:]
elif qd_type == 'PIC':
QDmat = np.zeros(np.shape(coll.Qmat[1:, 1:]))
elif qd_type == 'EX':
QT = coll.Qmat[1:, 1:].T
[_, _, U] = LA.lu(QT, overwrite_a=True)
QDmat = np.tril(U.T, k=-1)
print(QDmat)
else:
raise NotImplementedError('qd_type %s is not implemented' % qd_type)
# lim_specrad = max(abs(np.linalg.eigvals(np.eye(Nnodes) - np.linalg.inv(QDmat).dot(Qmat))))
# print('qd_type: %s -- lim_specrad: %6.4e -- conv_type: %s' % (qd_type, lim_specrad, conv_type))
if conv_type == 'to0':
ilim_left = -4
ilim_right = 2
rlim_left = 2
rlim_right = -4
elif conv_type == 'toinf':
ilim_left = 0
ilim_right = 11
rlim_left = 6
rlim_right = 0
elif conv_type == 'full':
ilim_left = -10
ilim_right = 11
rlim_left = 10
rlim_right = -11
else:
raise NotImplementedError('conv_type %s is not implemented' % conv_type)
ilam_list = 1j * np.logspace(ilim_left, ilim_right, 201)
rlam_list = -1 * np.logspace(rlim_left, rlim_right, 201)
assert (rlim_right - rlim_left + 1) % 5 == 0
assert (ilim_right - ilim_left - 1) % 5 == 0
assert (len(rlam_list) - 1) % 5 == 0
assert (len(ilam_list) - 1) % 5 == 0
Prho = np.zeros((len(rlam_list), len(ilam_list)))
for idr, rlam in enumerate(rlam_list):
for idi, ilam in enumerate(ilam_list):
dxlam = rlam + ilam
mat = np.linalg.inv(np.eye(Nnodes * Nsteps) - dxlam * np.kron(np.eye(Nsteps), QDmat)).dot(
dxlam * np.kron(np.eye(Nsteps), (Qmat - QDmat)) + np.kron(Emat, Nmat)
)
mat = np.linalg.matrix_power(mat, Nnodes)
Prho[idr, idi] = max(abs(np.linalg.eigvals(mat)))
print(np.amax(Prho))
fig, ax = plt.subplots(figsize=(15, 10))
ax.set_xticks([i + 0.5 for i in range(0, len(rlam_list), int(len(rlam_list) / 5))])
ax.set_xticklabels(
[r'-$10^{%d}$' % i for i in range(rlim_left, rlim_right, int((rlim_right - rlim_left + 1) / 5))]
)
ax.set_yticks([i + 0.5 for i in range(0, len(ilam_list), int(len(ilam_list) / 5))])
ax.set_yticklabels(
[r'$10^{%d}i$' % i for i in range(ilim_left, ilim_right, int((ilim_right - ilim_left - 1) / 5))]
)
cmap = plt.get_cmap('Reds')
pcol = plt.pcolor(Prho.T, cmap=cmap, norm=LogNorm(vmin=1e-09, vmax=1e-00))
plt.colorbar(pcol)
plt.xlabel(r'$Re(\Delta t\lambda)$')
plt.ylabel(r'$Im(\Delta t\lambda)$')
fname = (
'data/heatmap_smoother_' + conv_type + '_Nsteps' + str(Nsteps) + '_M' + str(Nnodes) + '_' + qd_type + '.png'
)
plt.savefig(fname, transparent=True, bbox_inches='tight')
if __name__ == "__main__":
compute_and_plot_specrad()
| 4,896 | 31.430464 | 120 | py |
pySDC | pySDC-master/pySDC/projects/AsympConv/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/AsympConv/PFASST_conv_tests.py | import os
import pickle
import matplotlib
import numpy as np
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.helpers.stats_helper import get_sorted
def main():
"""
Main driver running diffusion and advection tests
"""
nsweeps = 3
run_diffusion(nsweeps=nsweeps)
run_advection(nsweeps=nsweeps)
plot_results(nsweeps=nsweeps)
nsweeps = 2
run_diffusion(nsweeps=nsweeps)
run_advection(nsweeps=nsweeps)
plot_results(nsweeps=nsweeps)
def run_diffusion(nsweeps):
"""
A simple test program to test PFASST convergence for the heat equation with random initial data
Args:
nsweeps: number of fine sweeps to perform
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [nsweeps, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU']
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['freq'] = 1 # frequency for the test value
problem_params['nvars'] = [127, 63] # number of degrees of freedom for each level
problem_params['bc'] = 'dirichlet-zero' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = False
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_unforced # pass problem class
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 4 * level_params['dt']
# set up number of parallel time-steps to run PFASST with
num_proc = 4
results = dict()
for i in range(-3, 10):
ratio = level_params['dt'] / (1.0 / (problem_params['nvars'][0] + 1)) ** 2
problem_params['nu'] = 10.0**i / ratio # diffusion coefficient
description['problem_params'] = problem_params # pass problem parameters
out = 'Working on c = %6.4e' % problem_params['nu']
print(out)
cfl = ratio * problem_params['nu']
out = ' CFL number: %4.2e' % cfl
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=num_proc, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
if nsweeps == 3 and (i == -3 or i == 9):
assert np.mean(niters) <= 2, 'ERROR: too much iterations for diffusive asymptotics, got %s' % np.mean(
niters
)
results[cfl] = np.mean(niters)
fname = 'data/results_conv_diffusion_NS' + str(nsweeps) + '.pkl'
file = open(fname, 'wb')
pickle.dump(results, file)
file.close()
assert os.path.isfile(fname), 'ERROR: pickle did not create file'
def run_advection(nsweeps):
"""
A simple test program to test PFASST convergence for the periodic advection equation
Args:
nsweeps: number of fine sweeps to perform
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [nsweeps, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['freq'] = 64 # frequency for the test value
problem_params['nvars'] = [128, 64] # number of degrees of freedom for each level
problem_params['order'] = 2
problem_params['stencil_type'] = 'center'
problem_params['bc'] = 'periodic' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = advectionNd # pass problem class
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 4 * level_params['dt']
# set up number of parallel time-steps to run PFASST with
num_proc = 4
results = dict()
for i in range(-3, 10):
ratio = level_params['dt'] / (1.0 / (problem_params['nvars'][0] + 1))
problem_params['c'] = 10.0**i / ratio # diffusion coefficient
description['problem_params'] = problem_params # pass problem parameters
out = 'Working on nu = %6.4e' % problem_params['c']
print(out)
cfl = ratio * problem_params['c']
out = ' CFL number: %4.2e' % cfl
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=num_proc, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
if nsweeps == 3 and (i == -3 or i == 9):
assert np.mean(niters) <= 2, 'ERROR: too much iterations for advective asymptotics, got %s' % np.mean(
niters
)
results[cfl] = np.mean(niters)
fname = 'data/results_conv_advection_NS' + str(nsweeps) + '.pkl'
file = open(fname, 'wb')
pickle.dump(results, file)
file.close()
assert os.path.isfile(fname), 'ERROR: pickle did not create file'
def plot_results(nsweeps):
"""
Plotting routine for iteration counts
Args:
nsweeps: number of fine sweeps used
"""
fname = 'data/results_conv_diffusion_NS' + str(nsweeps) + '.pkl'
file = open(fname, 'rb')
results_diff = pickle.load(file)
file.close()
fname = 'data/results_conv_advection_NS' + str(nsweeps) + '.pkl'
file = open(fname, 'rb')
results_adv = pickle.load(file)
file.close()
xvalues_diff = sorted(results_diff.keys())
niter_diff = []
for x in xvalues_diff:
niter_diff.append(results_diff[x])
xvalues_adv = sorted(results_adv.keys())
niter_adv = []
for x in xvalues_adv:
niter_adv.append(results_adv[x])
# set up plotting parameters
params = {
'legend.fontsize': 20,
'figure.figsize': (12, 8),
'axes.labelsize': 20,
'axes.titlesize': 20,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'lines.linewidth': 3,
}
plt.rcParams.update(params)
# set up figure
plt.figure()
plt.xlabel(r'$\mu$')
plt.ylabel('no. of iterations')
plt.xlim(min(xvalues_diff + xvalues_adv) / 10.0, max(xvalues_diff + xvalues_adv) * 10.0)
plt.ylim(min(niter_diff + niter_adv) - 1, max(niter_diff + niter_adv) + 1)
plt.grid()
# plot
plt.semilogx(xvalues_diff, niter_diff, 'r-', marker='s', markersize=10, label='diffusion')
plt.semilogx(xvalues_adv, niter_adv, 'b-', marker='o', markersize=10, label='advection')
plt.legend(loc=1, ncol=1, numpoints=1)
# plt.show()
# save plot, beautify
fname = 'data/conv_test_niter_NS' + str(nsweeps) + '.png'
plt.savefig(fname, bbox_inches='tight')
assert os.path.isfile(fname), 'ERROR: plotting did not create file'
if __name__ == "__main__":
main()
| 10,273 | 32.141935 | 120 | py |
pySDC | pySDC-master/pySDC/projects/AsympConv/conv_test_toinf.py | import matplotlib
import numpy as np
import scipy.linalg as LA
matplotlib.use('Agg')
import matplotlib.pylab as plt
from matplotlib import rc
from pySDC.core.Collocation import CollBase
def compute_and_plot_specrad(Nnodes, lam):
"""
Compute and plot the spectral radius of the smoother for different step-sizes
Args:
Nnodes: number of collocation nodes
lam: test parameter representing the spatial problem
"""
Nsteps = 1
coll = CollBase(Nnodes, 0, 1, quad_type='RADAU-RIGHT')
Qmat = coll.Qmat[1:, 1:]
# do LU decomposition of QT (St. Martin's trick)
QT = coll.Qmat[1:, 1:].T
[_, _, U] = LA.lu(QT, overwrite_a=True)
QDmat = U.T
Nmat = np.zeros((Nnodes, Nnodes))
Nmat[:, -1] = 1
Nsweep_list = [1, Nnodes - 1, Nnodes]
color_list = ['red', 'blue', 'green']
marker_list = ['s', 'o', 'd']
setup_list = zip(Nsweep_list, color_list, marker_list)
xlist = [10**i for i in range(11)]
rc('font', **{"sans-serif": ["Arial"], "size": 24})
plt.subplots(figsize=(15, 10))
for Nsweeps, color, marker in setup_list:
Emat = np.zeros((Nsteps, Nsteps))
np.fill_diagonal(Emat[1:, :], 1)
Prho_list = []
predict_list = []
for x in xlist:
mat = np.linalg.inv(np.eye(Nnodes * Nsteps) - x * lam * np.kron(np.eye(Nsteps), QDmat)).dot(
x * lam * np.kron(np.eye(Nsteps), (Qmat - QDmat)) + np.kron(Emat, Nmat)
)
mat = np.linalg.matrix_power(mat, Nsweeps)
Prho_list.append(max(abs(np.linalg.eigvals(mat))))
predict_list.append(1.0 / x)
if len(predict_list) > 1:
print(
x,
predict_list[-1],
Prho_list[-1],
Prho_list[-2] / Prho_list[-1],
predict_list[-2] / predict_list[-1],
)
plt.loglog(
xlist,
Prho_list,
linestyle='-',
linewidth=3,
color=color,
marker=marker,
markersize=10,
label='spectral radius, L=' + str(Nsteps),
)
plt.loglog(
xlist,
[item / predict_list[0] for item in predict_list],
linestyle='--',
linewidth=2,
color='k',
label='estimate',
)
plt.xlabel('time-step size')
plt.ylabel('spectral radius')
plt.legend(loc=3, numpoints=1)
plt.grid()
plt.ylim([1e-16, 1e00])
if type(lam) is complex:
fname = 'data/smoother_specrad_toinf_M' + str(Nnodes) + '_LU_imag.png'
else:
fname = 'data/smoother_specrad_toinf_M' + str(Nnodes) + '_LU_real.png'
plt.savefig(fname, transparent=True, bbox_inches='tight')
if __name__ == "__main__":
compute_and_plot_specrad(Nnodes=3, lam=-1)
compute_and_plot_specrad(Nnodes=3, lam=1j)
compute_and_plot_specrad(Nnodes=7, lam=-1)
compute_and_plot_specrad(Nnodes=7, lam=1j)
| 2,989 | 26.943925 | 104 | py |
pySDC | pySDC-master/pySDC/projects/Hamiltonian/solar_system.py | import os
from collections import defaultdict
from mpl_toolkits.mplot3d import Axes3D
import dill
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted, filter_stats
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.FullSolarSystem import full_solar_system
from pySDC.implementations.problem_classes.OuterSolarSystem import outer_solar_system
from pySDC.implementations.sweeper_classes.verlet import verlet
from pySDC.implementations.transfer_classes.TransferParticles_NoCoarse import particles_to_particles
from pySDC.projects.Hamiltonian.hamiltonian_output import hamiltonian_output
def setup_outer_solar_system():
"""
Helper routine for setting up everything for the outer solar system problem
Returns:
description (dict): description of the controller
controller_params (dict): controller parameters
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 100.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = [5, 3]
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters for the Penning trap
problem_params = dict()
problem_params['sun_only'] = [False, True]
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['hook_class'] = hamiltonian_output # specialized hook class for more statistics and output
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = outer_solar_system
description['problem_params'] = problem_params
description['sweeper_class'] = verlet
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = particles_to_particles
return description, controller_params
def setup_full_solar_system():
"""
Helper routine for setting up everything for the full solar system problem
Returns:
description (dict): description of the controller
controller_params (dict): controller parameters
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 10.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = [5, 3]
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters for the Penning trap
problem_params = dict()
problem_params['sun_only'] = [False, True]
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['hook_class'] = hamiltonian_output # specialized hook class for more statistics and output
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = full_solar_system
description['problem_params'] = problem_params
description['sweeper_class'] = verlet
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = particles_to_particles
return description, controller_params
def run_simulation(prob=None):
"""
Routine to run the simulation of a second order problem
Args:
prob (str): name of the problem
"""
if prob == 'outer_solar_system':
description, controller_params = setup_outer_solar_system()
# set time parameters
t0 = 0.0
Tend = 10000.0
num_procs = 100
maxmeaniter = 6.0
elif prob == 'full_solar_system':
description, controller_params = setup_full_solar_system()
# set time parameters
t0 = 0.0
Tend = 1000.0
num_procs = 100
maxmeaniter = 19.0
else:
raise NotImplementedError('Problem type not implemented, got %s' % prob)
f = open('data/' + prob + '_out.txt', 'w')
out = 'Running ' + prob + ' problem with %s processors...' % num_procs
f.write(out + '\n')
print(out)
# instantiate the controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t=t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
# for item in iter_counts:
# out = 'Number of iterations for time %4.2f: %2i' % item
# f.write(out)
# print(out)
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
f.write(out + '\n')
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
f.write(out + '\n')
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (int(np.argmax(niters)), int(np.argmin(niters)))
f.write(out + '\n')
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
f.write(out + '\n')
print(out)
f.close()
assert np.mean(niters) <= maxmeaniter, 'Mean number of iterations is too high, got %s' % np.mean(niters)
fname = 'data/' + prob + '.dat'
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
assert os.path.isfile(fname), 'Run for %s did not create stats file' % prob
def show_results(prob=None, cwd=''):
"""
Helper function to plot the error of the Hamiltonian
Args:
prob (str): name of the problem
cwd (str): current working directory
"""
# read in the dill data
f = open(cwd + 'data/' + prob + '.dat', 'rb')
stats = dill.load(f)
f.close()
plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
# extract error in hamiltonian and prepare for plotting
extract_stats = filter_stats(stats, type='err_hamiltonian')
result = defaultdict(list)
for k, v in extract_stats.items():
result[k.iter].append((k.time, v))
for k, _ in result.items():
result[k] = sorted(result[k], key=lambda x: x[0])
plt_helper.newfig(textwidth=238.96, scale=0.89)
# Rearrange data for easy plotting
err_ham = 1
for k, v in result.items():
time = [item[0] for item in v]
ham = [item[1] for item in v]
err_ham = ham[-1]
plt_helper.plt.semilogy(time, ham, '-', lw=1, label='Iter ' + str(k))
assert err_ham < 2.4e-14, 'Error in the Hamiltonian is too large for %s, got %s' % (prob, err_ham)
plt_helper.plt.xlabel('Time')
plt_helper.plt.ylabel('Error in Hamiltonian')
plt_helper.plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fname = 'data/' + prob + '_hamiltonian'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
# extract positions and prepare for plotting
result = get_sorted(stats, type='position', sortby='time')
fig = plt_helper.plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Rearrange data for easy plotting
nparts = len(result[1][1][0])
ndim = len(result[1][1])
nsteps = len(result)
pos = np.zeros((nparts, ndim, nsteps))
for idx, item in enumerate(result):
for n in range(nparts):
for m in range(ndim):
pos[n, m, idx] = item[1][m][n]
for n in range(nparts):
if ndim == 2:
ax.plot(pos[n, 0, :], pos[n, 1, :])
elif ndim == 3:
ax.plot(pos[n, 0, :], pos[n, 1, :], pos[n, 2, :])
else:
raise NotImplementedError('Wrong number of dimensions for plotting, got %s' % ndim)
fname = 'data/' + prob + '_positions'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
def main():
prob = 'outer_solar_system'
run_simulation(prob)
show_results(prob)
prob = 'full_solar_system'
run_simulation(prob)
show_results(prob)
if __name__ == "__main__":
main()
| 9,197 | 32.205776 | 118 | py |
pySDC | pySDC-master/pySDC/projects/Hamiltonian/hamiltonian_output.py | from pySDC.core.Hooks import hooks
class hamiltonian_output(hooks):
def __init__(self):
"""
Initialization of particles output
"""
super(hamiltonian_output, self).__init__()
self.ham_init = None
def pre_run(self, step, level_number):
# some abbreviations
L = step.levels[0]
P = L.prob
super(hamiltonian_output, self).pre_run(step, level_number)
self.ham_init = P.eval_hamiltonian(L.u[0])
def post_iteration(self, step, level_number):
"""
Overwrite standard post iteration hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(hamiltonian_output, self).post_iteration(step, level_number)
# some abbreviations
L = step.levels[0]
P = L.prob
L.sweep.compute_end_point()
H = P.eval_hamiltonian(L.uend)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='hamiltonian',
value=H,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='err_hamiltonian',
value=abs(self.ham_init - H),
)
return None
def post_step(self, step, level_number):
"""
Overwrite standard post iteration hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(hamiltonian_output, self).post_step(step, level_number)
# some abbreviations
L = step.levels[0]
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='position',
value=L.uend.pos,
)
return None
| 2,130 | 24.987805 | 74 | py |
pySDC | pySDC-master/pySDC/projects/Hamiltonian/simple_problems.py | import os
from collections import defaultdict
import dill
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted, filter_stats
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.HarmonicOscillator import harmonic_oscillator
from pySDC.implementations.problem_classes.HenonHeiles import henon_heiles
from pySDC.implementations.sweeper_classes.verlet import verlet
from pySDC.implementations.transfer_classes.TransferParticles_NoCoarse import particles_to_particles
from pySDC.projects.Hamiltonian.hamiltonian_output import hamiltonian_output
def setup_harmonic():
"""
Helper routine for setting up everything for the harmonic oscillator
Returns:
description (dict): description of the controller
controller_params (dict): controller parameters
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 0.5
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = [5, 3]
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters for the Penning trap
problem_params = dict()
problem_params['k'] = 1.0
problem_params['phase'] = 0.0
problem_params['amp'] = 1.0
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['hook_class'] = hamiltonian_output # specialized hook class for more statistics and output
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = harmonic_oscillator
description['problem_params'] = problem_params
description['sweeper_class'] = verlet
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = particles_to_particles
return description, controller_params
def setup_henonheiles():
"""
Helper routine for setting up everything for the Henon Heiles problem
Returns:
description (dict): description of the controller
controller_params (dict): controller parameters
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 0.25
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = [5, 3]
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters for the Penning trap
problem_params = dict()
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['hook_class'] = hamiltonian_output # specialized hook class for more statistics and output
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = henon_heiles
description['problem_params'] = problem_params
description['sweeper_class'] = verlet
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = particles_to_particles
return description, controller_params
def run_simulation(prob=None):
"""
Routine to run the simulation of a second order problem
Args:
prob (str): name of the problem
"""
# check what problem type we have and set up corresponding description and variables
if prob == 'harmonic':
description, controller_params = setup_harmonic()
# set time parameters
t0 = 0.0
Tend = 50.0
num_procs = 100
maxmeaniter = 6.5
elif prob == 'henonheiles':
description, controller_params = setup_henonheiles()
# set time parameters
t0 = 0.0
Tend = 25.0
num_procs = 100
maxmeaniter = 5.0
else:
raise NotImplementedError('Problem type not implemented, got %s' % prob)
# instantiate the controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t=t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
# for item in iter_counts:
# out = 'Number of iterations for time %4.2f: %2i' % item
# print(out)
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
assert np.mean(niters) <= maxmeaniter, 'Mean number of iterations is too high, got %s' % np.mean(niters)
fname = 'data/' + prob + '.dat'
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
assert os.path.isfile(fname), 'Run for %s did not create stats file' % prob
def show_results(prob=None, cwd=''):
"""
Helper function to plot the error of the Hamiltonian
Args:
prob (str): name of the problem
cwd (str): current working directory
"""
# read in the dill data
f = open(cwd + 'data/' + prob + '.dat', 'rb')
stats = dill.load(f)
f.close()
# extract error in hamiltonian and prepare for plotting
extract_stats = filter_stats(stats, type='err_hamiltonian')
result = defaultdict(list)
for k, v in extract_stats.items():
result[k.iter].append((k.time, v))
for k, _ in result.items():
result[k] = sorted(result[k], key=lambda x: x[0])
plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=0.89)
# Rearrange data for easy plotting
err_ham = 1
for k, v in result.items():
time = [item[0] for item in v]
ham = [item[1] for item in v]
err_ham = ham[-1]
plt_helper.plt.semilogy(time, ham, '-', lw=1, label='Iter ' + str(k))
assert err_ham < 3.7e-08, 'Error in the Hamiltonian is too large for %s, got %s' % (prob, err_ham)
plt_helper.plt.xlabel('Time')
plt_helper.plt.ylabel('Error in Hamiltonian')
plt_helper.plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fname = 'data/' + prob + '_hamiltonian'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
def main():
prob = 'harmonic'
run_simulation(prob)
show_results(prob)
prob = 'henonheiles'
run_simulation(prob)
show_results(prob)
if __name__ == "__main__":
main()
| 7,709 | 31.808511 | 118 | py |
pySDC | pySDC-master/pySDC/projects/Hamiltonian/harmonic_oscillator.py | import os
import dill
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.HarmonicOscillator import harmonic_oscillator
from pySDC.implementations.sweeper_classes.verlet import verlet
from pySDC.implementations.transfer_classes.TransferParticles_NoCoarse import particles_to_particles
from pySDC.projects.Hamiltonian.stop_at_error_hook import stop_at_error_hook
def run_simulation():
"""
Routine to run the simulation of a second order problem
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 0.0
level_params['dt'] = 1.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = [5, 3]
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters for the Penning trap
problem_params = dict()
problem_params['k'] = None # will be defined later
problem_params['phase'] = 0.0
problem_params['amp'] = 1.0
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['hook_class'] = stop_at_error_hook
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = harmonic_oscillator
description['sweeper_class'] = verlet
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = particles_to_particles
# set time parameters
t0 = 0.0
Tend = 4.0
num_procs = 4
rlim_left = 0
rlim_right = 16.0
nstep = 34
ks = np.linspace(rlim_left, rlim_right, nstep)[1:]
# qd_combinations = [('IE', 'EE'), ('IE', 'PIC'),
# ('LU', 'EE'), ('LU', 'PIC'),
# # ('MIN3', 'PIC'), ('MIN3', 'EE'),
# ('PIC', 'EE'), ('PIC', 'PIC')]
qd_combinations = [('IE', 'EE'), ('PIC', 'PIC')]
results = dict()
results['ks'] = ks
for qd in qd_combinations:
print('Working on combination (%s, %s)...' % qd)
niters = np.zeros(len(ks))
for i, k in enumerate(ks):
problem_params['k'] = k
description['problem_params'] = problem_params
sweeper_params['QI'] = qd[0]
sweeper_params['QE'] = qd[1]
description['sweeper_params'] = sweeper_params
# instantiate the controller
controller = controller_nonMPI(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t=t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
uex = P.u_exact(Tend)
print('Error after run: %s' % abs(uex - uend))
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters[i] = np.mean(np.array([item[1] for item in iter_counts]))
# print('Worked on k = %s, took %s iterations' % (k, results[i]))
results[qd] = niters
fname = 'data/harmonic_k.dat'
f = open(fname, 'wb')
dill.dump(results, f)
f.close()
assert os.path.isfile(fname), 'Run did not create stats file'
def show_results(cwd=''):
"""
Helper function to plot the error of the Hamiltonian
Args:
cwd (str): current working directory
"""
plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=0.89)
# read in the dill data
f = open(cwd + 'data/harmonic_k.dat', 'rb')
results = dill.load(f)
f.close()
ks = results['ks']
for qd in results:
if qd != 'ks':
plt_helper.plt.plot(ks, results[qd], label=qd)
plt_helper.plt.xlabel('k')
plt_helper.plt.ylabel('Number of iterations')
plt_helper.plt.legend(
loc='upper left',
)
plt_helper.plt.ylim([0, 15])
fname = 'data/harmonic_qd_iterations'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
def main():
run_simulation()
show_results()
if __name__ == "__main__":
main()
| 4,865 | 28.313253 | 100 | py |
pySDC | pySDC-master/pySDC/projects/Hamiltonian/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/Hamiltonian/stop_at_error_hook.py | from pySDC.core.Hooks import hooks
class stop_at_error_hook(hooks):
def post_sweep(self, step, level_number):
"""
Overwrite standard post iteration hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(stop_at_error_hook, self).post_sweep(step, level_number)
# some abbreviations
L = step.levels[0]
P = L.prob
L.sweep.compute_end_point()
uex = P.u_exact(L.time + L.dt)
# print(abs(uex - L.uend))
if abs(uex - L.uend) < 1e-02:
print('Stop iterating at %s' % step.status.iter)
step.status.force_done = True
return None
| 734 | 24.344828 | 70 | py |
pySDC | pySDC-master/pySDC/projects/Hamiltonian/fput.py | import os
from collections import defaultdict
import dill
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted, filter_stats
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.FermiPastaUlamTsingou import fermi_pasta_ulam_tsingou
from pySDC.implementations.sweeper_classes.verlet import verlet
from pySDC.implementations.transfer_classes.TransferParticles_NoCoarse import particles_to_particles
from pySDC.projects.Hamiltonian.hamiltonian_and_energy_output import hamiltonian_and_energy_output
def setup_fput():
"""
Helper routine for setting up everything for the Fermi-Pasta-Ulam-Tsingou problem
Returns:
description (dict): description of the controller
controller_params (dict): controller parameters
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-12
level_params['dt'] = 2.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = [5, 3]
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters for the Penning trap
problem_params = dict()
problem_params['npart'] = 2048
problem_params['alpha'] = 0.25
problem_params['k'] = 1.0
problem_params['energy_modes'] = [[1, 2, 3, 4]]
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['hook_class'] = hamiltonian_and_energy_output
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = fermi_pasta_ulam_tsingou
description['problem_params'] = problem_params
description['sweeper_class'] = verlet
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = particles_to_particles
return description, controller_params
def run_simulation():
"""
Routine to run the simulation of a second order problem
"""
description, controller_params = setup_fput()
# set time parameters
t0 = 0.0
# set this to 10000 to reproduce the picture in
# http://www.scholarpedia.org/article/Fermi-Pasta-Ulam_nonlinear_lattice_oscillations
Tend = 1000.0
num_procs = 1
f = open('data/fput_out.txt', 'w')
out = 'Running fput problem with %s processors...' % num_procs
f.write(out + '\n')
print(out)
# instantiate the controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t=t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
# for item in iter_counts:
# out = 'Number of iterations for time %4.2f: %2i' % item
# f.write(out + '\n')
# print(out)
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
f.write(out + '\n')
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
f.write(out + '\n')
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (int(np.argmax(niters)), int(np.argmin(niters)))
f.write(out + '\n')
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
f.write(out + '\n')
print(out)
# get runtime
timing_run = get_sorted(stats, type='timing_run', sortby='time')[0][1]
out = '... took %6.4f seconds to run this.' % timing_run
f.write(out + '\n')
print(out)
f.close()
# assert np.mean(niters) <= 3.46, 'Mean number of iterations is too high, got %s' % np.mean(niters)
fname = 'data/fput.dat'
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
assert os.path.isfile(fname), 'Run for %s did not create stats file'
def show_results(cwd=''):
"""
Helper function to plot the error of the Hamiltonian
Args:
cwd (str): current working directory
"""
# read in the dill data
f = open(cwd + 'data/fput.dat', 'rb')
stats = dill.load(f)
f.close()
plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
# HAMILTONIAN PLOTTING #
# extract error in hamiltonian and prepare for plotting
extract_stats = filter_stats(stats, type='err_hamiltonian')
result = defaultdict(list)
for k, v in extract_stats.items():
result[k.iter].append((k.time, v))
for k, _ in result.items():
result[k] = sorted(result[k], key=lambda x: x[0])
plt_helper.newfig(textwidth=238.96, scale=0.89)
# Rearrange data for easy plotting
err_ham = 1
for k, v in result.items():
time = [item[0] for item in v]
ham = [item[1] for item in v]
err_ham = ham[-1]
plt_helper.plt.semilogy(time, ham, '-', lw=1, label='Iter ' + str(k))
print(err_ham)
# assert err_ham < 6E-10, 'Error in the Hamiltonian is too large, got %s' % err_ham
plt_helper.plt.xlabel('Time')
plt_helper.plt.ylabel('Error in Hamiltonian')
plt_helper.plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fname = 'data/fput_hamiltonian'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
# ENERGY PLOTTING #
# extract error in hamiltonian and prepare for plotting
result = get_sorted(stats, type='energy_step', sortby='time')
plt_helper.newfig(textwidth=238.96, scale=0.89)
# Rearrange data for easy plotting
for mode in result[0][1].keys():
time = [item[0] for item in result]
energy = [item[1][mode] for item in result]
plt_helper.plt.plot(time, energy, label=str(mode) + 'th mode')
plt_helper.plt.xlabel('Time')
plt_helper.plt.ylabel('Energy')
plt_helper.plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
fname = 'data/fput_energy'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
# POSITION PLOTTING #
# extract positions and prepare for plotting
result = get_sorted(stats, type='position', sortby='time')
plt_helper.newfig(textwidth=238.96, scale=0.89)
# Rearrange data for easy plotting
nparts = len(result[0][1])
nsteps = len(result)
pos = np.zeros((nparts, nsteps))
time = np.zeros(nsteps)
for idx, item in enumerate(result):
time[idx] = item[0]
for n in range(nparts):
pos[n, idx] = item[1][n]
for n in range(min(nparts, 16)):
plt_helper.plt.plot(time, pos[n, :])
fname = 'data/fput_positions'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
def main():
run_simulation()
show_results()
if __name__ == "__main__":
main()
| 7,980 | 32.254167 | 118 | py |
pySDC | pySDC-master/pySDC/projects/Hamiltonian/hamiltonian_and_energy_output.py | from pySDC.core.Hooks import hooks
class hamiltonian_and_energy_output(hooks):
def __init__(self):
"""
Initialization of particles output
"""
super(hamiltonian_and_energy_output, self).__init__()
self.ham_init = None
self.energy_init = None
def pre_run(self, step, level_number):
# some abbreviations
L = step.levels[0]
P = L.prob
super(hamiltonian_and_energy_output, self).pre_run(step, level_number)
self.ham_init = P.eval_hamiltonian(L.u[0])
self.energy_init = P.eval_mode_energy(L.u[0])
def post_iteration(self, step, level_number):
"""
Overwrite standard post iteration hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(hamiltonian_and_energy_output, self).post_iteration(step, level_number)
# some abbreviations
L = step.levels[0]
P = L.prob
L.sweep.compute_end_point()
H = P.eval_hamiltonian(L.uend)
E = P.eval_mode_energy(L.uend)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='hamiltonian',
value=H,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='err_hamiltonian',
value=abs(self.ham_init - H),
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='energy_iter',
value=E,
)
return None
def post_step(self, step, level_number):
"""
Overwrite standard post iteration hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(hamiltonian_and_energy_output, self).post_step(step, level_number)
# some abbreviations
L = step.levels[0]
P = L.prob
E = P.eval_mode_energy(L.uend)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='position',
value=L.uend.pos,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='energy_step',
value=E,
)
return None
| 2,859 | 25.481481 | 85 | py |
pySDC | pySDC-master/pySDC/projects/compression/compression_convergence_controller.py | from pySDC.core.ConvergenceController import ConvergenceController
import numpy as np
np.bool = np.bool_
import libpressio
class Compression(ConvergenceController):
def setup(self, controller, params, description, **kwargs):
default_compressor_args = {
# configure which compressor to use
"compressor_id": "sz3",
# configure the set of metrics to be gathered
"early_config": {"pressio:metric": "composite", "composite:plugins": ["time", "size", "error_stat"]},
# configure SZ
"compressor_config": {
"pressio:abs": 1e-10,
},
}
defaults = {
'control_order': 0,
**super().setup(controller, params, description, **kwargs),
'compressor_args': {**default_compressor_args, **params.get('compressor_args', {})},
'min_buffer_length': 12,
}
self.compressor = libpressio.PressioCompressor.from_config(defaults['compressor_args'])
return defaults
def post_iteration_processing(self, controller, S, **kwargs):
"""
Replace the solution by the compressed value
"""
assert len(S.levels) == 1
lvl = S.levels[0]
prob = lvl.prob
nodes = np.append(0, lvl.sweep.coll.nodes)
encode_buffer = np.zeros(max([len(lvl.u[0]), self.params.min_buffer_length]))
decode_buffer = np.zeros_like(encode_buffer)
for i in range(len(lvl.u)):
encode_buffer[: len(lvl.u[i])] = lvl.u[i][:]
comp_data = self.compressor.encode(encode_buffer)
decode_buffer = self.compressor.decode(comp_data, decode_buffer)
lvl.u[i][:] = decode_buffer[: len(lvl.u[i])]
lvl.f[i] = prob.eval_f(lvl.u[i], lvl.time + lvl.dt * nodes[i])
# metrics = self.compressor.get_metrics()
# print(metrics)
| 1,910 | 34.388889 | 113 | py |
pySDC | pySDC-master/pySDC/projects/compression/order.py | import numpy as np
import matplotlib.pyplot as plt
from pySDC.projects.Resilience.advection import run_advection
from pySDC.helpers.stats_helper import get_sorted
from pySDC.helpers.plot_helper import figsize_by_journal
import pySDC.implementations.hooks.log_errors as error_hooks
from pySDC.projects.compression.compression_convergence_controller import Compression
MACHINEPRECISION = (
1e-8 # generous tolerance below which we ascribe errors to floating point rounding errors rather than compression
)
LOGGER_LEVEL = 30
def single_run(problem, description=None, thresh=1e-10, Tend=2e-1, useMPI=False, num_procs=1):
description = {} if description is None else description
compressor_args = {}
compressor_args['compressor_config'] = {'pressio:abs': thresh}
if thresh > 0:
description['convergence_controllers'] = {Compression: {'compressor_args': compressor_args}}
controller_params = {
'mssdc_jac': False,
'logger_level': LOGGER_LEVEL,
}
error_hook = error_hooks.LogGlobalErrorPostRunMPI if useMPI else error_hooks.LogGlobalErrorPostRun
stats, _, _ = problem(
custom_description=description,
hook_class=error_hook,
Tend=Tend,
use_MPI=useMPI,
num_procs=num_procs,
custom_controller_params=controller_params,
)
if useMPI:
from mpi4py import MPI
comm = MPI.COMM_WORLD
else:
comm = None
e = min([me[1] for me in get_sorted(stats, type='e_global_post_run', comm=comm)])
return e
def multiple_runs(problem, values, expected_order, mode='dt', thresh=1e-10, useMPI=False, num_procs=1, **kwargs):
errors = np.zeros_like(values)
description = {
'level_params': {},
'problam_params': {},
'step_params': {},
}
if mode == 'dt':
description['step_params'] = {'maxiter': expected_order}
elif mode == 'nvars':
description['problem_params'] = {'order': expected_order}
for i in range(len(values)):
if mode == 'dt':
description['level_params']['dt'] = values[i]
Tend = values[i] * (5 if num_procs == 1 else 2 * num_procs)
elif mode == 'nvars':
description['problem_params']['nvars'] = values[i]
Tend = 2e-1
errors[i] = single_run(problem, description, thresh=thresh, Tend=Tend, useMPI=useMPI, num_procs=num_procs)
return values, errors
def get_order(values, errors, thresh=1e-16, expected_order=None):
values = np.array(values)
idx = np.argsort(values)
local_orders = np.log(errors[idx][1:] / errors[idx][:-1]) / np.log(values[idx][1:] / values[idx][:-1])
order = np.mean(local_orders[errors[idx][1:] > max([thresh, MACHINEPRECISION])])
if expected_order is not None:
assert np.isclose(order, expected_order, atol=0.5), f"Expected order {expected_order}, but got {order:.2f}!"
return order
def plot_order(values, errors, ax, thresh=1e-16, color='black', expected_order=None, **kwargs):
values = np.array(values)
order = get_order(values, errors, thresh=thresh, expected_order=expected_order)
ax.scatter(values, errors, color=color, **kwargs)
ax.loglog(values, errors[0] * (values / values[0]) ** order, color=color, label=f'p={order:.2f}', **kwargs)
def plot_order_in_time(ax, thresh, useMPI=False, num_procs=1):
problem = run_advection
base_configs_dt = {
'values': np.array([2.0 ** (-i) for i in [2, 3, 4, 5, 6, 7, 8, 9]]),
'mode': 'dt',
'ax': ax,
'thresh': thresh,
}
configs_dt = {}
configs_dt[2] = {**base_configs_dt, 'color': 'black'}
configs_dt[3] = {**base_configs_dt, 'color': 'magenta'}
configs_dt[4] = {**base_configs_dt, 'color': 'teal'}
configs_dt[5] = {**base_configs_dt, 'color': 'orange'}
# configs_dt[6] = {**base_configs_dt, 'color': 'blue'}
for key in configs_dt.keys():
values, errors = multiple_runs(
problem, expected_order=key, useMPI=useMPI, **configs_dt[key], num_procs=num_procs
)
plot_order(
values,
errors,
ax=configs_dt[key]['ax'],
thresh=configs_dt[key]['thresh'] * 1e2,
color=configs_dt[key]['color'],
expected_order=key + 1,
)
base_configs_dt['ax'].set_xlabel(r'$\Delta t$')
base_configs_dt['ax'].set_ylabel('local error')
base_configs_dt['ax'].axhline(
base_configs_dt['thresh'], color='grey', ls='--', label=rf'$\|\delta\|={{{thresh:.0e}}}$'
)
base_configs_dt['ax'].legend(frameon=False)
def order_in_time_different_error_bounds():
fig, axs = plt.subplots(
2, 2, figsize=figsize_by_journal('Springer_Numerical_Algorithms', 1.0, 1.0), sharex=True, sharey=True
)
threshs = [1e-6, 1e-8, 1e-10, 1e-12]
for i in range(len(threshs)):
ax = axs.flatten()[i]
plot_order_in_time(ax, threshs[i])
if i != 2:
ax.set_ylabel('')
ax.set_xlabel('')
fig.suptitle('Order in time for advection problem')
fig.tight_layout()
fig.savefig('compression-order-time.pdf')
if __name__ == '__main__':
order_in_time_different_error_bounds()
# base_configs_nvars = {
# 'values': [128, 256, 512, 1024],
# # 'values': np.array([2**(i) for i in [4, 5, 6, 7, 8, 9]]),
# 'mode': 'nvars',
# }
# configs_nvars = {}
# configs_nvars[2] = {**base_configs_nvars, 'color': 'black'}
# configs_nvars[4] = {**base_configs_nvars, 'color': 'magenta'}
# for key in configs_nvars.keys():
# values, errors = multiple_runs(problem, expected_order=key, **configs_nvars[key])
# plot_order(values, errors, axs[1], color=configs_nvars[key]['color'])
plt.show()
| 5,766 | 33.532934 | 118 | py |
pySDC | pySDC-master/pySDC/projects/matrixPFASST/compare_to_matrixbased.py | import numpy as np
from pathlib import Path
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
from pySDC.implementations.transfer_classes.TransferMesh_NoCoarse import mesh_to_mesh as mesh_to_mesh_nocoarse
from pySDC.projects.matrixPFASST.controller_matrix_nonMPI import controller_matrix_nonMPI
def diffusion_setup(par=0.0):
"""
Setup routine for advection test
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = par # diffusion coefficient
problem_params['freq'] = 4 # frequency for the test value
problem_params['nvars'] = [127, 63] # number of degrees of freedom for each level
problem_params['bc'] = 'dirichlet-zero' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['all_to_done'] = True
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_unforced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
return description, controller_params
def advection_setup(par=0.0):
"""
Setup routine for advection test
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['c'] = par
problem_params['freq'] = 4 # frequency for the test value
problem_params['nvars'] = [128, 64] # number of degrees of freedom for each level
problem_params['order'] = 2
problem_params['stencil_type'] = 'center'
problem_params['bc'] = 'periodic' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['all_to_done'] = True
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = advectionNd # pass problem class
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
return description, controller_params
def testequation_setup():
"""
Setup routine for the test equation
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3, 2]
sweeper_params['QI'] = 'LU'
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['u0'] = 1.0 # initial value (for all instances)
# use single values like this...
# problem_params['lambdas'] = [[-1.0]]
# .. or a list of values like this ...
# problem_params['lambdas'] = [[-1.0, -2.0, 1j, -1j]]
# .. or a whole block of values like this
ilim_left = -11
ilim_right = 0
rlim_left = 0
rlim_right = 11
ilam = 1j * np.logspace(ilim_left, ilim_right, 11)
rlam = -1 * np.logspace(rlim_left, rlim_right, 11)
lambdas = []
for rl in rlam:
for il in ilam:
lambdas.append(rl + il)
problem_params['lambdas'] = [lambdas]
# note: PFASST will do all of those at once, but without interaction (realized via diagonal matrix).
# The propagation matrix will be diagonal too, corresponding to the respective lambda value.
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['all_to_done'] = True
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = testequation0d # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_nocoarse # pass spatial transfer class
description['space_transfer_params'] = dict() # pass paramters for spatial transfer
return description, controller_params
def compare_controllers(type=None, par=0.0, f=None):
"""
A simple test program to compare PFASST runs with matrix-based and matrix-free controllers
Args:
type (str): setup type
par (float) parameter for controlling stiffness
f: file handler
"""
# set time parameters
t0 = 0.0
Tend = 1.0
if type == 'diffusion':
description, controller_params = diffusion_setup(par)
elif type == 'advection':
description, controller_params = advection_setup(par)
elif type == 'testequation':
description, controller_params = testequation_setup()
else:
raise ValueError('No valis setup type provided, aborting..')
out = '\nWorking with %s setup and parameter %3.1e..' % (type, par)
f.write(out + '\n')
print(out)
# instantiate controller
controller_mat = controller_matrix_nonMPI(num_procs=4, controller_params=controller_params, description=description)
controller_nomat = controller_nonMPI(num_procs=4, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller_nomat.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uex = P.u_exact(Tend)
# this is where the iteration is happening
uend_mat, stats_mat = controller_mat.run(u0=uinit, t0=t0, Tend=Tend)
uend_nomat, stats_nomat = controller_nomat.run(u0=uinit, t0=t0, Tend=Tend)
diff = abs(uend_mat - uend_nomat)
err_mat = abs(uend_mat - uex)
err_nomat = abs(uend_nomat - uex)
out = ' Error (mat/nomat) vs. exact solution: %6.4e -- %6.4e' % (err_mat, err_nomat)
f.write(out + '\n')
print(out)
out = ' Difference between both results: %6.4e' % diff
f.write(out + '\n')
print(out)
assert diff < 2.3e-15, 'ERROR: difference between matrix-based and matrix-free result is too large, got %s' % diff
# get and convert statistics to list of iterations count, sorted by process
iter_counts_mat = get_sorted(stats_mat, type='niter', sortby='time')
iter_counts_nomat = get_sorted(stats_nomat, type='niter', sortby='time')
out = ' Iteration counts for matrix-based version: %s' % iter_counts_mat
f.write(out + '\n')
print(out)
out = ' Iteration counts for matrix-free version: %s' % iter_counts_nomat
f.write(out + '\n')
print(out)
assert (
iter_counts_nomat == iter_counts_mat
), 'ERROR: number of iterations differ between matrix-based and matrix-free controller'
def main():
par_list = [1e-02, 1.0, 1e02]
Path("data").mkdir(parents=True, exist_ok=True)
f = open('data/comparison_matrix_vs_nomat_detail.txt', 'w')
for par in par_list:
compare_controllers(type='diffusion', par=par, f=f)
compare_controllers(type='advection', par=par, f=f)
compare_controllers(type='testequation', par=0.0, f=f)
f.close()
if __name__ == "__main__":
main()
| 10,484 | 36.580645 | 120 | py |
pySDC | pySDC-master/pySDC/projects/matrixPFASST/compare_to_propagator.py | import numpy as np
from pathlib import Path
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
from pySDC.implementations.transfer_classes.TransferMesh_NoCoarse import mesh_to_mesh as mesh_to_mesh_nocoarse
from pySDC.projects.matrixPFASST.controller_matrix_nonMPI import controller_matrix_nonMPI
def diffusion_setup(par=0.0):
"""
Setup routine for advection test
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU'
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = par # diffusion coefficient
problem_params['freq'] = 4 # frequency for the test value
problem_params['nvars'] = [127] # number of degrees of freedom for each level
problem_params['bc'] = 'dirichlet-zero' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_unforced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
return description, controller_params
def advection_setup(par=0.0):
"""
Setup routine for advection test
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU']
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['c'] = par
problem_params['freq'] = 4 # frequency for the test value
problem_params['nvars'] = [128, 64] # number of degrees of freedom for each level
problem_params['order'] = 2
problem_params['stencil_type'] = 'center'
problem_params['bc'] = 'periodic' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = advectionNd # pass problem class
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
return description, controller_params
def scalar_equation_setup():
"""
Setup routine for the test equation
Args:
par (float): parameter for controlling stiffness
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.25
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3, 2]
sweeper_params['QI'] = 'LU'
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['u0'] = 1.0 # initial value (for all instances)
# use single values like this...
# problem_params['lambdas'] = [[-1.0]]
# .. or a list of values like this ...
# problem_params['lambdas'] = [[-1.0, -2.0, 1j, -1j]]
# .. or a whole block of values like this
ilim_left = -11
ilim_right = 0
rlim_left = 0
rlim_right = 11
ilam = 1j * np.logspace(ilim_left, ilim_right, 11)
rlam = -1 * np.logspace(rlim_left, rlim_right, 11)
lambdas = []
for rl in rlam:
for il in ilam:
lambdas.append(rl + il)
problem_params['lambdas'] = [lambdas]
# note: PFASST will do all of those at once, but without interaction (realized via diagonal matrix).
# The propagation matrix will be diagonal too, corresponding to the respective lambda value.
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = testequation0d # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_nocoarse # pass spatial transfer class
description['space_transfer_params'] = dict() # pass paramters for spatial transfer
return description, controller_params
def compare_controllers(type=None, par=0.0, f=None):
"""
A simple test program to compare PFASST runs with matrix-based and matrix-free controllers
Args:
type (str): setup type
par (float): parameter for controlling stiffness
f: file handler
"""
# set time parameters
t0 = 0.0
Tend = 1.0
if type == 'diffusion':
description, controller_params = diffusion_setup(par)
elif type == 'advection':
description, controller_params = advection_setup(par)
elif type == 'testequation':
description, controller_params = scalar_equation_setup()
else:
raise ValueError('No valis setup type provided, aborting..')
out = '\nWorking with %s setup and parameter %3.1e..' % (type, par)
f.write(out + '\n')
print(out)
# instantiate controller
controller = controller_matrix_nonMPI(num_procs=4, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uex = P.u_exact(Tend)
# this is where the iteration is happening
uend_mat, stats_mat = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts_mat = get_sorted(stats_mat, type='niter', sortby='time')
out = ' Iteration counts for matrix-based version: %s' % iter_counts_mat
f.write(out + '\n')
print(out)
# filter only iteration counts and check for equality
niters = [item[1] for item in iter_counts_mat]
assert niters.count(niters[0]) == len(niters), 'ERROR: not all time-steps have the same number of iterations'
niter = niters[0]
# build propagation matrix using the prescribed number of iterations (or any other, if needed)
prop = controller.build_propagation_matrix(niter=niter)
err_prop_ex = np.linalg.norm(prop.dot(uinit) - uex)
err_mat_ex = np.linalg.norm(uend_mat - uex)
out = ' Error (mat/prop) vs. exact solution: %6.4e -- %6.4e' % (err_mat_ex, err_prop_ex)
f.write(out + '\n')
print(out)
err_mat_prop = np.linalg.norm(prop.dot(uinit) - uend_mat)
out = ' Difference between matrix-PFASST and propagator: %6.4e' % err_mat_prop
f.write(out + '\n')
print(out)
assert err_mat_prop < 2.0e-14, (
'ERROR: difference between matrix-based and propagator result is too large, got %s' % err_mat_prop
)
def main():
par_list = [1e-02, 1.0, 1e02]
Path("data").mkdir(parents=True, exist_ok=True)
f = open('data/comparison_matrix_vs_propagator_detail.txt', 'w')
for par in par_list:
compare_controllers(type='diffusion', par=par, f=f)
compare_controllers(type='advection', par=par, f=f)
compare_controllers(type='testequation', par=0.0, f=f)
f.close()
if __name__ == "__main__":
main()
| 10,054 | 36.103321 | 116 | py |
pySDC | pySDC-master/pySDC/projects/matrixPFASST/controller_matrix_nonMPI.py | import numpy as np
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
class controller_matrix_nonMPI(controller_nonMPI):
"""
PFASST controller, running serial matrix-based versions
"""
def __init__(self, num_procs, controller_params, description):
"""
Initialization routine for PFASST controller
Args:
num_procs: number of parallel time steps (still serial, though), can be 1
controller_params: parameter set for the controller and the steps
description: all the parameters to set up the rest (levels, problems, transfer, ...)
"""
assert description['sweeper_class'] is generic_implicit, (
'ERROR: matrix version will only work with generic_implicit sweeper, got %s' % description['sweeper_class']
)
# call parent's initialization routine
super(controller_matrix_nonMPI, self).__init__(
num_procs=num_procs, controller_params=controller_params, description=description
)
self.nsteps = len(self.MS)
self.nlevels = len(self.MS[0].levels)
self.nnodes = self.MS[0].levels[0].sweep.coll.num_nodes
self.nspace = self.MS[0].levels[0].prob.init[0]
self.dt = self.MS[0].levels[0].dt
self.tol = self.MS[0].levels[0].params.restol
self.maxiter = self.MS[0].params.maxiter
prob = self.MS[0].levels[0].prob
assert isinstance(self.nspace, int), 'ERROR: can only handle 1D data, got %s' % self.nspace
assert [level.sweep.coll.right_is_node for step in self.MS for level in step.levels].count(
True
) == self.nlevels * self.nsteps, 'ERROR: all collocation nodes have to be of Gauss-Radau type'
assert self.nlevels <= 2, 'ERROR: cannot use matrix-PFASST with more than 2 levels' # TODO: fixme
assert [level.dt for step in self.MS for level in step.levels].count(
self.dt
) == self.nlevels * self.nsteps, 'ERROR: dt must be equal for all steps and all levels'
# assert [level.sweep.coll.num_nodes for step in self.MS for level in step.levels].count(self.nnodes) == \
# self.nlevels * self.nsteps, 'ERROR: nnodes must be equal for all steps and all levels'
assert [type(level.prob) for step in self.MS for level in step.levels].count(
type(prob)
) == self.nlevels * self.nsteps, 'ERROR: all probem classes have to be the same'
assert self.params.predict_type is None, 'ERROR: no predictor for matrix controller yet' # TODO: fixme
assert hasattr(prob, 'A'), 'ERROR: need system matrix A for this (and linear problems!)'
A = prob.A.todense()
Q = self.MS[0].levels[0].sweep.coll.Qmat[1:, 1:]
Qd = self.MS[0].levels[0].sweep.QI[1:, 1:]
E = np.zeros((self.nsteps, self.nsteps))
np.fill_diagonal(E[1:, :], 1)
N = np.zeros((self.nnodes, self.nnodes))
N[:, -1] = 1
self.C = (
np.eye(self.nsteps * self.nnodes * self.nspace)
- self.dt * np.kron(np.eye(self.nsteps), np.kron(Q, A))
- np.kron(E, np.kron(N, np.eye(self.nspace)))
)
self.C = np.array(self.C)
self.P = np.eye(self.nsteps * self.nnodes * self.nspace) - self.dt * np.kron(
np.eye(self.nsteps), np.kron(Qd, A)
)
self.P = np.array(self.P)
if self.nlevels > 1:
prob_c = self.MS[0].levels[1].prob
self.nspace_c = prob_c.init[0]
Ac = prob_c.A.todense()
Qdc = self.MS[0].levels[1].sweep.QI[1:, 1:]
nnodesc = self.MS[0].levels[1].sweep.coll.num_nodes
Nc = np.zeros((nnodesc, nnodesc))
Nc[:, -1] = 1
if hasattr(self.MS[0].base_transfer.space_transfer, 'Pspace'):
TcfA = self.MS[0].base_transfer.space_transfer.Pspace.todense()
else:
TcfA = np.eye(self.nspace_c)
if hasattr(self.MS[0].base_transfer.space_transfer, 'Rspace'):
TfcA = self.MS[0].base_transfer.space_transfer.Rspace.todense()
else:
TfcA = np.eye(self.nspace)
TcfQ = self.MS[0].base_transfer.Pcoll
TfcQ = self.MS[0].base_transfer.Rcoll
self.Tcf = np.array(np.kron(np.eye(self.nsteps), np.kron(TcfQ, TcfA)))
self.Tfc = np.array(np.kron(np.eye(self.nsteps), np.kron(TfcQ, TfcA)))
self.Pc = (
np.eye(self.nsteps * nnodesc * self.nspace_c)
- self.dt * np.kron(np.eye(self.nsteps), np.kron(Qdc, Ac))
- np.kron(E, np.kron(Nc, np.eye(self.nspace_c)))
)
self.Pc = np.array(self.Pc)
self.u = np.zeros(self.nsteps * self.nnodes * self.nspace)
self.res = np.zeros(self.nsteps * self.nnodes * self.nspace)
self.u0 = np.zeros(self.nsteps * self.nnodes * self.nspace)
def run(self, u0, t0, Tend):
"""
Main driver for running the serial matrix version of SDC, MSSDC, MLSDC and PFASST
Args:
u0: initial values
t0: starting time
Tend: ending time
Returns:
end values on the finest level
stats object containing statistics for each step, each level and each iteration
"""
# some initializations and reset of statistics
uend = None
num_procs = len(self.MS)
for hook in self.hooks:
hook.reset_stats()
assert (
(Tend - t0) / self.dt
).is_integer(), 'ERROR: dt, t0, Tend were not chosen correctly, do not divide interval to be computed equally'
assert int((Tend - t0) / self.dt) % num_procs == 0, 'ERROR: num_procs not chosen correctly'
# initial ordering of the steps: 0,1,...,Np-1
slots = list(range(num_procs))
# initialize time variables of each step
time = [t0 + sum(self.dt for _ in range(p)) for p in slots]
# initialize block of steps with u0
self.restart_block(slots, time, u0)
# call pre-run hook
for S in self.MS:
for hook in self.hooks:
hook.pre_run(step=S, level_number=0)
nblocks = int((Tend - t0) / self.dt / num_procs)
for _ in range(nblocks):
self.MS = self.pfasst(self.MS)
for p in slots:
time[p] += num_procs * self.dt
# uend is uend of the last active step in the list
uend = self.MS[-1].levels[0].uend
self.restart_block(slots, time, uend)
# call post-run hook
for S in self.MS:
for hook in self.hooks:
hook.post_run(step=S, level_number=0)
return uend, self.return_stats()
def build_propagation_matrix(self, niter):
"""
Helper routine to create propagation matrix if requested
Args:
niter: number of iterations
Returns:
mat: propagation matrix
"""
# build smoother iteration matrix and preconditioner using nsweeps
Pinv = np.linalg.inv(self.P)
precond_smoother = Pinv.copy()
iter_mat_smoother = np.eye(self.nsteps * self.nnodes * self.nspace) - precond_smoother.dot(self.C)
for k in range(1, self.MS[0].levels[0].params.nsweeps):
precond_smoother += np.linalg.matrix_power(iter_mat_smoother, k).dot(Pinv)
iter_mat_smoother = np.linalg.matrix_power(iter_mat_smoother, self.MS[0].levels[0].params.nsweeps)
# add coarse-grid correction (single sweep, though!)
if self.nlevels > 1:
precond_cgc = self.Tcf.dot(np.linalg.inv(self.Pc)).dot(self.Tfc)
iter_mat_cgc = np.eye(self.nsteps * self.nnodes * self.nspace) - precond_cgc.dot(self.C)
iter_mat = iter_mat_smoother.dot(iter_mat_cgc)
precond = precond_smoother + precond_cgc - precond_smoother.dot(self.C).dot(precond_cgc)
else:
iter_mat = iter_mat_smoother
precond = precond_smoother
# form span and reduce matrices and add to operator
Tspread = np.kron(np.concatenate([[1] * (self.nsteps * self.nnodes)]), np.eye(self.nspace)).T
Tnospread = np.kron(
np.concatenate([[1], [0] * (self.nsteps - 1)]), np.kron(np.ones(self.nnodes), np.eye(self.nspace))
).T
Treduce = np.kron(np.concatenate([[0] * (self.nsteps * self.nnodes - 1), [1]]), np.eye(self.nspace))
if self.MS[0].levels[0].sweep.params.initial_guess == 'spread':
mat = np.linalg.matrix_power(iter_mat, niter).dot(Tspread)
# mat = iter_mat_smoother.dot(Tspread) + precond_smoother.dot(Tnospread)
else:
mat = np.linalg.matrix_power(iter_mat, niter).dot(Tnospread)
# mat = iter_mat_smoother.dot(Tnospread) + precond_smoother.dot(Tnospread) # No, the latter is not a typo!
# build propagation matrix
# mat = np.linalg.matrix_power(iter_mat, niter - 1).dot(mat)
for k in range(niter):
mat += np.linalg.matrix_power(iter_mat, k).dot(precond).dot(Tnospread)
mat = Treduce.dot(mat)
return mat
def restart_block(self, slots, time, u0):
"""
Helper routine to reset/restart block of steps
Args:
slots: list of steps
time: list of new times
u0: initial value to distribute across the steps
"""
# loop over steps
for p in slots:
# store current slot number for diagnostics
self.MS[p].status.slot = p
for p in slots:
for lvl in self.MS[p].levels:
lvl.status.time = time[p]
P = lvl.prob
for m in range(1, lvl.sweep.coll.num_nodes + 1):
lvl.u[m] = P.dtype_u(init=P.init, val=0.0)
lvl.f[m] = P.dtype_f(init=P.init, val=0.0)
self.u0 = np.kron(np.concatenate([[1], [0] * (self.nsteps - 1)]), np.kron(np.ones(self.nnodes), u0))
if self.MS[0].levels[0].sweep.params.initial_guess == 'spread':
self.u = np.kron(np.ones(self.nsteps * self.nnodes), u0)
else:
self.u = self.u0.copy()
self.res = np.zeros(self.nsteps * self.nnodes * self.nspace)
@staticmethod
def update_data(MS, u, res, niter, level, stage):
for S in MS:
S.status.stage = stage
S.status.iter = niter
L = S.levels[level]
P = L.prob
nnodes = L.sweep.coll.num_nodes
nspace = P.init[0]
first = S.status.slot * nnodes * nspace
last = (S.status.slot + 1) * nnodes * nspace
L.status.residual = np.linalg.norm(res[first:last], np.inf)
for m in range(1, nnodes + 1):
mstart = first + (m - 1) * nspace
mend = first + m * nspace
L.u[m][:] = u[mstart:mend]
L.f[m] = P.eval_f(L.u[m], L.time + L.dt * L.sweep.coll.nodes[m - 1])
S.levels[level].sweep.compute_end_point()
return MS
def pfasst(self, MS):
"""
Main function including the stages of SDC, MLSDC and PFASST (the "controller")
Args:
MS: all active steps
Returns:
all active steps
"""
niter = 0
self.res = self.u0 - self.C.dot(self.u)
MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='PRE_STEP')
for S in MS:
for hook in self.hooks:
hook.pre_step(step=S, level_number=0)
while np.linalg.norm(self.res, np.inf) > self.tol and niter < self.maxiter:
niter += 1
MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='PRE_ITERATION')
for S in MS:
for hook in self.hooks:
hook.pre_iteration(step=S, level_number=0)
if self.nlevels > 1:
for _ in range(MS[0].levels[1].params.nsweeps):
MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=1, stage='PRE_COARSE_SWEEP')
for S in MS:
for hook in self.hooks:
hook.pre_sweep(step=S, level_number=1)
self.u += self.Tcf.dot(np.linalg.solve(self.Pc, self.Tfc.dot(self.res)))
self.res = self.u0 - self.C.dot(self.u)
MS = self.update_data(
MS=MS, u=self.u, res=self.res, niter=niter, level=1, stage='POST_COARSE_SWEEP'
)
for S in MS:
for hook in self.hooks:
hook.post_sweep(step=S, level_number=1)
for _ in range(MS[0].levels[0].params.nsweeps):
MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='PRE_FINE_SWEEP')
for S in MS:
for hook in self.hooks:
hook.pre_sweep(step=S, level_number=0)
self.u += np.linalg.solve(self.P, self.res)
self.res = self.u0 - self.C.dot(self.u)
MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='POST_FINE_SWEEP')
for S in MS:
for hook in self.hooks:
hook.post_sweep(step=S, level_number=0)
MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='POST_ITERATION')
for S in MS:
for hook in self.hooks:
hook.post_iteration(step=S, level_number=0)
MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='POST_STEP')
for S in MS:
for hook in self.hooks:
hook.post_step(step=S, level_number=0)
return MS
| 14,111 | 38.418994 | 120 | py |
pySDC | pySDC-master/pySDC/projects/matrixPFASST/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/Resilience/vdp.py | # script to run a van der Pol problem
import numpy as np
import matplotlib.pyplot as plt
from pySDC.helpers.stats_helper import get_sorted, get_list_of_types
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.core.Errors import ProblemError, ConvergenceError
from pySDC.projects.Resilience.hook import LogData, hook_collection
from pySDC.projects.Resilience.strategies import merge_descriptions
from pySDC.projects.Resilience.sweepers import generic_implicit_efficient
def plot_step_sizes(stats, ax, e_em_key='error_embedded_estimate'):
"""
Plot solution and step sizes to visualize the dynamics in the van der Pol equation.
Args:
stats (pySDC.stats): The stats object of the run
ax: Somewhere to plot
Returns:
None
"""
# convert filtered statistics to list of iterations count, sorted by process
u = np.array([me[1][0] for me in get_sorted(stats, type='u', recomputed=False, sortby='time')])
p = np.array([me[1][1] for me in get_sorted(stats, type='u', recomputed=False, sortby='time')])
t = np.array([me[0] for me in get_sorted(stats, type='u', recomputed=False, sortby='time')])
e_em = np.array(get_sorted(stats, type=e_em_key, recomputed=False, sortby='time'))[:, 1]
dt = np.array(get_sorted(stats, type='dt', recomputed=False, sortby='time'))
restart = np.array(get_sorted(stats, type='restart', recomputed=None, sortby='time'))
ax.plot(t, u, label=r'$u$')
ax.plot(t, p, label=r'$p$')
dt_ax = ax.twinx()
dt_ax.plot(dt[:, 0], dt[:, 1], color='black')
dt_ax.plot(t, e_em, color='magenta')
dt_ax.set_yscale('log')
dt_ax.set_ylim((5e-10, 3e-1))
ax.plot([None], [None], label=r'$\Delta t$', color='black')
ax.plot([None], [None], label=r'$\epsilon_\mathrm{embedded}$', color='magenta')
ax.plot([None], [None], label='restart', color='grey', ls='-.')
for i in range(len(restart)):
if restart[i, 1] > 0:
ax.axvline(restart[i, 0], color='grey', ls='-.')
ax.legend(frameon=False)
ax.set_xlabel('time')
def plot_avoid_restarts(stats, ax, avoid_restarts):
"""
Make a plot that shows how many iterations where required to solve to a point in time in the simulation.
Also restarts are shown as vertical lines.
Args:
stats (pySDC.stats): The stats object of the run
ax: Somewhere to plot
avoid_restarts (bool): Whether the `avoid_restarts` option was set in order to choose a color
Returns:
None
"""
sweeps = get_sorted(stats, type='sweeps', recomputed=None)
restarts = get_sorted(stats, type='restart', recomputed=None)
color = 'blue' if avoid_restarts else 'red'
ls = ':' if not avoid_restarts else '-.'
label = 'with' if avoid_restarts else 'without'
ax.plot([me[0] for me in sweeps], np.cumsum([me[1] for me in sweeps]), color=color, label=f'{label} avoid_restarts')
[ax.axvline(me[0], color=color, ls=ls) for me in restarts if me[1]]
ax.set_xlabel(r'$t$')
ax.set_ylabel(r'$k$')
ax.legend(frameon=False)
def run_vdp(
custom_description=None,
num_procs=1,
Tend=10.0,
hook_class=LogData,
fault_stuff=None,
custom_controller_params=None,
use_MPI=False,
**kwargs,
):
"""
Run a van der Pol problem with default parameters.
Args:
custom_description (dict): Overwrite presets
num_procs (int): Number of steps for MSSDC
Tend (float): Time to integrate to
hook_class (pySDC.Hook): A hook to store data
fault_stuff (dict): A dictionary with information on how to add faults
custom_controller_params (dict): Overwrite presets
use_MPI (bool): Whether or not to use MPI
Returns:
dict: The stats object
controller: The controller
Tend: The time that was supposed to be integrated to
"""
# initialize level parameters
level_params = {}
level_params['dt'] = 1e-2
# initialize sweeper parameters
sweeper_params = {}
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU'
problem_params = {
'mu': 5.0,
'newton_tol': 1e-9,
'newton_maxiter': 99,
'u0': np.array([2.0, 0.0]),
}
# initialize step parameters
step_params = {}
step_params['maxiter'] = 4
# initialize controller parameters
controller_params = {}
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class])
controller_params['mssdc_jac'] = False
if custom_controller_params is not None:
controller_params = {**controller_params, **custom_controller_params}
# fill description dictionary for easy step instantiation
description = {}
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit_efficient
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
if custom_description is not None:
description = merge_descriptions(description, custom_description)
# set time parameters
t0 = 0.0
# instantiate controller
if use_MPI:
from mpi4py import MPI
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
comm = kwargs.get('comm', MPI.COMM_WORLD)
controller = controller_MPI(controller_params=controller_params, description=description, comm=comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
else:
controller = controller_nonMPI(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# insert faults
if fault_stuff is not None:
from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults
rnd_args = {'iteration': 3}
# args = {'time': 0.9, 'target': 0}
args = {'time': 5.25, 'target': 0}
prepare_controller_for_faults(controller, fault_stuff, rnd_args, args)
# call main function to get things done...
try:
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
except (ProblemError, ConvergenceError):
print('Warning: Premature termination!')
stats = controller.return_stats()
return stats, controller, Tend
def fetch_test_data(stats, comm=None, use_MPI=False):
"""
Get data to perform tests on from stats
Args:
stats (pySDC.stats): The stats object of the run
comm (mpi4py.MPI.Comm): MPI communicator, or `None` for the non-MPI version
use_MPI (bool): Whether or not MPI was used when generating stats
Returns:
dict: Key values to perform tests on
"""
types = ['error_embedded_estimate', 'restart', 'dt', 'sweeps', 'residual_post_step']
data = {}
for type in types:
if type not in get_list_of_types(stats):
raise ValueError(f"Can't read type \"{type}\" from stats, only got", get_list_of_types(stats))
if comm is None or use_MPI is False:
data[type] = [me[1] for me in get_sorted(stats, type=type, recomputed=None, sortby='time')]
else:
data[type] = [me[1] for me in get_sorted(stats, type=type, recomputed=None, sortby='time', comm=comm)]
return data
def check_if_tests_match(data_nonMPI, data_MPI):
"""
Check if the data matches between MPI and nonMPI versions
Args:
data_nonMPI (dict): Key values to perform tests on obtained without MPI
data_MPI (dict): Key values to perform tests on obtained with MPI
Returns:
None
"""
ops = [np.mean, np.min, np.max, len, sum]
for type in data_nonMPI.keys():
for op in ops:
val_nonMPI = op(data_nonMPI[type])
val_MPI = op(data_MPI[type])
assert np.isclose(val_nonMPI, val_MPI), (
f"Mismatch in operation {op.__name__} on type \"{type}\": with {data_MPI['size'][0]} ranks: "
f"nonMPI: {val_nonMPI}, MPI: {val_MPI}"
)
print(f'Passed with {data_MPI["size"][0]} ranks')
def mpi_vs_nonMPI(MPI_ready, comm):
"""
Check if MPI and non-MPI versions give the same output.
Args:
MPI_ready (bool): Whether or not we can use MPI at all
comm (mpi4py.MPI.Comm): MPI communicator
Returns:
None
"""
if MPI_ready:
size = comm.size
rank = comm.rank
use_MPI = [True, False]
else:
size = 1
rank = 0
use_MPI = [False, False]
if rank == 0:
print(f"Running with {size} ranks")
custom_description = {'convergence_controllers': {}}
custom_description['convergence_controllers'][Adaptivity] = {'e_tol': 1e-7, 'avoid_restarts': False}
data = [{}, {}]
for i in range(2):
if use_MPI[i] or rank == 0:
stats, controller, Tend = run_vdp(
custom_description=custom_description,
num_procs=size,
use_MPI=use_MPI[i],
Tend=1.0,
comm=comm,
)
data[i] = fetch_test_data(stats, comm, use_MPI=use_MPI[i])
data[i]['size'] = [size]
if rank == 0:
check_if_tests_match(data[1], data[0])
def check_adaptivity_with_avoid_restarts(comm=None, size=1):
"""
Make a test if adaptivity with the option to avoid restarts based on a contraction factor estimate works as
expected.
To this end, we run the same test of the van der Pol equation twice with the only difference being this option
turned off or on.
We recorded how many iterations we expect to avoid by avoiding restarts and check against this value.
Also makes a figure comparing the number of iterations over time.
In principle there is an option to test MSSDC here, but this is only preliminary and needs to be checked further.
Args:
comm (mpi4py.MPI.Comm): MPI communicator, or `None` for the non-MPI version
size (int): Number of steps for MSSDC, is overridden by communicator size if applicable
Returns:
None
"""
fig, ax = plt.subplots()
custom_description = {'convergence_controllers': {}, 'level_params': {'dt': 1.0e-2}}
custom_controller_params = {'all_to_done': False}
results = {'e': {}, 'sweeps': {}, 'restarts': {}}
size = comm.size if comm is not None else size
for avoid_restarts in [True, False]:
custom_description['convergence_controllers'][Adaptivity] = {'e_tol': 1e-7, 'avoid_restarts': avoid_restarts}
stats, controller, Tend = run_vdp(
custom_description=custom_description,
num_procs=size,
use_MPI=comm is not None,
custom_controller_params=custom_controller_params,
Tend=10.0e0,
comm=comm,
)
plot_avoid_restarts(stats, ax, avoid_restarts)
# check error
u = get_sorted(stats, type='u', recomputed=False)[-1]
if comm is None:
u_exact = controller.MS[0].levels[0].prob.u_exact(t=u[0])
else:
u_exact = controller.S.levels[0].prob.u_exact(t=u[0])
results['e'][avoid_restarts] = abs(u[1] - u_exact)
# check iteration counts
results['sweeps'][avoid_restarts] = sum(
[me[1] for me in get_sorted(stats, type='sweeps', recomputed=None, comm=comm)]
)
results['restarts'][avoid_restarts] = sum([me[1] for me in get_sorted(stats, type='restart', comm=comm)])
fig.tight_layout()
fig.savefig(f'data/vdp-{size}procs{"-use_MPI" if comm is not None else ""}-avoid_restarts.png')
assert np.isclose(results['e'][True], results['e'][False], rtol=5.0), (
'Errors don\'t match with avoid_restarts and without, got '
f'{results["e"][True]:.2e} and {results["e"][False]:.2e}'
)
if size == 1:
assert results['sweeps'][True] - results['sweeps'][False] == 1301 - 1344, (
'{Expected to save 43 iterations '
f"with avoid_restarts, got {results['sweeps'][False] - results['sweeps'][True]}"
)
assert results['restarts'][True] - results['restarts'][False] == 0 - 10, (
'{Expected to save 10 restarts '
f"with avoid_restarts, got {results['restarts'][False] - results['restarts'][True]}"
)
print('Passed avoid_restarts tests with 1 process')
if size == 4:
assert results['sweeps'][True] - results['sweeps'][False] == 2916 - 3008, (
'{Expected to save 92 iterations '
f"with avoid_restarts, got {results['sweeps'][False] - results['sweeps'][True]}"
)
assert results['restarts'][True] - results['restarts'][False] == 0 - 18, (
'{Expected to save 18 restarts '
f"with avoid_restarts, got {results['restarts'][False] - results['restarts'][True]}"
)
print('Passed avoid_restarts tests with 4 processes')
def check_step_size_limiter(size=4, comm=None):
"""
Check the step size limiter convergence controller.
First we run without step size limits and then enforce limits that are slightly above and below what the usual
limits. Then we run again and see if we exceed the limits.
Args:
size (int): Number of steps for MSSDC
comm (mpi4py.MPI.Comm): MPI communicator, or `None` for the non-MPI version
Returns:
None
"""
from pySDC.implementations.convergence_controller_classes.step_size_limiter import StepSizeLimiter
custom_description = {'convergence_controllers': {}, 'level_params': {'dt': 1.0e-2}}
expect = {}
params = {'e_tol': 1e-6}
for limit_step_sizes in [False, True]:
if limit_step_sizes:
params['dt_max'] = expect['dt_max'] * 0.9
params['dt_min'] = np.inf
params['dt_slope_max'] = expect['dt_slope_max'] * 0.9
params['dt_slope_min'] = expect['dt_slope_min'] * 1.1
custom_description['convergence_controllers'][StepSizeLimiter] = {'dt_min': expect['dt_min'] * 1.1}
else:
for k in ['dt_max', 'dt_min', 'dt_slope_max', 'dt_slope_min']:
params.pop(k, None)
custom_description['convergence_controllers'].pop(StepSizeLimiter, None)
custom_description['convergence_controllers'][Adaptivity] = params
stats, controller, Tend = run_vdp(
custom_description=custom_description,
num_procs=size,
use_MPI=comm is not None,
Tend=5.0e0,
comm=comm,
)
# plot the step sizes
dt = get_sorted(stats, type='dt', recomputed=None, comm=comm)
# make sure that the convergence controllers are only added once
convergence_controller_classes = [type(me) for me in controller.convergence_controllers]
for c in convergence_controller_classes:
assert convergence_controller_classes.count(c) == 1, f'Convergence controller {c} added multiple times'
dt_numpy = np.array([me[1] for me in dt])
if not limit_step_sizes:
expect['dt_max'] = max(dt_numpy)
expect['dt_min'] = min(dt_numpy)
expect['dt_slope_max'] = max(dt_numpy[:-2] / dt_numpy[1:-1])
expect['dt_slope_min'] = min(dt_numpy[:-2] / dt_numpy[1:-1])
else:
dt_max = max(dt_numpy)
dt_min = min(dt_numpy[size:-size]) # The first and last step might fall below the limits
dt_slope_max = max(dt_numpy[:-2] / dt_numpy[1:-1])
dt_slope_min = min(dt_numpy[:-2] / dt_numpy[1:-1])
assert (
dt_max <= expect['dt_max']
), f"Exceeded maximum allowed step size! Got {dt_max:.4e}, allowed {params['dt_max']:.4e}."
assert (
dt_min >= expect['dt_min']
), f"Exceeded minimum allowed step size! Got {dt_min:.4e}, allowed {params['dt_min']:.4e}."
assert (
dt_slope_max <= expect['dt_slope_max']
), f"Exceeded maximum allowed step size slope! Got {dt_slope_max:.4e}, allowed {params['dt_slope_max']:.4e}."
assert (
dt_slope_min >= expect['dt_slope_min']
), f"Exceeded minimum allowed step size slope! Got {dt_slope_min:.4e}, allowed {params['dt_slope_min']:.4e}."
assert (
dt_slope_max <= expect['dt_slope_max']
), f"Exceeded maximum allowed step size slope! Got {dt_slope_max:.4e}, allowed {params['dt_slope_max']:.4e}."
assert (
dt_slope_min >= expect['dt_slope_min']
), f"Exceeded minimum allowed step size slope! Got {dt_slope_min:.4e}, allowed {params['dt_slope_min']:.4e}."
if comm is None:
print(f'Passed step size limiter test with {size} ranks in nonMPI implementation')
else:
if comm.rank == 0:
print(f'Passed step size limiter test with {size} ranks in MPI implementation')
def interpolation_stuff(): # pragma: no cover
"""
Plot interpolation vdp with interpolation after a restart and compare it to other modes of adaptivity.
"""
from pySDC.implementations.convergence_controller_classes.interpolate_between_restarts import (
InterpolateBetweenRestarts,
)
from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep
from pySDC.implementations.hooks.log_work import LogWork
from pySDC.helpers.plot_helper import figsize_by_journal
fig, axs = plt.subplots(4, 1, figsize=figsize_by_journal('Springer_Numerical_Algorithms', 1.0, 1.0), sharex=True)
restart_ax = axs[2].twinx()
colors = ['black', 'red', 'blue']
labels = ['interpolate', 'regular', 'keep iterating']
for i in range(3):
convergence_controllers = {
Adaptivity: {'e_tol': 1e-7, 'dt_max': 9.0e-1},
}
if i == 0:
convergence_controllers[InterpolateBetweenRestarts] = {}
if i == 2:
convergence_controllers[Adaptivity]['avoid_restarts'] = True
problem_params = {
'mu': 5,
}
sweeper_params = {
'QI': 'LU',
}
custom_description = {
'convergence_controllers': convergence_controllers,
'problem_params': problem_params,
'sweeper_params': sweeper_params,
}
stats, controller, _ = run_vdp(
custom_description=custom_description,
hook_class=[LogLocalErrorPostStep, LogData, LogWork] + hook_collection,
)
k = get_sorted(stats, type='work_newton')
restarts = get_sorted(stats, type='restart')
u = get_sorted(stats, type='u', recomputed=False)
e_loc = get_sorted(stats, type='e_local_post_step', recomputed=False)
dt = get_sorted(stats, type='dt', recomputed=False)
axs[0].plot([me[0] for me in u], [me[1][1] for me in u], color=colors[i], label=labels[i])
axs[1].plot([me[0] for me in e_loc], [me[1] for me in e_loc], color=colors[i])
axs[2].plot([me[0] for me in k], np.cumsum([me[1] for me in k]), color=colors[i])
restart_ax.plot([me[0] for me in restarts], np.cumsum([me[1] for me in restarts]), color=colors[i], ls='--')
axs[3].plot([me[0] for me in dt], [me[1] for me in dt], color=colors[i])
for ax in [axs[1], axs[3]]:
ax.set_yscale('log')
axs[0].set_ylabel(r'$u$')
axs[1].set_ylabel(r'$e_\mathrm{local}$')
axs[2].set_ylabel(r'Newton iterations')
restart_ax.set_ylabel(r'restarts (dashed)')
axs[3].set_ylabel(r'$\Delta t$')
axs[3].set_xlabel(r'$t$')
axs[0].legend(frameon=False)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
import sys
try:
from mpi4py import MPI
MPI_ready = True
comm = MPI.COMM_WORLD
size = comm.size
except ModuleNotFoundError:
MPI_ready = False
comm = None
size = 1
if len(sys.argv) == 1:
mpi_vs_nonMPI(MPI_ready, comm)
check_step_size_limiter(size, comm)
if size == 1:
check_adaptivity_with_avoid_restarts(comm=None, size=1)
elif 'mpi_vs_nonMPI' in sys.argv:
mpi_vs_nonMPI(MPI_ready, comm)
elif 'check_step_size_limiter' in sys.argv:
check_step_size_limiter(MPI_ready, comm)
elif 'check_adaptivity_with_avoid_restarts' and size == 1:
check_adaptivity_with_avoid_restarts(comm=None, size=1)
else:
raise NotImplementedError('Your test is not implemented!')
| 21,069 | 37.101266 | 121 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/dahlquist.py | # script to run a simple advection problem
from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.core.Hooks import hooks
from pySDC.helpers.stats_helper import get_sorted
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pySDC.implementations.hooks.log_solution import LogSolutionAfterIteration
from pySDC.implementations.hooks.log_step_size import LogStepSize
from pySDC.projects.Resilience.strategies import merge_descriptions
class LogLambdas(hooks):
"""
Store the lambda values at the beginning of the run
"""
def pre_run(self, step, level_number):
super().pre_run(step, level_number)
L = step.levels[level_number]
self.add_to_stats(process=0, time=0, level=0, iter=0, sweep=0, type='lambdas', value=L.prob.lambdas)
hooks = [LogLambdas, LogSolutionAfterIteration, LogStepSize]
def run_dahlquist(
custom_description=None,
num_procs=1,
Tend=1.0,
hook_class=hooks,
fault_stuff=None,
custom_controller_params=None,
**kwargs,
):
"""
Run a Dahlquist problem with default parameters.
Args:
custom_description (dict): Overwrite presets
num_procs (int): Number of steps for MSSDC
Tend (float): Time to integrate to
hook_class (pySDC.Hook): A hook to store data
fault_stuff (dict): A dictionary with information on how to add faults
custom_controller_params (dict): Overwrite presets
Returns:
dict: The stats object
controller: The controller
Tend: The time that was supposed to be integrated to
"""
# initialize level parameters
level_params = dict()
level_params['dt'] = 1.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
sweeper_params['initial_guess'] = 'random'
# build lambdas
re = np.linspace(-30, 30, 400)
im = np.linspace(-50, 50, 400)
lambdas = np.array([[complex(re[i], im[j]) for i in range(len(re))] for j in range(len(im))]).reshape(
(len(re) * len(im))
)
problem_params = {
'lambdas': lambdas,
'u0': 1.0 + 0.0j,
}
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 5
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook_class
controller_params['mssdc_jac'] = False
if custom_controller_params is not None:
controller_params = {**controller_params, **custom_controller_params}
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = testequation0d # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params
if custom_description is not None:
description = merge_descriptions(description, custom_description)
# set time parameters
t0 = 0.0
# instantiate controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# insert faults
if fault_stuff is not None:
raise NotImplementedError('No fault stuff here...')
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
return stats, controller, Tend
def plot_stability(stats, ax=None, iter=None, colors=None, crosshair=True, fill=False, **kwargs):
"""
Plot the domain of stability by checking if the solution grows.
Args:
stats (pySDC.stats): The stats object of the run
ax: Somewhere to plot
iter (list): Check the stability for different numbers of iterations
colors (list): Colors for the different iterations
crosshair (bool): Whether to highlight the axes
fill (bool): Fill the contours or not
Returns:
bool: If the method is A-stable or not
"""
lambdas = get_sorted(stats, type='lambdas')[0][1]
u = get_sorted(stats, type='u', sortby='iter')
if ax is None:
fig, ax = plt.subplots(1, 1)
# decorate
if crosshair:
ax.axhline(0, color='black', alpha=1.0)
ax.axvline(0, color='black', alpha=1.0)
iter = [1] if iter is None else iter
colors = ['blue', 'red', 'violet', 'green'] if colors is None else colors
for i in iter:
# isolate the solutions from the iteration you want
U = np.reshape([me[1] for me in u if me[0] == i], (len(np.unique(lambdas.real)), len(np.unique(lambdas.imag))))
# get a grid for plotting
X, Y = np.meshgrid(np.unique(lambdas.real), np.unique(lambdas.imag))
if fill:
ax.contourf(X, Y, abs(U), levels=[-np.inf, 1 - np.finfo(float).eps], colors=colors[i - 1], alpha=0.5)
ax.contour(X, Y, abs(U), levels=[1], colors=colors[i - 1])
ax.plot([None], [None], color=colors[i - 1], label=f'k={i}')
# check if the method is A-stable
unstable = abs(U) > 1.0
Astable = not any(X[unstable] < 0)
ax.legend(frameon=False)
return Astable
def plot_contraction(stats, fig=None, ax=None, iter=None, plot_increase=False, cbar=True, **kwargs):
"""
Plot the contraction of the error.
Args:
stats (pySDC.stats): The stats object of the run
fig: Figure of the plot, needed for a colorbar
ax: Somewhere to plot
iter (list): Plot the contraction for different numbers of iterations
plot_increase (bool): Whether to also include increasing errors.
cbar (bool): Plot a color bar or not
Returns:
The plot
"""
lambdas = get_sorted(stats, type='lambdas')[0][1]
real = np.unique(lambdas.real)
imag = np.unique(lambdas.imag)
u = get_sorted(stats, type='u', sortby='iter')
t = get_sorted(stats, type='u', sortby='time')[0][0]
u_exact = np.exp(lambdas * t)
kwargs['cmap'] = kwargs.get('cmap', 'seismic' if plot_increase else 'jet')
# decide which iterations to look at
iter = [0, 1] if iter is None else iter
assert len(iter) > 1, 'Need to compute the contraction factor across multiple iterations!'
# get solution for the specified iterations
us = [me[1] for me in u if me[0] in iter]
if 0 in iter: # ic's are not stored in stats, so we have to add them manually
us = np.append([np.ones_like(lambdas)], us, axis=0)
# get error for each iteration
e = abs(us - u_exact)
e[e == 0] = np.finfo(float).eps
# get contraction rates for each iteration
rho = e[1:, :] / e[:-1, :]
rho_avg = np.mean(rho, axis=0)
rho_log = np.log(np.reshape(rho_avg, (len(imag), len(real))))
# get spaceally averaged contraction factor
# rho_avg_space = np.mean(rho, axis=1)
# e_tot = np.sum(e, axis=1)
# rho_tot = e_tot[1:] / e_tot[:-1]
if ax is None:
fig, ax = plt.subplots(1, 1)
# get a grid for plotting
X, Y = np.meshgrid(real, imag)
if plot_increase:
ax.contour(X, Y, rho_log, levels=[0.0])
lim = max(np.abs([rho_log.min(), rho_log.max()]))
kwargs['vmin'] = kwargs.get('vmin', -lim)
kwargs['vmax'] = kwargs.get('vmax', lim)
cs = ax.contourf(X, Y, rho_log, **kwargs)
else:
cs = ax.contourf(X, Y, np.where(rho_log <= 0, rho_log, None), levels=500, **kwargs)
# decorate
ax.axhline(0, color='black')
ax.axvline(0, color='black')
# fix pdf plotting
ax.set_rasterized(True)
if cbar:
divider = make_axes_locatable(ax)
cbar_ax = divider.append_axes('right', 0.2, pad=0.1)
cb = fig.colorbar(cs, cbar_ax)
cb.set_label(r'$\rho$')
cbar_ax.set_rasterized(True)
return cs
def plot_increment(stats, fig=None, ax=None, iter=None, cbar=True, **kwargs):
"""
Plot the increment between iterations.
Args:
stats (pySDC.stats): The stats object of the run
fig: Figure of the plot, needed for a colorbar
ax: Somewhere to plot
iter (list): Plot the contraction for different numbers of iterations
cbar (bool): Plot a color bar or not
Returns:
None
"""
lambdas = get_sorted(stats, type='lambdas')[0][1]
u = get_sorted(stats, type='u', sortby='iter')
kwargs['cmap'] = kwargs.get('cmap', 'jet')
# decide which iterations to look at
iter = [0, 1] if iter is None else iter
assert len(iter) > 1, 'Need to compute the increment across multiple iterations!'
# get solution for the specified iterations
u_iter = [me[1] for me in u if me[0] in iter]
if 0 in iter: # ics are not stored in stats, so we have to add them manually
u_iter = np.append(np.ones_like(lambdas), u_iter)
us = np.reshape(u_iter, (len(iter), len(lambdas)))
# get contraction rates for each iteration
rho = abs(us[1:, :] / us[:-1, :])
rho_avg = np.mean(rho, axis=0)
rho_log = np.log(np.reshape(rho_avg, (len(np.unique(lambdas.real)), len(np.unique(lambdas.imag)))))
if ax is None:
fig, ax = plt.subplots(1, 1)
# get a grid for plotting
X, Y = np.meshgrid(np.unique(lambdas.real), np.unique(lambdas.imag))
cs = ax.contourf(X, Y, rho_log, levels=500, **kwargs)
# outline the region where the increment is 0
ax.contour(X, Y, rho_log, levels=[-15], colors=['red'])
# decorate
ax.axhline(0, color='black')
ax.axvline(0, color='black')
# fix pdf plotting
ax.set_rasterized(True)
if cbar:
divider = make_axes_locatable(ax)
cbar_ax = divider.append_axes('right', 0.2, pad=0.1)
cb = fig.colorbar(cs, cbar_ax)
cb.set_label('increment')
cbar_ax.set_rasterized(True)
def compare_contraction():
"""
Make a plot comparing contraction factors between trapezoidal rule and implicit Euler.
"""
fig, axs = plt.subplots(1, 2, figsize=(12, 5.5), gridspec_kw={'width_ratios': [0.8, 1]})
precons = ['TRAP', 'IE']
norm = Normalize(vmin=-7, vmax=0)
cbar = True
for i in range(len(precons)):
custom_description = {'sweeper_params': {'QI': precons[i]}}
stats, controller, Tend = run_dahlquist(custom_description=custom_description, res=(400, 400))
plot_contraction(stats, fig=fig, ax=axs[i], cbar=cbar, norm=norm, cmap='jet')
cbar = False
axs[i].set_title(precons[i])
fig.tight_layout()
if __name__ == '__main__':
custom_description = None
stats, controller, Tend = run_dahlquist(custom_description=custom_description)
plot_stability(stats, iter=[1, 2, 3])
plot_contraction(stats, iter=[0, 4])
plt.show()
| 11,348 | 33.081081 | 119 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/hook.py | from pySDC.core.Hooks import hooks
from pySDC.implementations.hooks.log_solution import LogSolution
from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimate
from pySDC.implementations.hooks.log_extrapolated_error_estimate import LogExtrapolationErrorEstimate
from pySDC.implementations.hooks.log_step_size import LogStepSize
hook_collection = [LogSolution, LogEmbeddedErrorEstimate, LogExtrapolationErrorEstimate, LogStepSize]
class LogData(hooks):
"""
Record data required for analysis of problems in the resilience project
"""
def pre_run(self, step, level_number):
"""
Record initial conditions
"""
super().pre_run(step, level_number)
L = step.levels[level_number]
self.add_to_stats(process=0, time=0, level=0, iter=0, sweep=0, type='u0', value=L.u[0])
def post_step(self, step, level_number):
"""
Record final solutions as well as step size and error estimates
"""
super().post_step(step, level_number)
L = step.levels[level_number]
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='restart',
value=int(step.status.get('restart')),
)
# add the following with two names because I use both in different projects -.-
self.increment_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='sweeps',
value=step.status.iter,
)
self.increment_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='k',
value=step.status.iter,
)
class LogUold(hooks):
"""
Log last iterate at the end of the step. Since the hook comes after override of uold, we need to do this in each
iteration. But we don't know which will be the last, so we just do `iter=-1` to override the previous value.
"""
def post_iteration(self, step, level_number):
super().post_iteration(step, level_number)
L = step.levels[level_number]
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=-1,
sweep=L.status.sweep,
type='uold',
value=L.uold[-1],
)
class LogUAllIter(hooks):
"""
Log solution and errors after each iteration
"""
def post_iteration(self, step, level_number):
super(LogUAllIter, self).post_iteration(step, level_number)
L = step.levels[level_number]
L.sweep.compute_end_point()
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='u',
value=L.uend,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='error_embedded_estimate',
value=L.status.get('error_embedded_estimate'),
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='error_extrapolation_estimate',
value=L.status.get('error_extrapolation_estimate'),
)
| 3,815 | 30.53719 | 116 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/fault_stats.py | import numpy as np
import pickle
import matplotlib.pyplot as plt
from mpi4py import MPI
import pySDC.helpers.plot_helper as plot_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.projects.Resilience.hook import hook_collection, LogUAllIter, LogData
from pySDC.projects.Resilience.fault_injection import get_fault_injector_hook
from pySDC.implementations.convergence_controller_classes.hotrod import HotRod
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep
from pySDC.implementations.hooks.log_work import LogWork
# these problems are available for testing
from pySDC.projects.Resilience.advection import run_advection
from pySDC.projects.Resilience.vdp import run_vdp
from pySDC.projects.Resilience.piline import run_piline
from pySDC.projects.Resilience.Lorenz import run_Lorenz
from pySDC.projects.Resilience.Schroedinger import run_Schroedinger
from pySDC.projects.Resilience.quench import run_quench
from pySDC.projects.Resilience.strategies import BaseStrategy, AdaptivityStrategy, IterateStrategy, HotRodStrategy
plot_helper.setup_mpl(reset=True)
class FaultStats:
'''
Class to generate and analyse fault statistics
'''
def __init__(
self,
prob=None,
strategies=None,
faults=None,
reload=True,
recovery_thresh=1 + 1e-3,
recovery_thresh_abs=0.0,
num_procs=1,
mode='combination',
stats_path='data/stats',
**kwargs,
):
'''
Initialization routine
Args:
prob: A function that runs a pySDC problem, see imports for available problems
strategies (list): List of resilience strategies
faults (list): List of booleans that describe whether to use faults or not
reload (bool): Load previously computed statistics and continue from there or start from scratch
recovery_thresh (float): Relative threshold for recovery
num_procs (int): Number of processes
mode (str): Mode for fault generation: Either 'random' or 'combination'
'''
self.prob = prob
self.strategies = [None] if strategies is None else strategies
self.faults = [False, True] if faults is None else faults
self.reload = reload
self.recovery_thresh = recovery_thresh
self.recovery_thresh_abs = recovery_thresh_abs
self.num_procs = num_procs
self.mode = mode
self.stats_path = stats_path
self.kwargs = {
'fault_frequency_iter': 500,
**kwargs,
}
def get_Tend(self):
'''
Get the final time of runs for fault stats based on the problem
Returns:
float: Tend to put into the run
'''
return self.strategies[0].get_Tend(self.prob, self.num_procs)
def run_stats_generation(self, runs=1000, step=None, comm=None, kwargs_range=None, _reload=False, _runs_partial=0):
'''
Run the generation of stats for all strategies in the `self.strategies` variable
Args:
runs (int): Number of runs you want to do
step (int): Number of runs you want to do between saving
comm (MPI.Communicator): Communicator for distributing runs
kw_args_range (dict): Range for the parameters
_reload, _runs_partial: Variables only used for recursion. Do not change!
Returns:
None
'''
for key, val in kwargs_range.items() if kwargs_range is not None else {}:
if type(val) == int:
self.kwargs[key] = val
else:
for me in val:
kwargs_range_me = {**kwargs_range, key: me}
self.run_stats_generation(runs=runs, step=step, comm=comm, kwargs_range=kwargs_range_me)
return None
comm = MPI.COMM_WORLD if comm is None else comm
step = (runs if step is None else step) if comm.size == 1 else comm.size
_runs_partial = step if _runs_partial == 0 else _runs_partial
reload = self.reload or _reload
max_runs = self.get_max_combinations() if self.mode == 'combination' else runs
if reload:
# sort the strategies to do some load balancing
sorting_index = None
if comm.rank == 0:
already_completed = np.array(
[self.load(strategy=strategy, faults=True).get('runs', 0) for strategy in self.strategies]
)
sorting_index_ = np.argsort(already_completed)
sorting_index = sorting_index_[already_completed[sorting_index_] < max_runs]
# tell all ranks what strategies to use
sorting_index = comm.bcast(sorting_index, root=0)
strategies = [self.strategies[i] for i in sorting_index]
if len(strategies) == 0: # check if we are already done
return None
else:
strategies = self.strategies
strategy_comm = comm.Split(comm.rank % len(strategies))
for j in range(0, len(strategies), comm.size):
for f in self.faults:
if f:
runs_partial = min(_runs_partial, max_runs)
else:
runs_partial = min([5, _runs_partial])
self.generate_stats(
strategy=strategies[j + (comm.rank % len(strategies) % (len(strategies)) - j)],
runs=runs_partial,
faults=f,
reload=reload,
comm=strategy_comm,
)
self.run_stats_generation(runs=runs, step=step, comm=comm, _reload=True, _runs_partial=_runs_partial + step)
return None
def generate_stats(self, strategy=None, runs=1000, reload=True, faults=True, comm=None):
'''
Generate statistics for recovery from bit flips
-----------------------------------------------
Every run is given a different random seed such that we have different faults and the results are then stored
Args:
strategy (Strategy): Resilience strategy
runs (int): Number of runs you want to do
reload (bool): Load previously computed statistics and continue from there or start from scratch
faults (bool): Whether to do stats with faults or without
comm (MPI.Communicator): Communicator for distributing runs
Returns:
None
'''
comm = MPI.COMM_WORLD if comm is None else comm
# initialize dictionary to store the stats in
dat = {
'level': np.zeros(runs),
'iteration': np.zeros(runs),
'node': np.zeros(runs),
'problem_pos': [],
'bit': np.zeros(runs),
'error': np.zeros(runs),
'total_iteration': np.zeros(runs),
'total_newton_iteration': np.zeros(runs),
'restarts': np.zeros(runs),
'target': np.zeros(runs),
}
# store arguments for storing and loading
identifier_args = {
'faults': faults,
'strategy': strategy,
}
# reload previously recorded stats and write them to dat
if reload:
already_completed_ = None
if comm.rank == 0:
already_completed_ = self.load(**identifier_args)
already_completed = comm.bcast(already_completed_, root=0)
if already_completed['runs'] > 0 and already_completed['runs'] <= runs and comm.rank == 0:
for k in dat.keys():
dat[k][: min([already_completed['runs'], runs])] = already_completed.get(k, [])
else:
already_completed = {'runs': 0}
# prepare a message
involved_ranks = comm.gather(MPI.COMM_WORLD.rank, root=0)
msg = f'{comm.size} rank(s) ({involved_ranks}) doing {strategy.name}{" with faults" if faults else ""} from {already_completed["runs"]} to {runs}'
if comm.rank == 0 and already_completed['runs'] < runs:
print(msg, flush=True)
space_comm = comm.Split(comm.rank)
# perform the remaining experiments
for i in range(already_completed['runs'], runs):
if i % comm.size != comm.rank:
continue
# perform a single experiment with the correct random seed
stats, controller, Tend = self.single_run(strategy=strategy, run=i, faults=faults, space_comm=space_comm)
# get the data from the stats
faults_run = get_sorted(stats, type='bitflip')
t, u = get_sorted(stats, type='u', recomputed=False)[-1]
# check if we ran to the end
if t < Tend:
error = np.inf
else:
error = self.get_error(u, t, controller, strategy)
total_iteration = sum([k[1] for k in get_sorted(stats, type='k')])
total_newton_iteration = sum([k[1] for k in get_sorted(stats, type='work_newton')])
# record the new data point
if faults:
if len(faults_run) > 0:
dat['level'][i] = faults_run[0][1][0]
dat['iteration'][i] = faults_run[0][1][1]
dat['node'][i] = faults_run[0][1][2]
dat['problem_pos'] += [faults_run[0][1][3]]
dat['bit'][i] = faults_run[0][1][4]
dat['target'][i] = faults_run[0][1][5]
else:
assert self.mode == 'regular', f'No faults where recorded in run {i} of strategy {strategy.name}!'
dat['error'][i] = error
dat['total_iteration'][i] = total_iteration
dat['total_newton_iteration'][i] = total_newton_iteration
dat['restarts'][i] = sum([me[1] for me in get_sorted(stats, type='restarts')])
dat_full = {}
for k in dat.keys():
dat_full[k] = comm.reduce(dat[k], op=MPI.SUM)
# store the completed stats
dat_full['runs'] = runs
if already_completed['runs'] < runs:
if comm.rank == 0:
self.store(dat_full, **identifier_args)
if self.faults:
try:
self.get_recovered(strategy=strategy)
except KeyError:
print('Warning: Can\'t compute recovery rate right now')
return None
def get_error(self, u, t, controller, strategy):
"""
Compute the error.
Args:
u (dtype_u): The solution at the end of the run
t (float): Time at which `u` was recorded
controller (pySDC.controller.controller): The controller
strategy (Strategy): The resilience strategy
Returns:
float: Error
"""
return abs(u - controller.MS[0].levels[0].prob.u_exact(t=t))
def single_run(self, strategy, run=0, faults=False, force_params=None, hook_class=None, space_comm=None, Tend=None):
'''
Run the problem once with the specified parameters
Args:
strategy (Strategy): The resilience strategy you plan on using
run (int): Index for fault generation
faults (bool): Whether or not to put faults in
force_params (dict): Change parameters in the description of the problem
space_comm (MPI.Communicator): A communicator for space parallelisation
Returns:
dict: Stats object containing statistics for each step, each level and each iteration
pySDC.Controller: The controller of the run
float: The time the problem should have run to
'''
hook_class = hook_collection + [LogWork] + ([LogData] if hook_class is None else hook_class)
force_params = {} if force_params is None else force_params
# build the custom description
custom_description = strategy.get_custom_description(self.prob, self.num_procs)
for k in force_params.keys():
custom_description[k] = {**custom_description.get(k, {}), **force_params[k]}
custom_controller_params = force_params.get('controller_params', {})
if faults:
fault_stuff = {
'rng': None,
'args': strategy.get_fault_args(self.prob, self.num_procs),
'rnd_params': strategy.get_fault_args(self.prob, self.num_procs),
}
# make parameters for faults:
if self.mode == 'random':
fault_stuff['rng'] = np.random.RandomState(run)
elif self.mode == 'combination':
fault_stuff['rng'] = run
elif self.mode == 'regular':
fault_stuff['rng'] = np.random.RandomState(run)
fault_stuff['fault_frequency_iter'] = self.kwargs['fault_frequency_iter']
fault_stuff['rnd_params'] = {
'bit': 12,
'min_node': 1,
}
else:
raise NotImplementedError(f'Don\'t know how to add faults in mode {self.mode}')
else:
fault_stuff = None
return self.prob(
custom_description=custom_description,
num_procs=self.num_procs,
hook_class=hook_class,
fault_stuff=fault_stuff,
Tend=self.get_Tend() if Tend is None else Tend,
custom_controller_params=custom_controller_params,
space_comm=space_comm,
)
def compare_strategies(self, run=0, faults=False, ax=None): # pragma: no cover
'''
Take a closer look at how the strategies compare for a specific run
Args:
run (int): The number of the run to get the appropriate random generator
faults (bool): Whether or not to include faults
ax (Matplotlib.axes): Somewhere to plot
Returns:
None
'''
if ax is None:
fig, ax = plt.subplots(1, 1)
store = True
else:
store = False
k_ax = ax.twinx()
ls = ['-.' if type(strategy) == HotRodStrategy else '-' for strategy in self.strategies]
[self.scrutinize_visual(self.strategies[i], run, faults, ax, k_ax, ls[i]) for i in range(len(self.strategies))]
# make a legend
[k_ax.plot([None], [None], label=strategy.label, color=strategy.color) for strategy in self.strategies]
k_ax.legend(frameon=True)
if store:
fig.tight_layout()
plt.savefig(f'data/{self.get_name()}-comparison.pdf', transparent=True)
def scrutinize_visual(
self, strategy, run, faults, ax=None, k_ax=None, ls='-', plot_restarts=False
): # pragma: no cover
'''
Take a closer look at a specific run with a plot
Args:
strategy (Strategy): The resilience strategy you plan on using
run (int): The number of the run to get the appropriate random generator
faults (bool): Whether or not to include faults
ax (Matplotlib.axes): Somewhere to plot the error
k_ax (Matplotlib.axes): Somewhere to plot the iterations
plot_restarts (bool): Make vertical lines wherever restarts happened
Returns:
None
'''
if ax is None:
fig, ax = plt.subplots(1, 1)
store = True
else:
store = False
force_params = {}
stats, controller, Tend = self.single_run(
strategy=strategy,
run=run,
faults=faults,
force_params=force_params,
hook_class=hook_collection + [LogLocalErrorPostStep, LogData],
)
# plot the local error
e_loc = get_sorted(stats, type='e_local_post_step', recomputed=False)
ax.plot([me[0] for me in e_loc], [me[1] for me in e_loc], color=strategy.color, ls=ls)
# plot the iterations
k_ax = ax.twinx() if k_ax is None else k_ax
k = get_sorted(stats, type='k')
k_ax.plot([me[0] for me in k], np.cumsum([me[1] for me in k]), color=strategy.color, ls='--')
# plot the faults
faults = get_sorted(stats, type='bitflip')
for fault_time in [me[0] for me in faults]:
ax.axvline(fault_time, color='grey', ls=':')
# plot restarts
if plot_restarts:
restarts = get_sorted(stats, type='restarts')
[ax.axvline(me[0], color='black', ls='-.') if me[1] else '' for me in restarts]
# decorate
ax.set_yscale('log')
ax.set_ylabel(r'$\epsilon$')
k_ax.set_ylabel('cumulative iterations (dashed)')
ax.set_xlabel(r'$t$')
if store:
fig.tight_layout()
plt.savefig(f'data/{self.get_name()}-{strategy.name}-details.pdf', transparent=True)
def scrutinize(self, strategy, run, faults=True):
'''
Take a closer look at a specific run
Args:
strategy (Strategy): The resilience strategy you plan on using
run (int): The number of the run to get the appropriate random generator
faults (bool): Whether or not to include faults
Returns:
None
'''
force_params = {}
force_params['controller_params'] = {'logger_level': 15}
stats, controller, Tend = self.single_run(strategy=strategy, run=run, faults=faults, force_params=force_params)
t, u = get_sorted(stats, type='u')[-1]
print(max(u))
k = [me[1] for me in get_sorted(stats, type='k')]
print(k)
print(f'\nOverview for {strategy.name} strategy')
# see if we can determine if the faults where recovered
no_faults = self.load(strategy=strategy, faults=False)
e_star = np.mean(no_faults.get('error', [0]))
if t < Tend:
error = np.inf
print(f'Final time was not reached! Code crashed at t={t:.2f} instead of reaching Tend={Tend:.2f}')
else:
error = self.get_error(u, t, controller, strategy)
recovery_thresh = self.get_thresh(strategy)
print(
f'e={error:.2e}, e^*={e_star:.2e}, thresh: {recovery_thresh:.2e} -> recovered: \
{error < recovery_thresh}'
)
print(f'k: sum: {np.sum(k)}, min: {np.min(k)}, max: {np.max(k)}, mean: {np.mean(k):.2f},')
_newton_iter = get_sorted(stats, type='work_newton')
if len(_newton_iter) > 0:
newton_iter = [me[1] for me in _newton_iter]
print(
f'Newton: k: sum: {np.sum(newton_iter)}, min: {np.min(newton_iter)}, max: {np.max(newton_iter)}, mean: {np.mean(newton_iter):.2f},'
)
# checkout the step size
dt = [me[1] for me in get_sorted(stats, type='dt')]
print(f'dt: min: {np.min(dt):.2e}, max: {np.max(dt):.2e}, mean: {np.mean(dt):.2e}')
# restarts
restarts = [me[1] for me in get_sorted(stats, type='restarts')]
print(f'restarts: {sum(restarts)}, without faults: {no_faults["restarts"][0]}')
# print faults
faults = get_sorted(stats, type='bitflip')
print('\nfaults:')
print(' t | level | iter | node | bit | trgt | pos')
print('--------+-------+------+------+-----+------+----')
for f in faults:
print(f' {f[0]:6.2f} | {f[1][0]:5d} | {f[1][1]:4d} | {f[1][2]:4d} | {f[1][4]:3d} | {f[1][5]:4d} |', f[1][3])
return None
def convert_faults(self, faults):
'''
Make arrays of useable data from an entry in the stats object returned by pySDC
Args:
faults (list): The entry for faults returned by the pySDC run
Returns:
list: The times when the faults happened
list: The levels in which the faults happened
list: The iterations in which the faults happened
list: The nodes in which the faults happened
list: The problem positions in which the faults happened
list: The bits in which the faults happened
'''
time = [faults[i][0] for i in range(len(faults))]
level = [faults[i][1][0] for i in range(len(faults))]
iteration = [faults[i][1][1] for i in range(len(faults))]
node = [faults[i][1][2] for i in range(len(faults))]
problem_pos = [faults[i][1][3] for i in range(len(faults))]
bit = [faults[i][1][4] for i in range(len(faults))]
return time, level, iteration, node, problem_pos, bit
def get_path(self, **kwargs):
'''
Get the path to where the stats are stored
Args:
strategy (Strategy): The resilience strategy
faults (bool): Whether or not faults have been activated
Returns:
str: The path to what you are looking for
'''
return f'{self.stats_path}/{self.get_name(**kwargs)}.pickle'
def get_name(self, strategy=None, faults=True, mode=None):
'''
Function to get a unique name for a kind of statistics based on the problem and strategy that was used
Args:
strategy (Strategy): Resilience strategy
faults (bool): Whether or not faults where inserted
Returns:
str: The unique identifier
'''
if self.prob == run_advection:
prob_name = 'advection'
elif self.prob == run_vdp:
prob_name = 'vdp'
elif self.prob == run_piline:
prob_name = 'piline'
elif self.prob == run_Lorenz:
prob_name = 'Lorenz'
elif self.prob == run_Schroedinger:
prob_name = 'Schroedinger'
elif self.prob == run_quench:
prob_name = 'Quench'
else:
raise NotImplementedError(f'Name not implemented for problem {self.prob}')
if faults:
fault_name = '-faults'
else:
fault_name = ''
if strategy is not None:
strategy_name = f'-{strategy.name}'
else:
strategy_name = ''
mode = self.mode if mode is None else mode
if mode == 'regular':
mode_thing = f'-regular{self.kwargs["fault_frequency_iter"] if faults else ""}'
else:
mode_thing = ''
return f'{prob_name}{strategy_name}{fault_name}-{self.num_procs}procs{mode_thing}'
def store(self, dat, **kwargs):
'''
Stores the data for a run at a predefined path
Args:
dat (dict): The data of the recorded statistics
Returns:
None
'''
with open(self.get_path(**kwargs), 'wb') as f:
pickle.dump(dat, f)
return None
def load(self, **kwargs):
'''
Loads the stats belonging to a specific strategy and whether or not faults where inserted.
When no data has been generated yet, a dictionary is returned which only contains the number of completed runs,
which is 0 of course.
Returns:
dict: Data from previous run or if it is not available a placeholder dictionary
'''
kwargs['strategy'] = kwargs.get('strategy', self.strategies[MPI.COMM_WORLD.rank % len(self.strategies)])
try:
with open(self.get_path(**kwargs), 'rb') as f:
dat = pickle.load(f)
except FileNotFoundError:
return {'runs': 0}
return dat
def get_thresh(self, strategy=None):
"""
Get recovery threshold based on relative and absolute tolerances
Args:
strategy (Strategy): The resilience strategy
"""
fault_free = self.load(strategy=strategy, faults=False)
assert fault_free['error'].std() / fault_free['error'].mean() < 1e-5
return self.recovery_thresh_abs + self.recovery_thresh * fault_free["error"].mean()
def get_recovered(self, **kwargs):
'''
Determine the recovery rate for a specific strategy and store it to disk.
Returns:
None
'''
if 'strategy' not in kwargs.keys():
[self.get_recovered(strategy=strat, **kwargs) for strat in self.strategies]
else:
try:
with_faults = self.load(faults=True, **kwargs)
with_faults['recovered'] = with_faults['error'] < self.get_thresh(kwargs['strategy'])
self.store(faults=True, dat=with_faults, **kwargs)
except KeyError:
print("Can\'t compute recovery rate right now")
return None
def crash_rate(self, dat, no_faults, thingA, mask):
'''
Determine the rate of runs that crashed
Args:
dat (dict): The data of the recorded statistics with faults
no_faults (dict): The data of the corresponding fault-free stats
thingA (str): Some key stored in the stats that will go on the y-axis
mask (Numpy.ndarray of shape (n)): Arbitrary mask to apply before determining the rate
Returns:
int: Ratio of the runs which crashed and fall under the specific criteria set by the mask
'''
if len(dat[thingA][mask]) > 0:
crash = dat['error'] == np.inf
return len(dat[thingA][mask & crash]) / len(dat[thingA][mask])
else:
return None
def rec_rate(self, dat, no_faults, thingA, mask):
'''
Operation for plotting which returns the recovery rate for a given mask.
Which thingA you apply this to actually does not matter here since we compute a rate.
Args:
dat (dict): The recorded statistics
no_faults (dict): The corresponding fault-free stats
thingA (str): Some key stored in the stats
mask (Numpy.ndarray of shape (n)): Arbitrary mask for filtering
Returns:
float: Recovery rate
'''
if len(dat[thingA][mask]) > 0:
return len(dat[thingA][mask & dat['recovered']]) / len(dat[thingA][mask])
else:
return None
def mean(self, dat, no_faults, thingA, mask):
'''
Operation for plotting which returns the mean of thingA after applying the mask
Args:
dat (dict): The recorded statistics
no_faults (dict): The corresponding fault-free stats
thingA (str): Some key stored in the stats
mask (Numpy.ndarray of shape (n)): Arbitrary mask for filtering
Returns:
float: Mean of thingA after applying mask
'''
return np.mean(dat[thingA][mask])
def extra_mean(self, dat, no_faults, thingA, mask):
'''
Operation for plotting which returns the difference in mean of thingA between runs with and without faults after
applying the mask
Args:
dat (dict): The recorded statistics
no_faults (dict): The corresponding fault-free stats
thingA (str): Some key stored in the stats
mask (Numpy.ndarray of shape (n)): Arbitrary mask for filtering
Returns:
float: Difference in mean of thingA between runs with and without faults after applying mask
'''
if True in mask or int in [type(me) for me in mask]:
return np.mean(dat[thingA][mask]) - np.mean(no_faults[thingA])
else:
return None
def plot_thingA_per_thingB(
self, strategy, thingA, thingB, ax=None, mask=None, recovered=False, op=None
): # pragma: no cover
'''
Plot thingA vs. thingB for a single strategy
Args:
strategy (Strategy): The resilience strategy you want to plot
thingA (str): Some key stored in the stats that will go on the y-axis
thingB (str): Some key stored in the stats that will go on the x-axis
ax (Matplotlib.axes): Somewhere to plot
mask (Numpy.ndarray of shape (n)): Arbitrary mask to apply to both axes
recovered (bool): Show the plot for both all runs and only the recovered ones
op (function): Operation that is applied to thingA before plotting default is recovery rate
Returns:
None
'''
op = self.rec_rate if op is None else op
dat = self.load(strategy=strategy, faults=True)
no_faults = self.load(strategy=strategy, faults=False)
if mask is None:
mask = np.ones_like(dat[thingB], dtype=bool)
admissable_thingB = np.unique(dat[thingB][mask])
me = np.zeros(len(admissable_thingB))
me_recovered = np.zeros_like(me)
for i in range(len(me)):
_mask = (dat[thingB] == admissable_thingB[i]) & mask
if _mask.any():
me[i] = op(dat, no_faults, thingA, _mask)
me_recovered[i] = op(dat, no_faults, thingA, _mask & dat['recovered'])
if recovered:
ax.plot(
admissable_thingB,
me_recovered,
label=f'{strategy.label} (only recovered)',
color=strategy.color,
marker=strategy.marker,
ls='--',
linewidth=3,
)
ax.plot(
admissable_thingB, me, label=f'{strategy.label}', color=strategy.color, marker=strategy.marker, linewidth=2
)
ax.legend(frameon=False)
ax.set_xlabel(thingB)
ax.set_ylabel(thingA)
return None
def plot_things_per_things(
self,
thingA='bit',
thingB='bit',
recovered=False,
mask=None,
op=None,
args=None,
strategies=None,
name=None,
store=True,
ax=None,
fig=None,
): # pragma: no cover
'''
Plot thingA vs thingB for multiple strategies
Args:
thingA (str): Some key stored in the stats that will go on the y-axis
thingB (str): Some key stored in the stats that will go on the x-axis
recovered (bool): Show the plot for both all runs and only the recovered ones
mask (Numpy.ndarray of shape (n)): Arbitrary mask to apply to both axes
op (function): Operation that is applied to thingA before plotting default is recovery rate
args (dict): Parameters for how the plot should look
strategies (list): List of the strategies you want to plot, if None, all will be plotted
name (str): Optional name for the plot
store (bool): Store the plot at a predefined path or not (for jupyter notebooks)
ax (Matplotlib.axes): Somewhere to plot
fig (Matplotlib.figure): Figure of the ax
Returns
None
'''
strategies = self.strategies if strategies is None else strategies
args = {} if args is None else args
# make sure we have something to plot in
if ax is None:
fig, ax = plt.subplots(1, 1)
elif fig is None:
store = False
# execute the plots for all strategies
for s in strategies:
self.plot_thingA_per_thingB(s, thingA=thingA, thingB=thingB, recovered=recovered, ax=ax, mask=mask, op=op)
# set the parameters
[plt.setp(ax, k, v) for k, v in args.items()]
if store:
fig.tight_layout()
plt.savefig(f'data/{self.get_name()}-{thingA if name is None else name}_per_{thingB}.pdf', transparent=True)
plt.close(fig)
return None
def plot_recovery_thresholds(self, strategies=None, thresh_range=None, ax=None): # pragma: no cover
'''
Plot the recovery rate for a range of thresholds
Args:
strategies (list): List of the strategies you want to plot, if None, all will be plotted
thresh_range (list): thresholds for deciding whether to accept as recovered
ax (Matplotlib.axes): Somewhere to plot
Returns:
None
'''
# fill default values if nothing is specified
strategies = self.strategies if strategies is None else strategies
thresh_range = 1 + np.linspace(-4e-2, 4e-2, 100) if thresh_range is None else thresh_range
if ax is None:
fig, ax = plt.subplots(1, 1)
rec_rates = [[None] * len(thresh_range)] * len(strategies)
for strategy_idx in range(len(strategies)):
strategy = strategies[strategy_idx]
# load the stats
fault_free = self.load(strategy=strategy, faults=False)
with_faults = self.load(strategy=strategy, faults=True)
for thresh_idx in range(len(thresh_range)):
rec_mask = with_faults['error'] < thresh_range[thresh_idx] * fault_free['error'].mean()
rec_rates[strategy_idx][thresh_idx] = len(with_faults['error'][rec_mask]) / len(with_faults['error'])
ax.plot(thresh_range, rec_rates[strategy_idx], color=strategy.color, label=strategy.label)
ax.legend(frameon=False)
ax.set_ylabel('recovery rate')
ax.set_xlabel('threshold as ratio to fault-free error')
return None
def analyse_adaptivity(self, mask): # pragma: no cover
'''
Analyse a set of runs with adaptivity
Args:
mask (Numpy.ndarray of shape (n)): The mask you want to know about
Returns:
None
'''
index = self.get_index(mask)
dat = self.load()
# make a header
print(' run | bit | node | iter | e_em^* | e_em | e_glob^* | e_glob ')
print('-------+-----+------+------+----------+----------+----------+----------')
for i in index:
e_em, e_glob = self.analyse_adaptivity_single(int(i))
print(
f' {i:5d} | {dat["bit"][i]:3.0f} | {dat["node"][i]:4.0f} | {dat["iteration"][i]:4.0f} | {e_em[1]:.2e}\
| {e_em[0]:.2e} | {e_glob[1]:.2e} | {e_glob[0]:.2e}'
)
e_tol = AdaptivityStrategy().get_custom_description(self.prob, self.num_procs)['convergence_controllers'][
Adaptivity
]['e_tol']
print(f'We only restart when e_em > e_tol = {e_tol:.2e}!')
return None
def analyse_adaptivity_single(self, run): # pragma: no cover
'''
Compute what the difference in embedded and global error are for a specific run with adaptivity
Args:
run (int): The run you want to know about
Returns:
list: Embedded error with fault and without for the last iteration in the step with a fault
list: Global error with and without fault at the end of the run
'''
# perform one run with and one without faults
stats = []
controllers = []
for faults in [True, False]:
s, c, _ = self.single_run(
strategy=AdaptivityStrategy(), run=run, faults=faults, hook_class=hook_collection + [LogUAllIter]
)
stats += [s]
controllers += [c]
# figure out when the fault happened
t_fault = get_sorted(stats[0], type='bitflip')[0][0]
# get embedded error
e_em = [
[me[1] for me in get_sorted(stat, type='error_embedded_estimate', time=t_fault, sortby='iter')]
for stat in stats
]
# compute the global error
u_end = [get_sorted(stat, type='u')[-1] for stat in stats]
e_glob = [abs(u_end[i][1] - controllers[i].MS[0].levels[0].prob.u_exact(t=u_end[i][0])) for i in [0, 1]]
return [e_em[i][-1] for i in [0, 1]], e_glob
def analyse_HotRod(self, mask): # pragma: no cover
'''
Analyse a set of runs with Hot Rod
Args:
mask (Numpy.ndarray of shape (n)): The mask you want to know about
Returns:
None
'''
index = self.get_index(mask)
dat = self.load()
# make a header
print(
' run | bit | node | iter | e_ex^* | e_ex | e_em^* | e_em | diff* | diff | e_glob^* \
| e_glob '
)
print(
'-------+-----+------+------+----------+----------+----------+----------+----------+----------+----------\
+----------'
)
for i in index:
e_em, e_ex, e_glob = self.analyse_HotRod_single(int(i))
print(
f' {i:5d} | {dat["bit"][i]:3.0f} | {dat["node"][i]:4.0f} | {dat["iteration"][i]:4.0f} | {e_ex[1]:.2e}\
| {e_ex[0]:.2e} | {e_em[1]:.2e} | {e_em[0]:.2e} | {abs(e_em[1]-e_ex[1]):.2e} | {abs(e_em[0]-e_ex[0]):.2e} | \
{e_glob[1]:.2e} | {e_glob[0]:.2e}'
)
tol = HotRodStrategy().get_custom_description(self.prob, self.num_procs)['convergence_controllers'][HotRod][
'HotRod_tol'
]
print(f'We only restart when diff > tol = {tol:.2e}!')
return None
def analyse_HotRod_single(self, run): # pragma: no cover
'''
Compute what the difference in embedded, extrapolated and global error are for a specific run with Hot Rod
Args:
run (int): The run you want to know about
Returns:
list: Embedded error with fault and without for the last iteration in the step with a fault
list: Extrapolation error with fault and without for the last iteration in the step with a fault
list: Global error with and without fault at the end of the run
'''
# perform one run with and one without faults
stats = []
controllers = []
for faults in [True, False]:
s, c, _ = self.single_run(
strategy=HotRodStrategy(), run=run, faults=faults, hook_class=hook_collection + [LogUAllIter]
)
stats += [s]
controllers += [c]
# figure out when the fault happened
t_fault = get_sorted(stats[0], type='bitflip')[0][0]
# get embedded error
e_em = [
[me[1] for me in get_sorted(stat, type='error_embedded_estimate', time=t_fault, sortby='iter')]
for stat in stats
]
# get extrapolated error
e_ex = [
[me[1] for me in get_sorted(stat, type='error_extrapolation_estimate', time=t_fault, sortby='iter')]
for stat in stats
]
# compute the global error
u_end = [get_sorted(stat, type='u')[-1] for stat in stats]
e_glob = [abs(u_end[i][1] - controllers[i].MS[0].levels[0].prob.u_exact(t=u_end[i][0])) for i in [0, 1]]
return [e_em[i][-1] for i in [0, 1]], [e_ex[i][-1] for i in [0, 1]], e_glob
def print_faults(self, mask=None): # pragma: no cover
'''
Print all faults that happened within a certain mask
Args:
mask (Numpy.ndarray of shape (n)): The mask you want to know the contents of
Returns:
None
'''
index = self.get_index(mask)
dat = self.load()
# make a header
print(' run | bit | node | iter | space pos')
print('-------+-----+------+------+-----------')
for i in index:
print(
f' {i:5d} | {dat["bit"][i]:3.0f} | {dat["node"][i]:4.0f} | {dat["iteration"][i]:4.0f} | \
{dat["problem_pos"][i]}'
)
return None
def get_mask(self, strategy=None, key=None, val=None, op='eq', old_mask=None, compare_faults=False):
'''
Make a mask to apply to stored data to filter out certain things
Args:
strategy (Strategy): The resilience strategy you want to apply the mask to. Most masks are the same for all
strategies so None is fine
key (str): The key in the stored statistics that you want to filter for some value
val (str, float, int, bool): A value that you want to use for filtering. Dtype depends on key
op (str): Operation that is applied for filtering
old_mask (Numpy.ndarray of shape (n)): Apply this mask on top of the filter
compare_faults (bool): instead of comparing to val, compare to the mean value for fault free runs
Returns:
Numpy.ndarray with boolean entries that can be used as a mask
'''
strategy = self.strategies[0] if strategy is None else strategy
dat = self.load(strategy=strategy, faults=True)
if compare_faults:
if val is not None:
raise ValueError('Can\'t use val and compare_faults in get_mask at the same time!')
else:
vals = self.load(strategy=strategy, faults=False)[key]
val = sum(vals) / len(vals)
if None in [key, val] and op not in ['isfinite']:
mask = dat['bit'] == dat['bit']
else:
if op == 'uneq':
mask = dat[key] != val
elif op == 'eq':
mask = dat[key] == val
elif op == 'leq':
mask = dat[key] <= val
elif op == 'geq':
mask = dat[key] >= val
elif op == 'lt':
mask = dat[key] < val
elif op == 'gt':
mask = dat[key] > val
elif op == 'isfinite':
mask = np.isfinite(dat[key])
else:
raise NotImplementedError(f'Please implement op={op}!')
if old_mask is not None:
return mask & old_mask
else:
return mask
def get_fixable_faults_only(self, strategy):
"""
Return a mask of only faults that can be fixed with a given strategy.
Args:
strategy (Strategy): The resilience strategy you want to look at. In normal use it's the same for all
Returns:
Numpy.ndarray with boolean entries that can be used as a mask
"""
fixable = strategy.get_fixable_params(
maxiter=strategy.get_custom_description(self.prob, self.num_procs)['step_params']['maxiter']
)
mask = self.get_mask(strategy=strategy)
for kwargs in fixable:
mask = self.get_mask(strategy=strategy, **kwargs, old_mask=mask)
return mask
def get_index(self, mask=None):
'''
Get the indeces of all runs in mask
Args:
mask (Numpy.ndarray of shape (n)): The mask you want to know the contents of
Returns:
Numpy.ndarray: Array of indeces
'''
if mask is None:
dat = self.load()
return np.arange(len(dat['iteration']))
else:
return np.arange(len(mask))[mask]
def get_statistics_info(self, mask=None, strategy=None, print_all=False, ax=None): # pragma: no cover
'''
Get information about how many data points for faults we have given a particular mask
Args:
mask (Numpy.ndarray of shape (n)): The mask you want to apply before counting
strategy (Strategy): The resilience strategy you want to look at. In normal use it's the same for all
strategies, so you don't need to supply this argument
print_all (bool): Whether to add information that is normally not useful to the table
ax (Matplotlib.axes): Somewhere to plot the combinations histogram
Returns:
None
'''
# load some data from which to infer the number occurrences of some event
strategy = self.strategies[0] if strategy is None else strategy
dat = self.load(stratagy=strategy, faults=True)
# make a dummy mask in case none is supplied
if mask is None:
mask = np.ones_like(dat['error'], dtype=bool)
# print a header
print(f' tot: {len(dat["error"][mask]):6} | avg. counts | mean deviation | unique entries')
print('-------------------------------------------------------------')
# make a list of all keys that you want to look at
keys = ['iteration', 'bit', 'node']
if print_all:
keys += ['problem_pos', 'level', 'target']
# print the table
for key in keys:
counts, dev, unique = self.count_occurrences(dat[key][mask])
print(f' {key:11} | {counts:11.1f} | {dev:14.2f} | {unique:14}')
return None
def combinations_histogram(self, dat=None, keys=None, mask=None, ax=None): # pragma: no cover
'''
Make a histogram ouf of the occurrences of combinations
Args:
dat (dict): The data of the recorded statistics
keys (list): The keys in dat that you want to know the combinations of
mask (Numpy.ndarray of shape (n)): The mask you want to apply before counting
Returns:
Matplotlib.axes: The plot
'''
if ax is None:
fig, ax = plt.subplots(1, 1)
occurrences, bins = self.get_combination_histogram(dat, keys, mask)
ax.bar(bins[:-1], occurrences)
ax.set_xlabel('Occurrence of combinations')
return ax
def get_combination_histogram(self, dat=None, keys=None, mask=None): # pragma: no cover
'''
Check how many combinations of values we expect and how many we find to see if we need to do more experiments.
It is assumed that each allowed value for each key appears at least once in dat after the mask was applied
Args:
dat (dict): The data of the recorded statistics
keys (list): The keys in dat that you want to know the combinations of
mask (Numpy.ndarray of shape (n)): The mask you want to apply before counting
Returns:
Numpy.ndarray: Number of occurrences of combinations
Numpy.ndarray: Bins
'''
# load default values
dat = self.load(strategy=self.strategies[0], faults=True) if dat is None else dat
keys = ['iteration', 'bit', 'node'] if keys is None else keys
if mask is None:
mask = np.ones_like(dat['error'], dtype=bool)
# get unique values and compute how many combinations you expect
num_unique = [len(np.unique(dat[key][mask])) for key in keys]
expected_number_of_combinations = np.prod(num_unique)
# test what you actually get
combination_counts = self.get_combination_counts(dat, keys, mask)
# make a histogram with the result
occurrences, bins = np.histogram(combination_counts, bins=np.arange(max(combination_counts) + 1))
occurrences[0] = expected_number_of_combinations - len(combination_counts)
return occurrences, bins
def get_max_combinations(self, dat=None):
'''
Count how many combinations of parameters for faults are possible
Args:
dat (dict): The recorded statistics
keys (list): The keys in dat that you want to know the combinations of
Returns:
int: Number of possible combinations
'''
stats, controller, Tend = self.single_run(strategy=self.strategies[0], run=0, faults=True)
faultHook = get_fault_injector_hook(controller)
ranges = [
(0, faultHook.rnd_params['level_number']),
(0, faultHook.rnd_params['node'] + 1),
(1, faultHook.rnd_params['iteration'] + 1),
(0, faultHook.rnd_params['bit']),
]
ranges += [(0, i) for i in faultHook.rnd_params['problem_pos']]
return np.prod([me[1] - me[0] for me in ranges], dtype=int)
def get_combination_counts(self, dat, keys, mask):
'''
Get counts of how often all combinations of values of keys appear. This is done recursively to support arbitrary
numbers of keys
Args:
dat (dict): The data of the recorded statistics
keys (list): The keys in dat that you want to know the combinations of
mask (Numpy.ndarray of shape (n)): The mask you want to apply before counting
Returns:
list: Occurrences of all combinations
'''
key = keys[0]
unique_vals = np.unique(dat[key][mask])
res = []
for i in range(len(unique_vals)):
inner_mask = self.get_mask(key=key, val=unique_vals[i], op='eq', old_mask=mask)
if len(keys) > 1:
res += self.get_combination_counts(dat, keys[1:], inner_mask)
else:
res += [self.count_occurrences(dat[key][inner_mask])[0]]
return res
def count_occurrences(self, vals):
'''
Count the occurrences of unique values in vals and compute average deviation from mean
Args:
vals (list): Values you want to check
Returns:
float: Mean of number of occurrences of unique values in vals
float: Average deviation from mean number of occurrences
int: Number of unique entries
'''
unique_vals, counts = np.unique(vals, return_counts=True)
if len(counts) > 0:
return counts.mean(), sum(abs(counts - counts.mean())) / len(counts), len(counts)
else:
return None, None, 0
def bar_plot_thing(
self, x=None, thing=None, ax=None, mask=None, store=False, faults=False, name=None, op=None, args=None
): # pragma: no cover
'''
Make a bar plot about something!
Args:
x (Numpy.ndarray of dimension 1): x values for bar plot
thing (str): Some key stored in the stats that will go on the y-axis
mask (Numpy.ndarray of shape (n)): The mask you want to apply before plotting
store (bool): Store the plot at a predefined path or not (for jupyter notebooks)
faults (bool): Whether to load stats with faults or without
name (str): Optional name for the plot
op (function): Operation that is applied to thing before plotting default is recovery rate
args (dict): Parameters for how the plot should look
Returns:
None
'''
if ax is None:
fig, ax = plt.subplots(1, 1)
store = True
op = self.mean if op is None else op
# get the values for the bars
height = np.zeros(len(self.strategies))
for strategy_idx in range(len(self.strategies)):
strategy = self.strategies[strategy_idx]
# load the values
dat = self.load(strategy=strategy, faults=faults)
no_faults = self.load(strategy=strategy, faults=False)
# check if we have a mask
if mask is None:
mask = np.ones_like(dat[thing], dtype=bool)
height[strategy_idx] = op(dat, no_faults, thing, mask)
# get some x values
x = np.arange(len(self.strategies)) if x is None else x
# prepare labels
ticks = [strategy.bar_plot_x_label for strategy in self.strategies]
ax.bar(x, height, tick_label=ticks)
# set the parameters
ax.set_ylabel(thing)
args = {} if args is None else args
[plt.setp(ax, k, v) for k, v in args.items()]
if store:
fig.tight_layout()
plt.savefig(f'data/{self.get_name()}-{thing if name is None else name}-barplot.pdf', transparent=True)
plt.close(fig)
return None
def fault_frequency_plot(self, ax, iter_ax, kwargs_range, strategy=None): # pragma: no cover
func_args = locals()
func_args.pop('self', None)
if strategy is None:
for strat in self.strategies:
args = {**func_args, 'strategy': strat}
self.fault_frequency_plot(**args)
return None
# load data
all_data = {}
for me in kwargs_range['fault_frequency_iter']:
self.kwargs['fault_frequency_iter'] = me
self.get_recovered()
all_data[me] = self.load(strategy=strategy, faults=True, mode='regular')
# get_recovery_rate
results = {}
results['frequencies'] = list(all_data.keys())
results['recovery_rate'] = [
len(all_data[key]['recovered'][all_data[key]['recovered'] is True]) / len(all_data[key]['recovered'])
for key in all_data.keys()
]
# results['iterations'] = [np.mean(all_data[key]['total_iteration']) for key in all_data.keys()]
results['iterations'] = [
np.mean(all_data[key]['total_iteration'][all_data[key]['error'] != np.inf]) for key in all_data.keys()
]
ax.plot(results['frequencies'], results['recovery_rate'], **strategy.style)
iter_ax.plot(results['frequencies'], results['iterations'], **{**strategy.style, 'ls': '--'})
ax.set_xscale('log')
ax.set_xlabel('iterations between fault')
ax.set_ylabel('recovery rate')
iter_ax.set_ylabel('average total iterations if not crashed (dashed)')
ax.legend(frameon=False)
return None
def check_local_error(): # pragma: no cover
"""
Make a plot of the resolution over time for all problems
"""
problems = [run_vdp, run_Lorenz, run_Schroedinger, run_quench]
problems = [run_quench]
strategies = [BaseStrategy(), AdaptivityStrategy(), IterateStrategy()]
for i in range(len(problems)):
stats_analyser = FaultStats(
prob=problems[i],
strategies=strategies,
faults=[False],
reload=True,
recovery_thresh=1.1,
num_procs=1,
mode='random',
)
stats_analyser.compare_strategies()
plt.show()
def main():
stats_analyser = FaultStats(
prob=run_vdp,
strategies=[BaseStrategy(), AdaptivityStrategy(), IterateStrategy(), HotRodStrategy()],
faults=[False, True],
reload=True,
recovery_thresh=1.1,
# recovery_thresh_abs=1e-5,
num_procs=1,
mode='random',
stats_path='data/stats-jusuf',
)
########################
# msk = stats_analyser.get_mask(AdaptivityStrategy(), val=False, key='recovered')
# stats_analyser.print_faults(msk)
fig, ax = plt.subplots()
iter_ax = ax.twinx()
kwargs_range = {'fault_frequency_iter': (10, 100, 1000, 10000)}
stats_analyser.run_stats_generation(runs=10, kwargs_range=kwargs_range)
stats_analyser.fault_frequency_plot(ax=ax, iter_ax=iter_ax, kwargs_range=kwargs_range)
# stats_analyser.scrutinize(AdaptivityStrategy(), 4, True)
plt.show()
return None
########################
stats_analyser.run_stats_generation(runs=5000)
if MPI.COMM_WORLD.rank > 0: # make sure only one rank accesses the data
return None
stats_analyser.get_recovered()
mask = None
# stats_analyser.compare_strategies()
stats_analyser.plot_things_per_things(
'recovered', 'node', False, op=stats_analyser.rec_rate, mask=mask, args={'ylabel': 'recovery rate'}
)
stats_analyser.plot_things_per_things(
'recovered', 'iteration', False, op=stats_analyser.rec_rate, mask=mask, args={'ylabel': 'recovery rate'}
)
stats_analyser.plot_things_per_things(
'recovered', 'bit', False, op=stats_analyser.rec_rate, mask=mask, args={'ylabel': 'recovery rate'}
)
# make a plot for only the faults that can be recovered
fig, ax = plt.subplots(1, 1)
for strategy in stats_analyser.strategies:
fixable = stats_analyser.get_fixable_faults_only(strategy=strategy)
stats_analyser.plot_things_per_things(
'recovered',
'bit',
False,
strategies=[strategy],
op=stats_analyser.rec_rate,
mask=fixable,
args={'ylabel': 'recovery rate'},
name='fixable_recovery',
ax=ax,
)
fig.tight_layout()
fig.savefig(f'data/{stats_analyser.get_name()}-recoverable.pdf', transparent=True)
fig, ax = plt.subplots(1, 1, figsize=(13, 4))
stats_analyser.plot_recovery_thresholds(ax=ax, thresh_range=np.logspace(-1, 1, 1000))
ax.axvline(stats_analyser.get_thresh(BaseStrategy()), color='grey', ls=':', label='recovery threshold')
ax.set_xscale('log')
ax.legend(frameon=False)
fig.tight_layout()
fig.savefig(f'data/{stats_analyser.get_name()}-threshold.pdf', transparent=True)
stats_analyser.plot_things_per_things(
'total_iteration',
'bit',
True,
op=stats_analyser.mean,
mask=mask,
args={'yscale': 'log', 'ylabel': 'total iterations'},
)
stats_analyser.plot_things_per_things(
'total_iteration',
'bit',
True,
op=stats_analyser.extra_mean,
mask=mask,
args={'yscale': 'linear', 'ylabel': 'extra iterations'},
name='extra_iter',
)
stats_analyser.plot_things_per_things(
'error', 'bit', True, op=stats_analyser.mean, mask=mask, args={'yscale': 'log'}
)
stats_analyser.plot_recovery_thresholds()
if __name__ == "__main__":
# check_local_error()
main()
| 57,376 | 37.482227 | 154 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/collocation_adaptivity.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import TABLEAU_COLORS
from pySDC.helpers.stats_helper import get_sorted
from pySDC.projects.Resilience.vdp import run_vdp
from pySDC.projects.Resilience.advection import run_advection
from pySDC.projects.Resilience.heat import run_heat
from pySDC.projects.Resilience.hook import LogData
from pySDC.projects.Resilience.accuracy_check import get_accuracy_order
from pySDC.implementations.convergence_controller_classes.adaptive_collocation import AdaptiveCollocation
from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import (
EstimateEmbeddedErrorCollocation,
)
from pySDC.core.Hooks import hooks
from pySDC.implementations.hooks.log_errors import LogLocalErrorPostIter
from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimatePostIter
# define global parameters for running problems and plotting
CMAP = list(TABLEAU_COLORS.values())
Tend = 0.015
base_params = {
'step_params': {'maxiter': 99},
'sweeper_params': {
'QI': 'LU',
'num_nodes': 4,
},
'level_params': {'restol': 1e-9, 'dt': Tend},
}
coll_params_inexact = {
'num_nodes': [2, 3, 4],
'restol': [1e-4, 1e-7, 1e-9],
}
coll_params_refinement = {
'num_nodes': [1, 2, 3, 4],
}
coll_params_reduce = {
'num_nodes': [4, 3, 2, 1],
}
coll_params_type = {
# 'quad_type': ['RADAU-RIGHT', 'GAUSS'],
'quad_type': ['GAUSS', 'RADAU-RIGHT', 'LOBATTO'],
}
special_params = {
'inexact': {EstimateEmbeddedErrorCollocation: {'adaptive_coll_params': coll_params_inexact}},
'refinement': {EstimateEmbeddedErrorCollocation: {'adaptive_coll_params': coll_params_refinement}},
'reduce': {EstimateEmbeddedErrorCollocation: {'adaptive_coll_params': coll_params_reduce}},
'standard': {},
'type': {EstimateEmbeddedErrorCollocation: {'adaptive_coll_params': coll_params_type}},
}
def get_collocation_order(quad_type, num_nodes, node_type):
"""
Compute the maximal order achievable by a given collocation method
"""
pass
# define a few hooks
class LogSweeperParams(hooks):
"""
Log the sweeper parameters after every iteration to check if the adaptive collocation convergence controller is
doing what it's supposed to.
"""
def post_iteration(self, step, level_number):
"""
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
Returns:
None
"""
super().post_iteration(step, level_number)
L = step.levels[level_number]
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='sweeper_params',
value=L.sweep.params.__dict__,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='coll_order',
value=L.sweep.coll.order,
)
# plotting functions
def compare_adaptive_collocation(prob):
"""
Run a problem with various modes of adaptive collocation.
Args:
prob (function): A problem from the resilience project to run
Returns:
None
"""
fig, ax = plt.subplots()
node_ax = ax.twinx()
for i in range(len(special_params.keys())):
key = list(special_params.keys())[i]
custom_description = {**base_params, 'convergence_controllers': special_params[key]}
custom_controller_parameters = {'logger_level': 30}
stats, _, _ = prob(
Tend=Tend,
custom_description=custom_description,
custom_controller_params=custom_controller_parameters,
hook_class=[LogData, LogSweeperParams],
)
plot_residual(stats, ax, node_ax, label=key, color=CMAP[i])
def plot_residual(stats, ax, node_ax, **kwargs):
"""
Plot residual and nodes vs. iteration.
Also a test is performed to see if we can reproduce previously obtained results.
Args:
stats (pySDC.stats): The stats object of the run
ax (Matplotlib.pyplot.axes): Somewhere to plot
node_ax (Matplotlib.pyplot.axes): Somewhere to plot
Returns:
None
"""
sweeper_params = get_sorted(stats, type='sweeper_params', sortby='iter')
residual = get_sorted(stats, type='residual_post_iteration', sortby='iter')
# determine when the number of collocation nodes increased
nodes = [me[1]['num_nodes'] for me in sweeper_params]
# test if the expected outcome was achieved
label = kwargs['label']
expect = {
'inexact': [2, 2, 3, 3, 4, 4],
'refinement': [1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4],
'reduce': [4, 4, 4, 4, 4, 4, 3, 3, 2, 2, 2, 1],
'standard': [4, 4, 4, 4, 4, 4],
'type': [4, 4, 4, 4, 4, 4, 4],
}
assert np.allclose(
nodes, expect[label]
), f"Unexpected distribution of nodes vs. iteration in {label}! Expected {expect[label]}, got {nodes}"
ax.plot([me[0] for me in residual], [me[1] for me in residual], **kwargs)
ax.set_yscale('log')
ax.legend(frameon=False)
ax.set_xlabel(r'$k$')
ax.set_ylabel(r'residual')
node_ax.plot([me[0] for me in sweeper_params], nodes, **kwargs, ls='--')
node_ax.set_ylabel(r'nodes')
def check_order(prob, coll_name, ax, k_ax):
"""
Make plot of the order of the collocation problems and check if they are as expected.
Args:
prob (function): A problem from the resilience project to run
coll_name (str): The name of the collocation refinement strategy
ax (Matplotlib.pyplot.axes): Somewhere to plot
k_ax (Matplotlib.pyplot.axes): Somewhere to plot
Returns:
None
"""
dt_range = [2.0 ** (-i) for i in range(2, 11)]
res = []
label_keys = {
'type': 'quad_type',
}
for i in range(len(dt_range)):
new_params = {
'level_params': {'restol': 1e-9, 'dt': dt_range[i]},
'sweeper_params': {'num_nodes': 2, 'QI': 'IE'},
}
custom_description = {**base_params, 'convergence_controllers': special_params[coll_name], **new_params}
custom_controller_parameters = {'logger_level': 30}
stats, _, _ = prob(
Tend=dt_range[i],
custom_description=custom_description,
custom_controller_params=custom_controller_parameters,
hook_class=[LogData, LogSweeperParams, LogLocalErrorPostIter, LogEmbeddedErrorEstimatePostIter],
)
sweeper_params = get_sorted(stats, type='sweeper_params', sortby='iter')
converged_solution = [
sweeper_params[i][1] != sweeper_params[i + 1][1] for i in range(len(sweeper_params) - 1)
] + [True]
idx = np.arange(len(converged_solution))[converged_solution]
labels = [sweeper_params[i][1][label_keys.get(coll_name, 'num_nodes')] for i in idx]
e_loc = np.array([me[1] for me in get_sorted(stats, type='e_local_post_iteration', sortby='iter')])[
converged_solution
]
e_em_raw = [
me[1] for me in get_sorted(stats, type='error_embedded_estimate_collocation_post_iteration', sortby='iter')
]
e_em = np.array((e_em_raw + [None] if coll_name == 'refinement' else [None] + e_em_raw))
coll_order = np.array([me[1] for me in get_sorted(stats, type='coll_order', sortby='iter')])[converged_solution]
res += [(dt_range[i], e_loc, idx[1:] - idx[:-1], labels, coll_order, e_em)]
# assemble sth we can compute the order from
result = {'dt': [me[0] for me in res]}
embedded_errors = {'dt': [me[0] for me in res]}
num_sols = len(res[0][1])
for i in range(num_sols):
result[i] = [me[1][i] for me in res]
embedded_errors[i] = [me[5][i] for me in res]
label = res[0][3][i]
expected_order = res[0][4][i] + 1
ax.scatter(result['dt'], embedded_errors[i], color=CMAP[i])
for me in [result, embedded_errors]:
if None in me[i]:
continue
order = get_accuracy_order(me, key=i, thresh=1e-9)
assert np.isclose(
np.mean(order), expected_order, atol=0.3
), f"Expected order: {expected_order}, got {np.mean(order):.2f}!"
ax.loglog(result['dt'], result[i], label=f'{label} nodes: order: {np.mean(order):.1f}', color=CMAP[i])
if i > 0:
extra_iter = [me[2][i - 1] for me in res]
k_ax.plot(result['dt'], extra_iter, ls='--', color=CMAP[i])
ax.legend(frameon=False)
ax.set_xlabel(r'$\Delta t$')
ax.set_ylabel(r'$e_\mathrm{local}$ (lines), $e_\mathrm{embedded}$ (dots)')
k_ax.set_ylabel(r'extra iterations')
def order_stuff(prob):
fig, axs = plt.subplots(1, 3, figsize=(14, 4), sharex=True, sharey=True)
k_axs = []
modes = ['type', 'refinement', 'reduce']
for i in range(len(modes)):
k_axs += [axs.flatten()[i].twinx()]
check_order(prob, modes[i], axs.flatten()[i], k_axs[-1])
axs.flatten()[i].set_title(modes[i])
for i in range(2):
k_axs[i].set_ylabel('')
for ax in axs[1:]:
ax.set_xlabel('')
ax.set_ylabel('')
fig.tight_layout()
def adaptivity_collocation(plotting=False):
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityCollocation
e_tol = 1e-7
adaptive_coll_params = {
'num_nodes': [2, 3],
}
convergence_controllers = {}
convergence_controllers[AdaptivityCollocation] = {'adaptive_coll_params': adaptive_coll_params, 'e_tol': e_tol}
step_params = {}
step_params['maxiter'] = 99
level_params = {}
level_params['restol'] = 1e-8
description = {}
description['convergence_controllers'] = convergence_controllers
description['step_params'] = step_params
description['level_params'] = level_params
controller_params = {'logger_level': 30}
stats, controller, _ = run_vdp(custom_description=description, custom_controller_params=controller_params)
e_em = get_sorted(stats, type='error_embedded_estimate_collocation', recomputed=False)
assert (
max([me[1] for me in e_em]) <= e_tol
), "Exceeded threshold for local tolerance when using collocation based adaptivity"
assert (
min([me[1] for me in e_em][1:-1]) >= e_tol / 10
), "Over resolved problem when using collocation based adaptivity"
if plotting:
from pySDC.projects.Resilience.vdp import plot_step_sizes
fig, ax = plt.subplots()
plot_step_sizes(stats, ax, 'error_embedded_estimate_collocation')
def main(plotting=False):
adaptivity_collocation(plotting)
order_stuff(run_advection)
compare_adaptive_collocation(run_vdp)
if __name__ == "__main__":
main(True)
plt.show()
| 11,057 | 32.610942 | 120 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/Schroedinger.py | from mpi4py import MPI
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.NonlinearSchroedinger_MPIFFT import nonlinearschroedinger_imex
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
from pySDC.projects.Resilience.hook import LogData, hook_collection
from pySDC.projects.Resilience.strategies import merge_descriptions
from pySDC.core.Hooks import hooks
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
class live_plotting_with_error(hooks): # pragma: no cover
def __init__(self):
super().__init__()
self.fig, self.axs = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(12, 7))
divider = make_axes_locatable(self.axs[1])
self.cax_right = divider.append_axes('right', size='5%', pad=0.05)
divider = make_axes_locatable(self.axs[0])
self.cax_left = divider.append_axes('right', size='5%', pad=0.05)
def post_step(self, step, level_number):
lvl = step.levels[level_number]
lvl.sweep.compute_end_point()
self.axs[0].cla()
im1 = self.axs[0].imshow(np.abs(lvl.uend), vmin=0, vmax=2.0)
self.fig.colorbar(im1, cax=self.cax_left)
self.axs[1].cla()
im = self.axs[1].imshow(np.abs(lvl.prob.u_exact(lvl.time + lvl.dt) - lvl.uend))
self.fig.colorbar(im, cax=self.cax_right)
self.fig.suptitle(f't={lvl.time:.2f}')
self.axs[0].set_title('solution')
self.axs[1].set_title('error')
plt.pause(1e-9)
class live_plotting(hooks): # pragma: no cover
def __init__(self):
super().__init__()
self.fig, self.ax = plt.subplots()
divider = make_axes_locatable(self.ax)
self.cax = divider.append_axes('right', size='5%', pad=0.05)
def post_step(self, step, level_number):
lvl = step.levels[level_number]
lvl.sweep.compute_end_point()
self.ax.cla()
im = self.ax.imshow(np.abs(lvl.uend), vmin=0.2, vmax=1.8)
self.ax.set_title(f't={lvl.time + lvl.dt:.2f}')
self.fig.colorbar(im, cax=self.cax)
plt.pause(1e-9)
def run_Schroedinger(
custom_description=None,
num_procs=1,
Tend=1.0,
hook_class=LogData,
fault_stuff=None,
custom_controller_params=None,
use_MPI=False,
space_comm=None,
**kwargs,
):
"""
Run a Schroedinger problem with default parameters.
Args:
custom_description (dict): Overwrite presets
num_procs (int): Number of steps for MSSDC
Tend (float): Time to integrate to
hook_class (pySDC.Hook): A hook to store data
fault_stuff (dict): A dictionary with information on how to add faults
custom_controller_params (dict): Overwrite presets
use_MPI (bool): Whether or not to use MPI
Returns:
dict: The stats object
controller: The controller
Tend: The time that was supposed to be integrated to
"""
from mpi4py import MPI
space_comm = MPI.COMM_SELF if space_comm is None else space_comm
rank = space_comm.Get_rank()
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-8
level_params['dt'] = 1e-01 / 2
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['nvars'] = (128, 128)
problem_params['spectral'] = False
problem_params['c'] = 1.0
problem_params['comm'] = space_comm
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30 if rank == 0 else 99
controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class])
controller_params['mssdc_jac'] = False
# fill description dictionary for easy step instantiation
if custom_controller_params is not None:
controller_params = {**controller_params, **custom_controller_params}
description = dict()
description['problem_params'] = problem_params
description['problem_class'] = nonlinearschroedinger_imex
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
if custom_description is not None:
description = merge_descriptions(description, custom_description)
# set time parameters
t0 = 0.0
# instantiate controller
controller_args = {
'controller_params': controller_params,
'description': description,
}
if use_MPI:
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
comm = kwargs.get('comm', MPI.COMM_WORLD)
controller = controller_MPI(**controller_args, comm=comm)
P = controller.S.levels[0].prob
else:
controller = controller_nonMPI(**controller_args, num_procs=num_procs)
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# insert faults
if fault_stuff is not None:
from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults
nvars = [me / 2 for me in problem_params['nvars']]
nvars[0] += 1
rnd_args = {'iteration': 5, 'problem_pos': nvars, 'min_node': 1}
args = {'time': 0.3, 'target': 0}
prepare_controller_for_faults(controller, fault_stuff, rnd_args, args)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
return stats, controller, Tend
def main():
stats, _, _ = run_Schroedinger(space_comm=MPI.COMM_WORLD, hook_class=live_plotting)
plt.show()
if __name__ == "__main__":
main()
| 6,270 | 32.534759 | 114 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/advection.py | # script to run a simple advection problem
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.helpers.stats_helper import get_sorted
from pySDC.projects.Resilience.hook import LogData, hook_collection
from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults
from pySDC.projects.Resilience.strategies import merge_descriptions
def plot_embedded(stats, ax):
u = get_sorted(stats, type='u', recomputed=False)
uold = get_sorted(stats, type='uold', recomputed=False)
t = [me[0] for me in u]
e_em = get_sorted(stats, type='error_embedded_estimate', recomputed=False)
e_em_semi_glob = [abs(u[i][1] - uold[i][1]) for i in range(len(u))]
ax.plot(t, e_em_semi_glob, label=r'$\|u^{\left(k-1\right)}-u^{\left(k\right)}\|$')
ax.plot([me[0] for me in e_em], [me[1] for me in e_em], linestyle='--', label=r'$\epsilon$')
ax.set_xlabel(r'$t$')
ax.legend(frameon=False)
def run_advection(
custom_description=None,
num_procs=1,
Tend=2e-1,
hook_class=LogData,
fault_stuff=None,
custom_controller_params=None,
use_MPI=False,
**kwargs,
):
"""
Run an advection problem with default parameters.
Args:
custom_description (dict): Overwrite presets
num_procs (int): Number of steps for MSSDC
Tend (float): Time to integrate to
hook_class (pySDC.Hook): A hook to store data
fault_stuff (dict): A dictionary with information on how to add faults
custom_controller_params (dict): Overwrite presets
use_MPI (bool): Whether or not to use MPI
Returns:
dict: The stats object
controller: The controller
Tend: The time that was supposed to be integrated to
"""
# initialize level parameters
level_params = {}
level_params['dt'] = 0.05
# initialize sweeper parameters
sweeper_params = {}
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
problem_params = {'freq': 2, 'nvars': 2**9, 'c': 1.0, 'stencil_type': 'center', 'order': 4, 'bc': 'periodic'}
# initialize step parameters
step_params = {}
step_params['maxiter'] = 5
# initialize controller parameters
controller_params = {}
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class])
controller_params['mssdc_jac'] = False
if custom_controller_params is not None:
controller_params = {**controller_params, **custom_controller_params}
# fill description dictionary for easy step instantiation
description = {}
description['problem_class'] = advectionNd
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
if custom_description is not None:
description = merge_descriptions(description, custom_description)
# set time parameters
t0 = 0.0
# instantiate controller
if use_MPI:
from mpi4py import MPI
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
comm = kwargs.get('comm', MPI.COMM_WORLD)
controller = controller_MPI(controller_params=controller_params, description=description, comm=comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
else:
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
controller = controller_nonMPI(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# insert faults
if fault_stuff is not None:
rnd_args = {
'iteration': 5,
}
args = {
'time': 1e-1,
'target': 0,
}
prepare_controller_for_faults(controller, fault_stuff, rnd_args, args)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
return stats, controller, Tend
if __name__ == '__main__':
import matplotlib.pyplot as plt
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.projects.Resilience.hook import LogUold
adaptivity_params = {}
adaptivity_params['e_tol'] = 1e-8
convergence_controllers = {}
convergence_controllers[Adaptivity] = adaptivity_params
description = {}
description['convergence_controllers'] = convergence_controllers
fig, axs = plt.subplots(1, 2, figsize=(12, 4), sharex=True, sharey=True)
plot_embedded(run_advection(description, 1, hook_class=LogUold)[0], axs[0])
plot_embedded(run_advection(description, 4, hook_class=LogUold)[0], axs[1])
axs[0].set_title('1 process')
axs[1].set_title('4 processes')
fig.tight_layout()
plt.show()
| 5,282 | 34.456376 | 114 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/strategies.py | import numpy as np
from matplotlib.colors import TABLEAU_COLORS
cmap = TABLEAU_COLORS
def merge_descriptions(descA, descB):
"""
Merge two dictionaries that may contain dictionaries, which happens when merging descriptions, for instance.
Keys that occur in both dictionaries will be overwritten by the ones from `descB` and `descA` will be modified, not
copied!
Args:
descA (dict): Dictionary that you want to merge into
descB (dict): Dictionary you want to merge from
Returns:
dict: decsA with updated parameters
"""
for key in descB.keys():
if type(descB[key]) == dict:
descA[key] = merge_descriptions(descA.get(key, {}), descB[key])
else:
descA[key] = descB[key]
return descA
class Strategy:
'''
Abstract class for resilience strategies
'''
def __init__(self, useMPI=False, skip_residual_computation='none'):
'''
Initialization routine
'''
self.useMPI = useMPI
# set default values for plotting
self.linestyle = '-'
self.marker = '.'
self.name = ''
self.bar_plot_x_label = ''
self.color = list(cmap.values())[0]
# parameters for computational efficiency
if skip_residual_computation == 'all':
self.skip_residual_computation = ('IT_CHECK', 'IT_DOWN', 'IT_UP', 'IT_FINE', 'IT_COARSE')
elif skip_residual_computation == 'most':
self.skip_residual_computation = ('IT_DOWN', 'IT_UP', 'IT_FINE', 'IT_COARSE')
else:
self.skip_residual_computation = ()
# setup custom descriptions
self.custom_description = {}
self.custom_description['sweeper_params'] = {'skip_residual_computation': self.skip_residual_computation}
# prepare parameters for masks to identify faults that cannot be fixed by this strategy
self.fixable = []
self.fixable += [
{
'key': 'node',
'op': 'gt',
'val': 0,
}
]
self.fixable += [
{
'key': 'error',
'op': 'isfinite',
}
]
# stuff for work-precision diagrams
self.precision_parameter = None
self.precision_parameter_loc = []
def get_fixable_params(self, **kwargs):
"""
Return a list containing dictionaries which can be passed to `FaultStats.get_mask` as keyword arguments to
obtain a mask of faults that can be fixed
Returns:
list: Dictionary of parameters
"""
return self.fixable
def get_fault_args(self, problem, num_procs):
'''
Routine to get arguments for the faults that are exempt from randomization
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
dict: Arguments for the faults that are exempt from randomization
'''
return {}
def get_random_params(self, problem, num_procs):
'''
Routine to get parameters for the randomization of faults
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
dict: Randomization parameters
'''
return {}
@property
def style(self):
"""
Get the plotting parameters for the strategy.
Supply them to a plotting function using `**`
Returns:
(dict): The plotting parameters as a dictionary
"""
return {
'marker': self.marker,
'label': self.label,
'color': self.color,
'ls': self.linestyle,
}
@property
def label(self):
"""
Get a label for plotting
"""
return self.name
def get_Tend(self, problem, num_procs=1):
'''
Get the final time of runs for fault stats based on the problem
Args:
problem (function): A problem to run
num_procs (int): Number of processes
Returns:
float: Tend to put into the run
'''
if problem.__name__ == "run_vdp":
return 11.5
# return 2.3752559741400825 # old stuff
elif problem.__name__ == "run_piline":
return 20.0
elif problem.__name__ == "run_Lorenz":
return 1.5
elif problem.__name__ == "run_Schroedinger":
return 1.0
elif problem.__name__ == "run_quench":
return 500.0
else:
raise NotImplementedError('I don\'t have a final time for your problem!')
def get_custom_description(self, problem, num_procs=1):
'''
Get a custom description based on the problem
Args:
problem (function): A problem to run
num_procs (int): Number of processes
Returns:
dict: Custom description
'''
custom_description = {}
if problem.__name__ == "run_vdp":
custom_description['step_params'] = {'maxiter': 3}
custom_description['problem_params'] = {
'u0': np.array([2, 0], dtype=np.float64),
# 'u0': np.array([0.99995, -0.00999985], dtype=np.float64), # old stuff
'crash_at_maxiter': False,
'newton_tol': 1e-11,
}
custom_description['level_params'] = {'dt': 1e-2}
elif problem.__name__ == "run_Lorenz":
custom_description['step_params'] = {'maxiter': 5}
custom_description['level_params'] = {'dt': 1e-2}
elif problem.__name__ == "run_Schroedinger":
custom_description['step_params'] = {'maxiter': 5}
custom_description['level_params'] = {'dt': 1e-2, 'restol': -1}
elif problem.__name__ == "run_quench":
custom_description['level_params'] = {'restol': -1, 'dt': 8.0}
custom_description['step_params'] = {'maxiter': 5}
custom_description['problem_params'] = {'newton_iter': 99, 'newton_tol': 1e-11}
return merge_descriptions(custom_description, self.custom_description)
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class BaseStrategy(Strategy):
'''
Do a fixed iteration count
'''
def __init__(self, useMPI=False, skip_residual_computation='all'):
'''
Initialization routine
'''
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[0]
self.marker = 'o'
self.name = 'base'
self.bar_plot_x_label = 'base'
self.precision_parameter = 'dt'
self.precision_parameter_loc = ['level_params', 'dt']
@property
def label(self):
return r'fixed'
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 12453
elif key == 'e_global_post_run' and op == max:
return 4.3956128381594795e-06
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class AdaptivityStrategy(Strategy):
'''
Adaptivity as a resilience strategy
'''
def __init__(self, useMPI=False, skip_residual_computation='all'):
'''
Initialization routine
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[1]
self.marker = '*'
self.name = 'adaptivity'
self.bar_plot_x_label = 'adaptivity'
self.precision_parameter = 'e_tol'
self.precision_parameter_loc = ['convergence_controllers', Adaptivity, 'e_tol']
def get_fixable_params(self, maxiter, **kwargs):
"""
Here faults occurring in the last iteration cannot be fixed.
Args:
maxiter (int): Max. iterations until convergence is declared
Returns:
(list): Contains dictionaries of keyword arguments for `FaultStats.get_mask`
"""
self.fixable += [
{
'key': 'iteration',
'op': 'lt',
'val': maxiter,
}
]
return self.fixable
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds adaptivity
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom descriptions you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
custom_description = {}
custom_description['convergence_controllers'] = {}
dt_max = np.inf
dt_min = 1e-5
dt_slope_max = np.inf
if problem.__name__ == "run_piline":
e_tol = 1e-7
dt_min = 1e-2
elif problem.__name__ == "run_vdp":
e_tol = 2e-5
dt_min = 1e-3
elif problem.__name__ == "run_Lorenz":
e_tol = 2e-5
dt_min = 1e-3
elif problem.__name__ == "run_Schroedinger":
e_tol = 4e-6
dt_min = 1e-3
elif problem.__name__ == "run_quench":
e_tol = 1e-5
dt_min = 1e-3
# dt_max = 25
# dt_slope_max = 4.
from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestarting
flavor = 'MPI' if self.useMPI else 'nonMPI'
custom_description['convergence_controllers'][BasicRestarting.get_implementation(flavor)] = {
'max_restarts': 15
}
else:
raise NotImplementedError(
'I don\'t have a tolerance for adaptivity for your problem. Please add one to the\
strategy'
)
custom_description['convergence_controllers'][Adaptivity] = {
'e_tol': e_tol,
'dt_min': dt_min,
'dt_max': dt_max,
'dt_slope_max': dt_slope_max,
}
return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description)
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 3825
elif key == 'e_global_post_run' and op == max:
return 1.3370376368393444e-05
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class AdaptiveHotRodStrategy(Strategy):
'''
Adaptivity + Hot Rod as a resilience strategy
'''
def __init__(self, useMPI=False, skip_residual_computation='all'):
'''
Initialization routine
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[4]
self.marker = '.'
self.name = 'adaptive Hot Rod'
self.bar_plot_x_label = 'adaptive\nHot Rod'
self.precision_parameter = 'e_tol'
self.precision_parameter_loc = ['convergence_controllers', Adaptivity, 'e_tol']
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds adaptivity and Hot Rod
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom description you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.hotrod import HotRod
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
if problem.__name__ == "run_vdp":
e_tol = 3e-7
dt_min = 1e-3
maxiter = 4
HotRod_tol = 2e-6
else:
raise NotImplementedError(
'I don\'t have a tolerance for adaptive Hot Rod for your problem. Please add one \
to the strategy'
)
no_storage = num_procs > 1
custom_description = {
'convergence_controllers': {
HotRod: {'HotRod_tol': HotRod_tol, 'no_storage': no_storage},
Adaptivity: {'e_tol': e_tol, 'dt_min': dt_min, 'embedded_error_flavor': 'linearized'},
},
'step_params': {'maxiter': maxiter},
}
return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description)
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 4466
elif key == 'e_global_post_run' and op == max:
return 2.1455229857747504e-06
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class IterateStrategy(Strategy):
'''
Iterate for as much as you want
'''
def __init__(self, useMPI=False, skip_residual_computation='most'):
'''
Initialization routine
'''
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[2]
self.marker = 'v'
self.name = 'iterate'
self.bar_plot_x_label = 'iterate'
self.precision_parameter = 'restol'
self.precision_parameter_loc = ['level_params', 'restol']
@property
def label(self):
return r'$k$ adaptivity'
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that allows for adaptive iteration counts
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom description you can supply to the problem when running it
'''
restol = -1
e_tol = -1
if problem.__name__ == "run_piline":
restol = 2.3e-8
elif problem.__name__ == "run_vdp":
restol = 9e-7
elif problem.__name__ == "run_Lorenz":
restol = 16e-7
elif problem.__name__ == "run_Schroedinger":
restol = 6.5e-7
elif problem.__name__ == "run_quench":
restol = 1e-7
else:
raise NotImplementedError(
'I don\'t have a residual tolerance for your problem. Please add one to the \
strategy'
)
custom_description = {
'step_params': {'maxiter': 99},
'level_params': {'restol': restol, 'e_tol': e_tol},
}
if problem.__name__ == "run_quench":
custom_description['level_params']['dt'] = 1
return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description)
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 8534
elif key == 'e_global_post_run' and op == max:
return 0.0005961192269257065
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class HotRodStrategy(Strategy):
'''
Hot Rod as a resilience strategy
'''
def __init__(self, useMPI=False, skip_residual_computation='all'):
'''
Initialization routine
'''
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[3]
self.marker = '^'
self.name = 'Hot Rod'
self.bar_plot_x_label = 'Hot Rod'
self.precision_parameter = 'dt'
self.precision_parameter_loc = ['level_params', 'dt']
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds Hot Rod
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom description you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.hotrod import HotRod
from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestartingNonMPI
if problem.__name__ == "run_vdp":
HotRod_tol = 5e-7
maxiter = 4
elif problem.__name__ == "run_Lorenz":
HotRod_tol = 4e-7
maxiter = 6
elif problem.__name__ == "run_Schroedinger":
HotRod_tol = 3e-7
maxiter = 6
elif problem.__name__ == "run_quench":
HotRod_tol = 3e-5
maxiter = 6
else:
raise NotImplementedError(
'I don\'t have a tolerance for Hot Rod for your problem. Please add one to the\
strategy'
)
no_storage = num_procs > 1
custom_description = {
'convergence_controllers': {
HotRod: {'HotRod_tol': HotRod_tol, 'no_storage': no_storage},
BasicRestartingNonMPI: {'max_restarts': 2, 'crash_after_max_restarts': False},
},
'step_params': {'maxiter': maxiter},
}
return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description)
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 15230
elif key == 'e_global_post_run' and op == max:
return 4.3956128381594795e-06
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class AdaptivityCollocationStrategy(Strategy):
'''
Adaptivity based on collocation as a resilience strategy
'''
def __init__(self, useMPI=False, skip_residual_computation='most'):
'''
Initialization routine
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityCollocation
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[1]
self.marker = '*'
self.name = 'adaptivity_coll'
self.bar_plot_x_label = 'adaptivity collocation'
self.precision_parameter = 'e_tol'
self.adaptive_coll_params = {}
self.precision_parameter_loc = ['convergence_controllers', AdaptivityCollocation, 'e_tol']
self.restol = None
self.maxiter = 99
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds adaptivity
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom descriptions you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityCollocation
custom_description = {}
custom_description['step_params'] = {'maxiter': self.maxiter}
dt_max = np.inf
dt_min = 1e-5
if problem.__name__ == "run_piline":
e_tol = 1e-7
dt_min = 1e-2
elif problem.__name__ == "run_vdp":
e_tol = 2e-5
dt_min = 1e-3
elif problem.__name__ == "run_Lorenz":
e_tol = 2e-5
dt_min = 1e-3
elif problem.__name__ == "run_Schroedinger":
e_tol = 4e-6
dt_min = 1e-3
elif problem.__name__ == "run_quench":
e_tol = 1e-5
dt_min = 1e-3
dt_max = 1e2
else:
raise NotImplementedError(
'I don\'t have a tolerance for adaptivity for your problem. Please add one to the\
strategy'
)
custom_description['level_params'] = {'restol': e_tol / 10 if self.restol is None else self.restol}
custom_description['convergence_controllers'] = {
AdaptivityCollocation: {
'e_tol': e_tol,
'dt_min': dt_min,
'dt_max': dt_max,
'adaptive_coll_params': self.adaptive_coll_params,
}
}
return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description)
class AdaptivityCollocationTypeStrategy(AdaptivityCollocationStrategy):
def __init__(self, useMPI=False, skip_residual_computation='most'):
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[4]
self.marker = '.'
self.adaptive_coll_params = {
'quad_type': ['RADAU-RIGHT', 'GAUSS'],
'do_coll_update': [False, True],
}
@property
def label(self):
return 'adaptivity type'
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 2694
elif key == 'e_global_post_run' and op == max:
return 2.1707816100224875e-06
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class AdaptivityCollocationRefinementStrategy(AdaptivityCollocationStrategy):
def __init__(self, useMPI=False, skip_residual_computation='most'):
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[5]
self.marker = '^'
self.adaptive_coll_params = {
'num_nodes': [2, 3],
'quad_type': ['GAUSS', 'RADAU-RIGHT'],
'do_coll_update': [True, False],
}
@property
def label(self):
return 'adaptivity refinement'
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 1881
elif key == 'e_global_post_run' and op == max:
return 3.3428689244496823e-06
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class AdaptivityCollocationDerefinementStrategy(AdaptivityCollocationStrategy):
def __init__(self, useMPI=False, skip_residual_computation='most'):
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[6]
self.marker = '^'
self.adaptive_coll_params = {'num_nodes': [4, 3]}
@property
def label(self):
return 'adaptivity de-refinement'
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 3421
elif key == 'e_global_post_run' and op == max:
return 2.1130961994131336e-05
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class DIRKStrategy(AdaptivityStrategy):
'''
DIRK4(3)
'''
def __init__(self, useMPI=False, skip_residual_computation='all'):
'''
Initialization routine
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[7]
self.marker = '^'
self.name = 'DIRK'
self.bar_plot_x_label = 'DIRK4(3)'
self.precision_parameter = 'e_tol'
self.precision_parameter_loc = ['convergence_controllers', AdaptivityRK, 'e_tol']
@property
def label(self):
return 'DIRK4(3)'
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds adaptivity
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom descriptions you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK, Adaptivity
from pySDC.implementations.sweeper_classes.Runge_Kutta import DIRK43
adaptivity_description = super().get_custom_description(problem, num_procs)
e_tol = adaptivity_description['convergence_controllers'][Adaptivity]['e_tol']
adaptivity_description['convergence_controllers'].pop(Adaptivity, None)
adaptivity_description.pop('sweeper_params', None)
rk_params = {
'step_params': {'maxiter': 1},
'sweeper_class': DIRK43,
'convergence_controllers': {AdaptivityRK: {'e_tol': e_tol}},
}
custom_description = merge_descriptions(adaptivity_description, rk_params)
return custom_description
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 2168
elif key == 'e_global_post_run' and op == max:
return 0.00024166437265116247
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class ERKStrategy(DIRKStrategy):
"""
Explicit embedded RK using Cash-Karp's method
"""
def __init__(self, useMPI=False, skip_residual_computation='all'):
'''
Initialization routine
'''
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[9]
self.marker = 'x'
self.name = 'ERK'
self.bar_plot_x_label = 'ERK5(4)'
@property
def label(self):
return 'CP5(4)'
"""
Explicit Cash-Karp's method
"""
def get_custom_description(self, problem, num_procs=1):
from pySDC.implementations.sweeper_classes.Runge_Kutta import Cash_Karp
desc = super().get_custom_description(problem, num_procs)
desc['sweeper_class'] = Cash_Karp
return desc
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 0
elif key == 'e_global_post_run' and op == max:
return 2.0606132165701396e-05
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class DoubleAdaptivityStrategy(AdaptivityStrategy):
'''
Adaptivity based both on embedded estimate and on residual
'''
def __init__(self, useMPI=False, skip_residual_computation='all'):
'''
Initialization routine
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
super().__init__(useMPI=useMPI, skip_residual_computation=skip_residual_computation)
self.color = list(cmap.values())[7]
self.marker = '^'
self.name = 'double_adaptivity'
self.bar_plot_x_label = 'double adaptivity'
self.precision_parameter = 'e_tol'
self.precision_parameter_loc = ['convergence_controllers', Adaptivity, 'e_tol']
self.residual_e_tol_ratio = 1.0
self.residual_e_tol_abs = None
@property
def label(self):
return 'double adaptivity'
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds adaptivity
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom descriptions you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityResidual, Adaptivity
from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestarting
custom_description = super().get_custom_description(problem, num_procs)
if self.residual_e_tol_abs:
e_tol = self.residual_e_tol_abs
else:
e_tol = custom_description['convergence_controllers'][Adaptivity]['e_tol'] * self.residual_e_tol_ratio
custom_description['convergence_controllers'][AdaptivityResidual] = {
'e_tol': e_tol,
'allowed_modifications': ['decrease'],
}
flavor = 'MPI' if self.useMPI else 'nonMPI'
custom_description['convergence_controllers'][BasicRestarting.get_implementation(flavor)] = {'max_restarts': 15}
return custom_description
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 3825
elif key == 'e_global_post_run' and op == max:
return 1.3370376368393444e-05
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class AdaptivityAvoidRestartsStrategy(AdaptivityStrategy):
"""
Adaptivity with the avoid restarts option
"""
@property
def label(self):
return 'adaptivity (avoid restarts)'
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds adaptivity
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom descriptions you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestarting
custom_description = super().get_custom_description(problem, num_procs)
custom_description['convergence_controllers'][Adaptivity]['avoid_restarts'] = True
flavor = 'MPI' if self.useMPI else 'nonMPI'
custom_description['convergence_controllers'][BasicRestarting.get_implementation(flavor)] = {'max_restarts': 15}
return custom_description
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 2955
elif key == 'e_global_post_run' and op == max:
return 5.274015506540053e-07
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class AdaptivityInterpolationStrategy(AdaptivityStrategy):
"""
Adaptivity with interpolation between restarts
"""
@property
def label(self):
return 'adaptivity+interpolation'
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds adaptivity
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom descriptions you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.implementations.convergence_controller_classes.interpolate_between_restarts import (
InterpolateBetweenRestarts,
)
from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestarting
custom_description = super().get_custom_description(problem, num_procs)
custom_description['convergence_controllers'][Adaptivity]['avoid_restarts'] = False
custom_description['convergence_controllers'][InterpolateBetweenRestarts] = {}
flavor = 'MPI' if self.useMPI else 'nonMPI'
custom_description['convergence_controllers'][BasicRestarting.get_implementation(flavor)] = {'max_restarts': 15}
return custom_description
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 6659
elif key == 'e_global_post_run' and op == max:
return 2.9780002756552015e-06
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
class AdaptivityExtrapolationWithinQStrategy(Strategy):
'''
Adaptivity based on extrapolation between collocation nodes as a resilience strategy
'''
def __init__(self, useMPI=False):
'''
Initialization routine
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityExtrapolationWithinQ
super().__init__(useMPI=useMPI)
self.color = list(cmap.values())[8]
self.marker = '*'
self.name = 'adaptivity_extraQ'
self.bar_plot_x_label = 'adaptivity Q'
self.precision_parameter = 'e_tol'
self.adaptive_coll_params = {}
self.precision_parameter_loc = ['convergence_controllers', AdaptivityExtrapolationWithinQ, 'e_tol']
self.restol = None
self.maxiter = 99
def get_custom_description(self, problem, num_procs):
'''
Routine to get a custom description that adds adaptivity
Args:
problem: A function that runs a pySDC problem, see imports for available problems
num_procs (int): Number of processes you intend to run with
Returns:
The custom descriptions you can supply to the problem when running it
'''
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityExtrapolationWithinQ
custom_description = {}
custom_description['step_params'] = {'maxiter': self.maxiter}
dt_max = np.inf
dt_min = 1e-5
if problem.__name__ == "run_vdp":
e_tol = 2e-5
dt_min = 1e-3
# elif problem.__name__ == "run_piline":
# e_tol = 1e-7
# dt_min = 1e-2
# elif problem.__name__ == "run_Lorenz":
# e_tol = 2e-5
# dt_min = 1e-3
# elif problem.__name__ == "run_Schroedinger":
# e_tol = 4e-6
# dt_min = 1e-3
# elif problem.__name__ == "run_quench":
# e_tol = 1e-5
# dt_min = 1e-3
# dt_max = 1e2
else:
raise NotImplementedError(
'I don\'t have a tolerance for adaptivity for your problem. Please add one to the\
strategy'
)
custom_description['level_params'] = {'restol': e_tol / 10 if self.restol is None else self.restol}
custom_description['convergence_controllers'] = {
AdaptivityExtrapolationWithinQ: {
'e_tol': e_tol,
'dt_min': dt_min,
'dt_max': dt_max,
}
}
return merge_descriptions(super().get_custom_description(problem, num_procs), custom_description)
def get_reference_value(self, problem, key, op, num_procs=1):
"""
Get a reference value for a given problem for testing in CI.
Args:
problem: A function that runs a pySDC problem, see imports for available problems
key (str): The name of the variable you want to compare
op (function): The operation you want to apply to the data
num_procs (int): Number of processes
Returns:
The reference value
"""
if problem.__name__ == "run_vdp":
if key == 'work_newton' and op == sum:
return 2259
elif key == 'e_global_post_run' and op == max:
return 9.319882663172407e-06
raise NotImplementedError('The reference value you are looking for is not implemented for this strategy!')
| 43,776 | 35.179339 | 120 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/sweepers.py | import numpy as np
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
class generic_implicit_efficient(generic_implicit):
"""
This sweeper has the same functionality of the `generic_implicit` sweeper, but saves a few operations at the expense
of readability.
"""
def integrate(self, Q=None):
"""
Integrates the right-hand side. Depending on `Q`, this may or may not be consistent with an integral
approximation.
Args:
Q (numpy.ndarray): Some sort of quadrature rule
Returns:
list of dtype_u: containing the integral as values
"""
# get current level and problem description
L = self.level
P = L.prob
Q = self.coll.Qmat if Q is None else Q
me = []
# integrate RHS over all collocation nodes
for m in range(1, self.coll.num_nodes + 1):
# new instance of dtype_u, initialize values with 0
me.append(P.dtype_u(P.init, val=0.0))
for j in range(1, self.coll.num_nodes + 1):
me[-1] += L.dt * Q[m, j] * L.f[j]
return me
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# gather all terms which are known already (e.g. from the previous iteration)
# this corresponds to u0 + QF(u^k) - QdF(u^k) + tau
# get QF(u^k)
integral = self.integrate(Q=self.coll.Qmat - self.QI)
for m in range(M):
# add initial value
integral[m] += L.u[0]
# add tau if associated
if L.tau[m] is not None:
integral[m] += L.tau[m]
# do the sweep
for m in range(0, M):
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
rhs = P.dtype_u(integral[m])
for j in range(1, m + 1):
rhs += L.dt * self.QI[m + 1, j] * L.f[j]
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m + 1] = P.solve_system(
rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]
)
# update function values
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
# indicate presence of new values at this level
L.status.updated = True
return None
class imex_1st_order_efficient(imex_1st_order):
"""
Duplicate of `imex_1st_order` sweeper which is slightly more efficient at the cost of code readability.
"""
def integrate(self, Q=None, QI=None, QE=None):
"""
Integrates the right-hand side (here impl + expl)
Args:
Q (numpy.ndarray): Full quadrature rule
QI (numpy.ndarray): Implicit preconditioner
QE (numpy.ndarray): Explicit preconditioner
Returns:
list of dtype_u: containing the integral as values
"""
Q = self.coll.Qmat if Q is None else Q
QI = np.zeros_like(Q) if QI is None else QI
QE = np.zeros_like(Q) if QE is None else QE
# get current level and problem description
L = self.level
me = []
# integrate RHS over all collocation nodes
for m in range(1, self.coll.num_nodes + 1):
me.append(L.dt * ((Q - QI)[m, 1] * L.f[1].impl + (Q - QE)[m, 1] * L.f[1].expl))
# new instance of dtype_u, initialize values with 0
for j in range(2, self.coll.num_nodes + 1):
me[m - 1] += L.dt * ((Q - QI)[m, j] * L.f[j].impl + (Q - QE)[m, j] * L.f[j].expl)
return me
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# gather all terms which are known already (e.g. from the previous iteration)
# this corresponds to u0 + QF(u^k) - QIFI(u^k) - QEFE(u^k) + tau
# get QF(u^k)
integral = self.integrate(Q=self.coll.Qmat, QI=self.QI, QE=self.QE)
for m in range(M):
# add initial value
integral[m] += L.u[0]
# add tau if associated
if L.tau[m] is not None:
integral[m] += L.tau[m]
# do the sweep
for m in range(0, M):
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
rhs = P.dtype_u(integral[m])
for j in range(1, m + 1):
rhs += L.dt * (self.QI[m + 1, j] * L.f[j].impl + self.QE[m + 1, j] * L.f[j].expl)
# implicit solve with prefactor stemming from QI
L.u[m + 1] = P.solve_system(
rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]
)
# update function values
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
# indicate presence of new values at this level
L.status.updated = True
return None
| 5,880 | 32.414773 | 120 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/quench.py | # script to run a quench problem
from pySDC.implementations.problem_classes.Quench import Quench, QuenchIMEX
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.core.Hooks import hooks
from pySDC.helpers.stats_helper import get_sorted
from pySDC.projects.Resilience.hook import hook_collection, LogData
from pySDC.projects.Resilience.strategies import merge_descriptions
import numpy as np
import matplotlib.pyplot as plt
from pySDC.core.Errors import ConvergenceError
class live_plot(hooks): # pragma: no cover
"""
This hook plots the solution and the non-linear part of the right hand side after every step. Keep in mind that using adaptivity will result in restarts, which is not marked in these plots. Prepare to see the temperature profile jumping back again after a restart.
"""
def _plot_state(self, step, level_number): # pragma: no cover
"""
Plot the solution at all collocation nodes and the non-linear part of the right hand side
Args:
step (pySDC.Step.step): The current step
level_number (int): Number of current level
Returns:
None
"""
L = step.levels[level_number]
for ax in self.axs:
ax.cla()
# [self.axs[0].plot(L.prob.xv, L.u[i], label=f"node {i}") for i in range(len(L.u))]
self.axs[0].plot(L.prob.xv, L.u[-1])
self.axs[0].axhline(L.prob.u_thresh, color='black')
self.axs[1].plot(L.prob.xv, L.prob.eval_f_non_linear(L.u[-1], L.time))
self.axs[0].set_ylim(0, 0.025)
self.fig.suptitle(f"t={L.time:.2e}, k={step.status.iter}")
plt.pause(1e-1)
def pre_run(self, step, level_number): # pragma: no cover
"""
Setup a figure to plot into
Args:
step (pySDC.Step.step): The current step
level_number (int): Number of current level
Returns:
None
"""
self.fig, self.axs = plt.subplots(1, 2, figsize=(10, 4))
def post_step(self, step, level_number): # pragma: no cover
"""
Call the plotting function after the step
Args:
step (pySDC.Step.step): The current step
level_number (int): Number of current level
Returns:
None
"""
self._plot_state(step, level_number)
def run_quench(
custom_description=None,
num_procs=1,
Tend=6e2,
hook_class=LogData,
fault_stuff=None,
custom_controller_params=None,
imex=False,
u0=None,
t0=None,
use_MPI=False,
**kwargs,
):
"""
Run a toy problem of a superconducting magnet with a temperature leak with default parameters.
Args:
custom_description (dict): Overwrite presets
num_procs (int): Number of steps for MSSDC
Tend (float): Time to integrate to
hook_class (pySDC.Hook): A hook to store data
fault_stuff (dict): A dictionary with information on how to add faults
custom_controller_params (dict): Overwrite presets
imex (bool): Solve the problem IMEX or fully implicit
u0 (dtype_u): Initial value
t0 (float): Starting time
use_MPI (bool): Whether or not to use MPI
Returns:
dict: The stats object
controller: The controller
Tend: The time that was supposed to be integrated to
"""
# initialize level parameters
level_params = {}
level_params['dt'] = 10.0
# initialize sweeper parameters
sweeper_params = {}
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
sweeper_params['QE'] = 'PIC'
problem_params = {
'newton_tol': 1e-9,
}
# initialize step parameters
step_params = {}
step_params['maxiter'] = 5
# initialize controller parameters
controller_params = {}
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class])
controller_params['mssdc_jac'] = False
if custom_controller_params is not None:
controller_params = {**controller_params, **custom_controller_params}
# fill description dictionary for easy step instantiation
description = {}
description['problem_class'] = QuenchIMEX if imex else Quench
description['problem_params'] = problem_params
description['sweeper_class'] = imex_1st_order if imex else generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
if custom_description is not None:
description = merge_descriptions(description, custom_description)
# set time parameters
t0 = 0.0 if t0 is None else t0
# instantiate controller
controller_args = {
'controller_params': controller_params,
'description': description,
}
if use_MPI:
from mpi4py import MPI
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
comm = kwargs.get('comm', MPI.COMM_WORLD)
controller = controller_MPI(**controller_args, comm=comm)
P = controller.S.levels[0].prob
else:
controller = controller_nonMPI(**controller_args, num_procs=num_procs)
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0) if u0 is None else u0
# insert faults
if fault_stuff is not None:
from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults
rnd_args = {'iteration': 1, 'min_node': 1}
args = {'time': 31.0, 'target': 0}
prepare_controller_for_faults(controller, fault_stuff, rnd_args, args)
# call main function to get things done...
try:
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
except ConvergenceError:
print('Warning: Premature termination!')
stats = controller.return_stats()
return stats, controller, Tend
def faults(seed=0): # pragma: no cover
import matplotlib.pyplot as plt
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
fig, ax = plt.subplots(1, 1)
rng = np.random.RandomState(seed)
fault_stuff = {'rng': rng, 'args': {}, 'rnd_args': {}}
controller_params = {'logger_level': 30}
description = {'level_params': {'dt': 1e1}, 'step_params': {'maxiter': 5}}
stats, controller, _ = run_quench(custom_controller_params=controller_params, custom_description=description)
plot_solution_faults(stats, controller, ax, plot_lines=True, label='ref')
stats, controller, _ = run_quench(
fault_stuff=fault_stuff,
custom_controller_params=controller_params,
)
plot_solution_faults(stats, controller, ax, label='fixed')
description['convergence_controllers'] = {Adaptivity: {'e_tol': 1e-7, 'dt_max': 1e2, 'dt_min': 1e-3}}
stats, controller, _ = run_quench(
fault_stuff=fault_stuff, custom_controller_params=controller_params, custom_description=description
)
plot_solution_faults(stats, controller, ax, label='adaptivity', ls='--')
plt.show()
def plot_solution_faults(stats, controller, ax, plot_lines=False, **kwargs): # pragma: no cover
u_ax = ax
u = get_sorted(stats, type='u', recomputed=False)
u_ax.plot([me[0] for me in u], [np.mean(me[1]) for me in u], **kwargs)
if plot_lines:
P = controller.MS[0].levels[0].prob
u_ax.axhline(P.u_thresh, color='grey', ls='-.', label=r'$T_\mathrm{thresh}$')
u_ax.axhline(P.u_max, color='grey', ls=':', label=r'$T_\mathrm{max}$')
[ax.axvline(me[0], color='grey', label=f'fault at t={me[0]:.2f}') for me in get_sorted(stats, type='bitflip')]
u_ax.legend()
u_ax.set_xlabel(r'$t$')
u_ax.set_ylabel(r'$T$')
def get_crossing_time(stats, controller, num_points=5, inter_points=50, temperature_error_thresh=1e-5):
"""
Compute the time when the temperature threshold is crossed based on interpolation.
Args:
stats (dict): The stats from a pySDC run
controller (pySDC.Controller.controller): The controller
num_points (int): The number of points in the solution you want to use for interpolation
inter_points (int): The resolution of the interpolation
temperature_error_thresh (float): The temperature error compared to the actual threshold you want to allow
Returns:
float: The time when the temperature threshold is crossed
"""
from pySDC.core.Lagrange import LagrangeApproximation
from pySDC.core.Collocation import CollBase
P = controller.MS[0].levels[0].prob
u_thresh = P.u_thresh
u = get_sorted(stats, type='u', recomputed=False)
temp = np.array([np.mean(me[1]) for me in u])
t = np.array([me[0] for me in u])
crossing_index = np.arange(len(temp))[temp > u_thresh][0]
# interpolation stuff
num_points = min([num_points, crossing_index * 2, len(temp) - crossing_index])
idx = np.arange(num_points) - num_points // 2 + crossing_index
t_grid = t[idx]
u_grid = temp[idx]
t_inter = np.linspace(t_grid[0], t_grid[-1], inter_points)
interpolator = LagrangeApproximation(points=t_grid)
u_inter = interpolator.getInterpolationMatrix(t_inter) @ u_grid
crossing_inter = np.arange(len(u_inter))[u_inter > u_thresh][0]
temperature_error = abs(u_inter[crossing_inter] - u_thresh)
assert temperature_error < temp[crossing_index], "Temperature error is rising due to interpolation!"
if temperature_error > temperature_error_thresh and inter_points < 300:
return get_crossing_time(stats, controller, num_points + 4, inter_points + 15, temperature_error_thresh)
return t_inter[crossing_inter]
def plot_solution(stats, controller): # pragma: no cover
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
u_ax = ax
dt_ax = u_ax.twinx()
u = get_sorted(stats, type='u', recomputed=False)
u_ax.plot([me[0] for me in u], [np.mean(me[1]) for me in u], label=r'$T$')
dt = get_sorted(stats, type='dt', recomputed=False)
dt_ax.plot([me[0] for me in dt], [me[1] for me in dt], color='black', ls='--')
u_ax.plot([None], [None], color='black', ls='--', label=r'$\Delta t$')
if controller.useMPI:
P = controller.S.levels[0].prob
else:
P = controller.MS[0].levels[0].prob
u_ax.axhline(P.u_thresh, color='grey', ls='-.', label=r'$T_\mathrm{thresh}$')
u_ax.axhline(P.u_max, color='grey', ls=':', label=r'$T_\mathrm{max}$')
[ax.axvline(me[0], color='grey', label=f'fault at t={me[0]:.2f}') for me in get_sorted(stats, type='bitflip')]
u_ax.legend()
u_ax.set_xlabel(r'$t$')
u_ax.set_ylabel(r'$T$')
dt_ax.set_ylabel(r'$\Delta t$')
def compare_imex_full(plotting=False, leak_type='linear'):
"""
Compare the results of IMEX and fully implicit runs. For IMEX we need to limit the step size in order to achieve convergence, but for fully implicit, adaptivity can handle itself better.
Args:
plotting (bool): Plot the solution or not
"""
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.implementations.hooks.log_work import LogWork
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun
maxiter = 5
num_nodes = 3
newton_iter_max = 99
res = {}
rhs = {}
error = {}
custom_description = {}
custom_description['problem_params'] = {
'newton_tol': 1e-10,
'newton_iter': newton_iter_max,
'nvars': 2**9,
'leak_type': leak_type,
}
custom_description['step_params'] = {'maxiter': maxiter}
custom_description['sweeper_params'] = {'num_nodes': num_nodes}
custom_description['convergence_controllers'] = {
Adaptivity: {'e_tol': 1e-6, 'dt_max': 50},
}
custom_controller_params = {'logger_level': 30}
for imex in [False, True]:
stats, controller, _ = run_quench(
custom_description=custom_description,
custom_controller_params=custom_controller_params,
imex=imex,
Tend=4.3e2,
use_MPI=False,
hook_class=[LogWork, LogGlobalErrorPostRun],
)
res[imex] = get_sorted(stats, type='u')[-1][1]
newton_iter = [me[1] for me in get_sorted(stats, type='work_newton')]
rhs[imex] = np.mean([me[1] for me in get_sorted(stats, type='work_rhs')]) // 1
error[imex] = get_sorted(stats, type='e_global_post_run')[-1][1]
if imex:
assert all(me == 0 for me in newton_iter), "IMEX is not supposed to do Newton iterations!"
else:
assert (
max(newton_iter) / num_nodes / maxiter <= newton_iter_max
), "Took more Newton iterations than allowed!"
if plotting: # pragma: no cover
plot_solution(stats, controller)
diff = abs(res[True] - res[False])
thresh = 4e-3
assert (
diff < thresh
), f"Difference between IMEX and fully-implicit too large! Got {diff:.2e}, allowed is only {thresh:.2e}!"
prob = controller.MS[0].levels[0].prob
assert (
max(res[True]) > prob.u_max
), f"Expected runaway to happen, but maximum temperature is {max(res[True]):.2e} < u_max={prob.u_max:.2e}!"
assert (
rhs[True] == rhs[False]
), f"Expected IMEX and fully implicit schemes to take the same number of right hand side evaluations per step, but got {rhs[True]} and {rhs[False]}!"
assert error[True] < 1e-4, f'Expected error of IMEX version to be less than 1e-4, but got e={error[True]:.2e}!'
assert (
error[False] < 7.7e-5
), f'Expected error of fully implicit version to be less than 7.7e-5, but got e={error[False]:.2e}!'
def compare_reference_solutions_single():
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostStep, LogLocalErrorPostStep
from pySDC.implementations.hooks.log_solution import LogSolution
types = ['DIRK', 'SDC', 'scipy']
types = ['scipy']
fig, ax = plt.subplots()
error_ax = ax.twinx()
Tend = 500
colors = ['black', 'teal', 'magenta']
from pySDC.projects.Resilience.strategies import AdaptivityStrategy, merge_descriptions, DoubleAdaptivityStrategy
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
strategy = DoubleAdaptivityStrategy()
controller_params = {'logger_level': 15}
for j in range(len(types)):
description = {}
description['level_params'] = {'dt': 5.0, 'restol': 1e-10}
description['sweeper_params'] = {'QI': 'IE', 'num_nodes': 3}
description['problem_params'] = {
'leak_type': 'linear',
'leak_transition': 'step',
'nvars': 2**10,
'reference_sol_type': types[j],
'newton_tol': 1e-12,
}
description['level_params'] = {'dt': 5.0, 'restol': -1}
description = merge_descriptions(description, strategy.get_custom_description(run_quench, 1))
description['step_params'] = {'maxiter': 5}
description['convergence_controllers'][Adaptivity]['e_tol'] = 1e-4
stats, controller, _ = run_quench(
custom_description=description,
hook_class=[LogGlobalErrorPostStep, LogLocalErrorPostStep, LogSolution],
Tend=Tend,
imex=False,
custom_controller_params=controller_params,
)
e_glob = get_sorted(stats, type='e_global_post_step', recomputed=False)
e_loc = get_sorted(stats, type='e_local_post_step', recomputed=False)
u = get_sorted(stats, type='u', recomputed=False)
ax.plot([me[0] for me in u], [max(me[1]) for me in u], color=colors[j], label=f'{types[j]} reference')
error_ax.plot([me[0] for me in e_glob], [me[1] for me in e_glob], color=colors[j], ls='--')
error_ax.plot([me[0] for me in e_loc], [me[1] for me in e_loc], color=colors[j], ls=':')
prob = controller.MS[0].levels[0].prob
ax.axhline(prob.u_thresh, ls='-.', color='grey')
ax.axhline(prob.u_max, ls='-.', color='grey')
ax.plot([None], [None], ls='--', label=r'$e_\mathrm{global}$', color='grey')
ax.plot([None], [None], ls=':', label=r'$e_\mathrm{local}$', color='grey')
error_ax.set_yscale('log')
ax.legend(frameon=False)
ax.set_xlabel(r'$t$')
ax.set_ylabel('solution')
error_ax.set_ylabel('error')
ax.set_title('Fully implicit quench problem')
fig.tight_layout()
fig.savefig('data/quench_refs_single.pdf', bbox_inches='tight')
def compare_reference_solutions():
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun, LogLocalErrorPostStep
types = ['DIRK', 'SDC', 'scipy']
fig, ax = plt.subplots()
Tend = 500
dt_list = [Tend / 2.0**me for me in [2, 3, 4, 5, 6, 7, 8, 9, 10]]
# dt_list = [Tend / 2.**me for me in [2, 3, 4, 5, 6, 7]]
for j in range(len(types)):
errors = [None] * len(dt_list)
for i in range(len(dt_list)):
description = {}
description['level_params'] = {'dt': dt_list[i], 'restol': 1e-10}
description['sweeper_params'] = {'QI': 'IE', 'num_nodes': 3}
description['problem_params'] = {
'leak_type': 'linear',
'leak_transition': 'step',
'nvars': 2**10,
'reference_sol_type': types[j],
}
stats, controller, _ = run_quench(
custom_description=description,
hook_class=[LogGlobalErrorPostRun, LogLocalErrorPostStep],
Tend=Tend,
imex=False,
)
# errors[i] = get_sorted(stats, type='e_global_post_run')[-1][1]
errors[i] = max([me[1] for me in get_sorted(stats, type='e_local_post_step', recomputed=False)])
print(errors)
ax.loglog(dt_list, errors, label=f'{types[j]} reference')
ax.legend(frameon=False)
ax.set_xlabel(r'$\Delta t$')
ax.set_ylabel('global error')
ax.set_title('Fully implicit quench problem')
fig.tight_layout()
fig.savefig('data/quench_refs.pdf', bbox_inches='tight')
def check_order(reference_sol_type='scipy'):
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun
from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError
Tend = 500
maxiter_list = [1, 2, 3, 4, 5]
dt_list = [Tend / 2.0**me for me in [4, 5, 6, 7, 8, 9]]
# dt_list = [Tend / 2.**me for me in [6, 7, 8]]
fig, ax = plt.subplots()
from pySDC.implementations.sweeper_classes.Runge_Kutta import DIRK43
colors = ['black', 'teal', 'magenta', 'orange', 'red']
for j in range(len(maxiter_list)):
errors = [None] * len(dt_list)
for i in range(len(dt_list)):
description = {}
description['level_params'] = {'dt': dt_list[i]}
description['step_params'] = {'maxiter': maxiter_list[j]}
description['sweeper_params'] = {'QI': 'IE', 'num_nodes': 3}
description['problem_params'] = {
'leak_type': 'linear',
'leak_transition': 'step',
'nvars': 2**10,
'reference_sol_type': reference_sol_type,
}
description['convergence_controllers'] = {EstimateEmbeddedError: {}}
# if maxiter_list[j] == 5:
# description['sweeper_class'] = DIRK43
# description['sweeper_params'] = {'maxiter': 1}
stats, controller, _ = run_quench(
custom_description=description, hook_class=[LogGlobalErrorPostRun], Tend=Tend, imex=False
)
# errors[i] = max([me[1] for me in get_sorted(stats, type='error_embedded_estimate')])
errors[i] = get_sorted(stats, type='e_global_post_run')[-1][1]
print(errors)
ax.loglog(dt_list, errors, color=colors[j], label=f'{maxiter_list[j]} iterations')
ax.loglog(
dt_list, [errors[0] * (me / dt_list[0]) ** maxiter_list[j] for me in dt_list], color=colors[j], ls='--'
)
dt_list = np.array(dt_list)
errors = np.array(errors)
orders = np.log(errors[1:] / errors[:-1]) / np.log(dt_list[1:] / dt_list[:-1])
print(orders, np.mean(orders))
# ax.loglog(dt_list, local_errors)
ax.legend(frameon=False)
ax.set_xlabel(r'$\Delta t$')
ax.set_ylabel('global error')
# ax.set_ylabel('max. local error')
ax.set_title('Fully implicit quench problem')
fig.tight_layout()
fig.savefig(f'data/order_quench_{reference_sol_type}.pdf', bbox_inches='tight')
if __name__ == '__main__':
compare_reference_solutions_single()
# for reference_sol_type in ['DIRK', 'SDC', 'scipy']:
# check_order(reference_sol_type=reference_sol_type)
## faults(19)
## # get_crossing_time()
# compare_imex_full(plotting=True)
plt.show()
| 21,262 | 37.105735 | 268 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/heat.py | # script to run a simple heat problem
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.core.Hooks import hooks
from pySDC.helpers.stats_helper import get_sorted
from pySDC.projects.Resilience.hook import hook_collection, LogData
import numpy as np
from pySDC.projects.Resilience.strategies import merge_descriptions
def run_heat(
custom_description=None,
num_procs=1,
Tend=2e-1,
hook_class=LogData,
fault_stuff=None,
custom_controller_params=None,
):
"""
Run a heat problem with default parameters.
Args:
custom_description (dict): Overwrite presets
num_procs (int): Number of steps for MSSDC
Tend (float): Time to integrate to
hook_class (pySDC.Hook): A hook to store data
fault_stuff (dict): A dictionary with information on how to add faults
custom_controller_params (dict): Overwrite presets
Returns:
dict: The stats object
controller: The controller
Tend: The time that was supposed to be integrated to
"""
# initialize level parameters
level_params = dict()
level_params['dt'] = 0.05
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
problem_params = {
'freq': 2,
'nvars': 2**9,
'nu': 1.0,
'stencil_type': 'center',
'order': 6,
'bc': 'periodic',
'solver_type': 'direct',
'lintol': None,
'liniter': None,
}
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 5
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class])
controller_params['mssdc_jac'] = False
if custom_controller_params is not None:
controller_params = {**controller_params, **custom_controller_params}
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_unforced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = generic_implicit # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params
if custom_description is not None:
description = merge_descriptions(description, custom_description)
# set time parameters
t0 = 0.0
# instantiate controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# insert faults
if fault_stuff is not None:
raise NotImplementedError("The parameters have not been adapted to this equation yet!")
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
return stats, controller, Tend
if __name__ == '__main__':
run_heat()
| 3,513 | 32.150943 | 117 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/Lorenz.py | # script to run a Lorenz attractor problem
import numpy as np
import matplotlib.pyplot as plt
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.problem_classes.Lorenz import LorenzAttractor
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.core.Errors import ConvergenceError
from pySDC.projects.Resilience.hook import LogData, hook_collection
from pySDC.projects.Resilience.strategies import merge_descriptions
def run_Lorenz(
custom_description=None,
num_procs=1,
Tend=1.0,
hook_class=LogData,
fault_stuff=None,
custom_controller_params=None,
use_MPI=False,
**kwargs,
):
"""
Run a Lorenz attractor problem with default parameters.
Args:
custom_description (dict): Overwrite presets
num_procs (int): Number of steps for MSSDC
Tend (float): Time to integrate to
hook_class (pySDC.Hook): A hook to store data
fault_stuff (dict): A dictionary with information on how to add faults
custom_controller_params (dict): Overwrite presets
use_MPI (bool): Whether or not to use MPI
Returns:
dict: The stats object
controller: The controller
Tend: The time that was supposed to be integrated to
"""
# initialize level parameters
level_params = dict()
level_params['dt'] = 1e-2
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
problem_params = {
'newton_tol': 1e-9,
'newton_maxiter': 99,
}
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 4
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class])
controller_params['mssdc_jac'] = False
if custom_controller_params is not None:
controller_params = {**controller_params, **custom_controller_params}
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = LorenzAttractor
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
if custom_description is not None:
description = merge_descriptions(description, custom_description)
# set time parameters
t0 = 0.0
# instantiate controller
if use_MPI:
from mpi4py import MPI
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
comm = kwargs.get('comm', MPI.COMM_WORLD)
controller = controller_MPI(controller_params=controller_params, description=description, comm=comm)
P = controller.S.levels[0].prob
else:
controller = controller_nonMPI(
num_procs=num_procs, controller_params=controller_params, description=description
)
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# insert faults
if fault_stuff is not None:
from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults
rnd_args = {'iteration': 5}
args = {'time': 0.3, 'target': 0}
prepare_controller_for_faults(controller, fault_stuff, rnd_args, args)
# call main function to get things done...
try:
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
except ConvergenceError:
stats = controller.return_stats()
return stats, controller, Tend
def plot_solution(stats): # pragma: no cover
"""
Plot the solution in 3D.
Args:
stats (dict): The stats object of the run
Returns:
None
"""
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
u = get_sorted(stats, type='u')
ax.plot([me[1][0] for me in u], [me[1][1] for me in u], [me[1][2] for me in u])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
def check_solution(stats, controller, thresh=5e-4):
"""
Check if the global error solution wrt. a scipy reference solution is tolerable.
This is also a check for the global error hook.
Args:
stats (dict): The stats object of the run
controller (pySDC.Controller.controller): The controller
thresh (float): Threshold for accepting the accuracy
Returns:
None
"""
u = get_sorted(stats, type='u')
u_exact = controller.MS[0].levels[0].prob.u_exact(t=u[-1][0])
error = np.linalg.norm(u[-1][1] - u_exact, np.inf)
error_hook = get_sorted(stats, type='e_global_post_run')[-1][1]
assert error == error_hook, f'Expected errors to match, got {error:.2e} and {error_hook:.2e}!'
assert error < thresh, f"Error too large, got e={error:.2e}"
def main(plotting=True):
"""
Make a test run and see if the accuracy checks out.
Args:
plotting (bool): Plot the solution or not
Returns:
None
"""
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun
custom_description = {}
custom_description['convergence_controllers'] = {Adaptivity: {'e_tol': 1e-5}}
custom_controller_params = {'logger_level': 30}
stats, controller, _ = run_Lorenz(
custom_description=custom_description,
custom_controller_params=custom_controller_params,
Tend=10.0,
hook_class=[LogData, LogGlobalErrorPostRun],
)
check_solution(stats, controller, 5e-4)
if plotting: # pragma: no cover
plot_solution(stats)
if __name__ == "__main__":
main()
| 6,044 | 30.984127 | 114 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/piline.py | import numpy as np
import matplotlib.pyplot as plt
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.problem_classes.Piline import piline
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.implementations.convergence_controller_classes.hotrod import HotRod
from pySDC.projects.Resilience.hook import LogData, hook_collection
from pySDC.projects.Resilience.strategies import merge_descriptions
def run_piline(
custom_description=None,
num_procs=1,
Tend=20.0,
hook_class=LogData,
fault_stuff=None,
custom_controller_params=None,
):
"""
Run a Piline problem with default parameters.
Args:
custom_description (dict): Overwrite presets
num_procs (int): Number of steps for MSSDC
Tend (float): Time to integrate to
hook_class (pySDC.Hook): A hook to store data
fault_stuff (dict): A dictionary with information on how to add faults
custom_controller_params (dict): Overwrite presets
Returns:
dict: The stats object
controller: The controller
Tend: The time that was supposed to be integrated to
"""
# initialize level parameters
level_params = dict()
level_params['dt'] = 5e-2
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
sweeper_params['QE'] = 'PIC'
problem_params = {
'Vs': 100.0,
'Rs': 1.0,
'C1': 1.0,
'Rpi': 0.2,
'C2': 1.0,
'Lpi': 1.0,
'Rl': 5.0,
}
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 4
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook_collection + (hook_class if type(hook_class) == list else [hook_class])
controller_params['mssdc_jac'] = False
if custom_controller_params is not None:
controller_params = {**controller_params, **custom_controller_params}
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = piline # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params
if custom_description is not None:
description = merge_descriptions(description, custom_description)
# set time parameters
t0 = 0.0
# instantiate controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# insert faults
if fault_stuff is not None:
from pySDC.projects.Resilience.fault_injection import prepare_controller_for_faults
rnd_args = {'iteration': 4}
args = {'time': 2.5, 'target': 0}
prepare_controller_for_faults(controller, fault_stuff, rnd_args, args)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
return stats, controller, Tend
def get_data(stats, recomputed=False):
"""
Extract useful data from the stats.
Args:
stats (pySDC.stats): The stats object of the run
recomputed (bool): Whether to exclude values that don't contribute to the final solution or not
Returns:
dict: Data
"""
data = {
'v1': np.array([me[1][0] for me in get_sorted(stats, type='u', recomputed=recomputed)]),
'v2': np.array([me[1][1] for me in get_sorted(stats, type='u', recomputed=recomputed)]),
'p3': np.array([me[1][2] for me in get_sorted(stats, type='u', recomputed=recomputed)]),
't': np.array([me[0] for me in get_sorted(stats, type='u', recomputed=recomputed)]),
'dt': np.array([me[1] for me in get_sorted(stats, type='dt', recomputed=recomputed)]),
't_dt': np.array([me[0] for me in get_sorted(stats, type='dt', recomputed=recomputed)]),
'e_em': np.array(get_sorted(stats, type='error_embedded_estimate', recomputed=recomputed))[:, 1],
'e_ex': np.array(get_sorted(stats, type='error_extrapolation_estimate', recomputed=recomputed))[:, 1],
'restarts': np.array(get_sorted(stats, type='restart', recomputed=None))[:, 1],
't_restarts': np.array(get_sorted(stats, type='restart', recomputed=None))[:, 0],
'sweeps': np.array(get_sorted(stats, type='sweeps', recomputed=None))[:, 1],
}
data['ready'] = np.logical_and(data['e_ex'] != np.array(None), data['e_em'] != np.array(None))
data['restart_times'] = data['t_restarts'][data['restarts'] > 0]
return data
def plot_error(data, ax, use_adaptivity=True, plot_restarts=False):
"""
Plot the embedded and extrapolated error estimates.
Args:
data (dict): Data prepared from stats by `get_data`
use_adaptivity (bool): Whether adaptivity was used
plot_restarts (bool): Whether to plot vertical lines for restarts
Returns:
None
"""
setup_mpl_from_accuracy_check()
ax.plot(data['t_dt'], data['dt'], color='black')
e_ax = ax.twinx()
e_ax.plot(data['t'], data['e_em'], label=r'$\epsilon_\mathrm{embedded}$')
e_ax.plot(data['t'][data['ready']], data['e_ex'][data['ready']], label=r'$\epsilon_\mathrm{extrapolated}$', ls='--')
e_ax.plot(
data['t'][data['ready']],
abs(data['e_em'][data['ready']] - data['e_ex'][data['ready']]),
label='difference',
ls='-.',
)
if plot_restarts:
[ax.axvline(t_restart, ls='-.', color='black', alpha=0.5) for t_restart in data['restart_times']]
e_ax.plot([None, None], label=r'$\Delta t$', color='black')
e_ax.set_yscale('log')
if use_adaptivity:
e_ax.legend(frameon=False, loc='upper left')
else:
e_ax.legend(frameon=False, loc='upper right')
e_ax.set_ylim((7.367539795147197e-12, 1.109667868425781e-05))
ax.set_ylim((0.012574322653781072, 0.10050387672423527))
ax.set_xlabel('Time')
ax.set_ylabel(r'$\Delta t$')
ax.set_xlabel('Time')
def setup_mpl_from_accuracy_check():
"""
Change matplotlib parameters to conform to LaTeX style.
"""
from pySDC.projects.Resilience.accuracy_check import setup_mpl
setup_mpl()
def plot_solution(data, ax):
"""
Plot the solution.
Args:
data (dict): Data prepared from stats by `get_data`
ax: Somewhere to plot
Returns:
None
"""
setup_mpl_from_accuracy_check()
ax.plot(data['t'], data['v1'], label='v1', ls='-')
ax.plot(data['t'], data['v2'], label='v2', ls='--')
ax.plot(data['t'], data['p3'], label='p3', ls='-.')
ax.legend(frameon=False)
ax.set_xlabel('Time')
def check_solution(data, use_adaptivity, num_procs, generate_reference=False):
"""
Check the solution against a hard coded reference.
Args:
data (dict): Data prepared from stats by `get_data`
use_adaptivity (bool): Whether adaptivity was used
num_procs (int): Number of steps for MSSDC
generate_reference (bool): Instead of comparing to reference, print a new reference to the console
Returns:
None
"""
if use_adaptivity and num_procs == 1:
error_msg = 'Error when using adaptivity in serial:'
expected = {
'v1': 83.88330442715265,
'v2': 80.62692930055763,
'p3': 16.13594155613822,
'e_em': 4.922608098922865e-09,
'e_ex': 4.4120077421613226e-08,
'dt': 0.05,
'restarts': 1.0,
'sweeps': 2416.0,
't': 20.03656747407325,
}
elif use_adaptivity and num_procs == 4:
error_msg = 'Error when using adaptivity in parallel:'
expected = {
'v1': 83.88400082289273,
'v2': 80.62656229801286,
'p3': 16.134850400599763,
'e_em': 2.3681899108396465e-08,
'e_ex': 3.6491178375304526e-08,
'dt': 0.08265581329617167,
'restarts': 36.0,
'sweeps': 2528.0,
't': 19.999999999999996,
}
elif not use_adaptivity and num_procs == 4:
error_msg = 'Error with fixed step size in parallel:'
expected = {
'v1': 83.88400128006428,
'v2': 80.62656202423844,
'p3': 16.134849781053525,
'e_em': 4.277040943634347e-09,
'e_ex': 4.9707053288253756e-09,
'dt': 0.05,
'restarts': 0.0,
'sweeps': 1600.0,
't': 20.00000000000015,
}
elif not use_adaptivity and num_procs == 1:
error_msg = 'Error with fixed step size in serial:'
expected = {
'v1': 83.88400149770143,
'v2': 80.62656173487008,
'p3': 16.134849851184736,
'e_em': 4.977994905175365e-09,
'e_ex': 5.048084913047097e-09,
'dt': 0.05,
'restarts': 0.0,
'sweeps': 1600.0,
't': 20.00000000000015,
}
got = {
'v1': data['v1'][-1],
'v2': data['v2'][-1],
'p3': data['p3'][-1],
'e_em': data['e_em'][-1],
'e_ex': data['e_ex'][data['e_ex'] != [None]][-1],
'dt': data['dt'][-1],
'restarts': data['restarts'].sum(),
'sweeps': data['sweeps'].sum(),
't': data['t'][-1],
}
if generate_reference:
print(f'Adaptivity: {use_adaptivity}, num_procs={num_procs}')
print('expected = {')
for k in got.keys():
v = got[k]
if type(v) in [list, np.ndarray]:
print(f' \'{k}\': {v[v!=[None]][-1]},')
else:
print(f' \'{k}\': {v},')
print('}')
for k in expected.keys():
assert np.isclose(
expected[k], got[k], rtol=1e-4
), f'{error_msg} Expected {k}={expected[k]:.4e}, got {k}={got[k]:.4e}'
def residual_adaptivity(plot=False):
"""
Make a run with adaptivity based on the residual.
"""
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityResidual
max_res = 1e-8
custom_description = {'convergence_controllers': {}}
custom_description['convergence_controllers'][AdaptivityResidual] = {
'e_tol': max_res,
'e_tol_low': max_res / 10,
}
stats, _, _ = run_piline(custom_description, num_procs=1)
residual = get_sorted(stats, type='residual_post_step', recomputed=False)
dt = get_sorted(stats, type='dt', recomputed=False)
if plot:
fig, ax = plt.subplots()
dt_ax = ax.twinx()
ax.plot([me[0] for me in residual], [me[1] for me in residual])
dt_ax.plot([me[0] for me in dt], [me[1] for me in dt], color='black')
plt.show()
max_residual = max([me[1] for me in residual])
assert max_residual < max_res, f'Max. allowed residual is {max_res:.2e}, but got {max_residual:.2e}!'
dt_std = np.std([me[1] for me in dt])
assert dt_std != 0, f'Expected the step size to change, but standard deviation is {dt_std:.2e}!'
def main():
"""
Make a variety of tests to see if Hot Rod and Adaptivity work in serial as well as MSSDC.
"""
generate_reference = False
for use_adaptivity in [True, False]:
custom_description = {'convergence_controllers': {}}
if use_adaptivity:
custom_description['convergence_controllers'][Adaptivity] = {
'e_tol': 1e-7,
'embedded_error_flavor': 'linearized',
}
for num_procs in [1, 4]:
custom_description['convergence_controllers'][HotRod] = {'HotRod_tol': 1, 'no_storage': num_procs > 1}
stats, _, _ = run_piline(custom_description, num_procs=num_procs)
data = get_data(stats, recomputed=False)
fig, ax = plt.subplots(1, 1, figsize=(3.5, 3))
plot_error(data, ax, use_adaptivity)
if use_adaptivity:
fig.savefig(f'data/piline_hotrod_adaptive_{num_procs}procs.png', bbox_inches='tight', dpi=300)
else:
fig.savefig(f'data/piline_hotrod_{num_procs}procs.png', bbox_inches='tight', dpi=300)
if use_adaptivity and num_procs == 4:
sol_fig, sol_ax = plt.subplots(1, 1, figsize=(3.5, 3))
plot_solution(data, sol_ax)
sol_fig.savefig('data/piline_solution_adaptive.png', bbox_inches='tight', dpi=300)
plt.close(sol_fig)
check_solution(data, use_adaptivity, num_procs, generate_reference)
plt.close(fig)
if __name__ == "__main__":
residual_adaptivity()
main()
| 13,257 | 34.639785 | 120 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/extrapolation_within_Q.py | import matplotlib.pyplot as plt
import numpy as np
from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import (
EstimateExtrapolationErrorWithinQ,
)
from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep
from pySDC.helpers.stats_helper import get_sorted
from pySDC.projects.Resilience.piline import run_piline
from pySDC.projects.Resilience.advection import run_advection
from pySDC.projects.Resilience.vdp import run_vdp
def multiple_runs(prob, dts, num_nodes, quad_type='RADAU-RIGHT'):
"""
Make multiple runs of a specific problem and record vital error information
Args:
prob (function): A problem from the resilience project to run
dts (list): The step sizes to run with
num_nodes (int): Number of nodes
quad_type (str): Type of nodes
Returns:
dict: Errors for multiple runs
int: Order of the collocation problem
"""
description = {}
description['level_params'] = {'restol': 1e-10}
description['step_params'] = {'maxiter': 99}
description['sweeper_params'] = {'num_nodes': num_nodes, 'quad_type': quad_type}
description['convergence_controllers'] = {EstimateExtrapolationErrorWithinQ: {}}
if prob.__name__ == 'run_advection':
description['problem_params'] = {'order': 6, 'stencil_type': 'center'}
res = {}
for dt in dts:
description['level_params']['dt'] = dt
stats, controller, _ = prob(custom_description=description, Tend=5.0 * dt, hook_class=LogLocalErrorPostStep)
res[dt] = {}
res[dt]['e_loc'] = max([me[1] for me in get_sorted(stats, type='e_local_post_step')])
res[dt]['e_ex'] = max([me[1] for me in get_sorted(stats, type='error_extrapolation_estimate')])
coll_order = controller.MS[0].levels[0].sweep.coll.order
return res, coll_order
def plot_and_compute_order(ax, res, num_nodes, coll_order):
"""
Plot and compute the order from the multiple runs ran with `multiple_runs`. Also, it is tested if the expected order
is reached for the respective errors.
Args:
ax (Matplotlib.pyplot.axes): Somewhere to plot
res (dict): Result from `multiple_runs`
num_nodes (int): Number of nodes
coll_order (int): Order of the collocation problem
Returns:
None
"""
dts = np.array(list(res.keys()))
keys = list(res[dts[0]].keys())
# local error is one order higher than global error
expected_order = {
'e_loc': coll_order + 1,
'e_ex': num_nodes + 1,
}
for key in keys:
errors = np.array([res[dt][key] for dt in dts])
mask = np.logical_and(errors < 1e-3, errors > 1e-10)
order = np.log(errors[mask][1:] / errors[mask][:-1]) / np.log(dts[mask][1:] / dts[mask][:-1])
if ax is not None:
ax.loglog(dts, errors, label=f'{key}: order={np.mean(order):.2f}')
assert np.isclose(
np.mean(order), expected_order[key], atol=0.5
), f'Expected order {expected_order[key]} for {key}, but got {np.mean(order):.2e}!'
if ax is not None:
ax.legend(frameon=False)
def check_order(ax, prob, dts, num_nodes, quad_type):
"""
Check the order by calling `multiple_runs` and then `plot_and_compute_order`.
Args:
ax (Matplotlib.pyplot.axes): Somewhere to plot
prob (function): A problem from the resilience project to run
dts (list): The step sizes to run with
num_nodes (int): Number of nodes
quad_type (str): Type of nodes
"""
res, coll_order = multiple_runs(prob, dts, num_nodes, quad_type)
plot_and_compute_order(ax, res, num_nodes, coll_order)
def main():
fig, ax = plt.subplots()
num_nodes = 3
quad_type = 'RADAU-RIGHT'
check_order(ax, run_advection, [5e-1, 1e-1, 5e-2, 1e-2], num_nodes, quad_type)
plt.show()
if __name__ == "__main__":
main()
| 3,936 | 32.364407 | 120 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/fault_injection.py | import struct
import numpy as np
from pySDC.core.Hooks import hooks
from pySDC.implementations.datatype_classes.mesh import mesh
from pySDC.helpers.pysdc_helper import FrozenClass
class Fault(FrozenClass):
'''
Class for storing all the data that belongs to a fault, i.e. when and where it happens
'''
def __init__(self, params=None):
'''
Initialization routine for faults
Args:
params (dict): Parameters regarding when the fault will be inserted
'''
params = {} if params is None else params
self.time = None
self.timestep = None
self.level_number = None
self.iteration = None
self.node = None
self.problem_pos = None
self.bit = None
self.target = 0
self.when = 'after' # before or after an iteration?
for k, v in params.items():
setattr(self, k, v)
self._freeze
@classmethod
def random(cls, args, rnd_params, random_generator=None):
'''
Classmethod to initialize a random fault
Args:
args (dict): Supply variables that will be exempt from randomization here
rnd_params (dict): Supply attributes to the randomization such as maximum values here
random_generator (numpy.random.RandomState): Give a random generator to ensure repeatability
Returns Fault: Randomly generated fault
'''
if random_generator is None:
random_generator = np.random.RandomState(2187)
random = {
'level_number': random_generator.randint(low=0, high=rnd_params['level_number']),
'node': random_generator.randint(low=rnd_params.get('min_node', 0), high=rnd_params['node'] + 1),
'iteration': random_generator.randint(low=1, high=rnd_params['iteration'] + 1),
'problem_pos': [random_generator.randint(low=0, high=i) for i in rnd_params['problem_pos']],
'bit': random_generator.randint(low=0, high=rnd_params['bit']),
}
return cls({**random, **args})
@classmethod
def index_to_combination(cls, args, rnd_params, generator=None):
'''
Classmethod to initialize a fault based on an index to translate to a combination of fault parameters, in order
to loop through all combinations. Probably only makes sense for ODEs.
First, we get the number of possible combinations m, and then get a value for each fault parameter as
i = m % i_max (plus modifications to make sure we get a sensible value)
Args:
args (dict): Supply variables that will be exempt from randomization here.
rnd_params (dict): Supply attributes to the randomization such as maximum values here
generator (int): Index for specific combination
Returns:
Fault: Generated from a specific combination of parameters
'''
ranges = [
(0, rnd_params['level_number']),
(rnd_params.get('min_node', 0), rnd_params['node'] + 1),
(1, rnd_params['iteration'] + 1),
(0, rnd_params['bit']),
]
ranges += [(0, i) for i in rnd_params['problem_pos']]
# get values for taking modulo later
mods = [me[1] - me[0] for me in ranges]
if len(np.unique(mods)) < len(mods):
raise NotImplementedError(
'I can\'t deal with combinations when parameters have the same admissible number\
of values yet!'
)
coeff = [(generator // np.prod(mods[:i], dtype=int)) % mods[i] for i in range(len(mods))]
combinations = {
'level_number': coeff[0],
'node': coeff[1],
'iteration': coeff[2] + 1,
'bit': coeff[3],
'problem_pos': [coeff[4 + i] for i in range(len(rnd_params['problem_pos']))],
}
return cls({**combinations, **args})
class FaultInjector(hooks):
'''
Class to use as base for hooks class instead of abstract hooks class to insert faults using hooks
'''
def __init__(self):
'''
Initialization routine
'''
super(FaultInjector, self).__init__()
self.fault_frequency_time = np.inf
self.fault_frequency_iter = np.inf
self.faults = []
self.fault_init = [] # add faults to this list when the random parameters have not been set up yet
self.rnd_params = {}
self.random_generator = np.random.RandomState(2187) # number of the cell in which Princess Leia is held
@classmethod
def generate_fault_stuff_single_fault(cls, bit=0, iteration=1, problem_pos=None, level_number=0, node=1, time=None):
"""
Generate a fault stuff object which will insert a single fault at the supplied parameters. Because there will
be some parameter set for everything, there is no randomization anymore.
Args:
bit (int): Which bit to flip
iteration (int): After which iteration to flip
problem_pos: Where in the problem to flip a bit, type depends on the problem
level_number (int): In which level you want to flip
node (int): In which node to flip
time (float): The bitflip will occur in the time step after this time is reached
Returns:
dict: Can be supplied to the run functions in the resilience project to generate the single fault
"""
assert problem_pos is not None, "Please supply a spatial position for the fault as `problem_pos`!"
assert time is not None, "Please supply a time for the fault as `time`!"
fault_stuff = {
'rng': np.random.RandomState(0),
'args': {
'bit': bit,
'iteration': iteration,
'level_number': level_number,
'problem_pos': problem_pos,
'node': node,
'time': time,
},
}
fault_stuff['rnd_args'] = fault_stuff['args']
return fault_stuff
def add_fault(self, args, rnd_args):
if type(self.random_generator) == int:
self.add_fault_from_combination(args, rnd_args)
elif type(self.random_generator) == np.random.RandomState:
self.add_random_fault(args, rnd_args)
else:
raise NotImplementedError(
f'Don\'t know how to add fault with generator of type \
{type(self.random_generator)}'
)
def add_stored_faults(self):
'''
Method to add faults that are recorded for later adding in the pre run hook
Returns:
None
'''
for f in self.fault_init:
if f['kind'] == 'random':
self.add_random_fault(args=f['args'], rnd_args=f['rnd_args'])
elif f['kind'] == 'combination':
self.add_fault_from_combination(args=f['args'], rnd_args=f['rnd_args'])
else:
raise NotImplementedError(f'I don\'t know how to add stored fault of kind {f["kind"]}')
def add_random_fault(self, args=None, rnd_args=None):
'''
Method to generate a random fault and add it to the list of faults to be injected at some point
Args:
args (dict): parameters for fault initialization that should not be randomized
rnd_args (dict): special parameters for randomization other than the default ones
Returns:
None
'''
# replace args and rnd_args with empty dict if we didn't specify anything
args = {} if args is None else args
rnd_args = {} if rnd_args is None else rnd_args
# check if we can add the fault directly, or if we have to store its parameters and add it in the pre_run hook
if self.rnd_params == {}:
self.fault_init += [{'args': args, 'rnd_args': rnd_args, 'kind': 'random'}]
else:
self.faults += [
Fault.random(
args=args, rnd_params={**self.rnd_params, **rnd_args}, random_generator=self.random_generator
)
]
return None
def add_fault_from_combination(self, args=None, rnd_args=None):
'''
Method to generate a random fault and add it to the list of faults to be injected at some point
Args:
args (dict): parameters for fault initialization that override the combinations
rnd_args (dict): possible values that the parameters can take
Returns:
None
'''
# replace args and rnd_args with empty dict if we didn't specify anything
args = {} if args is None else args
rnd_args = {} if rnd_args is None else rnd_args
# check if we can add the fault directly, or if we have to store its parameters and add it in the pre_run hook
if self.rnd_params == {}:
self.fault_init += [{'args': args, 'rnd_args': rnd_args, 'kind': 'combination'}]
else:
self.faults += [
Fault.index_to_combination(
args=args, rnd_params={**self.rnd_params, **rnd_args}, generator=self.random_generator
)
]
return None
def inject_fault(self, step, f):
'''
Method to inject a fault into a step.
Args:
step (pySDC.Step.step): Step to inject the fault into
f (Fault): fault that should be injected
Returns:
None
'''
L = step.levels[f.level_number]
_abs_before = None
_abs_after = None
# insert the fault in some target
if f.target == 0:
'''
Target 0 means we flip a bit in the solution.
To make sure the faults have some impact, we have to reevaluate the right hand side. Otherwise the fault is
fixed automatically in this implementation, as the right hand side is assembled only from f(t, u) and u is
tempered with after computing f(t, u).
To be fair to iteration based resilience strategies, we also reevaluate the residual. Otherwise, when a
fault happens in the last iteration, it will not show up in the residual and the iteration is wrongly
stopped.
'''
_abs_before = abs(L.u[f.node][tuple(f.problem_pos)])
L.u[f.node][tuple(f.problem_pos)] = self.flip_bit(L.u[f.node][tuple(f.problem_pos)], f.bit)
L.f[f.node] = L.prob.eval_f(L.u[f.node], L.time + L.dt * L.sweep.coll.nodes[max([0, f.node - 1])])
L.sweep.compute_residual()
_abs_after = abs(L.u[f.node][tuple(f.problem_pos)])
else:
raise NotImplementedError(f'Target {f.target} for faults not implemented!')
# log what happened to stats and screen
self.logger.info(
f'Flipping bit {f.bit} {f.when} iteration {f.iteration} in node {f.node}. Target: {f.target}. Abs: {_abs_before:.2e} -> {_abs_after:.2e}'
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='bitflip',
value=(f.level_number, f.iteration, f.node, f.problem_pos, f.bit, f.target),
)
# remove the fault from the list to make sure it happens only once
self.faults.remove(f)
return None
def pre_run(self, step, level_number):
'''
Setup random parameters and add the faults that we couldn't before here
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
Returns:
None
'''
super(FaultInjector, self).pre_run(step, level_number)
if not type(step.levels[level_number].u[0]) == mesh:
raise NotImplementedError(
f'Fault insertion is only implemented for type mesh, not \
{type(step.levels[level_number].u[0])}'
)
dtype = step.levels[level_number].prob.u_exact(t=0).dtype
if dtype in [float, np.float64]:
bit = 64
elif dtype in [complex]:
bit = 128
else:
raise NotImplementedError(f'Don\'t know how many bits type {dtype} has')
# define parameters for randomization
self.rnd_params = {
'level_number': len(step.levels),
'node': step.levels[0].sweep.params.num_nodes,
'iteration': step.params.maxiter,
'problem_pos': step.levels[level_number].u[0].shape,
'bit': bit, # change manually if you ever have something else
**self.rnd_params,
}
# initialize the faults have been added before we knew the random parameters
self.add_stored_faults()
if self.rnd_params['level_number'] > 1:
raise NotImplementedError('I don\'t know how to insert faults in this multi-level madness :(')
# initialize parameters for periodic fault injection
self.timestep_idx = 0
self.iter_idx = 0
return None
def pre_step(self, step, level_number):
'''
Deal with periodic fault injection here:
- Increment the index for counting time steps
- Add a random fault in this time step if it is time for it based on the frequency
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
Returns:
None
'''
super(FaultInjector, self).pre_step(step, level_number)
self.timestep_idx += 1
if self.timestep_idx % self.fault_frequency_time == 0 and not self.timestep_idx == 0:
self.add_random_fault(args={'timestep': self.timestep_idx})
return None
def pre_iteration(self, step, level_number):
'''
Check if we have a fault that should be inserted here and deal with periodic injection per iteration count
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
Returns:
None
'''
super(FaultInjector, self).pre_iteration(step, level_number)
# check if the fault-free iteration count period has elapsed
if self.iter_idx % self.fault_frequency_iter == 0 and not self.iter_idx == 0:
self.add_random_fault(args={'timestep': self.timestep_idx, 'iteration': step.status.iter})
# loop though all unhappened faults and check if they are scheduled now
for f in [me for me in self.faults if me.when == 'before']:
# based on iteration number
if self.timestep_idx == f.timestep and step.status.iter == f.iteration:
self.inject_fault(step, f)
# based on time
elif f.time is not None:
if step.time > f.time and step.status.iter == f.iteration:
self.inject_fault(step, f)
self.iter_idx += 1
return None
def post_iteration(self, step, level_number):
'''
Check if we have a fault that should be inserted here
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
Returns:
None
'''
super(FaultInjector, self).post_iteration(step, level_number)
# loop though all unhappened faults and check if they are scheduled now
for f in [me for me in self.faults if me.when == 'after']:
# based on iteration number
if self.timestep_idx == f.timestep and step.status.iter == f.iteration:
self.inject_fault(step, f)
# based on time
elif f.time is not None:
if step.time > f.time and step.status.iter == f.iteration:
self.inject_fault(step, f)
return None
@classmethod
def to_binary(cls, f):
'''
Converts a single float in a string containing its binary representation in memory following IEEE754
The struct.pack function returns the input with the applied conversion code in 8 bit blocks, which are then
concatenated as a string. Complex numbers will be returned as two consecutive strings.
Args:
f (float, np.float64, np.float32): number to be converted to binary representation
Returns:
(str) Binary representation of f following IEEE754 as a string
'''
if type(f) in [np.float64, float]:
conversion_code = '>d' # big endian, double
elif type(f) in [np.float32]:
conversion_code = '>f' # big endian, float
elif type(f) in [np.complex128]:
return f'{cls.to_binary(f.real)}{cls.to_binary(f.imag)}'
else:
raise NotImplementedError(f'Don\'t know how to convert number of type {type(f)} to binary')
return ''.join('{:0>8b}'.format(c) for c in struct.pack(conversion_code, f))
@classmethod
def to_float(cls, s):
'''
Converts a string of a IEEE754 binary representation in a float. The string is converted to integer with base 2
and converted to bytes, which can be unpacked into a Python float by the struct module.
Args:
s (str): binary representation of a float number of 32 or 64 bit length following IEEE754
Returns:
(float) floating point representation of the binary string
'''
if len(s) == 64:
conversion_code = '>d' # big endian, double
byte_count = 8
elif len(s) == 32:
conversion_code = '>f' # big endian, float
byte_count = 4
elif len(s) == 128: # complex floats
real = s[0:64]
imag = s[64:128]
return cls.to_float(real) + cls.to_float(imag) * 1j
else:
raise NotImplementedError(f'Don\'t know how to convert string of length {len(s)} to float')
return struct.unpack(conversion_code, int(s, 2).to_bytes(byte_count, 'big'))[0]
@classmethod
def flip_bit(cls, target, bit):
'''
Flips a bit at position bit in a target using the bitwise xor operator
Args:
target (float, np.float64, np.float32): the floating point number in which you want to flip a bit
bit (int): the bit which you intend to flip
Returns:
(float) The floating point number resulting from flipping the respective bit in target
'''
binary = cls.to_binary(target)
return cls.to_float(f'{binary[:bit]}{int(binary[bit]) ^ 1}{binary[bit+1:]}')
def prepare_controller_for_faults(controller, fault_stuff, rnd_args, args):
"""
Prepare the controller for a run with faults. That means the fault injection hook is added and supplied with the
relevant parameters.
Args:
controller (pySDC.controller): The controller
fault_stuff (dict): A dictionary with information on how to add faults
rnd_args (dict): Default arguments for how to add random faults in a specific problem
args (dict): Default arguments for where to add faults in a specific problem
Returns:
None
"""
faultHook = get_fault_injector_hook(controller)
faultHook.random_generator = fault_stuff['rng']
for key in ['fault_frequency_iter']:
if key in fault_stuff.keys():
faultHook.__dict__[key] = fault_stuff[key]
for key, val in fault_stuff.get('rnd_params', {}).items():
faultHook.rnd_params[key] = val
if not len(faultHook.rnd_params.keys()) > 0:
faultHook.add_fault(
rnd_args={**rnd_args, **fault_stuff.get('rnd_params', {})},
args={**args, **fault_stuff.get('args', {})},
)
def get_fault_injector_hook(controller):
"""
Get the fault injector hook from the list of hooks in the controller.
If there is not one already, it is added here.
Args:
controller (pySDC.controller): The controller
Returns:
pySDC.hook.FaultInjector: The fault injecting hook
"""
hook_types = [type(me) for me in controller.hooks]
if FaultInjector not in hook_types:
controller.add_hook(FaultInjector)
return get_fault_injector_hook(controller)
else:
hook_idx = [i for i in range(len(hook_types)) if hook_types[i] == FaultInjector]
assert len(hook_idx) == 1, f'Expected exactly one FaultInjector, got {len(hook_idx)}!'
return controller.hooks[hook_idx[0]]
| 20,710 | 36.724954 | 149 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/Resilience/work_precision.py | from mpi4py import MPI
import numpy as np
import matplotlib.pyplot as plt
import pickle
from pySDC.projects.Resilience.strategies import merge_descriptions
from pySDC.projects.Resilience.Lorenz import run_Lorenz
from pySDC.projects.Resilience.vdp import run_vdp
from pySDC.projects.Resilience.Schroedinger import run_Schroedinger
from pySDC.projects.Resilience.quench import run_quench
from pySDC.helpers.stats_helper import get_sorted
from pySDC.helpers.plot_helper import setup_mpl, figsize_by_journal
setup_mpl(reset=True)
LOGGER_LEVEL = 30
VERBOSE = True
MAPPINGS = {
'e_global': ('e_global_post_run', max, False),
'e_global_rel': ('e_global_rel_post_run', max, False),
't': ('timing_run', max, False),
# 'e_local_max': ('e_local_post_step', max, False),
'k_SDC': ('k', sum, None),
'k_SDC_no_restart': ('k', sum, False),
'k_Newton': ('work_newton', sum, None),
'k_Newton_no_restart': ('work_newton', sum, False),
'k_rhs': ('work_rhs', sum, None),
'restart': ('restart', sum, None),
'dt_mean': ('dt', np.mean, False),
'dt_max': ('dt', max, False),
'e_embedded_max': ('error_embedded_estimate', max, False),
}
def single_run(problem, strategy, data, custom_description, num_procs=1, comm_world=None, problem_args=None):
"""
Make a single run of a particular problem with a certain strategy.
Args:
problem (function): A problem to run
strategy (Strategy): SDC strategy
data (dict): Put the results in here
custom_description (dict): Overwrite presets
num_procs (int): Number of processes for the time communicator
comm_world (mpi4py.MPI.Intracomm): Communicator that is available for the entire script
Returns:
None
"""
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRunMPI
from pySDC.implementations.hooks.log_work import LogWork
from pySDC.projects.Resilience.hook import LogData
comm = comm_world.Split(comm_world.rank < num_procs)
if comm_world.rank >= num_procs:
return None
strategy_description = strategy.get_custom_description(problem, num_procs)
description = merge_descriptions(strategy_description, custom_description)
controller_params = {'logger_level': LOGGER_LEVEL}
problem_args = {} if problem_args is None else problem_args
stats, controller, _ = problem(
custom_description=description,
Tend=strategy.get_Tend(problem, num_procs),
hook_class=[LogData, LogWork, LogGlobalErrorPostRunMPI],
custom_controller_params=controller_params,
use_MPI=True,
comm=comm,
**problem_args,
)
# record all the metrics
for key, mapping in MAPPINGS.items():
me = get_sorted(stats, type=mapping[0], recomputed=mapping[2], comm=comm)
if len(me) == 0:
data[key] += [np.nan]
else:
data[key] += [mapping[1]([you[1] for you in me])]
return None
def get_parameter(dictionary, where):
"""
Get a parameter at a certain position in a dictionary of dictionaries.
Args:
dictionary (dict): The dictionary
where (list): The list of keys leading to the value you want
Returns:
The value of the dictionary
"""
if len(where) == 1:
return dictionary[where[0]]
else:
return get_parameter(dictionary[where[0]], where[1:])
def set_parameter(dictionary, where, parameter):
"""
Set a parameter at a certain position in a dictionary of dictionaries
Args:
dictionary (dict): The dictionary
where (list): The list of keys leading to the value you want to set
parameter: Whatever you want to set the parameter to
Returns:
None
"""
if len(where) == 1:
dictionary[where[0]] = parameter
else:
set_parameter(dictionary[where[0]], where[1:], parameter)
def get_path(problem, strategy, num_procs, handle='', base_path='data/work_precision'):
"""
Get the path to a certain data.
Args:
problem (function): A problem to run
strategy (Strategy): SDC strategy
num_procs (int): Number of processes for the time communicator
handle (str): The name of the configuration
base_path (str): Some path where all the files are stored
Returns:
str: The path to the data you are looking for
"""
return f'{base_path}/{problem.__name__}-{strategy.__class__.__name__}-{handle}{"-wp" if handle else "wp"}-{num_procs}procs.pickle'
def record_work_precision(
problem,
strategy,
num_procs=1,
custom_description=None,
handle='',
runs=1,
comm_world=None,
problem_args=None,
param_range=None,
):
"""
Run problem with strategy and record the cost parameters.
Args:
problem (function): A problem to run
strategy (Strategy): SDC strategy
num_procs (int): Number of processes for the time communicator
custom_description (dict): Overwrite presets
handle (str): The name of the configuration
runs (int): Number of runs you want to do
comm_world (mpi4py.MPI.Intracomm): Communicator that is available for the entire script
Returns:
None
"""
data = {}
# prepare precision parameters
param = strategy.precision_parameter
description = merge_descriptions(
strategy.get_custom_description(problem, num_procs),
{} if custom_description is None else custom_description,
)
if param == 'e_tol':
power = 10.0
set_parameter(description, strategy.precision_parameter_loc[:-1] + ['dt_min'], 0)
exponents = [-3, -2, -1, 0, 1, 2, 3]
if problem.__name__ == 'run_vdp':
exponents = [-4, -3, -2, -1, 0, 1, 2]
elif param == 'dt':
power = 2.0
exponents = [-1, 0, 1, 2, 3]
elif param == 'restol':
power = 10.0
exponents = [-2, -1, 0, 1, 2, 3]
if problem.__name__ == 'run_vdp':
exponents = [-4, -3, -2, -1, 0, 1]
else:
raise NotImplementedError(f"I don't know how to get default value for parameter \"{param}\"")
where = strategy.precision_parameter_loc
default = get_parameter(description, where)
param_range = [default * power**i for i in exponents] if param_range is None else param_range
if problem.__name__ == 'run_quench':
if param == 'restol':
param_range = [1e-5, 1e-6, 1e-7, 1e-8, 1e-9]
elif param == 'e_tol':
param_range = [1e-2 / 2.0**me for me in [4, 5, 6, 7, 8, 9, 10]]
elif param == 'dt':
param_range = [500 / 2.0**me for me in [5, 6, 7, 8]]
# run multiple times with different parameters
for i in range(len(param_range)):
set_parameter(description, where, param_range[i])
if strategy.name == 'adaptivity_coll':
# set_parameter(description, ['level_params', 'restol'], 1e-9)
set_parameter(description, ['level_params', 'restol'], param_range[i] / 10.0)
data[param_range[i]] = {key: [] for key in MAPPINGS.keys()}
data[param_range[i]]['param'] = [param_range[i]]
data[param_range[i]][param] = [param_range[i]]
for _j in range(runs):
single_run(
problem,
strategy,
data[param_range[i]],
custom_description=description,
comm_world=comm_world,
problem_args=problem_args,
num_procs=num_procs,
)
comm_world.Barrier()
if VERBOSE and comm_world.rank == 0:
print(
f'{problem.__name__} {handle} {num_procs} procs, {param}={param_range[i]:.2e}: e={data[param_range[i]]["e_global"][-1]}, t={data[param_range[i]]["t"][-1]}, k={data[param_range[i]]["k_SDC"][-1]}'
)
if comm_world.rank == 0:
import socket
import time
data['meta'] = {
'hostname': socket.gethostname(),
'time': time.time,
'runs': runs,
}
with open(get_path(problem, strategy, num_procs, handle), 'wb') as f:
pickle.dump(data, f)
def plot_work_precision(
problem,
strategy,
num_procs,
ax,
work_key='k_SDC',
precision_key='e_global',
handle='',
plotting_params=None,
comm_world=None,
): # pragma: no cover
"""
Plot data from running a problem with a strategy.
Args:
problem (function): A problem to run
strategy (Strategy): SDC strategy
num_procs (int): Number of processes for the time communicator
ax (matplotlib.pyplot.axes): Somewhere to plot
work_key (str): The key in the recorded data you want on the x-axis
precision_key (str): The key in the recorded data you want on the y-axis
handle (str): The name of the configuration
plotting_params (dict): Will be passed when plotting
comm_world (mpi4py.MPI.Intracomm): Communicator that is available for the entire script
Returns:
None
"""
if comm_world.rank > 0:
return None
with open(get_path(problem, strategy, num_procs, handle=handle), 'rb') as f:
data = pickle.load(f)
keys = [key for key in data.keys() if key not in ['meta']]
work = [np.nanmean(data[key][work_key]) for key in keys]
precision = [np.nanmean(data[key][precision_key]) for key in keys]
for key in [work_key, precision_key]:
rel_variance = [np.std(data[me][key]) / max([np.nanmean(data[me][key]), 1.0]) for me in keys]
if not all(me < 1e-1 or not np.isfinite(me) for me in rel_variance):
print(
f"WARNING: Variance in \"{key}\" for {get_path(problem, strategy, num_procs, handle)} too large! Got {rel_variance}"
)
style = merge_descriptions(
{**strategy.style, 'label': f'{strategy.style["label"]}{f" {handle}" if handle else ""}'},
plotting_params if plotting_params else {},
)
ax.loglog(work, precision, **style)
if 't' in [work_key, precision_key]:
meta = data.get('meta', {})
if meta.get('hostname', None) in ['thomas-work']:
ax.text(0.1, 0.1, "Laptop timings!", transform=ax.transAxes)
if meta.get('runs', None) == 1:
ax.text(0.1, 0.2, "No sampling!", transform=ax.transAxes)
def decorate_panel(ax, problem, work_key, precision_key, num_procs=1, title_only=False): # pragma: no cover
"""
Decorate a plot
Args:
ax (matplotlib.pyplot.axes): Somewhere to plot
problem (function): A problem to run
work_key (str): The key in the recorded data you want on the x-axis
precision_key (str): The key in the recorded data you want on the y-axis
num_procs (int): Number of processes for the time communicator
title_only (bool): Put only the title on top, or do the whole shebang
Returns:
None
"""
labels = {
'k_SDC': 'SDC iterations',
'k_SDC_no_restart': 'SDC iterations (restarts excluded)',
'k_Newton': 'Newton iterations',
'k_Newton_no_restart': 'Newton iterations (restarts excluded)',
'k_rhs': 'right hand side evaluations',
't': 'wall clock time / s',
'e_global': 'global error',
'e_global_rel': 'relative global error',
'e_local_max': 'max. local error',
'restart': 'restarts',
'dt_max': r'$\Delta t_\mathrm{max}$',
'dt_mean': r'$\bar{\Delta t}$',
'param': 'parameter',
}
if not title_only:
ax.set_xlabel(labels.get(work_key, 'work'))
ax.set_ylabel(labels.get(precision_key, 'precision'))
# ax.legend(frameon=False)
titles = {
'run_vdp': 'Van der Pol',
'run_Lorenz': 'Lorenz attractor',
'run_Schroedinger': r'Schr\"odinger',
'run_quench': 'Quench',
}
ax.set_title(titles.get(problem.__name__, ''))
def execute_configurations(
problem,
configurations,
work_key,
precision_key,
num_procs,
ax,
decorate,
record,
runs,
comm_world,
plotting,
):
"""
Run for multiple configurations.
Args:
problem (function): A problem to run
configurations (dict): The configurations you want to run with
work_key (str): The key in the recorded data you want on the x-axis
precision_key (str): The key in the recorded data you want on the y-axis
num_procs (int): Number of processes for the time communicator
ax (matplotlib.pyplot.axes): Somewhere to plot
decorate (bool): Whether to decorate fully or only put the title
record (bool): Whether to only plot or also record the data first
runs (int): Number of runs you want to do
comm_world (mpi4py.MPI.Intracomm): Communicator that is available for the entire script
plotting (bool): Whether to plot something
Returns:
None
"""
for _, config in configurations.items():
for strategy in config['strategies']:
shared_args = {
'problem': problem,
'strategy': strategy,
'handle': config.get('handle', ''),
'num_procs': config.get('num_procs', num_procs),
}
if record:
record_work_precision(
**shared_args,
custom_description=config.get('custom_description', {}),
runs=runs,
comm_world=comm_world,
problem_args=config.get('problem_args', {}),
param_range=config.get('param_range', None),
)
if plotting and comm_world.rank == 0:
plot_work_precision(
**shared_args,
work_key=work_key,
precision_key=precision_key,
ax=ax,
plotting_params=config.get('plotting_params', {}),
comm_world=comm_world,
)
decorate_panel(
ax=ax,
problem=problem,
work_key=work_key,
precision_key=precision_key,
num_procs=num_procs,
title_only=not decorate,
)
def get_configs(mode, problem):
"""
Get configurations for work-precision plots. These are dictionaries containing strategies and handles and so on.
Args:
mode (str): The of the configurations you want to retrieve
problem (function): A problem to run
Returns:
dict: Configurations
"""
configurations = {}
if mode == 'regular':
from pySDC.projects.Resilience.strategies import AdaptivityStrategy, BaseStrategy, IterateStrategy
handle = 'regular'
configurations[0] = {
'handle': handle,
'strategies': [AdaptivityStrategy(useMPI=True), BaseStrategy(useMPI=True), IterateStrategy(useMPI=True)],
}
elif mode == 'step_size_limiting':
from pySDC.implementations.convergence_controller_classes.step_size_limiter import StepSizeLimiter
from pySDC.projects.Resilience.strategies import AdaptivityStrategy
configurations[0] = {
'custom_description': {'convergence_controllers': {StepSizeLimiter: {'dt_max': 25}}},
'handle': 'step limiter',
'strategies': [AdaptivityStrategy(useMPI=True)],
'plotting_params': {'color': 'teal', 'marker': 'v'},
}
configurations[1] = {
'custom_description': {'convergence_controllers': {StepSizeLimiter: {'dt_slope_max': 2}}},
'handle': 'slope limiter',
'strategies': [AdaptivityStrategy(useMPI=True)],
'plotting_params': {'color': 'magenta', 'marker': 'x'},
}
configurations[2] = {
'custom_description': {},
'handle': 'no limits',
'plotting_params': {'label': 'adaptivity'},
'strategies': [AdaptivityStrategy(useMPI=True)],
}
elif mode == 'compare_strategies':
from pySDC.projects.Resilience.strategies import AdaptivityStrategy, BaseStrategy, IterateStrategy
description_high_order = {'step_params': {'maxiter': 5}}
description_low_order = {'step_params': {'maxiter': 3}}
dashed = {'ls': '--'}
configurations[0] = {
'custom_description': description_high_order,
'handle': r'high order',
'strategies': [AdaptivityStrategy(useMPI=True), BaseStrategy(useMPI=True)],
}
configurations[1] = {
'custom_description': description_low_order,
'handle': r'low order',
'strategies': [AdaptivityStrategy(useMPI=True), BaseStrategy(useMPI=True)],
'plotting_params': dashed,
}
description_large_step = {'level_params': {'dt': 5.0 if problem.__name__ == 'run_quench' else 3e-2}}
description_small_step = {'level_params': {'dt': 1.0 if problem.__name__ == 'run_quench' else 1e-2}}
configurations[2] = {
'custom_description': description_large_step,
'handle': r'large step',
'strategies': [IterateStrategy(useMPI=True)],
'plotting_params': dashed,
}
configurations[3] = {
'custom_description': description_small_step,
'handle': r'small step',
'strategies': [IterateStrategy(useMPI=True)],
}
elif mode == 'RK':
from pySDC.projects.Resilience.strategies import AdaptivityStrategy, DIRKStrategy, ERKStrategy
# from pySDC.implementations.sweeper_classes.explicit import explicit
# configurations[3] = {
# 'custom_description': {
# 'step_params': {'maxiter': 5},
# 'sweeper_params': {'QE': 'EE'},
# 'sweeper_class': explicit,
# },
# 'handle': 'explicit order 4',
# 'strategies': [AdaptivityStrategy(useMPI=True)],
# 'plotting_params': {'ls': ':', 'label': 'explicit SDC5(4)'},
# }
configurations[0] = {
'strategies': [ERKStrategy(useMPI=True), DIRKStrategy(useMPI=True)],
}
configurations[1] = {
'custom_description': {'step_params': {'maxiter': 5}},
'handle': 'order 5',
'strategies': [AdaptivityStrategy(useMPI=True)],
'plotting_params': {'label': 'SDC5(4)'},
}
configurations[2] = {
'custom_description': {'step_params': {'maxiter': 4}},
'handle': 'order 4',
'strategies': [AdaptivityStrategy(useMPI=True)],
'plotting_params': {'ls': '--', 'label': 'SDC4(3)'},
}
elif mode == 'parallel_efficiency':
from pySDC.projects.Resilience.strategies import AdaptivityStrategy, BaseStrategy, IterateStrategy, ERKStrategy
desc = {}
desc['sweeper_params'] = {'num_nodes': 3, 'QI': 'IE'}
desc['step_params'] = {'maxiter': 5}
descIterate = {}
descIterate['sweeper_params'] = {'num_nodes': 3, 'QI': 'IE'}
ls = {
1: '-',
2: '--',
3: '-.',
4: ':',
5: 'loosely dashdotted',
}
# configurations[-1] = {
# 'strategies': [ERKStrategy(useMPI=False)], 'num_procs':1,
# }
for num_procs in [4, 2, 1]:
plotting_params = {'ls': ls[num_procs], 'label': f'adaptivity {num_procs} procs'}
configurations[num_procs] = {
'strategies': [AdaptivityStrategy(True)],
'custom_description': desc,
'num_procs': num_procs,
'plotting_params': plotting_params,
}
plotting_params = {'ls': ls[num_procs], 'label': fr'$k$ adaptivity {num_procs} procs'}
configurations[num_procs + 100] = {
'strategies': [IterateStrategy(True)],
'custom_description': descIterate,
'num_procs': num_procs,
'plotting_params': plotting_params,
}
elif mode[:13] == 'vdp_stiffness':
from pySDC.projects.Resilience.strategies import AdaptivityStrategy, ERKStrategy, DIRKStrategy
mu = float(mode[14:])
problem_desc = {'problem_params': {'mu': mu}}
desc = {}
desc['sweeper_params'] = {'num_nodes': 3, 'QI': 'IE'}
desc['step_params'] = {'maxiter': 5}
desc['problem_params'] = problem_desc['problem_params']
ls = {
1: '-',
2: '--',
3: '-.',
4: ':',
5: 'loosely dashdotted',
}
for num_procs in [4, 1]:
plotting_params = {'ls': ls[num_procs], 'label': f'SDC {num_procs} procs'}
configurations[num_procs] = {
'strategies': [AdaptivityStrategy(True)],
'custom_description': desc,
'num_procs': num_procs,
'plotting_params': plotting_params,
'handle': mode,
}
configurations[2] = {
'strategies': [ERKStrategy(useMPI=True)],
'num_procs': 1,
'handle': mode,
'plotting_params': {'label': 'CP5(4)'},
'custom_description': problem_desc,
#'param_range': [1e-2],
}
configurations[3] = {
'strategies': [DIRKStrategy(useMPI=True)],
'num_procs': 1,
'handle': mode,
'plotting_params': {'label': 'DIRK4(3)'},
'custom_description': problem_desc,
}
elif mode == 'compare_adaptivity':
# TODO: configurations not final!
from pySDC.projects.Resilience.strategies import (
AdaptivityCollocationTypeStrategy,
AdaptivityCollocationRefinementStrategy,
AdaptivityStrategy,
AdaptivityExtrapolationWithinQStrategy,
)
strategies = [
AdaptivityCollocationTypeStrategy(useMPI=True),
AdaptivityCollocationRefinementStrategy(useMPI=True),
]
restol = None
for strategy in strategies:
strategy.restol = restol
configurations[1] = {
'custom_description': {'step_params': {'maxiter': 99}, 'level_params': {'restol': 1e-11}},
'strategies': [AdaptivityExtrapolationWithinQStrategy(useMPI=True)],
}
configurations[2] = {'strategies': strategies}
configurations[3] = {
'custom_description': {'step_params': {'maxiter': 5}},
'strategies': [AdaptivityStrategy(useMPI=True)],
}
# strategies2 = [AdaptivityCollocationTypeStrategy(useMPI=True), AdaptivityCollocationRefinementStrategy(useMPI=True)]
# restol = 1e-6
# for strategy in strategies2:
# strategy.restol = restol
# configurations[3] = {'strategies':strategies2, 'handle': 'low restol', 'plotting_params': {'ls': '--'}}
elif mode == 'quench':
from pySDC.projects.Resilience.strategies import (
AdaptivityStrategy,
DoubleAdaptivityStrategy,
IterateStrategy,
BaseStrategy,
)
dumbledoresarmy = DoubleAdaptivityStrategy(useMPI=True)
# dumbledoresarmy.residual_e_tol_ratio = 1e2
dumbledoresarmy.residual_e_tol_abs = 1e-3
strategies = [
AdaptivityStrategy(useMPI=True),
IterateStrategy(useMPI=True),
BaseStrategy(useMPI=True),
dumbledoresarmy,
]
configurations[1] = {'strategies': strategies}
configurations[2] = {
'strategies': strategies,
'problem_args': {'imex': True},
'handle': 'IMEX',
'plotting_params': {'ls': '--'},
}
inexact = {'problem_params': {'newton_iter': 30}}
configurations[3] = {
'strategies': strategies,
'custom_description': inexact,
'handle': 'inexact',
'plotting_params': {'ls': ':'},
}
LU = {'sweeper_params': {'QI': 'LU'}}
configurations[4] = {
'strategies': strategies,
'custom_description': LU,
'handle': 'LU',
'plotting_params': {'ls': '-.'},
}
elif mode == 'preconditioners':
from pySDC.projects.Resilience.strategies import AdaptivityStrategy, IterateStrategy, BaseStrategy
strategies = [AdaptivityStrategy(useMPI=True), IterateStrategy(useMPI=True), BaseStrategy(useMPI=True)]
precons = ['IE', 'LU', 'MIN']
ls = ['-', '--', '-.', ':']
for i in range(len(precons)):
configurations[i] = {
'strategies': strategies,
'custom_description': {'sweeper_params': {'QI': precons[i]}},
'handle': precons[i],
'plotting_params': {'ls': ls[i]},
}
elif mode == 'newton_tol':
from pySDC.projects.Resilience.strategies import AdaptivityStrategy, BaseStrategy, IterateStrategy
tol_range = [1e-7, 1e-9, 1e-11]
ls = ['-', '--', '-.', ':']
for i in range(len(tol_range)):
configurations[i] = {
'strategies': [AdaptivityStrategy(useMPI=True), BaseStrategy(useMPI=True)],
'custom_description': {
'problem_params': {'newton_tol': tol_range[i]},
'step_params': {'maxiter': 5},
},
'handle': f"Newton tol={tol_range[i]:.1e}",
'plotting_params': {'ls': ls[i]},
}
configurations[i + len(tol_range)] = {
'strategies': [IterateStrategy(useMPI=True)],
'custom_description': {
'problem_params': {'newton_tol': tol_range[i]},
},
'handle': f"Newton tol={tol_range[i]:.1e}",
'plotting_params': {'ls': ls[i]},
}
elif mode == 'avoid_restarts':
from pySDC.projects.Resilience.strategies import (
AdaptivityStrategy,
AdaptivityAvoidRestartsStrategy,
AdaptivityInterpolationStrategy,
)
desc = {'sweeper_params': {'QI': 'IE'}, 'step_params': {'maxiter': 3}}
param_range = [1e-3, 1e-5]
configurations[0] = {
'strategies': [AdaptivityInterpolationStrategy(useMPI=True)],
'plotting_params': {'ls': '--'},
'custom_description': desc,
'param_range': param_range,
}
configurations[1] = {
'strategies': [AdaptivityAvoidRestartsStrategy(useMPI=True)],
'plotting_params': {'ls': '-.'},
'custom_description': desc,
'param_range': param_range,
}
configurations[2] = {
'strategies': [AdaptivityStrategy(useMPI=True)],
'custom_description': desc,
'param_range': param_range,
}
else:
raise NotImplementedError(f'Don\'t know the mode "{mode}"!')
return configurations
def get_fig(x=1, y=1, **kwargs): # pragma: no cover
"""
Get a figure to plot in.
Args:
x (int): How many panels in horizontal direction you want
y (int): How many panels in vertical direction you want
Returns:
matplotlib.pyplot.Figure
"""
width = 1.0
ratio = 1.0 if y == 2 else 0.5
keyword_arguments = {
'figsize': figsize_by_journal('Springer_Numerical_Algorithms', width, ratio),
'layout': 'constrained',
**kwargs,
}
return plt.subplots(y, x, **keyword_arguments)
def save_fig(
fig, name, work_key, precision_key, legend=True, format='pdf', base_path='data', **kwargs
): # pragma: no cover
"""
Save a figure with a legend on the bottom.
Args:
fig (matplotlib.pyplot.Figure): Figure you want to save
name (str): Name of the plot to put in the path
work_key (str): The key in the recorded data you want on the x-axis
precision_key (str): The key in the recorded data you want on the y-axis
legend (bool): Put a legend or not
format (str): Format to store the figure with
Returns:
None
"""
handles, labels = fig.get_axes()[0].get_legend_handles_labels()
order = np.argsort([me[0] for me in labels])
fig.legend(
[handles[i] for i in order],
[labels[i] for i in order],
loc='outside lower center',
ncols=3 if len(handles) % 3 == 0 else 4,
frameon=False,
fancybox=True,
)
path = f'{base_path}/wp-{name}-{work_key}-{precision_key}.{format}'
fig.savefig(path, bbox_inches='tight', **kwargs)
print(f'Stored figure \"{path}\"')
def all_problems(mode='compare_strategies', plotting=True, base_path='data', **kwargs): # pragma: no cover
"""
Make a plot comparing various strategies for all problems.
Args:
work_key (str): The key in the recorded data you want on the x-axis
precision_key (str): The key in the recorded data you want on the y-axis
Returns:
None
"""
fig, axs = get_fig(2, 2)
shared_params = {
'work_key': 'k_SDC',
'precision_key': 'e_global',
'num_procs': 1,
'runs': 1,
'comm_world': MPI.COMM_WORLD,
'record': False,
'plotting': plotting,
**kwargs,
}
problems = [run_vdp, run_Lorenz, run_Schroedinger, run_quench]
for i in range(len(problems)):
execute_configurations(
**shared_params,
problem=problems[i],
ax=axs.flatten()[i],
decorate=True,
configurations=get_configs(mode, problems[i]),
)
if plotting and shared_params['comm_world'].rank == 0:
save_fig(
fig=fig,
name=mode,
work_key=shared_params['work_key'],
precision_key=shared_params['precision_key'],
legend=True,
base_path=base_path,
)
def ODEs(mode='compare_strategies', plotting=True, base_path='data', **kwargs): # pragma: no cover
"""
Make a plot comparing various strategies for the two ODEs.
Args:
work_key (str): The key in the recorded data you want on the x-axis
precision_key (str): The key in the recorded data you want on the y-axis
Returns:
None
"""
fig, axs = get_fig(x=2, y=1)
shared_params = {
'work_key': 'k_SDC',
'precision_key': 'e_global',
'num_procs': 1,
'runs': 1,
'comm_world': MPI.COMM_WORLD,
'record': False,
'plotting': plotting,
**kwargs,
}
problems = [run_vdp, run_Lorenz]
for i in range(len(problems)):
execute_configurations(
**shared_params,
problem=problems[i],
ax=axs.flatten()[i],
decorate=i == 0,
configurations=get_configs(mode, problems[i]),
)
if plotting and shared_params['comm_world'].rank == 0:
save_fig(
fig=fig,
name=f'ODEs-{mode}',
work_key=shared_params['work_key'],
precision_key=shared_params['precision_key'],
legend=True,
base_path=base_path,
)
def single_problem(mode, problem, plotting=True, base_path='data', **kwargs): # pragma: no cover
"""
Make a plot for a single problem
Args:
mode (str): What you want to look at
problem (function): A problem to run
"""
fig, ax = get_fig(1, 1, figsize=figsize_by_journal('Springer_Numerical_Algorithms', 1, 0.5))
params = {
'work_key': 'k_SDC',
'precision_key': 'e_global',
'num_procs': 1,
'runs': 1,
'comm_world': MPI.COMM_WORLD,
'record': False,
'plotting': plotting,
**kwargs,
}
execute_configurations(**params, problem=problem, ax=ax, decorate=True, configurations=get_configs(mode, problem))
if plotting:
save_fig(
fig=fig,
name=f'{problem.__name__}-{mode}',
work_key=params['work_key'],
precision_key=params['precision_key'],
legend=False,
base_path=base_path,
)
def vdp_stiffness_plot(base_path='data', format='pdf', **kwargs): # pragma: no cover
fig, axs = get_fig(2, 2, sharex=True)
mus = [0, 5, 10, 15]
for i in range(len(mus)):
params = {
'runs': 1,
'problem': run_vdp,
'record': False,
'work_key': 't',
'precision_key': 'e_global_rel',
'comm_world': MPI.COMM_WORLD,
**kwargs,
}
params['num_procs'] = min(params['comm_world'].size, 5)
params['plotting'] = params['comm_world'].rank == 0
configurations = get_configs(mode=f'vdp_stiffness-{mus[i]}', problem=run_vdp)
execute_configurations(**params, ax=axs.flatten()[i], decorate=True, configurations=configurations)
axs.flatten()[i].set_title(rf'$\mu={{{mus[i]}}}$')
fig.suptitle('Van der Pol')
if params['comm_world'].rank == 0:
save_fig(
fig=fig,
name='vdp-stiffness',
work_key=params['work_key'],
precision_key=params['precision_key'],
legend=False,
base_path=base_path,
format=format,
)
if __name__ == "__main__":
comm_world = MPI.COMM_WORLD
params = {
'mode': 'compare_adaptivity',
'runs': 1,
'num_procs': min(comm_world.size, 5),
'plotting': comm_world.rank == 0,
}
params_single = {
**params,
'problem': run_vdp,
}
record = True
single_problem(**params_single, work_key='t', precision_key='e_global_rel', record=record)
# single_problem(**params_single, work_key='k_Newton_no_restart', precision_key='e_global_rel', record=False)
# single_problem(**params_single, work_key='param', precision_key='e_global_rel', record=False)
# ODEs(**params, work_key='t', precision_key='e_global_rel', record=record)
all_params = {
'record': False,
'runs': 1,
'work_key': 't',
'precision_key': 'e_global_rel',
'plotting': comm_world.rank == 0,
}
for _mode in ['parallel_efficiency']: # , 'preconditioners', 'compare_adaptivity']:
# all_problems(**all_params, mode=mode)
comm_world.Barrier()
if comm_world.rank == 0:
# parallel_efficiency(**params_single, work_key='k_SDC', precision_key='e_global_rel')
plt.show()
| 34,698 | 33.733734 | 214 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/FDeigenvalues.py | import numpy as np
from pySDC.helpers.problem_helper import get_finite_difference_stencil
def get_finite_difference_eigenvalues(derivative, order, stencil_type=None, steps=None, dx=None, L=1.0):
"""
Compute the eigenvalues of the finite difference (FD) discretization using Fourier transform.
In Fourier space, the offsets in the FD discretizations manifest as multiplications by
exp(2 * pi * j * n / N * offset).
Then, all you need to do is sum up the contributions from all entries in the stencil and Bob's your uncle,
you have computed the eigenvalues.
There are going to be as many eigenvalues as there are space elements.
Please be aware that these are in general complex.
Args:
derivative (int): The order of the derivative
order (int): The order of accuracy of the derivative
stencil_type (str): The type of stencil, i.e. 'forward', 'backward', or 'center'
steps (list): If you want an exotic stencil like upwind, you can give the offsets here
dx (float): The mesh spacing
L (float): The length of the interval in space
Returns:
numpy.ndarray: The complex (!) eigenvalues.
"""
# prepare variables
N = int(L // dx)
eigenvalues = np.zeros(N, dtype=complex)
# get the stencil
weights, offsets = get_finite_difference_stencil(
derivative=derivative, order=order, stencil_type=stencil_type, steps=steps
)
# get the impact of the stencil in Fourier space
for n in range(N):
for i in range(len(weights)):
eigenvalues[n] += weights[i] * np.exp(2 * np.pi * 1j * n / N * offsets[i]) * 1.0 / (dx**derivative)
return eigenvalues
| 1,702 | 37.704545 | 111 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/accuracy_check.py | import matplotlib as mpl
import matplotlib.pylab as plt
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError
from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import (
EstimateExtrapolationErrorNonMPI,
)
from pySDC.core.Hooks import hooks
from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep
from pySDC.projects.Resilience.strategies import merge_descriptions
import pySDC.helpers.plot_helper as plt_helper
from pySDC.projects.Resilience.piline import run_piline
class DoNothing(hooks):
pass
def setup_mpl(font_size=8):
"""
Setup matplotlib to fit in with TeX scipt.
Args:
fontsize (int): Font size
Returns:
None
"""
plt_helper.setup_mpl(reset=True)
# Set up plotting parameters
style_options = {
"axes.labelsize": 12, # LaTeX default is 10pt font.
"legend.fontsize": 13, # Make the legend/label fonts a little smaller
"axes.xmargin": 0.03,
"axes.ymargin": 0.03,
}
mpl.rcParams.update(style_options)
def get_results_from_stats(stats, var, val, hook_class=LogLocalErrorPostStep):
"""
Extract results from the stats are used to compute the order.
Args:
stats (dict): The stats object from a pySDC run
var (str): The variable to compute the order against
val (float): The value of var corresponding to this run
hook_class (pySDC.Hook): A hook such that we know what information is available
Returns:
dict: The information needed for the order plot
"""
results = {
'e_embedded': 0.0,
'e_extrapolated': 0.0,
'e': 0.0,
var: val,
}
if hook_class == LogLocalErrorPostStep:
e_extrapolated = np.array(get_sorted(stats, type='error_extrapolation_estimate'))[:, 1]
e_embedded = np.array(get_sorted(stats, type='error_embedded_estimate'))[:, 1]
e_local = np.array(get_sorted(stats, type='e_local_post_step'))[:, 1]
if len(e_extrapolated[e_extrapolated != [None]]) > 0:
results['e_extrapolated'] = e_extrapolated[e_extrapolated != [None]][-1]
if len(e_local[e_local != [None]]) > 0:
results['e'] = max([e_local[e_local != [None]][-1], np.finfo(float).eps])
if len(e_embedded[e_embedded != [None]]) > 0:
results['e_embedded'] = e_embedded[e_embedded != [None]][-1]
return results
def multiple_runs(
k=5,
serial=True,
Tend_fixed=None,
custom_description=None,
prob=run_piline,
dt_list=None,
hook_class=LogLocalErrorPostStep,
custom_controller_params=None,
var='dt',
avoid_restarts=False,
embedded_error_flavor=None,
):
"""
A simple test program to compute the order of accuracy.
Args:
k (int): Number of SDC sweeps
serial (bool): Whether to do regular SDC or Multi-step SDC with 5 processes
Tend_fixed (float): The time you want to solve the equation to. If left at `None`, the local error will be
computed since a fixed number of steps will be performed.
custom_description (dict): Custom parameters to pass to the problem
prob (function): A function that can accept suitable arguments and run a problem (see the Resilience project)
dt_list (list): A list of values to check the order with
hook_class (pySDC.Hook): A hook for recording relevant information
custom_controller_params (dict): Custom parameters to pass to the problem
var (str): The variable to check the order against
avoid_restarts (bool): Mode of running adaptivity if applicable
embedded_error_flavor (str): Flavor for the estimation of embedded error
Returns:
dict: The errors for different values of var
"""
# assemble list of dt
if dt_list is not None:
pass
elif Tend_fixed:
dt_list = 0.1 * 10.0 ** -(np.arange(3) / 2)
else:
dt_list = 0.01 * 10.0 ** -(np.arange(20) / 10.0)
num_procs = 1 if serial else 5
embedded_error_flavor = (
embedded_error_flavor if embedded_error_flavor else 'standard' if avoid_restarts else 'linearized'
)
# perform rest of the tests
for i in range(0, len(dt_list)):
desc = {
'step_params': {'maxiter': k},
'convergence_controllers': {
EstimateEmbeddedError.get_implementation(flavor=embedded_error_flavor, useMPI=False): {},
EstimateExtrapolationErrorNonMPI: {'no_storage': not serial},
},
}
# setup the variable we check the order against
if var == 'dt':
desc['level_params'] = {'dt': dt_list[i]}
elif var == 'e_tol':
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
desc['convergence_controllers'][Adaptivity] = {
'e_tol': dt_list[i],
'avoid_restarts': avoid_restarts,
'embedded_error_flavor': embedded_error_flavor,
}
if custom_description is not None:
desc = merge_descriptions(desc, custom_description)
Tend = Tend_fixed if Tend_fixed else 30 * dt_list[i]
stats, controller, _ = prob(
custom_description=desc,
num_procs=num_procs,
Tend=Tend,
hook_class=hook_class,
custom_controller_params=custom_controller_params,
)
level = controller.MS[-1].levels[-1]
e_glob = abs(level.prob.u_exact(t=level.time + level.dt) - level.u[-1])
e_local = abs(level.prob.u_exact(t=level.time + level.dt, u_init=level.u[0], t_init=level.time) - level.u[-1])
res_ = get_results_from_stats(stats, var, dt_list[i], hook_class)
res_['e_glob'] = e_glob
res_['e_local'] = e_local
if i == 0:
res = res_.copy()
for key in res.keys():
res[key] = [res[key]]
else:
for key in res_.keys():
res[key].append(res_[key])
return res
def plot_order(res, ax, k):
"""
Plot the order using results from `multiple_runs`.
Args:
res (dict): The results from `multiple_runs`
ax: Somewhere to plot
k (int): Number of iterations
Returns:
None
"""
color = plt.rcParams['axes.prop_cycle'].by_key()['color'][k - 2]
key = 'e_local'
order = get_accuracy_order(res, key=key, thresh=1e-11)
label = f'k={k}, p={np.mean(order):.2f}'
ax.loglog(res['dt'], res[key], color=color, ls='-', label=label)
ax.set_xlabel(r'$\Delta t$')
ax.set_ylabel(r'$\epsilon$')
ax.legend(frameon=False, loc='lower right')
def plot(res, ax, k, var='dt', keys=None):
"""
Plot the order of various errors using the results from `multiple_runs`.
Args:
results (dict): the dictionary containing the errors
ax: Somewhere to plot
k (int): Number of SDC sweeps
var (str): The variable to compute the order against
keys (list): List of keys to plot from the results
Returns:
None
"""
keys = keys if keys else ['e_embedded', 'e_extrapolated', 'e']
ls = ['-', ':', '-.']
color = plt.rcParams['axes.prop_cycle'].by_key()['color'][k - 2]
for i in range(len(keys)):
if all(me == 0.0 for me in res[keys[i]]):
continue
order = get_accuracy_order(res, key=keys[i], var=var)
if keys[i] == 'e_embedded':
label = rf'$k={{{np.mean(order):.2f}}}$'
expect_order = k if var == 'dt' else 1.0
assert np.isclose(
np.mean(order), expect_order, atol=4e-1
), f'Expected embedded error estimate to have order {expect_order} \
but got {np.mean(order):.2f}'
elif keys[i] == 'e_extrapolated':
label = None
expect_order = k + 1 if var == 'dt' else 1 + 1 / k
assert np.isclose(
np.mean(order), expect_order, rtol=3e-1
), f'Expected extrapolation error estimate to have order \
{expect_order} but got {np.mean(order):.2f}'
else:
label = None
ax.loglog(res[var], res[keys[i]], color=color, ls=ls[i], label=label)
if var == 'dt':
ax.set_xlabel(r'$\Delta t$')
elif var == 'e_tol':
ax.set_xlabel(r'$\epsilon_\mathrm{TOL}$')
else:
ax.set_xlabel(var)
ax.set_ylabel(r'$\epsilon$')
ax.legend(frameon=False, loc='lower right')
def get_accuracy_order(results, key='e_embedded', thresh=1e-14, var='dt'):
"""
Routine to compute the order of accuracy in time
Args:
results (dict): the dictionary containing the errors
key (str): The key in the dictionary corresponding to a specific error
thresh (float): A threshold below which values are not entered into the order computation
var (str): The variable to compute the order against
Returns:
the list of orders
"""
# retrieve the list of dt from results
assert var in results, f'ERROR: expecting the list of {var} in the results dictionary'
dt_list = sorted(results[var], reverse=True)
order = []
# loop over two consecutive errors/dt pairs
for i in range(1, len(dt_list)):
# compute order as log(prev_error/this_error)/log(this_dt/old_dt) <-- depends on the sorting of the list!
try:
if results[key][i] > thresh and results[key][i - 1] > thresh:
order.append(np.log(results[key][i] / results[key][i - 1]) / np.log(dt_list[i] / dt_list[i - 1]))
except TypeError:
print('Type Warning', results[key])
return order
def plot_orders(
ax,
ks,
serial,
Tend_fixed=None,
custom_description=None,
prob=run_piline,
dt_list=None,
custom_controller_params=None,
embedded_error_flavor=None,
):
"""
Plot only the local error.
Args:
ax: Somewhere to plot
ks (list): List of sweeps
serial (bool): Whether to do regular SDC or Multi-step SDC with 5 processes
Tend_fixed (float): The time you want to solve the equation to. If left at `None`, the local error will be
custom_description (dict): Custom parameters to pass to the problem
prob (function): A function that can accept suitable arguments and run a problem (see the Resilience project)
dt_list (list): A list of values to check the order with
custom_controller_params (dict): Custom parameters to pass to the problem
embedded_error_flavor (str): Flavor for the estimation of embedded error
Returns:
None
"""
for i in range(len(ks)):
k = ks[i]
res = multiple_runs(
k=k,
serial=serial,
Tend_fixed=Tend_fixed,
custom_description=custom_description,
prob=prob,
dt_list=dt_list,
hook_class=DoNothing,
custom_controller_params=custom_controller_params,
embedded_error_flavor=embedded_error_flavor,
)
plot_order(res, ax, k)
def plot_all_errors(
ax,
ks,
serial,
Tend_fixed=None,
custom_description=None,
prob=run_piline,
dt_list=None,
custom_controller_params=None,
var='dt',
avoid_restarts=False,
embedded_error_flavor=None,
keys=None,
):
"""
Make tests for plotting the error and plot a bunch of error estimates
Args:
ax: Somewhere to plot
ks (list): List of sweeps
serial (bool): Whether to do regular SDC or Multi-step SDC with 5 processes
Tend_fixed (float): The time you want to solve the equation to. If left at `None`, the local error will be
custom_description (dict): Custom parameters to pass to the problem
prob (function): A function that can accept suitable arguments and run a problem (see the Resilience project)
dt_list (list): A list of values to check the order with
custom_controller_params (dict): Custom parameters to pass to the problem
var (str): The variable to compute the order against
avoid_restarts (bool): Mode of running adaptivity if applicable
embedded_error_flavor (str): Flavor for the estimation of embedded error
keys (list): List of keys to plot from the results
Returns:
None
"""
for i in range(len(ks)):
k = ks[i]
res = multiple_runs(
k=k,
serial=serial,
Tend_fixed=Tend_fixed,
custom_description=custom_description,
prob=prob,
dt_list=dt_list,
custom_controller_params=custom_controller_params,
var=var,
avoid_restarts=avoid_restarts,
embedded_error_flavor=embedded_error_flavor,
)
# visualize results
plot(res, ax, k, var=var, keys=keys)
ax.plot([None, None], color='black', label=r'$\epsilon_\mathrm{embedded}$', ls='-')
ax.plot([None, None], color='black', label=r'$\epsilon_\mathrm{extrapolated}$', ls=':')
ax.plot([None, None], color='black', label=r'$e$', ls='-.')
ax.legend(frameon=False, loc='lower right')
def check_order_with_adaptivity():
"""
Test the order when running adaptivity.
Since we replace the step size with the tolerance, we check the order against this.
Irrespective of the number of sweeps we do, the embedded error estimate should scale linearly with the tolerance,
since it is supposed to match it as closely as possible.
The error estimate for the error of the last sweep, however will depend on the number of sweeps we do. The order
we expect is 1 + 1/k.
"""
setup_mpl()
ks = [3, 2]
for serial in [True, False]:
fig, ax = plt.subplots(1, 1, figsize=(3.5, 3))
plot_all_errors(
ax,
ks,
serial,
Tend_fixed=5e-1,
var='e_tol',
dt_list=[1e-5, 5e-6],
avoid_restarts=False,
custom_controller_params={'logger_level': 30},
)
if serial:
fig.savefig('data/error_estimate_order_adaptivity.png', dpi=300, bbox_inches='tight')
else:
fig.savefig('data/error_estimate_order_adaptivity_parallel.png', dpi=300, bbox_inches='tight')
plt.close(fig)
def check_order_against_step_size():
"""
Check the order versus the step size for different numbers of sweeps.
"""
setup_mpl()
ks = [4, 3, 2]
for serial in [True, False]:
fig, ax = plt.subplots(1, 1, figsize=(3.5, 3))
plot_all_errors(ax, ks, serial, Tend_fixed=1.0)
if serial:
fig.savefig('data/error_estimate_order.png', dpi=300, bbox_inches='tight')
else:
fig.savefig('data/error_estimate_order_parallel.png', dpi=300, bbox_inches='tight')
plt.close(fig)
def main():
"""Run various tests"""
check_order_with_adaptivity()
check_order_against_step_size()
if __name__ == "__main__":
main()
| 15,223 | 33.288288 | 118 | py |
pySDC | pySDC-master/pySDC/projects/Resilience/paper_plots.py | # script to make pretty plots for papers or talks
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from pySDC.projects.Resilience.fault_stats import (
FaultStats,
BaseStrategy,
AdaptivityStrategy,
IterateStrategy,
HotRodStrategy,
run_Lorenz,
run_Schroedinger,
run_vdp,
run_quench,
)
from pySDC.helpers.plot_helper import setup_mpl, figsize_by_journal
from pySDC.helpers.stats_helper import get_sorted
cm = 1 / 2.5
TEXTWIDTH = 11.9446244611 * cm
JOURNAL = 'Springer_Numerical_Algorithms'
BASE_PATH = 'data/paper'
def get_stats(problem, path='data/stats-jusuf'):
"""
Create a FaultStats object for a given problem to use for the plots.
Note that the statistics need to be already generated somewhere else, this function will only load them.
Args:
problem (function): A problem to run
path (str): Path to the associated stats for the problem
Returns:
FaultStats: Object to analyse resilience statistics from
"""
if problem in [run_Lorenz, run_vdp]:
mode = 'combination'
else:
mode = 'random'
recovery_thresh_abs = {
run_quench: 5e-3,
}
strategies = [BaseStrategy(), AdaptivityStrategy(), IterateStrategy()]
if JOURNAL not in ['JSC_beamer']:
strategies += [HotRodStrategy()]
stats_analyser = FaultStats(
prob=problem,
strategies=strategies,
faults=[False, True],
reload=True,
recovery_thresh=1.1,
recovery_thresh_abs=recovery_thresh_abs.get(problem, 0),
num_procs=1,
mode=mode,
stats_path=path,
)
stats_analyser.get_recovered()
return stats_analyser
def my_setup_mpl(**kwargs):
setup_mpl(reset=True, font_size=8)
mpl.rcParams.update({'lines.markersize': 6})
def savefig(fig, name, format='pdf', tight_layout=True): # pragma: no cover
"""
Save a figure to some predefined location.
Args:
fig (Matplotlib.Figure): The figure of the plot
name (str): The name of the plot
tight_layout (bool): Apply tight layout or leave as is
Returns:
None
"""
if tight_layout:
fig.tight_layout()
path = f'{BASE_PATH}/{name}.{format}'
fig.savefig(path, bbox_inches='tight', transparent=True, dpi=200)
print(f'saved "{path}"')
def analyse_resilience(problem, path='data/stats', **kwargs): # pragma: no cover
"""
Generate some stats for resilience / load them if already available and make some plots.
Args:
problem (function): A problem to run
path (str): Path to the associated stats for the problem
Returns:
None
"""
stats_analyser = get_stats(problem, path)
stats_analyser.get_recovered()
strategy = IterateStrategy()
not_fixed = stats_analyser.get_mask(strategy=strategy, key='recovered', val=False)
not_overflow = stats_analyser.get_mask(strategy=strategy, key='bit', val=1, op='uneq', old_mask=not_fixed)
stats_analyser.print_faults(not_overflow)
# special = stats_analyser.get_mask(strategy=strategy, key='bit', val=10, op='eq')
# stats_analyser.print_faults(special)
# Adaptivity: 19, ...
# stats_analyser.scrutinize(strategy, run=19, faults=True)
compare_strategies(stats_analyser, **kwargs)
plot_recovery_rate(stats_analyser, **kwargs)
def compare_strategies(stats_analyser, **kwargs): # pragma: no cover
"""
Make a plot showing local error and iteration number of time for all strategies
Args:
stats_analyser (FaultStats): Fault stats object, which contains some stats
Returns:
None
"""
my_setup_mpl()
fig, ax = plt.subplots(figsize=(TEXTWIDTH, 5 * cm))
stats_analyser.compare_strategies(ax=ax)
savefig(fig, 'compare_strategies', **kwargs)
def plot_recovery_rate(stats_analyser, **kwargs): # pragma: no cover
"""
Make a plot showing recovery rate for all faults and only for those that can be recovered.
Args:
stats_analyser (FaultStats): Fault stats object, which contains some stats
Returns:
None
"""
my_setup_mpl()
fig, axs = plt.subplots(1, 2, figsize=(TEXTWIDTH, 5 * cm), sharex=True, sharey=True)
stats_analyser.plot_things_per_things(
'recovered', 'bit', False, op=stats_analyser.rec_rate, args={'ylabel': 'recovery rate'}, ax=axs[0]
)
plot_recovery_rate_recoverable_only(stats_analyser, fig, axs[1], ylabel='')
axs[1].get_legend().remove()
axs[0].set_title('All faults')
axs[1].set_title('Only recoverable faults')
savefig(fig, 'recovery_rate_compared', **kwargs)
def plot_recovery_rate_recoverable_only(stats_analyser, fig, ax, **kwargs): # pragma: no cover
"""
Plot the recovery rate considering only faults that can be recovered theoretically.
Args:
stats_analyser (FaultStats): Fault stats object, which contains some stats
fig (matplotlib.pyplot.figure): Figure in which to plot
ax (matplotlib.pyplot.axes): Somewhere to plot
Returns:
None
"""
for i in range(len(stats_analyser.strategies)):
fixable = stats_analyser.get_fixable_faults_only(strategy=stats_analyser.strategies[i])
stats_analyser.plot_things_per_things(
'recovered',
'bit',
False,
op=stats_analyser.rec_rate,
mask=fixable,
args={**kwargs},
ax=ax,
fig=fig,
strategies=[stats_analyser.strategies[i]],
)
def compare_recovery_rate_problems(): # pragma: no cover
"""
Compare the recovery rate for vdP, Lorenz and Schroedinger problems.
Only faults that can be recovered are shown.
Returns:
None
"""
stats = [
get_stats(run_vdp),
get_stats(run_Lorenz),
get_stats(run_Schroedinger),
get_stats(run_quench),
]
titles = ['Van der Pol', 'Lorenz attractor', r'Schr\"odinger', 'Quench']
my_setup_mpl()
fig, axs = plt.subplots(2, 2, figsize=figsize_by_journal(JOURNAL, 1, 0.8), sharey=True)
[
plot_recovery_rate_recoverable_only(stats[i], fig, axs.flatten()[i], ylabel='', title=titles[i])
for i in range(len(stats))
]
for ax in axs.flatten():
ax.get_legend().remove()
axs[1, 1].legend(frameon=False)
axs[1, 0].set_ylabel('recovery rate')
axs[0, 0].set_ylabel('recovery rate')
savefig(fig, 'compare_equations')
def plot_efficiency_polar_vdp(problem, path='data/stats'): # pragma: no cover
stats_analyser = get_stats(problem, path)
fig, ax = plt.subplots(
subplot_kw={'projection': 'polar'}, figsize=figsize_by_journal(JOURNAL, 0.7, 0.5), layout='constrained'
)
theta, norms = plot_efficiency_polar_single(stats_analyser, ax)
labels = ['fail rate', 'extra iterations\nfor recovery', 'iterations for solution']
ax.set_xticks(theta[:-1], [f'{labels[i]}\nmax={norms[i]:.2f}' for i in range(len(labels))])
ax.set_rlabel_position(90)
fig.legend(frameon=False, loc='outside right', ncols=1)
savefig(fig, 'efficiency', tight_layout=False)
def plot_efficiency_polar_other(): # pragma: no cover
problems = [run_Lorenz, run_Schroedinger, run_quench]
paths = ['./data/stats/', './data/stats-jusuf', './data/stats-jusuf']
titles = ['Lorenz attractor', r'Schr\"odinger', 'Quench']
fig, axs = plt.subplots(
1, 3, subplot_kw={'projection': 'polar'}, figsize=figsize_by_journal(JOURNAL, 0.7, 0.5), layout='constrained'
)
for i in range(len(problems)):
stats_analyser = get_stats(problems[i], paths[i])
ax = axs[i]
theta, norms = plot_efficiency_polar_single(stats_analyser, ax)
labels = ['fail rate', 'extra iterations\nfor recovery', 'iterations for solution']
ax.set_rlabel_position(90)
# ax.set_xticks(theta[:-1], [f'max={norms[i]:.2f}' for i in range(len(labels))])
ax.set_xticks(theta[:-1], ['' for i in range(len(labels))])
ax.set_title(titles[i])
handles, labels = fig.get_axes()[0].get_legend_handles_labels()
fig.legend(handles=handles, labels=labels, frameon=False, loc='outside lower center', ncols=4)
savefig(fig, 'efficiency_other', tight_layout=False)
def plot_efficiency_polar_single(stats_analyser, ax): # pragma: no cover
"""
Plot the recovery rate and the computational cost in a polar plot.
Shown are three axes, where lower is better in all cases.
First is the fail rate, which is averaged across all faults, not just ones that can be fixed.
Then, there is the number of iterations, which we use as a measure for how expensive the scheme is to run.
And finally, there is an axis of how many extra iterations we need in case a fault is fixed by the resilience
scheme.
All quantities are plotted relative to their maximum.
Args:
problem (function): A problem to run
path (str): Path to the associated stats for the problem
Returns:
None
"""
# TODO: fix docs
mask = stats_analyser.get_mask() # get empty mask, potentially put in some other mask later
my_setup_mpl()
res = {}
for strategy in stats_analyser.strategies:
dat = stats_analyser.load(strategy=strategy, faults=True)
dat_no_faults = stats_analyser.load(strategy=strategy, faults=False)
mask = stats_analyser.get_fixable_faults_only(strategy=strategy)
fail_rate = 1.0 - stats_analyser.rec_rate(dat, dat_no_faults, 'recovered', mask)
iterations_no_faults = np.mean(dat_no_faults['total_iteration'])
detected = stats_analyser.get_mask(strategy=strategy, key='total_iteration', op='gt', val=iterations_no_faults)
rec_mask = stats_analyser.get_mask(strategy=strategy, key='recovered', op='eq', val=True, old_mask=detected)
if rec_mask.any():
extra_iterations = np.mean(dat['total_iteration'][rec_mask]) - iterations_no_faults
else:
extra_iterations = 0
res[strategy.name] = [fail_rate, extra_iterations, iterations_no_faults]
# normalize
# for strategy in stats_analyser.strategies:
norms = [max([res[k][i] for k in res.keys()]) for i in range(len(res['base']))]
norms[1] = norms[2] # use same norm for all iterations
res_norm = res.copy()
for k in res_norm.keys():
for i in range(3):
res_norm[k][i] /= norms[i]
theta = np.array([30, 150, 270, 30]) * 2 * np.pi / 360
for s in stats_analyser.strategies:
ax.plot(theta, res_norm[s.name] + [res_norm[s.name][0]], label=s.label, color=s.color, marker=s.marker)
return theta, norms
def plot_adaptivity_stuff(): # pragma: no cover
"""
Plot the solution for a van der Pol problem as well as the local error and cost associated with the base scheme and
adaptivity in k and dt in order to demonstrate that adaptivity is useful.
Returns:
None
"""
from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError
from pySDC.implementations.hooks.log_errors import LogLocalErrorPostStep
from pySDC.projects.Resilience.hook import LogData
stats_analyser = get_stats(run_vdp, 'data/stats')
my_setup_mpl()
scale = 0.5 if JOURNAL == 'JSC_beamer' else 1.0
fig, axs = plt.subplots(3, 1, figsize=figsize_by_journal(JOURNAL, scale, 1), sharex=True, sharey=False)
def plot_error(stats, ax, iter_ax, strategy, **kwargs):
"""
Plot global error and cumulative sum of iterations
Args:
stats (dict): Stats from pySDC run
ax (Matplotlib.pyplot.axes): Somewhere to plot the error
iter_ax (Matplotlib.pyplot.axes): Somewhere to plot the iterations
strategy (pySDC.projects.Resilience.fault_stats.Strategy): The resilience strategy
Returns:
None
"""
markevery = 40
e = get_sorted(stats, type='e_local_post_step', recomputed=False)
ax.plot([me[0] for me in e], [me[1] for me in e], markevery=markevery, **strategy.style, **kwargs)
k = get_sorted(stats, type='k')
iter_ax.plot(
[me[0] for me in k], np.cumsum([me[1] for me in k]), **strategy.style, markevery=markevery, **kwargs
)
ax.set_yscale('log')
ax.set_ylabel('local error')
iter_ax.set_ylabel(r'SDC iterations')
force_params = {'convergence_controllers': {EstimateEmbeddedError: {}}}
# force_params = {'convergence_controllers': {EstimateEmbeddedError: {}}, 'step_params': {'maxiter': 5}, 'level_params': {'dt': 4e-2}}
for strategy in [BaseStrategy, AdaptivityStrategy, IterateStrategy]:
stats, _, _ = stats_analyser.single_run(
strategy=strategy(), force_params=force_params, hook_class=[LogLocalErrorPostStep, LogData]
)
plot_error(stats, axs[1], axs[2], strategy())
if strategy == BaseStrategy:
u = get_sorted(stats, type='u', recomputed=False)
axs[0].plot([me[0] for me in u], [me[1][0] for me in u], color='black', label=r'$u$')
# axs[0].plot([me[0] for me in u], [me[1][1] for me in u], color='black', ls='--', label=r'$u_t$')
# axs[0].legend(frameon=False)
axs[2].set_xlabel(r'$t$')
axs[0].set_ylabel('solution')
axs[2].legend(frameon=JOURNAL == 'JSC_beamer')
savefig(fig, 'adaptivity')
def plot_fault_vdp(bit=0): # pragma: no cover
"""
Make a plot showing the impact of a fault on van der Pol without any resilience.
The faults are inserted in the last iteration in the last node in u_t such that you can best see the impact.
Args:
bit (int): The bit that you want to flip
Returns:
None
"""
from pySDC.projects.Resilience.fault_stats import (
FaultStats,
BaseStrategy,
)
from pySDC.projects.Resilience.hook import LogData
stats_analyser = FaultStats(
prob=run_vdp,
strategies=[BaseStrategy()],
faults=[False, True],
reload=True,
recovery_thresh=1.1,
num_procs=1,
mode='combination',
)
my_setup_mpl()
fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.8, 0.5))
colors = ['blue', 'red', 'magenta']
ls = ['--', '-']
markers = ['*', '^']
do_faults = [False, True]
superscripts = ['*', '']
subscripts = ['', 't', '']
run = 779 + 12 * bit # for faults in u_t
# run = 11 + 12 * bit # for faults in u
for i in range(len(do_faults)):
stats, controller, Tend = stats_analyser.single_run(
strategy=BaseStrategy(),
run=run,
faults=do_faults[i],
hook_class=[LogData],
)
u = get_sorted(stats, type='u')
faults = get_sorted(stats, type='bitflip')
for j in [0, 1]:
ax.plot(
[me[0] for me in u],
[me[1][j] for me in u],
ls=ls[i],
color=colors[j],
label=rf'$u^{{{superscripts[i]}}}_{{{subscripts[j]}}}$',
marker=markers[j],
markevery=60,
)
for idx in range(len(faults)):
ax.axvline(faults[idx][0], color='black', label='Fault', ls=':')
print(
f'Fault at t={faults[idx][0]:.2e}, iter={faults[idx][1][1]}, node={faults[idx][1][2]}, space={faults[idx][1][3]}, bit={faults[idx][1][4]}'
)
ax.set_title(f'Fault in bit {faults[idx][1][4]}')
ax.legend(frameon=True, loc='lower left')
ax.set_xlabel(r'$t$')
savefig(fig, f'fault_bit_{bit}')
def plot_quench_solution(): # pragma: no cover
"""
Plot the solution of Quench problem over time
Returns:
None
"""
my_setup_mpl()
if JOURNAL == 'JSC_beamer':
fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 0.9))
else:
fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 1.0, 0.45))
strategy = BaseStrategy()
custom_description = strategy.get_custom_description(run_quench)
stats, controller, _ = run_quench(custom_description=custom_description, Tend=strategy.get_Tend(run_quench))
prob = controller.MS[0].levels[0].prob
u = get_sorted(stats, type='u')
ax.plot([me[0] for me in u], [max(me[1]) for me in u], color='black', label='$T$')
ax.axhline(prob.u_thresh, label='$T_\mathrm{thresh}$', ls='--', color='grey', zorder=-1)
ax.axhline(prob.u_max, label='$T_\mathrm{max}$', ls=':', color='grey', zorder=-1)
ax.set_xlabel(r'$t$')
ax.legend(frameon=False)
savefig(fig, 'quench_sol')
def plot_Lorenz_solution(): # pragma: no cover
"""
Plot the solution of Lorenz attractor problem over time
Returns:
None
"""
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.projects.Resilience.strategies import AdaptivityStrategy
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun
strategy = AdaptivityStrategy()
my_setup_mpl()
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
# if JOURNAL == 'JSC_beamer':
# fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 0.9))
# else:
# fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 1.0, 0.33))
custom_description = strategy.get_custom_description(run_Lorenz, 1)
custom_description['convergence_controllers'] = {Adaptivity: {'e_tol': 1e-10}}
stats, _, _ = run_Lorenz(
custom_description=custom_description,
Tend=strategy.get_Tend(run_Lorenz, 1) * 20,
hook_class=LogGlobalErrorPostRun,
)
u = get_sorted(stats, type='u')
e = get_sorted(stats, type='e_global_post_run')[-1]
print(u[-1], e)
ax.plot([me[1][0] for me in u], [me[1][1] for me in u], [me[1][2] for me in u])
##################
from pySDC.projects.Resilience.strategies import DIRKStrategy, ERKStrategy, IterateStrategy
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK
strategy = ERKStrategy()
custom_description = strategy.get_custom_description(run_Lorenz, 1)
custom_description['convergence_controllers'] = {Adaptivity: {'e_tol': 1e-10}}
stats, _, _ = run_Lorenz(
custom_description=custom_description,
Tend=strategy.get_Tend(run_Lorenz, 1) * 20,
hook_class=LogGlobalErrorPostRun,
)
u = get_sorted(stats, type='u')
e = get_sorted(stats, type='e_global_post_run')[-1]
print(u[-1], e)
ax.plot([me[1][0] for me in u], [me[1][1] for me in u], [me[1][2] for me in u], ls='--')
################
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
savefig(fig, 'lorenz_sol')
def plot_vdp_solution(): # pragma: no cover
"""
Plot the solution of van der Pol problem over time to illustrate the varying time scales.
Returns:
None
"""
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
my_setup_mpl()
if JOURNAL == 'JSC_beamer':
fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 0.9))
else:
fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 1.0, 0.33))
custom_description = {'convergence_controllers': {Adaptivity: {'e_tol': 1e-7}}}
stats, _, _ = run_vdp(custom_description=custom_description, Tend=28.6)
u = get_sorted(stats, type='u')
ax.plot([me[0] for me in u], [me[1][0] for me in u], color='black')
ax.set_ylabel(r'$u$')
ax.set_xlabel(r'$t$')
savefig(fig, 'vdp_sol')
def work_precision(): # pragma: no cover
from pySDC.projects.Resilience.work_precision import (
all_problems,
single_problem,
ODEs,
get_fig,
execute_configurations,
save_fig,
get_configs,
MPI,
vdp_stiffness_plot,
)
all_params = {
'record': False,
'work_key': 't',
'precision_key': 'e_global_rel',
'plotting': True,
'base_path': 'data/paper',
}
for mode in ['compare_strategies', 'parallel_efficiency']:
all_problems(**all_params, mode=mode)
# Quench stuff
fig, axs = get_fig(x=3, y=1, figsize=figsize_by_journal('Springer_Numerical_Algorithms', 1, 0.47))
quench_params = {
**all_params,
'problem': run_quench,
'decorate': True,
'configurations': get_configs('step_size_limiting', run_quench),
'num_procs': 1,
'runs': 1,
'comm_world': MPI.COMM_WORLD,
}
quench_params.pop('base_path', None)
execute_configurations(**{**quench_params, 'work_key': 'k_SDC', 'precision_key': 'k_Newton'}, ax=axs[2])
execute_configurations(**{**quench_params, 'work_key': 'param', 'precision_key': 'restart'}, ax=axs[1])
execute_configurations(**{**quench_params, 'work_key': 't', 'precision_key': 'e_global_rel'}, ax=axs[0])
axs[1].set_yscale('linear')
axs[2].set_yscale('linear')
axs[2].set_xscale('linear')
axs[1].set_xlabel(r'$e_\mathrm{tol}$')
for ax in axs:
ax.set_title(ax.get_ylabel())
ax.set_ylabel('')
fig.suptitle('Quench')
save_fig(
fig=fig,
name=f'{run_quench.__name__}',
work_key='step-size',
precision_key='limiting',
legend=True,
base_path=all_params["base_path"],
)
vdp_stiffness_plot(base_path='data/paper')
def make_plots_for_TIME_X_website(): # pragma: no cover
global JOURNAL, BASE_PATH
JOURNAL = 'JSC_beamer'
BASE_PATH = 'data/paper/time-x_website'
fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 2.0 / 3.0))
plot_recovery_rate_recoverable_only(get_stats(run_vdp), fig, ax)
savefig(fig, 'recovery_rate', format='png')
from pySDC.projects.Resilience.work_precision import vdp_stiffness_plot
vdp_stiffness_plot(base_path=BASE_PATH, format='png')
def make_plots_for_SIAM_CSE23(): # pragma: no cover
"""
Make plots for the SIAM talk
"""
global JOURNAL, BASE_PATH
JOURNAL = 'JSC_beamer'
BASE_PATH = 'data/paper/SIAMCSE23'
fig, ax = plt.subplots(figsize=figsize_by_journal(JOURNAL, 0.5, 3.0 / 4.0))
plot_recovery_rate_recoverable_only(get_stats(run_vdp), fig, ax)
savefig(fig, 'recovery_rate')
plot_adaptivity_stuff()
compare_recovery_rate_problems()
plot_vdp_solution()
def make_plots_for_paper(): # pragma: no cover
"""
Make plots that are supposed to go in the paper.
"""
global JOURNAL, BASE_PATH
JOURNAL = 'Springer_Numerical_Algorithms'
BASE_PATH = 'data/paper'
plot_vdp_solution()
plot_quench_solution()
plot_recovery_rate(get_stats(run_vdp))
plot_fault_vdp(0)
plot_fault_vdp(13)
plot_adaptivity_stuff()
compare_recovery_rate_problems()
work_precision()
def make_plots_for_notes(): # pragma: no cover
"""
Make plots for the notes for the website / GitHub
"""
global JOURNAL, BASE_PATH
JOURNAL = 'Springer_Numerical_Algorithms'
BASE_PATH = 'notes/Lorenz'
analyse_resilience(run_Lorenz, format='png')
analyse_resilience(run_quench, format='png')
if __name__ == "__main__":
# make_plots_for_notes()
# make_plots_for_SIAM_CSE23()
# make_plots_for_TIME_X_website()
make_plots_for_paper()
| 23,448 | 32.885838 | 154 | py |
pySDC | pySDC-master/pySDC/projects/RDC/equidistant_RDC.py | import numpy as np
from scipy.special import roots_legendre
from scipy.interpolate import BarycentricInterpolator
from pySDC.core.Errors import CollocationError, ParameterError
from pySDC.core.Collocation import CollBase
class MyBarycentricInterpolator(BarycentricInterpolator):
"""
Overwrite BarycentricInterolator to inject custom weights
"""
def __init__(self, xi, yi=None, weights=None, axis=0):
super(MyBarycentricInterpolator, self).__init__(xi, yi, axis)
self.wi = weights
class Equidistant_RDC(CollBase):
"""
Implements equidistant nodes with blended barycentric interpolation
Attributes:
fh_weights: blended FH weights for barycentric interpolation
"""
def __init__(self, num_nodes, tleft=0, tright=1, **kwargs):
"""
Initialization
Args:
num_nodes: number of nodes
tleft (float): left interval boundary (usually 0)
tright (float): right interval boundary (usually 1)
"""
if type(num_nodes) is int:
max_d = 15
nnodes = num_nodes
else:
if type(num_nodes) is not tuple:
raise ParameterError('Expecting int or tuple for num_nodes parameter, got %s' % type(num_nodes))
if len(num_nodes) != 2:
raise ParameterError('Expecting 1 or 2 arguments for num_nodes, got %s' % num_nodes)
if type(num_nodes[0]) is not int:
raise ParameterError('Expecting int type for first num_nodes argument, got %s' % type(num_nodes[0]))
if type(num_nodes[1]) is not int:
raise ParameterError('Expecting int type for second num_nodes argument, got %s' % type(num_nodes[1]))
max_d = num_nodes[1]
nnodes = num_nodes[0]
if nnodes < 2:
raise CollocationError("Number of nodes should be at least 2 for equidistant, but is %d" % num_nodes)
try:
super(Equidistant_RDC, self).__init__(num_nodes=nnodes, node_type='EQUID', quad_type='LOBATTO', **kwargs)
except AttributeError:
pass
self.order = self.num_nodes
self.nodes = self._getNodes
d = min(self.num_nodes - 1, max_d)
self.fh_weights = self._getFHWeights(d)
self.weights = self._getWeights(tleft, tright)
self.Qmat = self._gen_Qmatrix
self.Smat = self._gen_Smatrix
self.delta_m = self._gen_deltas
self.left_is_node = True
self.right_is_node = True
def _getFHWeights(self, d):
"""
Computes blended FH weights for barycentric interpolation
This method is ported from Georges Klein's matlab function
Args:
d (int): blending parameter
Returns:
numpy.ndarray: weights
"""
n = self.num_nodes - 1
w = np.zeros(n + 1)
for k in range(0, n + 1):
ji = max(k - d, 0)
jf = min(k, n - d)
sumcoeff = []
for i in range(ji, jf + 1):
prodterm = []
for j in range(i, i + d + 1):
if j == k:
prodterm.append(1)
else:
prodterm.append(self.nodes[k] - self.nodes[j])
product = 1.0 / np.prod(prodterm)
sumcoeff.append((-1) ** (i - 1) * product)
y = sorted(sumcoeff, key=abs)
w[k] = np.sum(y)
return w
def _getWeights(self, a, b):
"""
Computes weights using custom barycentric interpolation
Args:
a (float): left interval boundary
b (float): right interval boundary
Returns:
numpy.ndarray: weights of the collocation formula given by the nodes
"""
if self.nodes is None:
raise CollocationError("Need nodes before computing weights, got %s" % self.nodes)
circ_one = np.zeros(self.num_nodes)
circ_one[0] = 1.0
tcks = []
for i in range(self.num_nodes):
tcks.append(MyBarycentricInterpolator(self.nodes, np.roll(circ_one, i), self.fh_weights))
# Generate evaluation points for quadrature
tau, omega = roots_legendre(self.num_nodes)
phi = (b - a) / 2 * tau + (b + a) / 2
weights = [np.sum((b - a) / 2 * omega * p(phi)) for p in tcks]
weights = np.array(weights)
return weights
@property
def _gen_Qmatrix(self):
"""
Compute tleft-to-node integration matrix for later use in collocation formulation
Returns:
numpy.ndarray: matrix containing the weights for tleft to node
"""
if self.nodes is None:
raise CollocationError(f"Need nodes before computing weights, got {self.nodes}")
M = self.num_nodes
Q = np.zeros([M + 1, M + 1])
# Generate Lagrange polynomials associated to the nodes
circ_one = np.zeros(self.num_nodes)
circ_one[0] = 1.0
tcks = []
for i in range(M):
tcks.append(MyBarycentricInterpolator(self.nodes, np.roll(circ_one, i), self.fh_weights))
# Generate evaluation points for quadrature
a, b = self.tleft, self.nodes[:, None]
tau, omega = roots_legendre(self.num_nodes)
tau, omega = tau[None, :], omega[None, :]
phi = (b - a) / 2 * tau + (b + a) / 2
# Compute quadrature
intQ = np.array([np.sum((b - a) / 2 * omega * p(phi), axis=-1) for p in tcks])
# Store into Q matrix
Q[1:, 1:] = intQ.T
return Q
| 5,629 | 32.313609 | 117 | py |
pySDC | pySDC-master/pySDC/projects/RDC/vanderpol_error_test.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import numpy as np
import pickle
import os
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.RDC.equidistant_RDC import Equidistant_RDC
def compute_RDC_errors():
"""
Van der Pol's oscillator with RDC
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 0
level_params['dt'] = 10.0 / 40.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = Equidistant_RDC
sweeper_params['num_nodes'] = 41
sweeper_params['QI'] = 'IE'
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-14
problem_params['newton_maxiter'] = 50
problem_params['mu'] = 10
problem_params['u0'] = (2.0, 0)
# initialize step parameters
step_params = dict()
step_params['maxiter'] = None
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# instantiate the controller
controller_rdc = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# set time parameters
t0 = 0.0
Tend = 10.0
# get initial values on finest level
P = controller_rdc.MS[0].levels[0].prob
uinit = P.u_exact(t0)
ref_sol = np.load('data/vdp_ref.npy')
maxiter_list = range(1, 11)
results = dict()
results['maxiter_list'] = maxiter_list
for maxiter in maxiter_list:
# ugly, but much faster than re-initializing the controller over and over again
controller_rdc.MS[0].params.maxiter = maxiter
# call main function to get things done...
uend_rdc, stats_rdc = controller_rdc.run(u0=uinit, t0=t0, Tend=Tend)
err = np.linalg.norm(uend_rdc - ref_sol, np.inf) / np.linalg.norm(ref_sol, np.inf)
print('Maxiter = %2i -- Error: %8.4e' % (controller_rdc.MS[0].params.maxiter, err))
results[maxiter] = err
fname = 'data/vdp_results.pkl'
file = open(fname, 'wb')
pickle.dump(results, file)
file.close()
assert os.path.isfile(fname), 'ERROR: pickle did not create file'
def plot_RDC_results(cwd=''):
"""
Routine to visualize the errors
Args:
cwd (string): current working directory
"""
file = open(cwd + 'data/vdp_results.pkl', 'rb')
results = pickle.load(file, encoding='latin-1')
file.close()
# retrieve the list of nvars from results
assert 'maxiter_list' in results, 'ERROR: expecting the list of maxiters in the results dictionary'
maxiter_list = sorted(results['maxiter_list'])
# Set up plotting parameters
params = {
'legend.fontsize': 20,
'figure.figsize': (12, 8),
'axes.labelsize': 20,
'axes.titlesize': 20,
'xtick.labelsize': 16,
'ytick.labelsize': 16,
'lines.linewidth': 3,
}
plt.rcParams.update(params)
# create new figure
plt.figure()
# take x-axis limits from nvars_list + some spacning left and right
plt.xlim([min(maxiter_list) - 1, max(maxiter_list) + 1])
plt.xlabel('maxiter')
plt.ylabel('rel. error')
plt.grid()
min_err = 1e99
max_err = 0e00
err_list = []
# loop over nvars, get errors and find min/max error for y-axis limits
for maxiter in maxiter_list:
err = results[maxiter]
min_err = min(err, min_err)
max_err = max(err, max_err)
err_list.append(err)
plt.semilogy(maxiter_list, err_list, ls='-', marker='o', markersize=10, label='RDC')
# adjust y-axis limits, add legend
plt.ylim([min_err / 10, max_err * 10])
plt.legend(loc=1, ncol=1, numpoints=1)
# plt.show()
# save plot as PNG, beautify
fname = 'data/RDC_errors_vdp.png'
plt.savefig(fname, bbox_inches='tight')
assert os.path.isfile(fname), 'ERROR: plot was not created'
return None
if __name__ == "__main__":
compute_RDC_errors()
plot_RDC_results()
| 4,621 | 28.069182 | 113 | py |
pySDC | pySDC-master/pySDC/projects/RDC/vanderpol_MLSDC_PFASST_test.py | import numpy as np
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.transfer_classes.TransferMesh_NoCoarse import mesh_to_mesh
from pySDC.projects.RDC.equidistant_RDC import Equidistant_RDC
def run_RDC(cwd=''):
"""
Van der Pol's oscillator with RDC, MLRDC and PFASST
Args:
cwd (string): current working directory
Returns:
list: list of errors and mean number of iterations (for testing)
"""
# set time parameters
t0 = 0.0
Tend = 10.0
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 0.25
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = Equidistant_RDC
sweeper_params['num_nodes'] = 20
sweeper_params['QI'] = 'IE'
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-12
problem_params['newton_maxiter'] = 50
problem_params['mu'] = 10
problem_params['u0'] = (2.0, 0)
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 50
base_transfer_params = dict()
# base_transfer_params['finter'] = True
# base_transfer_params['coll_iorder'] = 2
# base_transfer_params['coll_rorder'] = 2
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['space_transfer_class'] = mesh_to_mesh
description['base_transfer_params'] = base_transfer_params
results = []
ref_sol = np.load(cwd + 'data/vdp_ref.npy')
# instantiate the controller
controller_rdc = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller_rdc.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend_rdc, stats_rdc = controller_rdc.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats_rdc, type='niter', sortby='time')
mean_niter = np.mean(np.array([item[1] for item in iter_counts]))
err = np.linalg.norm(uend_rdc - ref_sol, np.inf) / np.linalg.norm(ref_sol, np.inf)
print('RDC : Mean number of iterations: %6.3f -- Error: %8.4e' % (mean_niter, err))
results.append((err, mean_niter))
sweeper_params['num_nodes'] = [sweeper_params['num_nodes'], 10]
controller_mlrdc = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
uend_mlrdc, stats_mlrdc = controller_mlrdc.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats_mlrdc, type='niter', sortby='time')
mean_niter = np.mean(np.array([item[1] for item in iter_counts]))
err = np.linalg.norm(uend_mlrdc - ref_sol, np.inf) / np.linalg.norm(ref_sol, np.inf)
print('MLRDC : Mean number of iterations: %6.3f -- Error: %8.4e' % (mean_niter, err))
results.append((err, mean_niter))
controller_pfasst = controller_nonMPI(num_procs=10, controller_params=controller_params, description=description)
uend_pfasst, stats_pfasst = controller_pfasst.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats_pfasst, type='niter', sortby='time')
mean_niter = np.mean(np.array([item[1] for item in iter_counts]))
err = np.linalg.norm(uend_pfasst - ref_sol, np.inf) / np.linalg.norm(ref_sol, np.inf)
print('PFASST(10): Mean number of iterations: %6.3f -- Error: %8.4e' % (mean_niter, err))
results.append((err, mean_niter))
return results
if __name__ == "__main__":
results = run_RDC()
| 4,417 | 36.12605 | 117 | py |
pySDC | pySDC-master/pySDC/projects/RDC/vanderpol_reference.py | import numpy as np
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
def main():
"""
Van der Pol's oscillator reference solution
"""
# set time parameters
t0 = 0.0
Tend = 10.0
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-12
level_params['dt'] = (Tend - t0) / 2000.0
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 5
sweeper_params['QI'] = 'IE'
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-14
problem_params['newton_maxiter'] = 50
problem_params['mu'] = 10
problem_params['u0'] = (2.0, 0)
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# instantiate the controller
controller_ref = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller_ref.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uend_ref, stats_ref = controller_ref.run(u0=uinit, t0=t0, Tend=Tend)
np.save('data/vdp_ref.npy', uend_ref)
if __name__ == "__main__":
main()
| 1,947 | 28.074627 | 113 | py |
pySDC | pySDC-master/pySDC/projects/RDC/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/Performance/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/Performance/run_simple_forcing_benchmark.py | from argparse import ArgumentParser
import numpy as np
from mpi4py import MPI
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.AllenCahn_MPIFFT import allencahn_imex, allencahn_imex_timeforcing
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
# from pySDC.projects.AllenCahn_Bayreuth.AllenCahn_dump import dump
# from pySDC.projects.Performance.controller_MPI_scorep import controller_MPI
def run_simulation(name=None, nprocs_space=None):
"""
A simple test program to do PFASST runs for the AC equation
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if nprocs_space is not None:
color = int(world_rank / nprocs_space)
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_comm.Set_name('Space-Comm')
space_size = space_comm.Get_size()
space_rank = space_comm.Get_rank()
# split world communicator to create time-communicators
if nprocs_space is not None:
color = int(world_rank % nprocs_space)
else:
color = int(world_rank / world_size)
time_comm = comm.Split(color=color)
time_comm.Set_name('Time-Comm')
time_size = time_comm.Get_size()
time_rank = time_comm.Get_rank()
# print(time_size, space_size, world_size)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 1e-03
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['L'] = 4.0
# problem_params['L'] = 16.0
problem_params['nvars'] = [(48 * 12, 48 * 12), (8 * 12, 8 * 12)]
# problem_params['nvars'] = [(48 * 48, 48 * 48), (8 * 48, 8 * 48)]
problem_params['eps'] = [0.04]
problem_params['radius'] = 0.25
problem_params['comm'] = space_comm
problem_params['name'] = name
problem_params['init_type'] = 'circle_rand'
problem_params['spectral'] = False
if name == 'AC-bench-constforce':
problem_params['dw'] = [-23.59]
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30 if space_rank == 0 else 99 # set level depending on rank
controller_params['predict_type'] = 'fine_only'
# controller_params['hook_class'] = dump # activate to get data output at each step
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = fft_to_fft
if name == 'AC-bench-noforce' or name == 'AC-bench-constforce':
description['problem_class'] = allencahn_imex
elif name == 'AC-bench-timeforce':
description['problem_class'] = allencahn_imex_timeforcing
else:
raise NotImplementedError(f'{name} is not implemented')
# set time parameters
t0 = 0.0
Tend = 240 * 0.001
if space_rank == 0 and time_rank == 0:
out = f'---------> Running {name} with {time_size} process(es) in time and {space_size} process(es) in space...'
print(out)
# instantiate controller
controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
timing = get_sorted(stats, type='timing_setup', sortby='time')
max_timing_setup = time_comm.allreduce(timing[0][1], MPI.MAX)
timing = get_sorted(stats, type='timing_run', sortby='time')
max_timing = time_comm.allreduce(timing[0][1], MPI.MAX)
if space_rank == 0 and time_rank == time_size - 1:
print()
out = f'Setup time: {max_timing_setup:.4f} sec.'
print(out)
out = f'Time to solution: {max_timing:.4f} sec.'
print(out)
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = f'Mean number of iterations: {np.mean(niters):.4f}'
print(out)
if __name__ == "__main__":
# Add parser to get number of processors in space and setup (have to do this here to enable automatic testing)
parser = ArgumentParser()
parser.add_argument(
"-s",
"--setup",
help='Specifies the setup',
type=str,
default='AC-bench-noforce',
choices=['AC-bench-noforce', 'AC-bench-constforce', 'AC-bench-timeforce'],
)
parser.add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
args = parser.parse_args()
run_simulation(name=args.setup, nprocs_space=args.nprocs_space)
| 5,791 | 35.658228 | 120 | py |
pySDC | pySDC-master/pySDC/projects/Performance/visualize.py | import glob
import json
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
def joint_plots(list_of_result_paths):
# get result data from JUBE tables
results = []
for result_path in list_of_result_paths:
results.append(np.genfromtxt(result_path, names=True, skip_header=1, delimiter='|', dtype=float, comments='--'))
# fill arrays with data
ncores = np.concatenate(results)['ntasks'] * np.concatenate(results)['nnodes']
timings_space = results[0]['timing_pat']
timings_spacetime = results[1]['timing_pat']
ideal = [timings_space[0] / (c / ncores[0]) for c in np.unique(ncores)]
# setup and fill plots
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=1.0)
plt_helper.plt.loglog(np.unique(ncores), ideal, 'k--', label='ideal')
plt_helper.plt.loglog(
ncores[0 : len(results[0])],
timings_space,
lw=1,
ls='-',
color='b',
marker='o',
markersize=4,
markeredgecolor='k',
label='parallel-in-space',
)
plt_helper.plt.loglog(
ncores[-len(results[1]) :],
timings_spacetime,
lw=1,
ls='-',
color='r',
marker='d',
markersize=4,
markeredgecolor='k',
label='parallel-in-space-time',
)
plt_helper.plt.grid()
plt_helper.plt.legend(loc=3, ncol=1)
plt_helper.plt.xlabel('Number of cores')
plt_helper.plt.ylabel('Time [s]')
# save plot, beautify
fname = 'data/scaling'
plt_helper.savefig(fname)
def plot_data(name=''):
"""
Visualization using numpy arrays (written via MPI I/O) and json description
Produces one png file per time-step, combine as movie via e.g.
> ffmpeg -i data/name_%08d.png name.mp4
Args:
name (str): name of the simulation (expects data to be in data path)
"""
# get data and json files
json_files = sorted(glob.glob(f'./data/{name}_*.json'))
data_files = sorted(glob.glob(f'./data/{name}_*.dat'))
# setup plotting
plt_helper.setup_mpl()
for json_file, data_file in zip(json_files, data_files):
with open(json_file, 'r') as fp:
obj = json.load(fp)
index = json_file.split('_')[1].split('.')[0]
print(f'Working on step {index}...')
# get data and format
array = np.fromfile(data_file, dtype=obj['datatype'])
array = array.reshape(obj['shape'], order='C')
# plot
plt_helper.newfig(textwidth=238.96, scale=1.0)
plt_helper.plt.imshow(array, vmin=0, vmax=1, extent=[-2, 2, -2, 2], origin='lower')
plt_helper.plt.yticks(range(-2, 3))
cbar = plt_helper.plt.colorbar()
cbar.set_label('concentration')
# plt_helper.plt.title(f"Time: {obj['time']:6.4f}")
# save plot, beautify
fname = f'data/{name}_{index}'
plt_helper.savefig(fname, save_pgf=False, save_png=False)
if __name__ == '__main__':
list_of_result_paths = [
'data/bench_run_SPxTS/000004/result/result.dat',
'data/bench_run_SPxTP/000002/result/result.dat',
]
# joint_plots(list_of_result_paths)
plot_data(name='AC-bench-noforce')
| 3,204 | 28.675926 | 120 | py |
pySDC | pySDC-master/pySDC/projects/Performance/controller_MPI_scorep.py | import numpy as np
from mpi4py import MPI
from pySDC.core.Controller import controller
from pySDC.core.Errors import ControllerError
from pySDC.core.Step import step
from pySDC.implementations.convergence_controller_classes.check_convergence import CheckConvergence
import scorep.user as spu
class controller_MPI(controller):
"""
PFASST controller, running parallel version of PFASST in blocks (MG-style)
"""
def __init__(self, controller_params, description, comm):
"""
Initialization routine for PFASST controller
Args:
controller_params: parameter set for the controller and the step class
description: all the parameters to set up the rest (levels, problems, transfer, ...)
comm: MPI communicator
"""
# call parent's initialization routine
super(controller_MPI, self).__init__(controller_params)
# create single step per processor
self.S = step(description)
# pass communicator for future use
self.comm = comm
# add request handler for status send
self.req_status = None
num_procs = self.comm.Get_size()
rank = self.comm.Get_rank()
# insert data on time communicator to the steps (helpful here and there)
self.S.status.time_size = num_procs
if self.params.dump_setup and rank == 0:
self.dump_setup(step=self.S, controller_params=controller_params, description=description)
num_levels = len(self.S.levels)
# add request handle container for isend
self.req_send = [None] * num_levels
if num_procs > 1 and num_levels > 1:
for L in self.S.levels:
if not L.sweep.coll.right_is_node or L.sweep.params.do_coll_update:
raise ControllerError("For PFASST to work, we assume uend^k = u_M^k")
if num_levels == 1 and self.params.predict_type is not None:
self.logger.warning(
'you have specified a predictor type but only a single level.. ' 'predictor will be ignored'
)
def run(self, u0, t0, Tend):
"""
Main driver for running the parallel version of SDC, MSSDC, MLSDC and PFASST
Args:
u0: initial values
t0: starting time
Tend: ending time
Returns:
end values on the finest level
stats object containing statistics for each step, each level and each iteration
"""
# reset stats to prevent double entries from old runs
self.hooks.reset_stats()
# find active processes and put into new communicator
rank = self.comm.Get_rank()
num_procs = self.comm.Get_size()
all_dt = self.comm.allgather(self.S.dt)
all_time = [t0 + sum(all_dt[0:i]) for i in range(num_procs)]
time = all_time[rank]
all_active = all_time < Tend - 10 * np.finfo(float).eps
if not any(all_active):
raise ControllerError('Nothing to do, check t0, dt and Tend')
active = all_active[rank]
if not all(all_active):
comm_active = self.comm.Split(active)
rank = comm_active.Get_rank()
num_procs = comm_active.Get_size()
else:
comm_active = self.comm
self.S.status.slot = rank
# initialize block of steps with u0
self.restart_block(num_procs, time, u0)
uend = u0
# call post-setup hook
self.hooks.post_setup(step=None, level_number=None)
# call pre-run hook
self.hooks.pre_run(step=self.S, level_number=0)
comm_active.Barrier()
# while any process still active...
while active:
while not self.S.status.done:
name = f'REGION -- {self.S.status.stage} -- {self.S.status.slot}'
spu.region_begin(name)
self.pfasst(comm_active, num_procs)
spu.region_end(name)
time += self.S.dt
# broadcast uend, set new times and fine active processes
tend = comm_active.bcast(time, root=num_procs - 1)
uend = self.S.levels[0].uend.bcast(root=num_procs - 1, comm=comm_active)
all_dt = comm_active.allgather(self.S.dt)
all_time = [tend + sum(all_dt[0:i]) for i in range(num_procs)]
time = all_time[rank]
all_active = all_time < Tend - 10 * np.finfo(float).eps
active = all_active[rank]
if not all(all_active):
comm_active = comm_active.Split(active)
rank = comm_active.Get_rank()
num_procs = comm_active.Get_size()
self.S.status.slot = rank
# initialize block of steps with u0
self.restart_block(num_procs, time, uend)
# call post-run hook
self.hooks.post_run(step=self.S, level_number=0)
comm_active.Free()
return uend, self.hooks.return_stats()
def restart_block(self, size, time, u0):
"""
Helper routine to reset/restart block of (active) steps
Args:
size: number of active time steps
time: current time
u0: initial value to distribute across the steps
Returns:
block of (all) steps
"""
# store link to previous step
self.S.prev = self.S.status.slot - 1
self.S.next = self.S.status.slot + 1
# resets step
self.S.reset_step()
# determine whether I am the first and/or last in line
self.S.status.first = self.S.prev == -1
self.S.status.last = self.S.next == size
# intialize step with u0
self.S.init_step(u0)
# reset some values
self.S.status.done = False
self.S.status.iter = 0
self.S.status.stage = 'SPREAD'
for l in self.S.levels:
l.tag = None
self.req_status = None
self.req_send = [None] * len(self.S.levels)
self.S.status.prev_done = False
self.S.status.time_size = size
for lvl in self.S.levels:
lvl.status.time = time
lvl.status.sweep = 1
@staticmethod
def recv(target, source, tag=None, comm=None):
"""
Receive function
Args:
target: level which will receive the values
source: level which initiated the send
tag: identifier to check if this message is really for me
comm: communicator
"""
target.u[0].recv(source=source, tag=tag, comm=comm)
# re-evaluate f on left interval boundary
target.f[0] = target.prob.eval_f(target.u[0], target.time)
def predictor(self, comm):
"""
Predictor function, extracted from the stepwise implementation (will be also used by matrix sweppers)
Args:
comm: communicator
"""
if self.params.predict_type is None:
pass
elif self.params.predict_type == 'fine_only':
# do a fine sweep only
self.S.levels[0].sweep.update_nodes()
elif self.params.predict_type == 'libpfasst_style':
# restrict to coarsest level
for l in range(1, len(self.S.levels)):
self.S.transfer(source=self.S.levels[l - 1], target=self.S.levels[l])
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.first:
self.logger.debug(
'recv data predict: process %s, stage %s, time, %s, source %s, tag %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev, self.S.status.iter)
)
self.recv(target=self.S.levels[-1], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1)
# do the sweep with new values
self.S.levels[-1].sweep.update_nodes()
self.S.levels[-1].sweep.compute_end_point()
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.last:
self.logger.debug(
'send data predict: process %s, stage %s, time, %s, target %s, tag %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.next, self.S.status.iter)
)
self.S.levels[-1].uend.send(dest=self.S.next, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1, add_to_stats=True)
# go back to fine level, sweeping
for l in range(len(self.S.levels) - 1, 0, -1):
# prolong values
self.S.transfer(source=self.S.levels[l], target=self.S.levels[l - 1])
# on middle levels: do sweep as usual
if l - 1 > 0:
self.S.levels[l - 1].sweep.update_nodes()
# end with a fine sweep
self.S.levels[0].sweep.update_nodes()
elif self.params.predict_type == 'pfasst_burnin':
# restrict to coarsest level
for l in range(1, len(self.S.levels)):
self.S.transfer(source=self.S.levels[l - 1], target=self.S.levels[l])
for p in range(self.S.status.slot + 1):
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not p == 0 and not self.S.status.first:
self.logger.debug(
'recv data predict: process %s, stage %s, time, %s, source %s, tag %s, phase %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev, self.S.status.iter, p)
)
self.recv(target=self.S.levels[-1], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1)
# do the sweep with new values
self.S.levels[-1].sweep.update_nodes()
self.S.levels[-1].sweep.compute_end_point()
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.last:
self.logger.debug(
'send data predict: process %s, stage %s, time, %s, target %s, tag %s, phase %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.next, self.S.status.iter, p)
)
self.S.levels[-1].uend.send(dest=self.S.next, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(
step=self.S, level_number=len(self.S.levels) - 1, add_to_stats=(p == self.S.status.slot)
)
# interpolate back to finest level
for l in range(len(self.S.levels) - 1, 0, -1):
self.S.transfer(source=self.S.levels[l], target=self.S.levels[l - 1])
# end this with a fine sweep
self.S.levels[0].sweep.update_nodes()
elif self.params.predict_type == 'fmg':
# TODO: implement FMG predictor
raise NotImplementedError('FMG predictor is not yet implemented')
else:
raise ControllerError('Wrong predictor type, got %s' % self.params.predict_type)
def pfasst(self, comm, num_procs):
"""
Main function including the stages of SDC, MLSDC and PFASST (the "controller")
For the workflow of this controller, check out one of our PFASST talks
Args:
comm: communicator
num_procs (int): number of parallel processes
"""
stage = self.S.status.stage
self.logger.debug(stage + ' - process ' + str(self.S.status.slot))
if stage == 'SPREAD':
# (potentially) serial spreading phase
# first stage: spread values
self.hooks.pre_step(step=self.S, level_number=0)
# call predictor from sweeper
self.S.levels[0].sweep.predict()
# update stage
if len(self.S.levels) > 1: # MLSDC or PFASST with predict
self.S.status.stage = 'PREDICT'
else:
self.S.status.stage = 'IT_CHECK'
elif stage == 'PREDICT':
# call predictor (serial)
self.hooks.pre_predict(step=self.S, level_number=0)
self.predictor(comm)
self.hooks.post_predict(step=self.S, level_number=0)
# update stage
# self.hooks.pre_iteration(step=self.S, level_number=0)
self.S.status.stage = 'IT_CHECK'
elif stage == 'IT_CHECK':
# check whether to stop iterating (parallel)
self.hooks.pre_comm(step=self.S, level_number=0)
if self.req_send[0] is not None:
self.req_send[0].wait()
self.S.levels[0].sweep.compute_end_point()
if not self.S.status.last and self.params.fine_comm:
self.logger.debug(
'isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.next, 0, self.S.status.iter)
)
self.req_send[0] = self.S.levels[0].uend.isend(dest=self.S.next, tag=self.S.status.iter, comm=comm)
if not self.S.status.first and not self.S.status.prev_done and self.params.fine_comm:
self.logger.debug(
'recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev, 0, self.S.status.iter)
)
self.recv(target=self.S.levels[0], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=0)
self.S.levels[0].sweep.compute_residual()
self.S.status.done = CheckConvergence.check_convergence(self.S)
if self.params.all_to_done:
self.hooks.pre_comm(step=self.S, level_number=0)
self.S.status.done = comm.allreduce(sendobj=self.S.status.done, op=MPI.LAND)
self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=True)
else:
self.hooks.pre_comm(step=self.S, level_number=0)
# check if an open request of the status send is pending
if self.req_status is not None:
self.req_status.wait()
# recv status
if not self.S.status.first and not self.S.status.prev_done:
self.S.status.prev_done = comm.recv(source=self.S.prev, tag=99)
self.logger.debug(
'recv status: status %s, process %s, time %s, target %s, tag %s, iter %s'
% (
self.S.status.prev_done,
self.S.status.slot,
self.S.time,
self.S.next,
99,
self.S.status.iter,
)
)
self.S.status.done = self.S.status.done and self.S.status.prev_done
# send status forward
if not self.S.status.last:
self.logger.debug(
'isend status: status %s, process %s, time %s, target %s, tag %s, iter %s'
% (self.S.status.done, self.S.status.slot, self.S.time, self.S.next, 99, self.S.status.iter)
)
self.req_status = comm.isend(self.S.status.done, dest=self.S.next, tag=99)
self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=True)
if self.S.status.iter > 0:
self.hooks.post_iteration(step=self.S, level_number=0)
# if not readys, keep doing stuff
if not self.S.status.done:
# increment iteration count here (and only here)
self.S.status.iter += 1
self.hooks.pre_iteration(step=self.S, level_number=0)
if len(self.S.levels) > 1: # MLSDC or PFASST
self.S.status.stage = 'IT_UP'
else:
if num_procs == 1 or self.params.mssdc_jac: # SDC or parallel MSSDC (Jacobi-like)
self.S.status.stage = 'IT_FINE'
else:
self.S.status.stage = 'IT_COARSE' # serial MSSDC (Gauss-like)
else:
# Need to finish alll pending isend requests. These will occur for the first active process, since
# in the last iteration the wait statement will not be called ("send and forget")
for req in self.req_send:
if req is not None:
req.wait()
if self.req_status is not None:
self.req_status.wait()
self.hooks.post_step(step=self.S, level_number=0)
self.S.status.stage = 'DONE'
elif stage == 'IT_FINE':
nsweeps = self.S.levels[0].params.nsweeps
self.S.levels[0].status.sweep = 0
# do fine sweep
for k in range(nsweeps):
self.S.levels[0].status.sweep += 1
self.hooks.pre_comm(step=self.S, level_number=0)
if self.req_send[0] is not None:
self.req_send[0].wait()
self.S.levels[0].sweep.compute_end_point()
if not self.S.status.last and self.params.fine_comm:
self.logger.debug(
'isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.next, 0, self.S.status.iter)
)
self.req_send[0] = self.S.levels[0].uend.isend(dest=self.S.next, tag=self.S.status.iter, comm=comm)
if not self.S.status.first and not self.S.status.prev_done and self.params.fine_comm:
self.logger.debug(
'recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev, 0, self.S.status.iter)
)
self.recv(target=self.S.levels[0], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=(k == nsweeps - 1))
self.hooks.pre_sweep(step=self.S, level_number=0)
self.S.levels[0].sweep.update_nodes()
self.S.levels[0].sweep.compute_residual()
self.hooks.post_sweep(step=self.S, level_number=0)
# update stage
self.S.status.stage = 'IT_CHECK'
elif stage == 'IT_UP':
# go up the hierarchy from finest to coarsest level (parallel)
self.S.transfer(source=self.S.levels[0], target=self.S.levels[1])
# sweep and send on middle levels (not on finest, not on coarsest, though)
for l in range(1, len(self.S.levels) - 1):
nsweeps = self.S.levels[l].params.nsweeps
for _ in range(nsweeps):
self.hooks.pre_comm(step=self.S, level_number=l)
if self.req_send[l] is not None:
self.req_send[l].wait()
self.S.levels[l].sweep.compute_end_point()
if not self.S.status.last and self.params.fine_comm:
self.logger.debug(
'isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.next, l, self.S.status.iter)
)
self.req_send[l] = self.S.levels[l].uend.isend(
dest=self.S.next, tag=self.S.status.iter, comm=comm
)
if not self.S.status.first and not self.S.status.prev_done and self.params.fine_comm:
self.logger.debug(
'recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s'
% (self.S.status.slot, self.S.status.stage, self.S.time, self.S.prev, l, self.S.status.iter)
)
self.recv(target=self.S.levels[l], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=l)
self.hooks.pre_sweep(step=self.S, level_number=l)
self.S.levels[l].sweep.update_nodes()
self.S.levels[l].sweep.compute_residual()
self.hooks.post_sweep(step=self.S, level_number=l)
# transfer further up the hierarchy
self.S.transfer(source=self.S.levels[l], target=self.S.levels[l + 1])
# update stage
self.S.status.stage = 'IT_COARSE'
elif stage == 'IT_COARSE':
# sweeps on coarsest level (serial/blocking)
# receive from previous step (if not first)
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.first and not self.S.status.prev_done:
self.logger.debug(
'recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s'
% (
self.S.status.slot,
self.S.status.stage,
self.S.time,
self.S.prev,
len(self.S.levels) - 1,
self.S.status.iter,
)
)
self.recv(target=self.S.levels[-1], source=self.S.prev, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1)
# do the sweep
self.hooks.pre_sweep(step=self.S, level_number=len(self.S.levels) - 1)
assert self.S.levels[-1].params.nsweeps == 1, (
'ERROR: this controller can only work with one sweep on the coarse level, got %s'
% self.S.levels[-1].params.nsweeps
)
self.S.levels[-1].sweep.update_nodes()
self.S.levels[-1].sweep.compute_residual()
self.hooks.post_sweep(step=self.S, level_number=len(self.S.levels) - 1)
self.S.levels[-1].sweep.compute_end_point()
# send to next step
self.hooks.pre_comm(step=self.S, level_number=len(self.S.levels) - 1)
if not self.S.status.last:
self.logger.debug(
'send data: process %s, stage %s, time %s, target %s, tag %s, iter %s'
% (
self.S.status.slot,
self.S.status.stage,
self.S.time,
self.S.next,
len(self.S.levels) - 1,
self.S.status.iter,
)
)
self.S.levels[-1].uend.send(dest=self.S.next, tag=self.S.status.iter, comm=comm)
self.hooks.post_comm(step=self.S, level_number=len(self.S.levels) - 1, add_to_stats=True)
# update stage
if len(self.S.levels) > 1: # MLSDC or PFASST
self.S.status.stage = 'IT_DOWN'
else:
self.S.status.stage = 'IT_CHECK' # MSSDC
elif stage == 'IT_DOWN':
# prolong corrections down to finest level (parallel)
# receive and sweep on middle levels (except for coarsest level)
for l in range(len(self.S.levels) - 1, 0, -1):
# prolong values
self.S.transfer(source=self.S.levels[l], target=self.S.levels[l - 1])
# on middle levels: do sweep as usual
if l - 1 > 0:
nsweeps = self.S.levels[l - 1].params.nsweeps
for k in range(nsweeps):
self.hooks.pre_comm(step=self.S, level_number=l - 1)
if self.req_send[l - 1] is not None:
self.req_send[l - 1].wait()
self.S.levels[l - 1].sweep.compute_end_point()
if not self.S.status.last and self.params.fine_comm:
self.logger.debug(
'isend data: process %s, stage %s, time %s, target %s, tag %s, iter %s'
% (
self.S.status.slot,
self.S.status.stage,
self.S.time,
self.S.next,
l - 1,
self.S.status.iter,
)
)
self.req_send[l - 1] = self.S.levels[l - 1].uend.isend(
dest=self.S.next, tag=self.S.status.iter, comm=comm
)
if not self.S.status.first and not self.S.status.prev_done and self.params.fine_comm:
self.logger.debug(
'recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s'
% (
self.S.status.slot,
self.S.status.stage,
self.S.time,
self.S.prev,
l - 1,
self.S.status.iter,
)
)
self.recv(
target=self.S.levels[l - 1], source=self.S.prev, tag=self.S.status.iter, comm=comm
)
self.hooks.post_comm(step=self.S, level_number=l - 1, add_to_stats=(k == nsweeps - 1))
self.hooks.pre_sweep(step=self.S, level_number=l - 1)
self.S.levels[l - 1].sweep.update_nodes()
self.S.levels[l - 1].sweep.compute_residual()
self.hooks.post_sweep(step=self.S, level_number=l - 1)
# update stage
self.S.status.stage = 'IT_FINE'
else:
raise ControllerError('Weird stage, got %s' % self.S.status.stage)
| 27,025 | 41.29421 | 120 | py |
pySDC | pySDC-master/pySDC/projects/soft_failure/generate_statistics.py | from __future__ import division
import dill
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.GeneralizedFisher_1D_FD_implicit import generalized_fisher
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.projects.soft_failure.FaultHooks import fault_hook
from pySDC.projects.soft_failure.implicit_sweeper_faults import implicit_sweeper_faults
from pySDC.projects.soft_failure.visualization_helper import (
show_residual_across_simulation,
show_min_max_residual_across_simulation,
show_iter_hist,
)
def diffusion_setup():
"""
Setup routine for diffusion test
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 0.25
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
sweeper_params['detector_threshold'] = 1e-10
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 0.1 # diffusion coefficient
problem_params['freq'] = 4 # frequency for the test value
problem_params['nvars'] = 127 # number of degrees of freedom for each level
problem_params['bc'] = 'dirichlet-zero' # boundary conditions
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_unforced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = implicit_sweeper_faults # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
return description, controller_params
def reaction_setup():
"""
Setup routine for diffusion-reaction test with Newton solver
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 0.25
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
sweeper_params['detector_threshold'] = 1e-10
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 1.0
problem_params['lambda0'] = 2.0
problem_params['newton_maxiter'] = 20
problem_params['newton_tol'] = 1e-10
problem_params['stop_at_nan'] = False
problem_params['interval'] = (-5, 5)
problem_params['nvars'] = 127
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = generalized_fisher # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = implicit_sweeper_faults # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
return description, controller_params
def vanderpol_setup():
"""
Van der Pol's oscillator
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 0.25
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU'
sweeper_params['detector_threshold'] = 1e-10
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-10
problem_params['newton_maxiter'] = 50
problem_params['stop_at_nan'] = False
problem_params['mu'] = 18
problem_params['u0'] = (1.0, 0.0)
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['sweeper_class'] = implicit_sweeper_faults
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
return description, controller_params
def run_clean_simulations(type=None):
"""
A simple code to run fault-free simulations
Args:
type (str): setup type
f: file handler
"""
if type == 'diffusion':
description, controller_params = diffusion_setup()
elif type == 'reaction':
description, controller_params = reaction_setup()
elif type == 'vanderpol':
description, controller_params = vanderpol_setup()
else:
raise ValueError('No valid setup type provided, aborting..')
# set time parameters
t0 = 0.0
Tend = description['level_params']['dt']
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# this is where the iteration is happening
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# print('This clean run took %s iterations!' % iter_counts[0][1])
return iter_counts[0][1]
def run_faulty_simulations(type=None, niters=None, cwd=''):
"""
A simple program to run faulty simulations
Args:
type (str): setup type
niters (int): number of iterations in clean run
f: file handler
cwd (str): current workind directory
"""
if type == 'diffusion':
description, controller_params = diffusion_setup()
elif type == 'reaction':
description, controller_params = reaction_setup()
elif type == 'vanderpol':
description, controller_params = vanderpol_setup()
else:
raise ValueError('No valid setup type provided, aborting..')
# set time parameters
t0 = 0.0
Tend = description['level_params']['dt']
filehandle_injections = open(cwd + 'data/dump_injections_' + type + '.txt', 'w')
controller_params['hook_class'] = fault_hook
description['sweeper_params']['allow_fault_correction'] = True
description['sweeper_params']['dump_injections_filehandle'] = filehandle_injections
description['sweeper_params']['niters'] = niters
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# number of runs
nruns = 500
results = []
for _ in range(nruns):
# this is where the iteration is happening
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
results.append(stats)
filehandle_injections.close()
dill.dump(results, open(cwd + "data/results_" + type + ".pkl", "wb"))
def process_statistics(type=None, cwd=''):
results = dill.load(open(cwd + "data/results_" + type + ".pkl", "rb"))
# get minimal length of residual vector
minlen = 1000
nruns = 0
for stats in results:
residuals = get_sorted(stats, type='residual_post_iteration', sortby='iter')
minlen = min(minlen, len(residuals))
nruns += 1
# initialize minimal residual vector
minres = np.zeros(minlen)
minres[:] = 1000
# initialize maximal residual vector
maxres = np.zeros(minlen)
# initialize mean residual vector
meanres = np.zeros(minlen)
# initialize median residual vector
medianres = np.zeros(minlen)
# initialize helper list
median_list = [[] for _ in range(minlen)]
for stats in results:
# Some black magic to extract fault stats out of monstrous stats object
# fault_stats = get_sorted(stats, type='fault_stats', sortby='type')[0][1]
# Some black magic to extract residuals dependent on iteration out of monstrous stats object
residuals_iter = get_sorted(stats, type='residual_post_iteration', sortby='iter')
# extract residuals ouf of residuals_iter
residuals = np.array([item[1] for item in residuals_iter])
# calculate minimal, maximal, mean residual vectors
for i in range(minlen):
if np.isnan(residuals[i]) or np.isinf(residuals[i]):
residuals[i] = 1000
minres[i] = min(minres[i], residuals[i])
maxres[i] = max(maxres[i], residuals[i])
meanres[i] += residuals[i]
median_list[i].append(residuals[i])
# Example output of what we now can do
# print(fault_stats.nfaults_injected_u, fault_stats.nfaults_injected_f, fault_stats.nfaults_detected,
# fault_stats.nfalse_positives, fault_stats.nfalse_positives_in_correction,
# fault_stats.nfaults_missed, fault_stats.nclean_steps)
# print()
# call helper routine to produce residual plot
# fname = 'residuals.png'
fname = cwd + 'data/' + type + '_' + str(nruns) + '_' + 'runs' + '_' + 'residuals.png'
show_residual_across_simulation(stats=stats, fname=fname)
meanres /= nruns
# print(minres)
# print(maxres)
# print(meanres)
# calculate median residual vector
for i in range(minlen):
medianres[i] = np.median(median_list[i])
# print(median_list)
# print(medianres)
# call helper routine to produce residual plot of minres, maxres, meanres and medianres
# fname = 'min_max_residuals.png'
fname = cwd + 'data/' + type + '_' + str(nruns) + '_' + 'runs' + '_' + 'min_max_residuals.png'
show_min_max_residual_across_simulation(
fname=fname, minres=minres, maxres=maxres, meanres=meanres, medianres=medianres, maxiter=minlen
)
# calculate maximum number of iterations per test run
maxiter = []
for stats in results:
residuals = get_sorted(stats, type='residual_post_iteration', sortby='iter')
maxiters = max(np.array([item[0] for item in residuals]))
maxiter.append(maxiters)
# print(maxiter)
# call helper routine to produce histogram of maxiter
# fname = 'iter_hist.png'
fname = cwd + 'data/' + type + '_' + str(nruns) + '_' + 'runs' + '_' + 'iter_hist.png'
show_iter_hist(fname=fname, maxiter=maxiter, nruns=nruns)
# initialize sum of nfaults_detected
nfd = 0
# initialize sum of nfalse_positives
nfp = 0
# initialize sum of nfaults_missed
nfm = 0
# initialize sum of nfalse_positives_in_correction
nfpc = 0
# calculate sum of nfaults_detected, sum of nfalse_positives, sum of nfaults_missed
for stats in results:
# Some black magic to extract fault stats out of monstrous stats object
fault_stats = get_sorted(stats, type='fault_stats', sortby='type')[0][1]
nfd += fault_stats.nfaults_detected
nfp += fault_stats.nfalse_positives
nfm += fault_stats.nfaults_missed
nfpc += fault_stats.nfalse_positives_in_correction
g = open(cwd + 'data/' + type + '_' + str(nruns) + '_' + 'runs' + '_' + 'Statistics.txt', 'w')
out = 'Type: ' + type + ' ' + str(nruns) + ' runs'
g.write(out + '\n')
# detector metrics (Sloan, Kumar, Bronevetsky 2012)
# nfaults_detected
out = 'true positives: ' + str(nfd)
g.write(out + '\n')
# nfaults_positives
out = 'false positives: ' + str(nfp)
g.write(out + '\n')
# nfaults_missed
out = 'false negatives: ' + str(nfm)
g.write(out + '\n')
# nfalse_positives_in_correction
out = 'false positives in correction: ' + str(nfpc)
g.write(out + '\n')
# F-Score
f_score = 2 * nfd / (2 * nfd + nfp + nfm)
out = 'F-Score: ' + str(f_score)
g.write(out + '\n')
# false positive rate (FPR)
fpr = nfp / (nfd + nfp)
out = 'False positive rate: ' + str(fpr)
g.write(out + '\n')
# true positive rate (TPR)
tpr = nfd / (nfd + nfp)
out = 'True positive rate: ' + str(tpr)
g.write(out + '\n')
g.close()
def main():
# type = 'diffusion'
# niters = run_clean_simulations(type=type)
# run_faulty_simulations(type=type, niters=niters)
# process_statistics(type=type)
# type = 'reaction'
# niters = run_clean_simulations(type=type)
# run_faulty_simulations(type=type, niters=niters)
# process_statistics(type=type)
type = 'vanderpol'
niters = run_clean_simulations(type=type)
run_faulty_simulations(type=type, niters=niters)
process_statistics(type=type)
if __name__ == "__main__":
main()
| 14,023 | 34.324937 | 109 | py |
pySDC | pySDC-master/pySDC/projects/soft_failure/visualization_helper.py | import os
import matplotlib
import numpy as np
from matplotlib import rc
from pySDC.helpers.stats_helper import filter_stats
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def show_residual_across_simulation(stats, fname):
"""
Helper routine to visualize the residuals dependent on the number of iterations across the simulation
Args:
stats (dict): statistics object
fname (str): filename
"""
# get residuals of the run
extract_stats = filter_stats(stats, type='residual_post_iteration')
# find boundaries for x-,y-axis as well as arrays
maxiter = 0
for k, _ in extract_stats.items():
maxiter = max(maxiter, k.iter)
# grep residuals and put into array
residual = np.zeros(maxiter)
residual[:] = -99
for k, v in extract_stats.items():
if k.iter != -1:
residual[k.iter - 1] = np.log10(v)
# Set up latex stuff and fonts
rc('font', **{"sans-serif": ["Arial"], "size": 30})
rc('legend', fontsize='small')
rc('xtick', labelsize='small')
rc('ytick', labelsize='small')
# create plot and save
fig, ax = plt.subplots(figsize=(15, 10))
ax.set_xlabel('iteration')
ax.set_ylabel('log10(residual)')
plt.axis([0, 14, -12, 3])
plt.plot(np.linspace(1, maxiter, num=maxiter), residual)
plt.savefig(fname)
assert os.path.isfile(fname), 'ERROR: plotting did not create PNG file'
def show_min_max_residual_across_simulation(fname, minres, maxres, meanres, medianres, maxiter):
"""
Helper routine to visualize the minimal, maximal, mean, median residual vectors dependent on the
number of iterations across the simulation
Args:
stats (dict): statistics object
fname (str): filename
minres: minimal residual vector
maxres: maximal residual vector
meanres: mean residual vector
medianres: median residual vector
maxiter (int): length of residual vectors, maximal iteration index
"""
# Set up latex stuff and fonts
rc('font', **{"sans-serif": ["Arial"], "size": 30})
rc('legend', fontsize='small')
rc('xtick', labelsize='small')
rc('ytick', labelsize='small')
# create plot and save
fig, ax = plt.subplots(figsize=(15, 10))
ax.set_xlabel('iteration')
ax.set_ylabel('log10(residual)')
plt.plot(np.linspace(1, maxiter, num=maxiter), np.log10(minres), 'ob--', label='min')
plt.plot(np.linspace(1, maxiter, num=maxiter), np.log10(maxres), 'og--', label='max')
plt.plot(np.linspace(1, maxiter, num=maxiter), np.log10(meanres), 'or--', label='mean')
plt.plot(np.linspace(1, maxiter, num=maxiter), np.log10(medianres), 'oy--', label='median')
plt.fill_between(
np.linspace(1, maxiter, num=maxiter), np.log10(minres), np.log10(maxres), color='grey', alpha=0.3, label='range'
)
plt.axis([0, 14, -12, 3])
plt.legend()
plt.savefig(fname)
assert os.path.isfile(fname), 'ERROR: plotting did not create PNG file'
def show_iter_hist(fname, maxiter, nruns):
"""
Helper routine to visualize the maximal iteration number across the simulation in a histogram
Args:
stats (dict): statistics object
fname (str): filename
maxiter: maximal iterations per run
nruns: number of runs
"""
# create plot and save
fig, ax = plt.subplots(figsize=(15, 10))
plt.hist(maxiter, bins=np.arange(min(maxiter), max(maxiter) + 2, 1), align='left', rwidth=0.9)
# with correction allowed: axis instead of xticks
# plt.axis([12, 51, 0, nruns+1])
plt.xticks([13, 15, 20, 25, 30, 35, 40, 45, 50])
ax.set_xlabel('iterations until convergence')
plt.hlines(nruns, min(maxiter), max(maxiter), colors='red', linestyle='dashed')
# with correction allowed: no logscale
plt.yscale('log')
plt.savefig(fname)
assert os.path.isfile(fname), 'ERROR: plotting did not create PNG file'
| 3,949 | 29.620155 | 120 | py |
pySDC | pySDC-master/pySDC/projects/soft_failure/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/soft_failure/implicit_sweeper_faults.py | import struct
from datetime import datetime
import numpy as np
from pySDC.helpers.pysdc_helper import FrozenClass
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
class _fault_stats(FrozenClass):
def __init__(self):
self.nfaults_called = 0
self.nfaults_injected_u = 0
self.nfaults_injected_f = 0
self.nfaults_detected = 0
self.ncorrection_attempts = 0
self.nfaults_missed = 0
self.nfalse_positives = 0
self.nfalse_positives_in_correction = 0
self.nclean_steps = 0
self._freeze()
class implicit_sweeper_faults(generic_implicit):
"""
LU sweeper using LU decomposition of the Q matrix for the base integrator, special type of generic implicit sweeper
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'allow_fault_correction' not in params:
params['allow_fault_correction'] = False
if 'detector_threshold' not in params:
params['detector_threshold'] = 1.0
if 'dump_injections_filehandle' not in params:
params['dump_injections_filehandle'] = None
# call parent's initialization routine
super(implicit_sweeper_faults, self).__init__(params)
self.fault_stats = _fault_stats()
self.fault_injected = False
self.fault_detected = False
self.in_correction = False
self.fault_iteration = False
def reset_fault_stats(self):
"""
Helper method to reset all fault related stats and flags. Will be called after the run in post-processing.
"""
self.fault_stats = _fault_stats()
self.fault_injected = False
self.fault_detected = False
self.in_correction = False
self.fault_iteration = False
@staticmethod
def bitsToFloat(b):
"""
Static helper method to get a number from bit into float representation
Args:
b: bit representation of a number
Returns:
float representation of b
"""
s = struct.pack('>q', b)
return struct.unpack('>d', s)[0]
@staticmethod
def floatToBits(f):
"""
Static helper method to get a number from float into bit representation
Args:
f: float representation of a number
Returns:
bit representation of f
"""
s = struct.pack('>d', f)
return struct.unpack('>q', s)[0]
def do_bitflip(self, a, pos):
"""
Method to do a bit flip
Args:
a: float representation of a number
pos (int between 0 and 63): position of bit flip
Returns:
float representation of a number after bit flip at pos
"""
# flip of mantissa (fraction) bit (pos between 0 and 51) or of exponent bit (pos between 52 and 62)
if pos < 63:
b = self.floatToBits(a)
# mask: bit representation with 1 at pos and 0 elsewhere
mask = 1 << pos
# ^: bitwise xor-operator --> bit flip at pos
c = b ^ mask
return self.bitsToFloat(c)
# "flip" of sign bit (pos = 63)
elif pos == 63:
return -a
def inject_fault(self, type=None, target=None):
"""
Main method to inject a fault
Args:
type (str): string describing whether u of f should be affected
target: data to be modified
"""
pos = 0
bitflip_entry = 0
# do bitflip in u
if type == 'u':
# do something to target = u here!
# do a bitflip at random vector entry of u at random position in bit representation
ulen = len(target)
bitflip_entry = np.random.randint(ulen)
pos = np.random.randint(64)
tmp = target[bitflip_entry]
target[bitflip_entry] = self.do_bitflip(target[bitflip_entry], pos)
# print(' fault in u injected')
self.fault_stats.nfaults_injected_u += 1
# do bitflip in f
elif type == 'f':
# do something to target = f here!
# do a bitflip at random vector entry of f at random position in bit representation
flen = len(target)
bitflip_entry = np.random.randint(flen)
pos = np.random.randint(64)
tmp = target[bitflip_entry]
target[bitflip_entry] = self.do_bitflip(target[bitflip_entry], pos)
# print(' fault in f injected')
self.fault_stats.nfaults_injected_f += 1
else:
tmp = None
print('ERROR: wrong fault type specified, got %s' % type)
exit()
self.fault_injected = True
if self.params.dump_injections_filehandle is not None:
out = str(datetime.now())
out += ' --- '
out += type + ' ' + str(bitflip_entry) + ' ' + str(pos)
out += ' --- '
out += str(tmp) + ' ' + str(target[bitflip_entry]) + ' ' + str(np.abs(tmp - target[bitflip_entry]))
out += '\n'
self.params.dump_injections_filehandle.write(out)
def detect_fault(self, current_node=None, rhs=None):
"""
Main method to detect a fault
Args:
current_node (int): current node we are working with at the moment
rhs: right-hand side vector for usage in detector
"""
# get current level for further use
L = self.level
# calculate solver residual
res = L.u[current_node] - L.dt * self.QI[current_node, current_node] * L.f[current_node] - rhs
res_norm = np.linalg.norm(res, np.inf)
if np.isnan(res_norm) or res_norm > self.params.detector_threshold:
# print(' FAULT DETECTED!')
self.fault_detected = True
else:
self.fault_detected = False
# update statistics
# fault injected and fault detected -> yeah!
if self.fault_injected and self.fault_detected:
self.fault_stats.nfaults_detected += 1
# no fault injected but fault detected -> meh!
elif not self.fault_injected and self.fault_detected:
self.fault_stats.nfalse_positives += 1
# in correction mode and fault detected -> meeeh!
if self.in_correction:
self.fault_stats.nfalse_positives_in_correction += 1
# fault injected but no fault detected -> meh!
elif self.fault_injected and not self.fault_detected:
self.fault_stats.nfaults_missed += 1
# no fault injected and no fault detected -> yeah!
else:
self.fault_stats.nclean_steps += 1
def correct_fault(self):
"""
Main method to correct a fault or issue a restart
"""
# do correction magic or issue restart here... could be empty!
# we need to make sure that not another fault is injected here.. could also temporarily lower the probability
self.in_correction = True
# print(' doing correction...')
self.fault_stats.ncorrection_attempts += 1
self.fault_detected = False
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single sweep over all nodes
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
# gather all terms which are known already (e.g. from the previous iteration)
# this corresponds to u0 + QF(u^k) - QdF(u^k) + tau
# get QF(u^k)
integral = self.integrate()
for m in range(M):
# get -QdF(u^k)_m
for j in range(M + 1):
integral[m] -= L.dt * self.QI[m + 1, j] * L.f[j]
# add initial value
integral[m] += L.u[0]
# add tau if associated
if L.tau[m] is not None:
integral[m] += L.tau[m]
fault_node = np.random.randint(M)
# do the sweep
m = 0
while m < M:
# see if there will be a fault
self.fault_injected = False
fault_at_u = False
fault_at_f = False
if not self.in_correction and m == fault_node and self.fault_iteration:
if np.random.randint(2) == 0:
fault_at_u = True
else:
fault_at_f = True
# build rhs, consisting of the known values from above and new values from previous nodes (at k+1)
# this is what needs to be protected separately!
rhs = P.dtype_u(integral[m])
for j in range(m + 1):
rhs += L.dt * self.QI[m + 1, j] * L.f[j]
if fault_at_u:
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m + 1] = P.solve_system(
rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]
)
# inject fault at some u value
self.inject_fault(type='u', target=L.u[m + 1])
# update function values
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
elif fault_at_f:
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m + 1] = P.solve_system(
rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]
)
# update function values
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
# inject fault at some f value
self.inject_fault(type='f', target=L.f[m + 1])
else:
# implicit solve with prefactor stemming from the diagonal of Qd
L.u[m + 1] = P.solve_system(
rhs, L.dt * self.QI[m + 1, m + 1], L.u[m + 1], L.time + L.dt * self.coll.nodes[m]
)
# update function values
L.f[m + 1] = P.eval_f(L.u[m + 1], L.time + L.dt * self.coll.nodes[m])
# see if our detector finds something
self.detect_fault(current_node=m + 1, rhs=rhs)
# if we are allowed to try correction, do so, otherwise proceed with sweep
if not self.in_correction and self.fault_detected and self.params.allow_fault_correction:
self.correct_fault()
else:
self.in_correction = False
m += 1
# indicate presence of new values at this level
L.status.updated = True
return None
| 11,102 | 32.85061 | 119 | py |
pySDC | pySDC-master/pySDC/projects/soft_failure/FaultHooks.py | from __future__ import division
import numpy as np
from pySDC.core.Hooks import hooks
class fault_hook(hooks):
def __init__(self):
"""
Initialization of fault hooks
"""
super(fault_hook, self).__init__()
self.fault_iteration = None
def pre_run(self, step, level_number):
super(fault_hook, self).pre_run(step, level_number)
L = step.levels[level_number]
L.sweep.reset_fault_stats()
self.fault_iteration = np.random.randint(1, L.sweep.params.niters)
def pre_iteration(self, step, level_number):
super(fault_hook, self).pre_iteration(step, level_number)
L = step.levels[level_number]
L.sweep.fault_iteration = self.fault_iteration == step.status.iter
def post_run(self, step, level_number):
super(fault_hook, self).post_run(step, level_number)
L = step.levels[level_number]
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='fault_stats',
value=L.sweep.fault_stats,
)
| 1,187 | 24.276596 | 74 | py |
pySDC | pySDC-master/pySDC/projects/soft_failure/data/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/DAE/sweepers/fully_implicit_DAE.py | import numpy as np
from scipy import optimize
from pySDC.core.Errors import ParameterError
from pySDC.core.Sweeper import sweeper
class fully_implicit_DAE(sweeper):
"""
Custom sweeper class, implements Sweeper.py
Sweeper to solve first order differential equations in fully implicit form
Primarily implemented to be used with differential algebraic equations
Based on the concepts outlined in "Arbitrary order Krylov deferred correction methods for differential algebraic equations" by Huang et al.
Attributes:
QI: implicit Euler integration matrix
"""
def __init__(self, params):
"""
Initialization routine for the custom sweeper
Args:
params: parameters for the sweeper
"""
if 'QI' not in params:
params['QI'] = 'IE'
# call parent's initialization routine
super(fully_implicit_DAE, self).__init__(params)
msg = f"Quadrature type {self.params.quad_type} is not implemented yet. Use 'RADAU-RIGHT' instead!"
if self.coll.left_is_node:
raise ParameterError(msg)
self.QI = self.get_Qdelta_implicit(coll=self.coll, qd_type=self.params.QI)
# TODO: hijacking this function to return solution from its gradient i.e. fundamental theorem of calculus.
# This works well since (ab)using level.f to store the gradient. Might need to change this for release?
def integrate(self):
"""
Returns the solution by integrating its gradient (fundamental theorem of calculus)
Note that level.f stores the gradient values in the fully implicit case, rather than the evaluation of the rhs as in the ODE case
Returns:
list of dtype_u: containing the integral as values
"""
# get current level and problem description
L = self.level
P = L.prob
M = self.coll.num_nodes
me = []
# integrate gradient over all collocation nodes
for m in range(1, M + 1):
# new instance of dtype_u, initialize values with 0
me.append(P.dtype_u(P.init, val=0.0))
for j in range(1, M + 1):
me[-1] += L.dt * self.coll.Qmat[m, j] * L.f[j]
return me
def update_nodes(self):
"""
Update the u- and f-values at the collocation nodes -> corresponds to a single iteration of the preconditioned Richardson iteration in "ordinary" SDC
Returns:
None
"""
# get current level and problem description
L = self.level
# in the fully implicit case L.prob.eval_f() evaluates the function F(u, u', t)
P = L.prob
# only if the level has been touched before
assert L.status.unlocked
# get number of collocation nodes for easier access
M = self.coll.num_nodes
u_0 = L.u[0]
# get QU^k where U = u'
# note that for multidimensional functions the required Kronecker product is achieved since
# e.g. L.f[j] is a mesh object and multiplication with a number distributes over the mesh
integral = self.integrate()
# build the rest of the known solution u_0 + del_t(Q - Q_del)U_k
for m in range(1, M + 1):
for j in range(1, M + 1):
integral[m - 1] -= L.dt * self.QI[m, j] * L.f[j]
# add initial value
integral[m - 1] += u_0
# do the sweep
for m in range(1, M + 1):
# build implicit function, consisting of the known values from above and new values from previous nodes (at k+1)
u_approx = P.dtype_u(integral[m - 1])
# add the known components from current sweep del_t*Q_del*U_k+1
for j in range(1, m):
u_approx += L.dt * self.QI[m, j] * L.f[j]
# params contains U = u'
def impl_fn(params):
# make params into a mesh object
params_mesh = P.dtype_f(P.init)
params_mesh[:] = params
# build parameters to pass to implicit function
local_u_approx = u_approx
# note that derivatives of algebraic variables are taken into account here too
# these do not directly affect the output of eval_f but rather indirectly via QI
local_u_approx += L.dt * self.QI[m, m] * params_mesh
return P.eval_f(local_u_approx, params_mesh, L.time + L.dt * self.coll.nodes[m - 1])
# get U_k+1
# note: not using solve_system here because this solve step is the same for any problem
# See link for how different methods use the default tol parameter
# https://github.com/scipy/scipy/blob/8a6f1a0621542f059a532953661cd43b8167fce0/scipy/optimize/_root.py#L220
# options['xtol'] = P.params.newton_tol
# options['eps'] = 1e-16
opt = optimize.root(
impl_fn,
L.f[m],
method='hybr',
tol=P.newton_tol
# callback= lambda x, f: print("solution:", x, " residual: ", f)
)
# update gradient (recall L.f is being used to store the gradient)
L.f[m][:] = opt.x
# Update solution approximation
integral = self.integrate()
for m in range(M):
L.u[m + 1] = u_0 + integral[m]
# indicate presence of new values at this level
L.status.updated = True
return None
def predict(self):
"""
Predictor to fill values at nodes before first sweep
Default prediction for the sweepers, only copies the values to all collocation nodes
This function overrides the base implementation by always initialising level.f to zero
This is necessary since level.f stores the solution derivative in the fully implicit case, which is not initially known
"""
# get current level and problem description
L = self.level
P = L.prob
# set initial guess for gradient to zero
L.f[0] = P.dtype_f(init=P.init, val=0.0)
for m in range(1, self.coll.num_nodes + 1):
# copy u[0] to all collocation nodes and set f (the gradient) to zero
if self.params.initial_guess == 'spread':
L.u[m] = P.dtype_u(L.u[0])
L.f[m] = P.dtype_f(init=P.init, val=0.0)
# start with zero everywhere
elif self.params.initial_guess == 'zero':
L.u[m] = P.dtype_u(init=P.init, val=0.0)
L.f[m] = P.dtype_f(init=P.init, val=0.0)
# start with random initial guess
elif self.params.initial_guess == 'random':
L.u[m] = P.dtype_u(init=P.init, val=np.random.rand(1)[0])
L.f[m] = P.dtype_f(init=P.init, val=np.random.rand(1)[0])
else:
raise ParameterError(f'initial_guess option {self.params.initial_guess} not implemented')
# indicate that this level is now ready for sweeps
L.status.unlocked = True
L.status.updated = True
def compute_residual(self, stage=None):
"""
Overrides the base implementation
Uses the absolute value of the implicit function ||F(u', u, t)|| as the residual
Args:
stage (str): The current stage of the step the level belongs to
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# Check if we want to skip the residual computation to gain performance
# Keep in mind that skipping any residual computation is likely to give incorrect outputs of the residual!
if stage in self.params.skip_residual_computation:
L.status.residual = 0.0 if L.status.residual is None else L.status.residual
return None
# check if there are new values (e.g. from a sweep)
# assert L.status.updated
# compute the residual for each node
res_norm = []
for m in range(self.coll.num_nodes):
# use abs function from data type here
res_norm.append(abs(P.eval_f(L.u[m + 1], L.f[m + 1], L.time + L.dt * self.coll.nodes[m])))
# find maximal residual over the nodes
if L.params.residual_type == 'full_abs':
L.status.residual = max(res_norm)
elif L.params.residual_type == 'last_abs':
L.status.residual = res_norm[-1]
elif L.params.residual_type == 'full_rel':
L.status.residual = max(res_norm) / abs(L.u[0])
elif L.params.residual_type == 'last_rel':
L.status.residual = res_norm[-1] / abs(L.u[0])
else:
raise ParameterError(
f'residual_type = {L.params.residual_type} not implemented, choose '
f'full_abs, last_abs, full_rel or last_rel instead'
)
# indicate that the residual has seen the new values
L.status.updated = False
return None
def compute_end_point(self):
"""
Compute u at the right point of the interval
The value uend computed here is a full evaluation of the Picard formulation unless do_full_update==False
Returns:
None
"""
# get current level and problem description
L = self.level
P = L.prob
# check if Mth node is equal to right point and do_coll_update is false, perform a simple copy
if self.coll.right_is_node and not self.params.do_coll_update:
# a copy is sufficient
L.uend = P.dtype_u(L.u[-1])
else:
# start with u0 and add integral over the full interval (using coll.weights)
L.uend = P.dtype_u(L.u[0])
for m in range(self.coll.num_nodes):
L.uend += L.dt * self.coll.weights[m] * L.f[m + 1]
return None
| 9,959 | 38.367589 | 157 | py |
pySDC | pySDC-master/pySDC/projects/DAE/plotting/loglog_plot.py | import os
import sys
import numpy as np
import pickle
import pySDC.helpers.plot_helper as plt_helper
def plot_convergence(): # pragma: no cover
'''
Loads pickled error data for multiple preconditioners and collocation node count and plots it with respect to the time step size on a log-log axis.
A new plot is generated for each preconditioner. Different collocation node counts are plotted on the same axes
'''
data = pickle.load(open("data/dae_conv_data.p", "rb"))
# Configure specific line and symbol style_lists
# These must match the data being loaded
# General style_list settings e.g. font, should be changed in pySDC.helpers.plot_helper
# num_nodes_list = [3, 4, 5]
# color_list = ["r", "blue", "g"]
# shape_list = ["o", "d", "s"]
# style_list = [":", "-.", "--"]
# order_list = [5, 7, 9]
num_nodes_list = [3]
color_list = ["blue"]
shape_list = ["d"]
style_list = ["-."]
order_list = [5]
# Some nasty hacking to get the iteration numbers positioned correctly individually in each plot
num_data_points = len(data[next(iter(data))][num_nodes_list[0]]['error'])
# adjust positions
data['LU'][3]['position'] = ['center'] * num_data_points
data['LU'][4]['position'] = ['center'] * num_data_points
data['LU'][5]['position'] = ['center'] * num_data_points
data['IE'][3]['position'] = ['center'] * num_data_points
data['IE'][4]['position'] = ['center'] * num_data_points
data['IE'][5]['position'] = ['center'] * num_data_points
# data['MIN'][3]['position'] = ['center']*num_data_points
# data['MIN'][4]['position'] = ['center']*num_data_points
# data['MIN'][5]['position'] = ['center']*num_data_points
# adjust offsets
data['LU'][3]['offset'] = [(0, 10)] * num_data_points
data['LU'][4]['offset'] = [(0, -10)] * 4
data['LU'][4]['offset'].extend([(0, 10)] * (num_data_points - 4))
data['LU'][5]['offset'] = [(0, -14)] * num_data_points
data['IE'][3]['offset'] = [(0, 10)] * num_data_points
data['IE'][4]['offset'] = [(0, -10)] * 4
data['IE'][4]['offset'].extend([(0, 10)] * (num_data_points - 4))
data['IE'][5]['offset'] = [(0, -14)] * num_data_points
# data['MIN'][3]['offset'] = [(0, 10)]*num_data_points
# data['MIN'][4]['offset'] = [(0, -10)]*4
# data['MIN'][4]['offset'].extend([(0, 10)]*(num_data_points-4))
# data['MIN'][5]['offset'] = [(0, -14)]*num_data_points
plt_helper.setup_mpl()
for qd_type in data.keys():
fig, ax = plt_helper.newfig(textwidth=500, scale=0.89) # Create a figure containing a single axes.
# Init ylim to span largest possible interval. Refined in each iteration to fit data
ylim = (sys.float_info.max, sys.float_info.min)
for num_nodes, color, shape, style, order in zip(
num_nodes_list, color_list, shape_list, style_list, order_list
):
# Plot convergence data
ax.loglog(
data[qd_type][num_nodes]['dt'][:],
data[qd_type][num_nodes]['error'][:],
label="node count = {}".format(num_nodes),
color=color,
marker=shape,
# ls=':',
lw=1,
# alpha=0.4
)
# Plot reference lines
start_point = 3
ax.loglog(
data[qd_type][num_nodes]['dt'][start_point:],
data[qd_type][num_nodes]['error'][start_point]
* (data[qd_type][num_nodes]['dt'][start_point:] / data[qd_type][num_nodes]['dt'][start_point]) ** order,
color="black",
ls=style,
lw=0.7,
label="{}. order ref.".format(order),
)
# Write iteration count to each data point
for niter, error, dt, position, offset in zip(
data[qd_type][num_nodes]['niter'],
data[qd_type][num_nodes]['error'],
data[qd_type][num_nodes]['dt'],
data[qd_type][num_nodes]['position'],
data[qd_type][num_nodes]['offset'],
):
ax.annotate(
niter,
(dt, error),
textcoords="offset points", # how to position the text
xytext=offset, # distance from text to points (x,y)
ha=position,
)
# Update the current y limits of the data
# Ensures that final plot fits the data but cuts off the excess reference lines
ylim = (
min(np.append(data[qd_type][num_nodes]['error'][:], ylim[0])),
max(np.append(data[qd_type][num_nodes]['error'][:], ylim[1])),
)
ax.set(ylim=((1e-2 * ylim[0], 5e1 * ylim[1])))
ax.set(xlabel=r'$dt$', ylabel=r'$||u_5-\tilde{u}_5||_\infty$')
ax.grid(visible=True)
# reorder legend entries to place reference lines at end
handles, labels = ax.get_legend_handles_labels()
legend_order = range(len(handles))
legend_order = np.concatenate(
(list(filter(lambda x: x % 2 == 0, legend_order)), list(filter(lambda x: x % 2 == 1, legend_order)))
)
ax.legend([handles[idx] for idx in legend_order], [labels[idx] for idx in legend_order])
# plt_helper.plt.show()
fname = 'data/simple_dae_SDC_' + qd_type
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.png')
if __name__ == "__main__":
plot_convergence()
| 5,579 | 39.729927 | 151 | py |
pySDC | pySDC-master/pySDC/projects/DAE/plotting/linear_plot.py | import pySDC.helpers.plot_helper as plt_helper
import pickle
def linear_plot(): # pragma: no cover
'''Loads solution data from an .npy file and plots specified parameters with respect to each other on a linear axis'''
data = pickle.load(open("data/dae_conv_data.p", "rb"))
plt_helper.setup_mpl()
fig, ax = plt_helper.newfig(textwidth=500, scale=0.89) # Create a figure containing a single axes.
# ax.plot(data['dt'], data['ue'], label=r'$U_e$', lw=0.6, color='r')
# ax.plot(data['dt'], data['solution'][:, 7], label=r'$U_8$', marker='x', markersize=2, lw=0.6, color='b')
# ax.plot(data['dt'], data['solution'][0], label=r'$x$', lw=0.6, marker='x', markersize=3)
# ax.plot(data['dt'], data['solution'][1], label=r'$y$', lw=0.6, marker='x', markersize=3)
ax.plot(data['dt'], data['solution'][2], label=r'$dx$', lw=0.6, marker='x', markersize=3)
ax.plot(data['dt'], data['solution'][3], label=r'$dy$', lw=0.6, marker='x', markersize=3)
# ax.plot(data['dt'], data['solution'][4], label=r'$lambda$', lw=0.6, marker='x', markersize=3)
# title='Convergence plot two stage implicit Runge-Kutta with Gauss nodes'
# ax.set(xlabel=r'time (s)', ylabel=r'voltage (V)')
ax.set(xlabel=r'$x$')
ax.grid(visible=True)
fig.tight_layout()
ax.legend(loc='upper left')
fname = 'data/lin_plot_1'
plt_helper.savefig(fname)
# plt.savefig('../results/problematic_good.png')
if __name__ == "__main__":
linear_plot()
| 1,483 | 42.647059 | 122 | py |
pySDC | pySDC-master/pySDC/projects/DAE/plotting/semilogy_plot.py | import os
import pickle
import pySDC.helpers.plot_helper as plt_helper
def plot_convergence(): # pragma: no cover
'''
Loads pickled error data for multiple preconditioners and collocation node counts and plots it with respect to the max. iteration count
The y axis is logarithmically scaled. The x axis is linearly scaled.
A new plot is generated for each preconditioner. Different collocation node counts are plotted on the same axes
'''
data = pickle.load(open("data/dae_conv_data.p", "rb"))
# Configure specific line and symbol style_lists
# These must match the data being loaded
# General style_list settings e.g. font, should be changed in pySDC.helpers.plot_helper
num_nodes_list = [3, 4, 5]
color_list = ["r", "blue", "g"]
shape_list = ["o", "d", "s"]
start = 0
end = 35
plt_helper.setup_mpl()
for qd_type in data.keys():
fig, ax1 = plt_helper.newfig(textwidth=500, scale=0.89) # Create a figure containing a single axes.
ax2 = ax1.twinx()
lns1 = list()
lns2 = list()
for num_nodes, color, shape in zip(num_nodes_list, color_list, shape_list):
# Plot convergence data
lns1.append(
ax1.semilogy(
data[qd_type][num_nodes]['niter'][start:end],
data[qd_type][num_nodes]['error'][start:end],
label="Error {} nodes".format(num_nodes),
color=color,
marker=shape,
# ls=':',
lw=1,
alpha=0.4,
)[0]
)
lns2.append(
ax2.semilogy(
data[qd_type][num_nodes]['niter'][start:end],
data[qd_type][num_nodes]['residual'][start:end],
label="Residual {} nodes".format(num_nodes),
color=color,
marker=shape,
ls=':',
lw=1,
# alpha=0.4
)[0]
)
ax1.set(xlabel='Iter. count', ylabel=r'$||u_1-\tilde{u}_1||_\infty$')
ax1.grid(visible=True)
ax2.set(ylabel=r'$||F\left(\tilde{u}, \tilde{u}\', t\right)||_\infty$')
lns = lns1 + lns2
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc='upper right')
# plt_helper.plt.show()
fname = 'data/simple_dae_SDC_' + qd_type
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.png')
if __name__ == "__main__":
plot_convergence()
| 2,605 | 34.216216 | 139 | py |
pySDC | pySDC-master/pySDC/projects/DAE/misc/HookClass_DAE.py | from pySDC.core.Hooks import hooks
class approx_solution_hook(hooks):
"""
Hook class to add the approximate solution to the output generated by the sweeper after each time step
"""
def __init__(self):
"""
Initialization routine for the custom hook
"""
super(approx_solution_hook, self).__init__()
def post_step(self, step, level_number):
"""
Default routine called after each step
Args:
step: the current step
level_number: the current level number
"""
super(approx_solution_hook, self).post_step(step, level_number)
# some abbreviations
L = step.levels[level_number]
# TODO: is it really necessary to recompute the end point? Hasn't this been done already?
L.sweep.compute_end_point()
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='approx_solution',
value=L.uend,
)
class error_hook(hooks):
"""
Hook class to add the approximate solution to the output generated by the sweeper after each time step
"""
def __init__(self):
"""
Initialization routine for the custom hook
"""
super(error_hook, self).__init__()
def post_step(self, step, level_number):
"""
Default routine called after each step
Args:
step: the current step
level_number: the current level number
"""
super(error_hook, self).post_step(step, level_number)
# some abbreviations
L = step.levels[level_number]
P = L.prob
# TODO: is it really necessary to recompute the end point? Hasn't this been done already?
L.sweep.compute_end_point()
# compute and save errors
# Note that the component from which the error is measured is specified here
upde = P.u_exact(step.time + step.dt)
err = abs(upde[0] - L.uend[0])
# err = abs(upde[4] - L.uend[4])
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='error_post_step',
value=err,
)
| 2,423 | 27.517647 | 106 | py |
pySDC | pySDC-master/pySDC/projects/DAE/misc/ProblemDAE.py | import numpy as np
from pySDC.core.Problem import ptype
from pySDC.implementations.datatype_classes.mesh import mesh
class ptype_dae(ptype):
"""
Interface class for DAE problems. Ensures that all parameters are passed that are needed by DAE sweepers
"""
dtype_u = mesh
dtype_f = mesh
def __init__(self, nvars, newton_tol):
"""
Initialization routine
Args:
problem_params (dict): custom parameters for the example
dtype_u: mesh data type (will be passed parent class)
dtype_f: mesh data type (will be passed parent class)
"""
super().__init__((nvars, None, np.dtype('float64')))
self._makeAttributeAndRegister('nvars', 'newton_tol', localVars=locals(), readOnly=True)
| 781 | 29.076923 | 108 | py |
pySDC | pySDC-master/pySDC/projects/DAE/run/fully_implicit_dae_playground.py | from pathlib import Path
import numpy as np
import pickle
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.DAE.problems.simple_DAE import problematic_f
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.projects.DAE.misc.HookClass_DAE import approx_solution_hook
from pySDC.projects.DAE.misc.HookClass_DAE import error_hook
from pySDC.helpers.stats_helper import get_sorted
def main():
"""
A simple test program to see the fully implicit SDC solver in action
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-6
level_params['dt'] = 1e-1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver
problem_params['nvars'] = 2
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 40
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = [approx_solution_hook, error_hook]
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = problematic_f
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
Path("data").mkdir(parents=True, exist_ok=True)
# instantiate the controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# set time parameters
t0 = 0.0
Tend = 1.0
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# check error
err = get_sorted(stats, type='error_post_step', sortby='time')
err = np.linalg.norm([err[i][1] for i in range(len(err))], np.inf)
print(f"Error is {err}")
assert np.isclose(err, 0.0, atol=1e-4), "Error too large."
# store results
sol = get_sorted(stats, type='approx_solution_hook', sortby='time')
sol_dt = np.array([sol[i][0] for i in range(len(sol))])
sol_data = np.array([[sol[j][1][i] for j in range(len(sol))] for i in range(problem_params['nvars'])])
data = dict()
data['dt'] = sol_dt
data['solution'] = sol_data
pickle.dump(data, open("data/dae_conv_data.p", 'wb'))
print("Done")
if __name__ == "__main__":
main()
| 2,854 | 31.816092 | 109 | py |
pySDC | pySDC-master/pySDC/projects/DAE/run/run_convergence_test.py | import numpy as np
import statistics
import pickle
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.projects.DAE.misc.HookClass_DAE import error_hook
from pySDC.helpers.stats_helper import get_sorted
from pySDC.helpers.stats_helper import filter_stats
def setup():
"""
Routine to initialise convergence test parameters
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
# This comes as read-in for the sweeper class
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver
problem_params['nvars'] = 3
# This comes as read-in for the step class
step_params = dict()
step_params['maxiter'] = 30
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = error_hook
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = simple_dae_1
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# set simulation parameters
num_samples = 2
run_params = dict()
run_params['t0'] = 0.0
run_params['tend'] = 0.1
run_params['dt_list'] = np.logspace(-2, -3, num=num_samples)
run_params['qd_list'] = ['IE', 'LU']
run_params['num_nodes_list'] = [3]
return description, controller_params, run_params
def run(description, controller_params, run_params):
"""
Routine to run simulation
"""
conv_data = dict()
for qd_type in run_params['qd_list']:
description['sweeper_params']['QI'] = qd_type
conv_data[qd_type] = dict()
for num_nodes in run_params['num_nodes_list']:
description['sweeper_params']['num_nodes'] = num_nodes
conv_data[qd_type][num_nodes] = dict()
conv_data[qd_type][num_nodes]['error'] = np.zeros_like(run_params['dt_list'])
conv_data[qd_type][num_nodes]['niter'] = np.zeros_like(run_params['dt_list'], dtype='int')
conv_data[qd_type][num_nodes]['dt'] = run_params['dt_list']
for j, dt in enumerate(run_params['dt_list']):
print('Working on Qdelta=%s -- num. nodes=%i -- dt=%f' % (qd_type, num_nodes, dt))
description['level_params']['dt'] = dt
# instantiate the controller
controller = controller_nonMPI(
num_procs=1, controller_params=controller_params, description=description
)
# get initial values
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(run_params['t0'])
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=run_params['t0'], Tend=run_params['tend'])
# compute exact solution and compare
err = get_sorted(stats, type='error_post_step', sortby='time')
niter = filter_stats(stats, type='niter')
conv_data[qd_type][num_nodes]['error'][j] = np.linalg.norm([err[j][1] for j in range(len(err))], np.inf)
conv_data[qd_type][num_nodes]['niter'][j] = round(statistics.mean(niter.values()))
print("Error is", conv_data[qd_type][num_nodes]['error'][j])
return conv_data
if __name__ == "__main__":
"""
Routine to run convergence tests for the fully implicit solver using specified example with various preconditioners, time step sizes and collocation node counts
Error data is stored in a dictionary and then pickled for use with the loglog_plot.py routine
"""
description, controller_params, run_params = setup()
conv_data = run(description, controller_params, run_params)
pickle.dump(conv_data, open("data/dae_conv_data.p", 'wb'))
print("Done")
| 4,361 | 38.297297 | 164 | py |
pySDC | pySDC-master/pySDC/projects/DAE/run/run_iteration_test.py | import numpy as np
import statistics
import pickle
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1
from pySDC.projects.DAE.problems.transistor_amplifier import one_transistor_amplifier
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.projects.DAE.misc.HookClass_DAE import error_hook
from pySDC.helpers.stats_helper import get_sorted
from pySDC.helpers.stats_helper import filter_stats
def setup():
"""
Routine to initialise iteration test parameters
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = -1
# Used for generating the first set of plots. Chose this because in the convergence plots the three collocation methods investigated had converged. Maybe too big? -> actually looked at results for different step sizes. There was no real difference.
# level_params['dt'] = 1e-3
level_params['dt'] = 1e-4
# This comes as read-in for the sweeper class
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
# This comes as read-in for the problem class
problem_params = dict()
# Absolute termination tollerance for implicit solver
# Exactly how this is used can be adjusted in update_nodes() in the fully implicit sweeper
problem_params['newton_tol'] = 1e-7
problem_params['nvars'] = 3
# This comes as read-in for the step class
step_params = dict()
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = error_hook
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = simple_dae_1
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# set simulation parameters
run_params = dict()
run_params['t0'] = 0.0
run_params['tend'] = 0.1
max_iter_low = 4
max_iter_high = 6
run_params['max_iter_list'] = list(range(max_iter_low, max_iter_high))
run_params['qd_list'] = ['IE', 'LU']
run_params['num_nodes_list'] = [3]
return description, controller_params, run_params
def run(description, controller_params, run_params):
"""
Routine to run simulation
"""
conv_data = dict()
for qd_type in run_params['qd_list']:
description['sweeper_params']['QI'] = qd_type
conv_data[qd_type] = dict()
for num_nodes in run_params['num_nodes_list']:
description['sweeper_params']['num_nodes'] = num_nodes
conv_data[qd_type][num_nodes] = dict()
conv_data[qd_type][num_nodes]['error'] = np.zeros_like(run_params['max_iter_list'], dtype=float)
conv_data[qd_type][num_nodes]['residual'] = np.zeros_like(run_params['max_iter_list'], dtype=float)
conv_data[qd_type][num_nodes]['niter'] = np.zeros_like(run_params['max_iter_list'], dtype='int')
conv_data[qd_type][num_nodes]['max_iter'] = run_params['max_iter_list']
for i, max_iter in enumerate(run_params['max_iter_list']):
print('Working on Qdelta=%s -- num. nodes=%i -- max. iter.=%i' % (qd_type, num_nodes, max_iter))
description['step_params']['maxiter'] = max_iter
# instantiate the controller
controller = controller_nonMPI(
num_procs=1, controller_params=controller_params, description=description
)
# get initial values
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(run_params['t0'])
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=run_params['t0'], Tend=run_params['tend'])
# compute exact solution and compare
err = get_sorted(stats, type='error_post_step', sortby='time')
residual = get_sorted(stats, type='residual_post_step', sortby='time')
niter = filter_stats(stats, type='niter')
conv_data[qd_type][num_nodes]['error'][i] = np.linalg.norm([err[j][1] for j in range(len(err))], np.inf)
conv_data[qd_type][num_nodes]['residual'][i] = np.linalg.norm(
[residual[j][1] for j in range(len(residual))], np.inf
)
conv_data[qd_type][num_nodes]['niter'][i] = round(statistics.mean(niter.values()))
print(
"Error=",
conv_data[qd_type][num_nodes]['error'][i],
" Residual=",
conv_data[qd_type][num_nodes]['residual'][i],
)
return conv_data
if __name__ == "__main__":
"""
Routine to run simple differential-algebraic-equation example with various max iters, preconditioners and collocation node counts
In contrast to run_convergence_test.py, in which max iters is set large enough to not be the limiting factor, max iters is varied for a fixed time step and the improvement in the error is measured
Error data is stored in a dictionary and then pickled for use with the loglog_plot.py routine
"""
description, controller_params, run_params = setup()
conv_data = run(description, controller_params, run_params)
pickle.dump(conv_data, open("data/dae_iter_data.p", 'wb'))
print("Done")
| 5,652 | 42.152672 | 252 | py |
pySDC | pySDC-master/pySDC/projects/DAE/run/synchronous_machine_playground.py | from pathlib import Path
import numpy as np
import pickle
import statistics
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.DAE.problems.synchronous_machine import synchronous_machine_infinite_bus
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.projects.DAE.misc.HookClass_DAE import approx_solution_hook
from pySDC.projects.DAE.misc.HookClass_DAE import error_hook
from pySDC.helpers.stats_helper import get_sorted
from pySDC.helpers.stats_helper import filter_stats
def main():
"""
A testing ground for the synchronous machine model
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-7
level_params['dt'] = 1e-1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU'
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver
problem_params['nvars'] = 14
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 100
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = [error_hook, approx_solution_hook]
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = synchronous_machine_infinite_bus
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
Path("data").mkdir(parents=True, exist_ok=True)
# instantiate the controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# set time parameters
t0 = 0.0
Tend = 1.0
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# check error (only available if reference solution was provided)
# err = get_sorted(stats, type='error_post_step', sortby='time')
# err = np.linalg.norm([err[i][1] for i in range(len(err))], np.inf)
# print(f"Error is {err}")
uend_ref = [
8.30823565e-01,
-4.02584174e-01,
1.16966755e00,
9.47592808e-01,
-3.68076863e-01,
-3.87492326e-01,
-7.77837831e-01,
-1.67347611e-01,
1.34810867e00,
5.46223705e-04,
1.29690691e-02,
-8.00823474e-02,
3.10281509e-01,
9.94039645e-01,
]
err = np.linalg.norm(uend - uend_ref, np.inf)
assert np.isclose(err, 0, atol=1e-4), "Error too large."
# store results
sol = get_sorted(stats, type='approx_solution', sortby='time')
sol_dt = np.array([sol[i][0] for i in range(len(sol))])
sol_data = np.array([[sol[j][1][i] for j in range(len(sol))] for i in range(problem_params['nvars'])])
niter = filter_stats(stats, type='niter')
niter = np.fromiter(niter.values(), int)
data = dict()
data['dt'] = sol_dt
data['solution'] = sol_data
data['niter'] = round(statistics.mean(niter))
pickle.dump(data, open("data/dae_conv_data.p", 'wb'))
print("Done")
if __name__ == "__main__":
main()
| 3,592 | 31.369369 | 109 | py |
pySDC | pySDC-master/pySDC/projects/DAE/problems/synchronous_machine.py | import numpy as np
import warnings
from scipy.interpolate import interp1d
from pySDC.projects.DAE.misc.ProblemDAE import ptype_dae
from pySDC.implementations.datatype_classes.mesh import mesh
class synchronous_machine_infinite_bus(ptype_dae):
r"""
Synchronous machine model from Kundur (equiv. circuits fig. 3.18 in [1]_) attached to infinite bus. The machine can be
represented as two different circuits at the direct-axis and the quadrature-axis. Detailed information can be found in
[1]_. The system of differential-algebraic equations (DAEs) consists of the equations for
- the stator voltage equations
.. math::
\frac{d \Psi_d (t)}{dt} = \omega_b (v_d + R_a i_d (t) + \omega_r \Psi_q (t)),
.. math::
\frac{d \Psi_q (t)}{dt} = \omega_b (v_q + R_a i_q (t) - \omega_r \Psi_d (t)),
.. math::
\frac{d \Psi_0 (t)}{dt} = \omega_b (v_0 + R_a i_0 (t)),
- the rotor voltage equations
.. math::
\frac{d \Psi_F (t)}{dt} = \omega_b (v_F - R_F i_F (t)),
.. math::
\frac{d \Psi_D (t)}{dt} = -\omega_b (R_D i_D (t)),
.. math::
\frac{d \Psi_{Q1} (t)}{dt} = -\omega_b (R_{Q1} i_{Q1} (t)),
.. math::
\frac{d \Psi_{Q2} (t)}{dt} = -\omega_b (R_{Q2} i_{Q2} (t)),
- the stator flux linkage equations
.. math::
\Psi_d (t) = L_d i_d (t) + L_{md} i_F (t) + L_{md} i_D (t),
.. math::
\Psi_q (t) = L_q i_q (t) + L_{mq} i_{Q1} (t) + L_{mq} i_{Q2} (t),
.. math::
\Psi_0 (t) = L_0 i_0 (t)
- the rotor flux linkage equations
.. math::
\Psi_F = L_F i_F (t) + L_D i_D + L_{md} i_d (t),
.. math::
\Psi_D = L_F i_F (t) + L_D i_D + L_{md} i_d (t),
.. math::
\Psi_{Q1} = L_{Q1} i_{Q1} (t) + L_{mq} i_{Q2} + L_{mq} i_q (t),
.. math::
\Psi_{Q2} = L_{mq} i_{Q1} (t) + L_{Q2} i_{Q2} + L_{mq} i_q (t),
- the swing equations
.. math::
\frac{d \delta (t)}{dt} = \omega_b (\omega_r (t) - 1),
.. math::
\frac{d \omega_r (t)}{dt} = \frac{1}{2 H}(T_m - T_e - K_D \omega_b (\omega_r (t) - 1)).
The voltages :math:`v_d`, :math:`v_q` can be updated via the following procedure. The stator's currents are mapped
to the comlex-valued external reference frame current :math:`I` with
.. math::
\Re(I) = i_d (t) \sin(\delta (t)) + i_q (t) \cos(\delta (t)),
.. math::
\Im(I) = -i_d (t) \cos(\delta (t)) + i_q (t) \sin(\delta (t)).
The voltage V across the stator terminals can then be computed as complex-value via
.. math::
V_{comp} = E_B + Z_{line} (\Re(I) + i \Im(I))
with impedance :math:`Z_{line}\in\mathbb{C}`. Then, :math:`v_d`, :math:`v_q` can be computed via the network equations
.. math::
v_d = \Re(V_{comp}) \sin(\delta (t)) - \Im(V_{comp}) \cos(\delta (t)),
.. math::
v_q = \Re(V_{comp}) \cos(\delta (t)) + \Im(V_{comp}) \sin(\delta (t)),
which describes the connection between the machine and the infinite bus.
Parameters
----------
nvars : int
Number of unknowns of the system of DAEs.
newton_tol : float
Tolerance for Newton solver.
Attributes
----------
L_d: float
Inductance of inductor :math:'L_d', see [1]_.
L_q: float
Inductance of inductor :math:'L_q', see [1]_.
L_F: float
Inductance of inductor :math:'L_F', see [1]_.
L_D: float
Inductance of inductor :math:'L_D', see [1]_.
L_Q1: float
Inductance of inductor :math:'L_{Q1}', see [1]_.
L_Q2: float
Inductance of inductor :math:'L_{Q2}', see [1]_.
L_md: float
Inductance of inductor :math:'L_{md}', see [1]_.
L_mq: float
Inductance of inductor :math:'L_{mq}', see [1]_.
R_s: float
Resistance of resistor :math:`R_s`, see [1]_.
R_F: float
Resistance of resistor :math:`R_F`, see [1]_.
R_D: float
Resistance of resistor :math:`R_D`, see [1]_.
R_Q1: float
Resistance of resistor :math:`R_{Q1}`, see [1]_.
R_Q2: float
Resistance of resistor :math:`R_{Q2}`, see [1]_.
omega_b: float
Base frequency of the rotor in mechanical :math:`rad/s`.
H_: float
Defines the per unit inertia constant.
K_D: float
Factor that accounts for damping losses.
Z_line: complex
Impedance of the transmission line that connects the infinite bus to the generator.
E_B: float
Voltage of infinite bus.
v_F: float
Voltage at the field winding.
T_m: float
Defines the mechanical torque applied to the rotor shaft.
References
----------
.. [1] P. Kundur, N. J. Balu, M. G. Lauby. Power system stability and control. The EPRI power system series (1994).
"""
def __init__(self, nvars, newton_tol):
super(synchronous_machine_infinite_bus, self).__init__(nvars, newton_tol)
# load reference solution
# data file must be generated and stored under misc/data and self.t_end = t[-1]
# data = np.load(r'pySDC/projects/DAE/misc/data/synch_gen.npy')
# x = data[:, 0]
# y = data[:, 1:]
# self.u_ref = interp1d(x, y, kind='cubic', axis=0, fill_value='extrapolate')
self.t_end = 0.0
self.L_d = 1.8099
self.L_q = 1.76
self.L_F = 1.8247
self.L_D = 1.8312
self.L_Q1 = 2.3352
self.L_Q2 = 1.735
self.L_md = 1.6599
self.L_mq = 1.61
self.R_s = 0.003
self.R_F = 0.0006
self.R_D = 0.0284
self.R_Q1 = 0.0062
self.R_Q2 = 0.0237
self.omega_b = 376.9911184307752
self.H_ = 3.525
self.K_D = 0.0
# Line impedance
self.Z_line = -0.2688022164909709 - 0.15007173591230372j
# Infinite bus voltage
self.E_B = 0.7
# Rotor (field) operating voltages
# These are modelled as constants. Intuition: permanent magnet as rotor
self.v_F = 8.736809687330562e-4
self.T_m = 0.854
def eval_f(self, u, du, t):
r"""
Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`.
Parameters
----------
u : dtype_u
Current values of the numerical solution at time t.
du : dtype_u
Current values of the derivative of the numerical solution at time t.
t : float
Current time of the numerical solution.
Returns
-------
f : dtype_f
Current value of the right-hand side of f (which includes 14 components).
"""
# simulate torque change at t = 0.05
if t >= 0.05:
self.T_m = 0.354
f = self.dtype_f(self.init)
# u = [psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2,
# i_d, i_q, i_F, i_D, i_Q1, i_Q2
# omega_m,
# v_d, v_q,
# iz_d, iz_q, il_d, il_q, vl_d, vl_q]
# extract variables for readability
# algebraic components
psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2 = u[0], u[1], u[2], u[3], u[4], u[5]
i_d, i_q, i_F, i_D, i_Q1, i_Q2 = u[6], u[7], u[8], u[9], u[10], u[11]
delta_r = u[12]
omega_m = u[13]
# differential components
# these result directly from the voltage equations, introduced e.g. pg. 145 Krause
dpsi_d, dpsi_q, dpsi_F, dpsi_D, dpsi_Q1, dpsi_Q2 = du[0], du[1], du[2], du[3], du[4], du[5]
ddelta_r = du[12]
domega_m = du[13]
# Network current
I_Re = i_d * np.sin(delta_r) + i_q * np.cos(delta_r)
I_Im = -i_d * np.cos(delta_r) + i_q * np.sin(delta_r)
# Machine terminal voltages in network coordinates
# Need to transform like this to subtract infinite bus voltage
V_comp = self.E_B - self.Z_line * (-1) * (I_Re + 1j * I_Im)
# Terminal voltages in dq0 coordinates
v_d = np.real(V_comp) * np.sin(delta_r) - np.imag(V_comp) * np.cos(delta_r)
v_q = np.real(V_comp) * np.cos(delta_r) + np.imag(V_comp) * np.sin(delta_r)
# algebraic variables are i_d, i_q, i_F, i_D, i_Q1, i_Q2, il_d, il_q
f[:] = (
# differential generator
-dpsi_d + self.omega_b * (v_d - self.R_s * i_d + omega_m * psi_q),
-dpsi_q + self.omega_b * (v_q - self.R_s * i_q - omega_m * psi_d),
-dpsi_F + self.omega_b * (self.v_F - self.R_F * i_F),
-dpsi_D - self.omega_b * self.R_D * i_D,
-dpsi_Q1 - self.omega_b * self.R_Q1 * i_Q1,
-dpsi_Q2 - self.omega_b * self.R_Q2 * i_Q2,
-ddelta_r + self.omega_b * (omega_m - 1),
-domega_m
+ 1 / (2 * self.H_) * (self.T_m - (psi_q * i_d - psi_d * i_q) - self.K_D * self.omega_b * (omega_m - 1)),
# algebraic generator
-psi_d + self.L_d * i_d + self.L_md * i_F + self.L_md * i_D,
-psi_q + self.L_q * i_q + self.L_mq * i_Q1 + self.L_mq * i_Q2,
-psi_F + self.L_md * i_d + self.L_F * i_F + self.L_md * i_D,
-psi_D + self.L_md * i_d + self.L_md * i_F + self.L_D * i_D,
-psi_Q1 + self.L_mq * i_q + self.L_Q1 * i_Q1 + self.L_mq * i_Q2,
-psi_Q2 + self.L_mq * i_q + self.L_mq * i_Q1 + self.L_Q2 * i_Q2,
)
return f
def u_exact(self, t):
"""
Approximation of the exact solution generated by spline interpolation of an extremely accurate numerical reference solution.
Parameters
----------
t : float
The time of the reference solution.
Returns
-------
me : dtype_u
The reference solution as mesh object. It contains fixed initial conditions at initial time (which includes
14 components).
"""
me = self.dtype_u(self.init)
if t == 0:
psi_d = 0.7770802016688648
psi_q = -0.6337183129426077
psi_F = 1.152966888216155
psi_D = 0.9129958488040036
psi_Q1 = -0.5797082294536264
psi_Q2 = -0.579708229453273
i_d = -0.9061043142342473
i_q = -0.36006722326230495
i_F = 1.45613494788927
i_D = 0.0
i_Q1 = 0.0
i_Q2 = 0.0
delta_r = 39.1 * np.pi / 180
omega_0 = 2 * np.pi * 60
omega_b = 2 * np.pi * 60
omega_m = omega_0 / omega_b # = omega_r since pf = 2 i.e. two pole machine
me[:] = (psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2, i_d, i_q, i_F, i_D, i_Q1, i_Q2, delta_r, omega_m)
elif t < self.t_end:
me[:] = self.u_ref(t)
else:
warnings.warn("Requested time exceeds domain of the reference solution. Returning zero.")
me[:] = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
return me
# class synchronous_machine_pi_line(ptype_dae):
# """
# Synchronous machine model from Kundur (equiv. circuits fig. 3.18)
# attached to pi line with resistive load
# This model does not work yet but is included as a starting point for developing similar models and in the hope that somebody will figure out why it does not work
# """
# def __init__(self, problem_params, dtype_u=mesh, dtype_f=mesh):
# super(synchronous_machine_pi_line, self).__init__(problem_params, dtype_u, dtype_f)
# # load reference solution
# # data file must be generated and stored under misc/data and self.t_end = t[-1]
# # data = np.load(r'pySDC/projects/DAE/misc/data/synch_gen.npy')
# # x = data[:, 0]
# # y = data[:, 1:]
# # self.u_ref = interp1d(x, y, kind='cubic', axis=0, fill_value='extrapolate')
# self.t_end = 0.0
# self.L_d = 1.8099
# self.L_q = 1.76
# self.L_F = 1.8247
# self.L_D = 1.8312
# self.L_Q1 = 2.3352
# self.L_Q2 = 1.735
# self.L_md = 1.6599
# self.L_mq = 1.61
# self.R_s = 0.003
# self.R_F = 0.0006
# self.R_D = 0.0284
# self.R_Q1 = 0.0062
# self.R_Q2 = 0.0237
# self.omega_b = 376.9911184307752
# self.H_ = 3.525
# self.K_D = 0.0
# # pi line
# self.C_pi = 0.000002
# self.R_pi = 0.02
# self.L_pi = 0.00003
# # load
# self.R_L = 0.75
# self.v_F = 8.736809687330562e-4
# self.v_D = 0
# self.v_Q1 = 0
# self.v_Q2 = 0
# self.T_m = 0.854
# def eval_f(self, u, du, t):
# """
# Routine to evaluate the implicit representation of the problem i.e. F(u', u, t)
# Args:
# u (dtype_u): the current values. This parameter has been "hijacked" to contain [u', u] in this case to enable evaluation of the implicit representation
# t (float): current time
# Returns:
# Current value of F(), 21 components
# """
# # simulate torque change at t = 0.05
# if t >= 0.05:
# self.T_m = 0.354
# f = self.dtype_f(self.init)
# # u = [psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2,
# # i_d, i_q, i_F, i_D, i_Q1, i_Q2
# # omega_m,
# # v_d, v_q,
# # iz_d, iz_q, il_d, il_q, vl_d, vl_q]
# # extract variables for readability
# # algebraic components
# psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2 = u[0], u[1], u[2], u[3], u[4], u[5]
# i_d, i_q, i_F, i_D, i_Q1, i_Q2 = u[6], u[7], u[8], u[9], u[10], u[11]
# # delta_r = u[12]
# omega_m = u[12]
# v_d, v_q = u[13], u[14]
# iz_d, iz_q, il_d, il_q, vl_d, vl_q = u[15], u[16], u[17], u[18], u[19], u[20]
# # differential components
# # these result directly from the voltage equations, introduced e.g. pg. 145 Krause
# dpsi_d, dpsi_q, dpsi_F, dpsi_D, dpsi_Q1, dpsi_Q2 = du[0], du[1], du[2], du[3], du[4], du[5]
# # ddelta_r = du[12]
# domega_m = du[12]
# dv_d, dv_q = du[13], du[14]
# diz_d, diz_q, dvl_d, dvl_q = du[15], du[16],du[19], du[20]
# # algebraic variables are i_d, i_q, i_F, i_D, i_Q1, i_Q2, il_d, il_q
# f[:] = (
# # differential generator
# dpsi_d + self.omega_b * (v_d - self.R_s * i_d + omega_m * psi_q),
# dpsi_q + self.omega_b * (v_q - self.R_s * i_q - omega_m * psi_d),
# dpsi_F + self.omega_b * (self.v_F - self.R_F * i_F),
# dpsi_D + self.omega_b * (self.v_D - self.R_D * i_D),
# dpsi_Q1 + self.omega_b * (self.v_Q1 - self.R_Q1 * i_Q1),
# dpsi_Q2 + self.omega_b * (self.v_Q2 - self.R_Q2 * i_Q2),
# -domega_m + 1 / (2 * self.H_) * (self.T_m - (psi_q * i_d - psi_d * i_q) - self.K_D * self.omega_b * (omega_m-1)),
# # differential pi line
# -dv_d + omega_m * v_q + 2/self.C_pi * (i_d - iz_d),
# -dv_q - omega_m * v_d + 2/self.C_pi * (i_q - iz_q),
# -dvl_d + omega_m * vl_q + 2/self.C_pi * (iz_d - il_d),
# -dvl_q - omega_m * vl_d + 2/self.C_pi * (iz_q - il_q),
# -diz_d - self.R_pi/self.L_pi * iz_d + omega_m * iz_q + (v_d - vl_d) / self.L_pi,
# -diz_q - self.R_pi/self.L_pi * iz_q - omega_m * iz_d + (v_q - vl_q) / self.L_pi,
# # algebraic generator
# psi_d + self.L_d * i_d + self.L_md * i_F + self.L_md * i_D,
# psi_q + self.L_q * i_q + self.L_mq * i_Q1 + self.L_mq * i_Q2,
# psi_F + self.L_md * i_d + self.L_F * i_F + self.L_md * i_D,
# psi_D + self.L_md * i_d + self.L_md * i_F + self.L_D * i_D,
# psi_Q1 + self.L_mq * i_q + self.L_Q1 * i_Q1 + self.L_mq * i_Q2,
# psi_Q2 + self.L_mq * i_q + self.L_mq * i_Q1 + self.L_Q2 * i_Q2,
# # algebraic pi line
# -il_d + vl_d/self.R_L,
# -il_q + vl_q/self.R_L,
# )
# return f
# def u_exact(self, t):
# """
# Approximation of the exact solution generated by spline interpolation of an extremely accurate numerical reference solution.
# Args:
# t (float): current time
# Returns:
# Mesh containing fixed initial value, 5 components
# """
# me = self.dtype_u(self.init)
# if t == 0:
# psi_d = 0.3971299
# psi_q = 0.9219154
# psi_F = 0.8374232
# psi_D = 0.5795112
# psi_Q1 = 0.8433430
# psi_Q2 = 0.8433430
# i_d = -1.215876
# i_q = 0.5238156
# i_F = 1.565
# i_D = 0
# i_Q1 = 0
# i_Q2 = 0
# v_d = -0.9362397
# v_q = 0.4033005
# omega_m = 1.0
# # pi line
# iz_d = -1.215875
# iz_q = 0.5238151
# il_d = -1.215875
# il_q = 0.5238147
# vl_d = -0.9119063
# vl_q = 0.3928611
# me[:] = (psi_d, psi_q, psi_F, psi_D, psi_Q1, psi_Q2,
# i_d, i_q, i_F, i_D, i_Q1, i_Q2,
# omega_m,
# v_d, v_q,
# iz_d, iz_q, il_d, il_q, vl_d, vl_q)
# elif t < self.t_end:
# me[:] = self.u_ref(t)
# else:
# warnings.warn("Requested time exceeds domain of the reference solution. Returning zero.")
# me[:] = (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
# return me
| 17,645 | 36.624733 | 167 | py |
pySDC | pySDC-master/pySDC/projects/DAE/problems/simple_DAE.py | import warnings
import numpy as np
from scipy.interpolate import interp1d
from pySDC.projects.DAE.misc.ProblemDAE import ptype_dae
class pendulum_2d(ptype_dae):
r"""
Example implementing the well known 2D pendulum as a first order differential-algebraic equation (DAE) of index 3.
The DAE system is given by the equations
.. math::
x' = u,
.. math::
\frac{d}{dt} \frac{\partial}{\partial u} L = \frac{\partial L}{\partial x} + f + G^{T} \lambda,
.. math::
0 = \phi.
The pendulum is used in most introductory literature on DAEs, for example on page 8 of [1]_.
Parameters
----------
nvars : int
Number of unknowns of the system of DAEs.
newton_tol : float
Tolerance for Newton solver.
Attributes
----------
t_end: float
The end time at which the reference solution is determined.
References
----------
.. [1] E. Hairer, C. Lubich, M. Roche. The numerical solution of differential-algebraic systems by Runge-Kutta methods.
Lect. Notes Math. (1989).
"""
def __init__(self, nvars, newton_tol):
"""Initialization routine"""
super().__init__(nvars, newton_tol)
# load reference solution
# data file must be generated and stored under misc/data and self.t_end = t[-1]
# data = np.load(r'pySDC/projects/DAE/misc/data/pendulum.npy')
# t = data[:, 0]
# solution = data[:, 1:]
# self.u_ref = interp1d(t, solution, kind='cubic', axis=0, fill_value='extrapolate')
self.t_end = 0.0
def eval_f(self, u, du, t):
r"""
Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`.
Parameters
----------
u : dtype_u
Current values of the numerical solution at time t.
du : dtype_u
Current values of the derivative of the numerical solution at time t.
t : float
Current time of the numerical solution.
Returns
-------
f : dtype_f
Current value of the right-hand side of f (which includes five components).
"""
g = 9.8
# The last element of u is a Lagrange multiplier. Not sure if this needs to be time dependent, but must model the
# weight somehow
f = self.dtype_f(self.init)
f[:] = (du[0] - u[2], du[1] - u[3], du[2] + u[4] * u[0], du[3] + u[4] * u[1] + g, u[0] ** 2 + u[1] ** 2 - 1)
return f
def u_exact(self, t):
"""
Approximation of the exact solution generated by spline interpolation of an extremely accurate numerical reference solution.
Parameters
----------
t : float
The time of the reference solution.
Returns
-------
me : dtype_u
The reference solution as mesh object. It contains fixed initial conditions at initial time.
"""
me = self.dtype_u(self.init)
if t == 0:
me[:] = (-1, 0, 0, 0, 0)
elif t < self.t_end:
me[:] = self.u_ref(t)
else:
warnings.warn("Requested time exceeds domain of the reference solution. Returning zero.")
me[:] = (0, 0, 0, 0, 0)
return me
class simple_dae_1(ptype_dae):
r"""
Example implementing a smooth linear index-2 differential-algebraic equation (DAE) with known analytical solution.
The DAE system is given by
.. math::
\frac{d u_1 (t)}{dt} = (\alpha - \frac{1}{2 - t}) u_1 (t) + (2-t) \alpha z (t) + \frac{3 - t}{2 - t},
.. math::
\frac{d u_2 (t)}{dt} = \frac{1 - \alpha}{t - 2} u_1 (t) - u_2 (t) + (\alpha - 1) z (t) + 2 e^{t},
.. math::
0 = (t + 2) u_1 (t) + (t^{2} - 4) u_2 (t) - (t^{2} + t - 2) e^{t}.
The exact solution of this system is
.. math::
u_1 (t) = u_2 (t) = e^{t},
.. math::
z (t) = -\frac{e^{t}}{2 - t}.
This example is commonly used to test that numerical implementations are functioning correctly. See, for example,
page 267 of [1]_.
Parameters
----------
nvars : int
Number of unknowns of the system of DAEs.
newton_tol : float
Tolerance for Newton solver.
References
----------
.. [1] U. Ascher, L. R. Petzold. Computer method for ordinary differential equations and differential-algebraic
equations. Society for Industrial and Applied Mathematics (1998).
"""
def eval_f(self, u, du, t):
r"""
Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`.
Parameters
----------
u : dtype_u
Current values of the numerical solution at time t.
du : dtype_u
Current values of the derivative of the numerical solution at time t.
t : float
Current time of the numerical solution.
Returns
-------
f : dtype_f
Current value of the right-hand side of f (which includes three components).
"""
# Smooth index-2 DAE pg. 267 Ascher and Petzold (also the first example in KDC Minion paper)
a = 10.0
f = self.dtype_f(self.init)
f[:] = (
-du[0] + (a - 1 / (2 - t)) * u[0] + (2 - t) * a * u[2] + np.exp(t) * (3 - t) / (2 - t),
-du[1] + (1 - a) / (t - 2) * u[0] - u[1] + (a - 1) * u[2] + 2 * np.exp(t),
(t + 2) * u[0] + (t**2 - 4) * u[1] - (t**2 + t - 2) * np.exp(t),
)
return f
def u_exact(self, t):
"""
Routine for the exact solution.
Parameters
----------
t : float
The time of the reference solution.
Returns
-------
me : dtype_u
The reference solution as mesh object containing three components.
"""
me = self.dtype_u(self.init)
me[:] = (np.exp(t), np.exp(t), -np.exp(t) / (2 - t))
return me
class problematic_f(ptype_dae):
r"""
Standard example of a very simple fully implicit index-2 differential algebraic equation (DAE) that is not
numerically solvable for certain choices of the parameter :math:`\eta`. The DAE system is given by
.. math::
y (t) + \eta t z (t) = f(t),
.. math::
\frac{d y(t)}{dt} + \eta t \frac{d z(t)}{dt} + (1 + \eta) z (t) = g (t).
See, for example, page 264 of [1]_.
Parameters
----------
nvars : int
Number of unknowns of the system of DAEs.
newton_tol : float
Tolerance for Newton solver.
Attributes
----------
eta: float
Specific parameter of the problem.
References
----------
.. [1] U. Ascher, L. R. Petzold. Computer method for ordinary differential equations and differential-algebraic
equations. Society for Industrial and Applied Mathematics (1998).
"""
def __init__(self, nvars, newton_tol, eta=1):
"""Initialization routine"""
super().__init__(nvars, newton_tol)
self._makeAttributeAndRegister('eta', localVars=locals())
def eval_f(self, u, du, t):
r"""
Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`.
Parameters
----------
u : dtype_u
Current values of the numerical solution at time t.
du : dtype_u
Current values of the derivative of the numerical solution at time t.
t : float
Current time of the numerical solution.
Returns
-------
f : dtype_f
Current value of the right-hand side of f (which includes two components).
"""
f = self.dtype_f(self.init)
f[:] = (
u[0] + self.eta * t * u[1] - np.sin(t),
du[0] + self.eta * t * du[1] + (1 + self.eta) * u[1] - np.cos(t),
)
return f
def u_exact(self, t):
"""
Routine for the exact solution.
Parameters
----------
t : float
The time of the reference solution.
Returns
-------
me : dtype_u
The reference solution as mesh object containing two components.
"""
me = self.dtype_u(self.init)
me[:] = (np.sin(t), 0)
return me
| 8,320 | 30.281955 | 132 | py |
pySDC | pySDC-master/pySDC/projects/DAE/problems/transistor_amplifier.py | import warnings
import numpy as np
from scipy.interpolate import interp1d
from pySDC.projects.DAE.misc.ProblemDAE import ptype_dae
# Helper function
def _transistor(u_in):
return 1e-6 * (np.exp(u_in / 0.026) - 1)
class one_transistor_amplifier(ptype_dae):
r"""
The one transistor amplifier example from pg. 404 in [1]_. The problem is an index-1 differential-algebraic equation
(DAE) having the equations
.. math::
\frac{U_e (t)}{R_0} - \frac{U_1 (t)}{R_0} + C_1 (\frac{d U_2 (t)}{dt} - \frac{d U_1 (t)}{dt}) = 0,
.. math::
\frac{U_b}{R_2} - U_2 (t) (\frac{1}{R_1} + \frac{1}{R_2}) + C_1 (\frac{d U_1 (t)}{dt} - \frac{d U_2 (t)}{dt}) - 0.01 f(U_2 (t) - U_3 (t)) = 0,
.. math::
f(U_2 (t) - U_3 (t)) - \frac{U_3 (t)}{R_3} - C_2 \frac{d U_3 (t)}{dt} = 0,
.. math::
\frac{U_b}{R_4} - \frac{U_4 (t)}{R_4} + C_3 (\frac{d U_5 (t)}{dt} - \frac{d U_4 (t)}{dt}) - 0.99 f(U_2 (t) - U_3 (t)) = 0,
.. math::
-\frac{U_5 (t)}{R_5} + C_3 (\frac{d U_4 (t)}{dt} - \frac{d U_5 (t)}{dt}) = 0,
with
.. math::
f(U(t)) = 10^{-6} (exp(\frac{U (t)}{0.026}) - 1).
The initial signal :math:`U_e (t)` is defined as
.. math::
U_e (t) = 0.4 \sin(200 \pi t).
Constants are fixed as :math:`U_b = 6`, :math:`R_0 = 1000`, :math:`R_k = 9000` for :math:`k=1,..,5`,
`C_j = j \cdot 10^{-6}` for :math:`j=1,2,3`.They are also defined in the method `eval_f`.
Parameters
----------
nvars : int
Number of unknowns of the system of DAEs.
newton_tol : float
Tolerance for Newton solver.
Attributes
----------
t_end: float
The end time at which the reference solution is determined.
References
----------
.. [1] E. Hairer, G. Wanner. Solving ordinary differential equations II: Stiff and differential-algebraic problems.
Springer (2009).
"""
def __init__(self, nvars, newton_tol):
super().__init__(nvars, newton_tol)
# load reference solution
# data file must be generated and stored under misc/data and self.t_end = t[-1]
# data = np.load(r'pySDC/projects/DAE/misc/data/one_trans_amp.npy')
# x = data[:, 0]
# # The last column contains the input signal
# y = data[:, 1:-1]
# self.u_ref = interp1d(x, y, kind='cubic', axis=0, fill_value='extrapolate')
self.t_end = 0.0
def eval_f(self, u, du, t):
r"""
Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`.
Parameters
----------
u : dtype_u
Current values of the numerical solution at time t.
du : dtype_u
Current values of the derivative of the numerical solution at time t.
t : float
Current time of the numerical solution.
Returns
-------
f : dtype_f
Current value of the right-hand side of f (which includes five components).
"""
u_b = 6.0
u_e = 0.4 * np.sin(200 * np.pi * t)
alpha = 0.99
r_0 = 1000
r_k = 9000
c_1, c_2, c_3 = 1e-6, 2e-6, 3e-6
f = self.dtype_f(self.init)
f[:] = (
(u_e - u[0]) / r_0 + c_1 * (du[1] - du[0]),
(u_b - u[1]) / r_k - u[1] / r_k + c_1 * (du[0] - du[1]) - (1 - alpha) * _transistor(u[1] - u[2]),
_transistor(u[1] - u[2]) - u[2] / r_k - c_2 * du[2],
(u_b - u[3]) / r_k + c_3 * (du[4] - du[3]) - alpha * _transistor(u[1] - u[2]),
-u[4] / r_k + c_3 * (du[3] - du[4]),
)
return f
def u_exact(self, t):
"""
Approximation of the exact solution generated by spline interpolation of an extremely accurate numerical
reference solution.
Parameters
----------
t : float
The time of the reference solution.
Returns
-------
me : dtype_u
The reference solution as mesh object containing five components and fixed initial conditions.
"""
me = self.dtype_u(self.init)
if t == 0:
me[:] = (0, 3, 3, 6, 0)
elif t < self.t_end:
me[:] = self.u_ref(t)
else:
warnings.warn("Requested time exceeds domain of the reference solution. Returning zero.")
me[:] = (0, 0, 0, 0, 0)
return me
class two_transistor_amplifier(ptype_dae):
r"""
The two transistor amplifier example from page 108 in [1]_. The problem is an index-1 differential-algebraic equation
(DAE) having the equations
.. math::
\frac{U_e (t)}{R_0} - \frac{U_1 (t)}{R_0} + C_1 (\frac{d U_2 (t)}{dt} - \frac{d U_1 (t)}{dt}) = 0,
.. math::
\frac{U_b}{R_2} - U_2 (t) (\frac{1}{R_1} + \frac{1}{R_2}) + C_1 (\frac{d U_1 (t)}{dt} - \frac{d U_2 (t)}{dt}) - (\alpha - 1) f(U_2 (t) - U_3 (t)) = 0,
.. math::
f(U_2 (t) - U_3 (t)) - \frac{U_3 (t)}{R_3} - C_2 \frac{d U_3 (t)}{dt} = 0,
.. math::
\frac{U_b}{R_4} - \frac{U_4 (t)}{R_4} + C_3 (\frac{d U_5 (t)}{dt} - \frac{d U_4 (t)}{dt}) - \alpha f(U_2 (t) - U_3 (t)) = 0,
.. math::
\frac{U_b}{R_6} - U_5 (t) (\frac{1}{R_5} + \frac{1}{R_6}) + C_3 (\frac{d U_4 (t)}{dt} - \frac{d U_5 (t)}{dt}) + (\alpha - 1) f(U_5 (t) - U_6 (t)) = 0,
.. math::
f(U_5 (t) - U_6 (t)) - \frac{U_6 (t)}{R_7} - C_4 \frac{d U_6 (t)}{dt} = 0,
.. math::
\frac{U_b}{R_8} - \frac{U_7 (t)}{R_8} - C_5 (\frac{d U_7 (t)}{dt} - \frac{d U_8 (t)}{dt}) - \alpha f(U_5 (t) - U_6 (t)) = 0,
.. math::
\frac{U_8 (t)}{R_9} - C_5 (\frac{d U_7 (t)}{dt} - \frac{d U_7 (t)}{dt}) = 0,
with
.. math::
f(U_i (t) - U_j (t)) = \beta (\exp(\frac{U_i (t) - U_j (t)}{U_F}) - 1).
The initial signal :math:`U_e (t)` is defined as
.. math::
U_e (t) = 0.1 \sin(200 \pi t).
Constants are fixed as :math:`U_b = 6`, :math:`U_F = 0.026`, :math:`\alpha = 0.99`, :math:`\beta = 10^{-6}`, :math:`R_0 = 1000`,
:math:`R_k = 9000` for :math:`k=1,..,9`, `C_j = j \cdot 10^{-6}` for :math:`j=1,..,5`. They are also defined in the
method `eval_f`.
Parameters
----------
nvars : int
Number of unknowns of the system of DAEs.
newton_tol : float
Tolerance for Newton solver.
Attributes
----------
t_end: float
The end time at which the reference solution is determined.
References
----------
.. [1] E. Hairer, C. Lubich, M. Roche. The numerical solution of differential-algebraic systems by Runge-Kutta methods.
Lect. Notes Math. (1989).
"""
def __init__(self, nvars, newton_tol):
super().__init__(nvars, newton_tol)
# load reference solution
# data file must be generated and stored under misc/data and self.t_end = t[-1]
# data = np.load(r'pySDC/projects/DAE/misc/data/two_trans_amp.npy')
# x = data[:, 0]
# The last column contains the input signal
# y = data[:, 1:-1]
# self.u_ref = interp1d(x, y, kind='cubic', axis=0, fill_value='extrapolate')
self.t_end = 0.0
def eval_f(self, u, du, t):
r"""
Routine to evaluate the implicit representation of the problem, i.e., :math:`F(u, u', t)`.
Parameters
----------
u : dtype_u
Current values of the numerical solution at time t.
du : dtype_u
Current values of the derivative of the numerical solution at time t.
t : float
Current time of the numerical solution.
Returns
-------
f : dtype_f
Current value of the right-hand side of f (which includes eight components).
"""
u_b = 6.0
u_e = 0.1 * np.sin(200 * np.pi * t)
alpha = 0.99
r_0 = 1000.0
r_k = 9000.0
c_1, c_2, c_3, c_4, c_5 = 1e-6, 2e-6, 3e-6, 4e-6, 5e-6
f = self.dtype_f(self.init)
f[:] = (
(u_e - u[0]) / r_0 - c_1 * (du[0] - du[1]),
(u_b - u[1]) / r_k - u[1] / r_k + c_1 * (du[0] - du[1]) + (alpha - 1) * _transistor(u[1] - u[2]),
_transistor(u[1] - u[2]) - u[2] / r_k - c_2 * du[2],
(u_b - u[3]) / r_k - c_3 * (du[3] - du[4]) - alpha * _transistor(u[1] - u[2]),
(u_b - u[4]) / r_k - u[4] / r_k + c_3 * (du[3] - du[4]) + (alpha - 1) * _transistor(u[4] - u[5]),
_transistor(u[4] - u[5]) - u[5] / r_k - c_4 * du[5],
(u_b - u[6]) / r_k - c_5 * (du[6] - du[7]) - alpha * _transistor(u[4] - u[5]),
-u[7] / r_k + c_5 * (du[6] - du[7]),
)
return f
def u_exact(self, t):
"""
Dummy exact solution that should only be used to get initial conditions for the problem. This makes
initialisation compatible with problems that have a known analytical solution. Could be used to output a
reference solution if generated/available.
Parameters
----------
t : float
The time of the reference solution.
Returns
-------
me : dtype_u
The reference solution as mesh object containing eight components and fixed initial conditions.
"""
me = self.dtype_u(self.init)
if t == 0:
me[:] = (0, 3, 3, 6, 3, 3, 6, 0)
elif t < self.t_end:
me[:] = self.u_ref(t)
else:
warnings.warn("Requested time exceeds domain of the reference solution. Returning zero.")
me[:] = (0, 0, 0, 0, 0, 0, 0, 0)
return me
| 9,565 | 34.298893 | 158 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/run_temp_forcing_benchmark.py | from argparse import ArgumentParser
import numpy as np
from mpi4py import MPI
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.AllenCahn_Temp_MPIFFT import allencahn_temp_imex
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
from pySDC.projects.AllenCahn_Bayreuth.AllenCahn_dump import dump
def run_simulation(name=None, nprocs_space=None):
"""
A simple test program to do PFASST runs for the AC equation
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if nprocs_space is not None:
color = int(world_rank / nprocs_space)
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_size = space_comm.Get_size()
space_rank = space_comm.Get_rank()
# split world communicator to create time-communicators
if nprocs_space is not None:
color = int(world_rank % nprocs_space)
else:
color = int(world_rank / world_size)
time_comm = comm.Split(color=color)
time_size = time_comm.Get_size()
time_rank = time_comm.Get_rank()
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 1e-03
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['L'] = 16.0
problem_params['nvars'] = [(48 * 48, 48 * 48), (8 * 48, 8 * 48)]
problem_params['eps'] = [0.04]
problem_params['radius'] = 0.25
problem_params['TM'] = 1.0
problem_params['D'] = 1.0
problem_params['dw'] = [300.0]
problem_params['comm'] = space_comm
problem_params['init_type'] = 'circle_rand'
problem_params['spectral'] = True
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20 if space_rank == 0 else 99 # set level depending on rank
controller_params['hook_class'] = dump
controller_params['predict_type'] = 'fine_only'
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = fft_to_fft
description['problem_class'] = allencahn_temp_imex
# set time parameters
t0 = 0.0
Tend = 100 * 0.001
if space_rank == 0 and time_rank == 0:
out = f'---------> Running {name} with {time_size} process(es) in time and {space_size} process(es) in space...'
print(out)
# instantiate controller
controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
if space_rank == 0:
print()
# convert filtered statistics to list of iterations count, sorted by time
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = f'Mean number of iterations on rank {time_rank}: {np.mean(niters):.4f}'
print(out)
timing = get_sorted(stats, type='timing_setup', sortby='time')
out = f'Setup time on rank {time_rank}: {timing[0][1]:.4f} sec.'
print(out)
timing = get_sorted(stats, type='timing_run', sortby='time')
out = f'Time to solution on rank {time_rank}: {timing[0][1]:.4f} sec.'
print(out)
if __name__ == "__main__":
# Add parser to get number of processors in space and setup (have to do this here to enable automatic testing)
parser = ArgumentParser()
parser.add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
args = parser.parse_args()
name = 'AC-bench-tempforce'
run_simulation(name=name, nprocs_space=args.nprocs_space)
| 4,936 | 35.57037 | 120 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/AllenCahn_monitor_and_dump.py | import numpy as np
import json
from mpi4py import MPI
from pySDC.core.Hooks import hooks
class monitor_and_dump(hooks):
def __init__(self):
"""
Initialization of Allen-Cahn monitoring
"""
super(monitor_and_dump, self).__init__()
self.init_radius = None
self.init_vol = None
self.ndim = None
self.corr_rad = None
self.corr_vol = None
self.comm = None
self.rank = None
self.size = None
self.amode = MPI.MODE_WRONLY | MPI.MODE_CREATE
self.time_step = None
def pre_run(self, step, level_number):
"""
Overwrite standard pre run hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(monitor_and_dump, self).pre_run(step, level_number)
L = step.levels[0]
# get space-communicator and data
self.comm = L.prob.params.comm
if self.comm is not None:
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
else:
self.rank = 0
self.size = 1
# get real space values
if L.prob.params.spectral:
tmp = L.prob.fft.backward(L.u[0])
else:
tmp = L.u[0][:]
self.ndim = len(tmp.shape)
# compute numerical radius and volume
# c_local = np.count_nonzero(tmp >= 0.5)
c_local = float(tmp[:].sum())
if self.comm is not None:
c_global = self.comm.allreduce(sendobj=c_local, op=MPI.SUM)
else:
c_global = c_local
if self.ndim == 3:
vol = c_global * L.prob.dx**3
radius = (vol / (np.pi * 4.0 / 3.0)) ** (1.0 / 3.0)
self.init_vol = np.pi * 4.0 / 3.0 * L.prob.params.radius**3
elif self.ndim == 2:
vol = c_global * L.prob.dx**2
radius = np.sqrt(vol / np.pi)
self.init_vol = np.pi * L.prob.params.radius**2
else:
raise NotImplementedError('Can use this only for 2 or 3D problems')
self.init_radius = L.prob.params.radius
self.corr_rad = self.init_radius / radius
self.corr_vol = self.init_vol / vol
radius *= self.corr_rad
vol *= self.corr_vol
# write to stats
if L.time == 0.0:
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_radius',
value=radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_radius',
value=self.init_radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_volume',
value=vol,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_volume',
value=self.init_vol,
)
# compute local offset for I/O
nbytes_local = tmp.nbytes
if self.comm is not None:
nbytes_global = self.comm.allgather(nbytes_local)
else:
nbytes_global = [nbytes_local]
local_offset = sum(nbytes_global[: self.rank])
# dump initial data
fname = f"./data/{L.prob.params.name}_{0:08d}"
fh = MPI.File.Open(self.comm, fname + ".dat", self.amode)
fh.Write_at_all(local_offset, tmp)
fh.Close()
# write json description
if self.rank == 0 and step.status.slot == 0:
json_obj = dict()
json_obj['type'] = 'dataset'
json_obj['datatype'] = str(tmp.dtype)
json_obj['endian'] = str(tmp.dtype.byteorder)
json_obj['time'] = L.time
json_obj['space_comm_size'] = self.size
json_obj['time_comm_size'] = step.status.time_size
json_obj['shape'] = L.prob.params.nvars
json_obj['elementsize'] = tmp.dtype.itemsize
with open(fname + '.json', 'w') as fp:
json.dump(json_obj, fp)
# set step count
self.time_step = 1
def post_step(self, step, level_number):
"""
Overwrite standard post step hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(monitor_and_dump, self).post_step(step, level_number)
# some abbreviations
L = step.levels[0]
# get real space values
if L.prob.params.spectral:
tmp = L.prob.fft.backward(L.uend)
else:
tmp = L.uend[:]
# compute numerical radius and volume
# c_local = np.count_nonzero(tmp >= 0.5)
# c_local = float(tmp[tmp > 2 * L.prob.params.eps].sum())
c_local = float(tmp[:].sum())
if self.comm is not None:
c_global = self.comm.allreduce(sendobj=c_local, op=MPI.SUM)
else:
c_global = c_local
if self.ndim == 3:
vol = c_global * L.prob.dx**3
radius = (vol / (np.pi * 4.0 / 3.0)) ** (1.0 / 3.0)
exact_vol = np.pi * 4.0 / 3.0 * (max(self.init_radius**2 - 4.0 * (L.time + L.dt), 0)) ** (3.0 / 2.0)
exact_radius = (exact_vol / (np.pi * 4.0 / 3.0)) ** (1.0 / 3.0)
elif self.ndim == 2:
vol = c_global * L.prob.dx**2
radius = np.sqrt(vol / np.pi)
exact_vol = np.pi * max(self.init_radius**2 - 2.0 * (L.time + L.dt), 0)
exact_radius = np.sqrt(exact_vol / np.pi)
else:
raise NotImplementedError('Can use this only for 2 or 3D problems')
radius *= self.corr_rad
vol *= self.corr_vol
# write to stats
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_radius',
value=radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_radius',
value=exact_radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_volume',
value=vol,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_volume',
value=exact_vol,
)
# compute local offset for I/O
nbytes_local = tmp.nbytes
if self.comm is not None:
nbytes_global = self.comm.allgather(nbytes_local)
else:
nbytes_global = [nbytes_local]
local_offset = sum(nbytes_global[: self.rank])
# dump initial data
fname = f"./data/{L.prob.params.name}_{self.time_step + step.status.slot:08d}"
fh = MPI.File.Open(self.comm, fname + ".dat", self.amode)
fh.Write_at_all(local_offset, tmp)
fh.Close()
# write json description
if self.rank == 0:
json_obj = dict()
json_obj['type'] = 'dataset'
json_obj['datatype'] = str(tmp.dtype)
json_obj['endian'] = str(tmp.dtype.byteorder)
json_obj['time'] = L.time + L.dt
json_obj['space_comm_size'] = self.size
json_obj['time_comm_size'] = step.status.time_size
json_obj['shape'] = L.prob.params.nvars
json_obj['elementsize'] = tmp.dtype.itemsize
with open(fname + '.json', 'w') as fp:
json.dump(json_obj, fp)
# update step count
self.time_step += step.status.time_size
| 8,577 | 31.740458 | 112 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/run_temp_forcing_reference.py | from argparse import ArgumentParser
import numpy as np
from mpi4py import MPI
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.AllenCahn_Temp_MPIFFT import allencahn_temp_imex
from pySDC.projects.AllenCahn_Bayreuth.AllenCahn_dump import dump
def run_simulation(name='', spectral=None, nprocs_space=None):
"""
A test program to create reference data for the AC equation with temporal forcing
Args:
name (str): name of the run, will be used to distinguish different setups
spectral (bool): run in real or spectral space
nprocs_space (int): number of processors in space (None if serial)
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if nprocs_space is not None:
color = int(world_rank / nprocs_space)
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_rank = space_comm.Get_rank()
space_size = space_comm.Get_size()
assert world_size == space_size, 'This script cannot run parallel-in-time with MPI, only spatial parallelism'
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-12
level_params['dt'] = 1e-06
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [7]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['L'] = 1.0
problem_params['nvars'] = [(128, 128)]
problem_params['eps'] = [0.04]
problem_params['radius'] = 0.25
problem_params['TM'] = 1.0
problem_params['D'] = 0.1
problem_params['dw'] = [21.0]
problem_params['comm'] = space_comm
problem_params['init_type'] = 'circle'
problem_params['spectral'] = spectral
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20 if space_rank == 0 else 99 # set level depending on rank
controller_params['hook_class'] = dump
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['problem_class'] = allencahn_temp_imex
# set time parameters
t0 = 0.0
Tend = 1 * 0.001
if space_rank == 0:
out = f'---------> Running {name} with spectral={spectral} and {space_size} process(es) in space...'
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
if space_rank == 0:
print()
# convert filtered statistics of iterations count, sorted by time
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.mean(np.array([item[1] for item in iter_counts]))
out = f'Mean number of iterations: {niters:.4f}'
print(out)
# get setup time
timing = get_sorted(stats, type='timing_setup', sortby='time')
out = f'Setup time: {timing[0][1]:.4f} sec.'
print(out)
# get running time
timing = get_sorted(stats, type='timing_run', sortby='time')
out = f'Time to solution: {timing[0][1]:.4f} sec.'
print(out)
out = '...Done <---------\n'
print(out)
def main(nprocs_space=None):
"""
Little helper routine to run the whole thing
Args:
nprocs_space (int): number of processors in space (None if serial)
"""
name = 'AC-reference-tempforce'
run_simulation(name=name, spectral=False, nprocs_space=nprocs_space)
# run_simulation(name=name, spectral=True, nprocs_space=nprocs_space)
if __name__ == "__main__":
# Add parser to get number of processors in space (have to do this here to enable automatic testing)
parser = ArgumentParser()
parser.add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
args = parser.parse_args()
main(nprocs_space=args.nprocs_space)
| 5,102 | 33.952055 | 113 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/AllenCahn_dump.py | import numpy as np
import json
from mpi4py import MPI
from mpi4py_fft import newDistArray
from pySDC.core.Hooks import hooks
class dump(hooks):
def __init__(self):
"""
Initialization of Allen-Cahn monitoring
"""
super(dump, self).__init__()
self.comm = None
self.rank = None
self.size = None
self.amode = MPI.MODE_WRONLY | MPI.MODE_CREATE
self.time_step = None
def pre_run(self, step, level_number):
"""
Overwrite standard pre run hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(dump, self).pre_run(step, level_number)
L = step.levels[0]
# get space-communicator and data
self.comm = L.prob.params.comm
if self.comm is not None:
self.rank = self.comm.Get_rank()
self.size = self.comm.Get_size()
else:
self.rank = 0
self.size = 1
# get real space values
if L.prob.params.spectral:
if hasattr(L.prob, 'ncomp'):
tmp1 = newDistArray(L.prob.fft, False)
tmp = np.zeros(tmp1.shape + (L.prob.ncomp,))
for i in range(L.prob.ncomp):
tmp[..., i] = L.prob.fft.backward(L.u[0][..., i])
else:
tmp = L.prob.fft.backward(L.u[0])
else:
tmp = L.u[0][:]
# compute local offset for I/O
nbytes_local = tmp.nbytes
if self.comm is not None:
nbytes_global = self.comm.allgather(nbytes_local)
else:
nbytes_global = [nbytes_local]
local_offset = sum(nbytes_global[: self.rank])
# dump initial data
fname = f"./data/{L.prob.params.name}_{0:08d}"
fh = MPI.File.Open(self.comm, fname + ".dat", self.amode)
fh.Write_at_all(local_offset, tmp)
fh.Close()
sizes = list(L.prob.params.nvars)
if hasattr(L.prob, 'ncomp'):
sizes.append(L.prob.ncomp)
# write json description
if self.rank == 0 and step.status.slot == 0:
json_obj = dict()
json_obj['type'] = 'dataset'
json_obj['datatype'] = str(tmp.dtype)
json_obj['endian'] = str(tmp.dtype.byteorder)
json_obj['time'] = L.time
json_obj['space_comm_size'] = self.size
json_obj['time_comm_size'] = step.status.time_size
json_obj['shape'] = sizes
json_obj['elementsize'] = tmp.dtype.itemsize
with open(fname + '.json', 'w') as fp:
json.dump(json_obj, fp)
# set step count
self.time_step = 1
def post_step(self, step, level_number):
"""
Overwrite standard post step hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(dump, self).post_step(step, level_number)
# some abbreviations
L = step.levels[0]
# get real space values
if L.prob.params.spectral:
if hasattr(L.prob, 'ncomp'):
tmp1 = newDistArray(L.prob.fft, False)
tmp = np.zeros(tmp1.shape + (L.prob.ncomp,))
for i in range(L.prob.ncomp):
tmp[..., i] = L.prob.fft.backward(L.uend[..., i])
else:
tmp = L.prob.fft.backward(L.uend)
else:
tmp = L.uend[:]
# compute local offset for I/O
nbytes_local = tmp.nbytes
if self.comm is not None:
nbytes_global = self.comm.allgather(nbytes_local)
else:
nbytes_global = [nbytes_local]
local_offset = sum(nbytes_global[: self.rank])
# dump data
fname = f"./data/{L.prob.params.name}_{self.time_step + step.status.slot:08d}"
fh = MPI.File.Open(self.comm, fname + ".dat", self.amode)
fh.Write_at_all(local_offset, tmp)
fh.Close()
sizes = list(L.prob.params.nvars)
if hasattr(L.prob, 'ncomp'):
sizes.append(L.prob.ncomp)
# write json description
if self.rank == 0:
json_obj = dict()
json_obj['type'] = 'dataset'
json_obj['datatype'] = str(tmp.dtype)
json_obj['endian'] = str(tmp.dtype.byteorder)
json_obj['time'] = L.time + L.dt
json_obj['space_comm_size'] = self.size
json_obj['time_comm_size'] = step.status.time_size
json_obj['shape'] = sizes
json_obj['elementsize'] = tmp.dtype.itemsize
with open(fname + '.json', 'w') as fp:
json.dump(json_obj, fp)
# update step count
self.time_step += step.status.time_size
| 4,852 | 31.57047 | 86 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/run_temp_forcing_verification.py | from argparse import ArgumentParser
import json
import numpy as np
from mpi4py import MPI
from mpi4py_fft import newDistArray
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.AllenCahn_Temp_MPIFFT import allencahn_temp_imex
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
def run_simulation(name='', spectral=None, nprocs_time=None, nprocs_space=None, dt=None, cwd='.'):
"""
A test program to do PFASST runs for the AC equation with temperature-based forcing
(slightly inefficient, but will run for a few seconds only)
Args:
name (str): name of the run, will be used to distinguish different setups
spectral (bool): run in real or spectral space
nprocs_time (int): number of processors in time
nprocs_space (int): number of processors in space (None if serial)
dt (float): time-step size
cwd (str): current working directory
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if nprocs_space is not None:
color = int(world_rank / nprocs_space)
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_rank = space_comm.Get_rank()
space_size = space_comm.Get_size()
assert world_size == space_size, 'This script cannot run parallel-in-time with MPI, only spatial parallelism'
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-12
level_params['dt'] = dt
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['L'] = 1.0
problem_params['nvars'] = [(128, 128), (32, 32)]
problem_params['eps'] = [0.04]
problem_params['radius'] = 0.25
problem_params['TM'] = 1.0
problem_params['D'] = 0.1
problem_params['dw'] = [21.0]
problem_params['comm'] = space_comm
problem_params['init_type'] = 'circle'
problem_params['spectral'] = spectral
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30 if space_rank == 0 else 99 # set level depending on rank
controller_params['predict_type'] = 'pfasst_burnin'
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = fft_to_fft
description['problem_class'] = allencahn_temp_imex
# set time parameters
t0 = 0.0
Tend = 1 * 0.001
if space_rank == 0:
out = f'---------> Running {name} with spectral={spectral} and {space_size} process(es) in space...'
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=nprocs_time, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
if space_rank == 0:
# convert filtered statistics of iterations count, sorted by time
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.mean(np.array([item[1] for item in iter_counts]))
out = f'Mean number of iterations: {niters:.4f}'
print(out)
# get setup time
timing = get_sorted(stats, type='timing_setup', sortby='time')
out = f'Setup time: {timing[0][1]:.4f} sec.'
print(out)
# get running time
timing = get_sorted(stats, type='timing_run', sortby='time')
out = f'Time to solution: {timing[0][1]:.4f} sec.'
print(out)
refname = f'{cwd}/data/AC-reference-tempforce_00001000'
with open(f'{refname}.json', 'r') as fp:
obj = json.load(fp)
array = np.fromfile(f'{refname}.dat', dtype=obj['datatype'])
array = array.reshape(obj['shape'], order='C')
if spectral:
ureal = newDistArray(P.fft, False)
ureal = P.fft.backward(uend[..., 0], ureal)
Treal = newDistArray(P.fft, False)
Treal = P.fft.backward(uend[..., 1], Treal)
err = max(np.amax(abs(ureal - array[..., 0])), np.amax(abs(Treal - array[..., 1])))
else:
err = abs(array - uend)
out = '...Done <---------\n'
print(out)
return err
def main(nprocs_space=None, cwd='.'):
"""
Little helper routine to run the whole thing
Args:
nprocs_space (int): number of processors in space (None if serial)
cwd (str): current working directory
"""
name = 'AC-test-tempforce'
nsteps = [2**i for i in range(4)]
errors = [1]
orders = []
for n in nsteps:
err = run_simulation(name=name, spectral=False, nprocs_time=n, nprocs_space=nprocs_space, dt=1e-03 / n, cwd=cwd)
errors.append(err)
orders.append(np.log(errors[-1] / errors[-2]) / np.log(0.5))
print(f'Error: {errors[-1]:6.4e}')
print(f'Order of accuracy: {orders[-1]:4.2f}\n')
assert errors[2 + 1] < 8e-10, f'Errors are too high, got {errors[2 + 1]}'
assert np.isclose(orders[3], 5.3, rtol=2e-02), f'Order of accuracy is not within tolerance, got {orders[3]}'
print()
errors = [1]
orders = []
for n in nsteps:
err = run_simulation(name=name, spectral=True, nprocs_time=n, nprocs_space=nprocs_space, dt=1e-03 / n, cwd=cwd)
errors.append(err)
orders.append(np.log(errors[-1] / errors[-2]) / np.log(0.5))
print(f'Error: {errors[-1]:6.4e}')
print(f'Order of accuracy: {orders[-1]:4.2f}\n')
assert errors[2 + 1] < 8e-10, f'Errors are too high, got {errors[2 + 1]}'
assert np.isclose(orders[1], 4.6, rtol=7e-02), f'Order of accuracy is not within tolerance, got {orders[1]}'
if __name__ == "__main__":
# Add parser to get number of processors in space (have to do this here to enable automatic testing)
parser = ArgumentParser()
parser.add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
args = parser.parse_args()
main(nprocs_space=args.nprocs_space)
| 7,215 | 36.005128 | 120 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/run_temp_forcing_realistic.py | from argparse import ArgumentParser
import numpy as np
from mpi4py import MPI
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.AllenCahn_Temp_MPIFFT import allencahn_temp_imex
from pySDC.projects.AllenCahn_Bayreuth.AllenCahn_dump import dump
def run_simulation(name='', spectral=None, nprocs_space=None):
"""
A test program to create reference data for the AC equation with temporal forcing
Args:
name (str): name of the run, will be used to distinguish different setups
spectral (bool): run in real or spectral space
nprocs_space (int): number of processors in space (None if serial)
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if nprocs_space is not None:
color = int(world_rank / nprocs_space)
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_rank = space_comm.Get_rank()
space_size = space_comm.Get_size()
assert world_size == space_size, 'This script cannot run parallel-in-time with MPI, only spatial parallelism'
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-12
level_params['dt'] = 1e-03
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [7]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['L'] = 1.0
problem_params['nvars'] = [(128, 128)]
problem_params['eps'] = [0.03]
problem_params['radius'] = 0.35682
problem_params['TM'] = 1.0
problem_params['D'] = 10.0
problem_params['dw'] = [1.0]
problem_params['comm'] = space_comm
problem_params['init_type'] = 'circle'
problem_params['spectral'] = spectral
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20 if space_rank == 0 else 99 # set level depending on rank
controller_params['hook_class'] = dump
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['problem_class'] = allencahn_temp_imex
# set time parameters
t0 = 0.0
Tend = 100 * 0.001
if space_rank == 0:
out = f'---------> Running {name} with spectral={spectral} and {space_size} process(es) in space...'
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
if space_rank == 0:
print()
# convert filtered statistics of iterations count, sorted by time
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.mean(np.array([item[1] for item in iter_counts]))
out = f'Mean number of iterations: {niters:.4f}'
print(out)
# get setup time
timing = get_sorted(stats, type='timing_setup', sortby='time')
out = f'Setup time: {timing[0][1]:.4f} sec.'
print(out)
# get running time
timing = get_sorted(stats, type='timing_run', sortby='time')
out = f'Time to solution: {timing[0][1]:.4f} sec.'
print(out)
out = '...Done <---------\n'
print(out)
def main(nprocs_space=None):
"""
Little helper routine to run the whole thing
Args:
nprocs_space (int): number of processors in space (None if serial)
"""
name = 'AC-realistic-tempforce'
run_simulation(name=name, spectral=False, nprocs_space=nprocs_space)
# run_simulation(name=name, spectral=True, nprocs_space=nprocs_space)
if __name__ == "__main__":
# Add parser to get number of processors in space (have to do this here to enable automatic testing)
parser = ArgumentParser()
parser.add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
args = parser.parse_args()
main(nprocs_space=args.nprocs_space)
| 5,104 | 34.699301 | 113 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/AllenCahn_monitor.py | import numpy as np
from mpi4py import MPI
from pySDC.core.Hooks import hooks
class monitor(hooks):
def __init__(self):
"""
Initialization of Allen-Cahn monitoring
"""
super(monitor, self).__init__()
self.init_radius = None
self.init_vol = None
self.ndim = None
self.corr_rad = None
self.corr_vol = None
self.comm = None
def pre_run(self, step, level_number):
"""
Overwrite standard pre run hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(monitor, self).pre_run(step, level_number)
L = step.levels[0]
# get space-communicator and data
self.comm = L.prob.comm
# get real space values
if L.prob.spectral:
tmp = L.prob.fft.backward(L.u[0])
else:
tmp = L.u[0][:]
self.ndim = len(tmp.shape)
# compute numerical radius and volume
# c_local = np.count_nonzero(tmp >= 0.5)
c_local = float(tmp[:].sum())
if self.comm is not None:
c_global = self.comm.allreduce(sendobj=c_local, op=MPI.SUM)
else:
c_global = c_local
if self.ndim == 3:
vol = c_global * L.prob.dx**3
radius = (vol / (np.pi * 4.0 / 3.0)) ** (1.0 / 3.0)
self.init_vol = np.pi * 4.0 / 3.0 * L.prob.radius**3
elif self.ndim == 2:
vol = c_global * L.prob.dx**2
radius = np.sqrt(vol / np.pi)
self.init_vol = np.pi * L.prob.radius**2
else:
raise NotImplementedError('Can use this only for 2 or 3D problems')
self.init_radius = L.prob.radius
self.corr_rad = self.init_radius / radius
self.corr_vol = self.init_vol / vol
radius *= self.corr_rad
vol *= self.corr_vol
# write to stats
if L.time == 0.0:
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_radius',
value=radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_radius',
value=self.init_radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_volume',
value=vol,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_volume',
value=self.init_vol,
)
def post_step(self, step, level_number):
"""
Overwrite standard post step hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(monitor, self).post_step(step, level_number)
# some abbreviations
L = step.levels[0]
# get real space values
if L.prob.spectral:
tmp = L.prob.fft.backward(L.uend)
else:
tmp = L.uend[:]
# compute numerical radius and volume
# c_local = np.count_nonzero(tmp >= 0.5)
# c_local = float(tmp[tmp > 2 * L.prob.eps].sum())
c_local = float(tmp[:].sum())
if self.comm is not None:
c_global = self.comm.allreduce(sendobj=c_local, op=MPI.SUM)
else:
c_global = c_local
if self.ndim == 3:
vol = c_global * L.prob.dx**3
radius = (vol / (np.pi * 4.0 / 3.0)) ** (1.0 / 3.0)
exact_vol = np.pi * 4.0 / 3.0 * (max(self.init_radius**2 - 4.0 * (L.time + L.dt), 0)) ** (3.0 / 2.0)
exact_radius = (exact_vol / (np.pi * 4.0 / 3.0)) ** (1.0 / 3.0)
elif self.ndim == 2:
vol = c_global * L.prob.dx**2
radius = np.sqrt(vol / np.pi)
exact_vol = np.pi * max(self.init_radius**2 - 2.0 * (L.time + L.dt), 0)
exact_radius = np.sqrt(exact_vol / np.pi)
else:
raise NotImplementedError('Can use this only for 2 or 3D problems')
radius *= self.corr_rad
vol *= self.corr_vol
# write to stats
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_radius',
value=radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_radius',
value=exact_radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_volume',
value=vol,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_volume',
value=exact_vol,
)
| 5,782 | 30.091398 | 112 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/run_simple_forcing_verification.py | from argparse import ArgumentParser
import json
import glob
import numpy as np
from mpi4py import MPI
import pySDC.helpers.plot_helper as plt_helper
import matplotlib.ticker as ticker
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.AllenCahn_MPIFFT import allencahn_imex, allencahn_imex_timeforcing
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
from pySDC.projects.AllenCahn_Bayreuth.AllenCahn_monitor import monitor
def run_simulation(name='', spectral=None, nprocs_space=None):
"""
A test program to do PFASST runs for the AC equation with different forcing
Args:
name (str): name of the run, will be used to distinguish different setups
spectral (bool): run in real or spectral space
nprocs_space (int): number of processors in space (None if serial)
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if nprocs_space is not None:
color = int(world_rank / nprocs_space)
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_rank = space_comm.Get_rank()
space_size = space_comm.Get_size()
assert world_size == space_size, 'This script cannot run parallel-in-time with MPI, only spatial parallelism'
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 1e-03
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['L'] = 1.0
problem_params['nvars'] = [(128, 128), (32, 32)]
problem_params['eps'] = [0.04]
problem_params['radius'] = 0.25
problem_params['comm'] = space_comm
problem_params['init_type'] = 'circle'
problem_params['spectral'] = spectral
if name == 'AC-test-constforce':
problem_params['dw'] = [-23.59]
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30 if space_rank == 0 else 99 # set level depending on rank
controller_params['hook_class'] = monitor
controller_params['predict_type'] = 'pfasst_burnin'
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = fft_to_fft
if name == 'AC-test-noforce' or name == 'AC-test-constforce':
description['problem_class'] = allencahn_imex
elif name == 'AC-test-timeforce':
description['problem_class'] = allencahn_imex_timeforcing
else:
raise NotImplementedError(f'{name} is not implemented')
# set time parameters
t0 = 0.0
Tend = 32 * 0.001
if space_rank == 0:
out = f'---------> Running {name} with spectral={spectral} and {space_size} process(es) in space...'
print(out)
# instantiate controller
controller = controller_nonMPI(num_procs=8, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
if space_rank == 0:
# convert filtered statistics to list of computed radii, sorted by time
computed_radii = get_sorted(stats, type='computed_radius', sortby='time')
exact_radii = get_sorted(stats, type='exact_radius', sortby='time')
computed_vol = get_sorted(stats, type='computed_volume', sortby='time')
exact_vol = get_sorted(stats, type='exact_volume', sortby='time')
# print and store radii and error over time
err_test = 0.0
results = dict()
for cr, er, cv, ev in zip(computed_radii, exact_radii, computed_vol, exact_vol):
if name == 'AC-test-noforce':
exrad = er[1]
exvol = ev[1]
else:
exrad = computed_radii[0][1]
exvol = computed_vol[0][1]
if exrad > 0:
errr = abs(cr[1] - exrad) / exrad
errv = abs(cv[1] - exvol) / exvol
else:
errr = 1.0
errv = 1.0
if cr[0] == 0.025:
err_test = errr
out = f'Computed/exact/error radius for time {cr[0]:6.4f}: ' f'{cr[1]:8.6f} / {exrad:8.6f} / {errr:6.4e}'
print(out)
results[cr[0]] = (cr[1], exrad, errr, cv[1], exvol, errv)
fname = f'./data/{name}_results.json'
with open(fname, 'w') as fp:
json.dump(results, fp, sort_keys=True, indent=4)
print()
# convert filtered statistics of iterations count, sorted by time
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.mean(np.array([item[1] for item in iter_counts]))
out = f'Mean number of iterations: {niters:.4f}'
print(out)
# get setup time
timing = get_sorted(stats, type='timing_setup', sortby='time')
out = f'Setup time: {timing[0][1]:.4f} sec.'
print(out)
# get running time
timing = get_sorted(stats, type='timing_run', sortby='time')
out = f'Time to solution: {timing[0][1]:.4f} sec.'
print(out)
out = '...Done <---------\n'
print(out)
# Testing the output
if name == 'AC-test-noforce':
if spectral:
exp_iters = 6.59375
exp_err = 7.821e-02
else:
exp_iters = 7.8125
exp_err = 7.85e-02
elif name == 'AC-test-constforce':
if spectral:
exp_iters = 2.875
exp_err = 4.678e-04
else:
exp_iters = 4.3125
exp_err = 6.2384e-04
elif name == 'AC-test-timeforce':
if spectral:
exp_iters = 1.65625
exp_err = 6.2345e-04
else:
exp_iters = 2.40625
exp_err = 6.2345e-04
else:
raise NotImplementedError(f'{name} is not implemented')
assert niters == exp_iters, f'Got deviating iteration counts of {niters} instead of {exp_iters}'
assert err_test < exp_err, f'Got deviating errors of {err_test} instead of {exp_err}'
def visualize_radii():
"""
Routine to plot the radii of the runs vs. the exact radii
"""
plt_helper.setup_mpl()
filelist = glob.glob('./data/*_results.json')
for file in filelist:
# read in file with data
with open(file, 'r') as fp:
results = json.load(fp)
print(f'Working on {file}...')
# get times and radii
xcoords = list(results)
computed_radii = [v[0] for k, v in results.items()]
exact_radii = [v[1] for k, v in results.items()]
computed_vol = [v[3] for k, v in results.items()]
exact_vol = [v[4] for k, v in results.items()]
# compute bound for y-axis
max_rad = max(max(computed_radii), max(exact_radii))
max_vol = max(max(computed_vol), max(exact_vol))
# set up plot for radii
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
# and plot
ax.plot(xcoords, computed_radii, label='Computed radius')
ax.plot(xcoords, exact_radii, color='k', linestyle='--', linewidth=1, label='Exact radius')
# beautify and save plot
ax.set_ylim([-0.01, max_rad * 1.1])
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
# ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.xaxis.set_major_locator(ticker.MultipleLocator(4))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
# ax.set_title(file.split('/')[-1].replace('_results.json', ''))
f = file.replace('_results.json', '_radii')
plt_helper.savefig(f)
# test if all went well
assert glob.glob(f'{f}.pdf'), 'ERROR: plotting did not create PDF file'
# assert glob.glob(f'{f}.pgf'), 'ERROR: plotting did not create PGF file'
assert glob.glob(f'{f}.png'), 'ERROR: plotting did not create PNG file'
# set up plot for volumes
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
# and plot
ax.plot(xcoords, computed_vol, label='Computed volume')
ax.plot(xcoords, exact_vol, color='k', linestyle='--', linewidth=1, label='Exact volume')
# beautify and save plot
ax.set_ylim([-0.01, max_vol * 1.1])
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
# ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.xaxis.set_major_locator(ticker.MultipleLocator(4))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
# ax.set_title(file.split('/')[-1].replace('_results.json', ''))
f = file.replace('_results.json', '_volume')
plt_helper.savefig(f)
# test if all went well
assert glob.glob(f'{f}.pdf'), 'ERROR: plotting did not create PDF file'
# assert glob.glob(f'{f}.pgf'), 'ERROR: plotting did not create PGF file'
assert glob.glob(f'{f}.png'), 'ERROR: plotting did not create PNG file'
def main(nprocs_space=None):
"""
Little helper routine to run the whole thing
Args:
nprocs_space (int): number of processors in space (None if serial)
"""
name_list = ['AC-test-noforce', 'AC-test-constforce', 'AC-test-timeforce']
for name in name_list:
run_simulation(name=name, spectral=False, nprocs_space=nprocs_space)
run_simulation(name=name, spectral=True, nprocs_space=nprocs_space)
if __name__ == "__main__":
# Add parser to get number of processors in space (have to do this here to enable automatic testing)
parser = ArgumentParser()
parser.add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
args = parser.parse_args()
main(nprocs_space=args.nprocs_space)
visualize_radii()
| 11,173 | 36.246667 | 117 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/run_simple_forcing_benchmark.py | from argparse import ArgumentParser
import warnings
import numpy as np
from mpi4py import MPI
warnings.filterwarnings("ignore")
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.AllenCahn_MPIFFT import allencahn_imex, allencahn_imex_timeforcing
from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft
def run_simulation(name=None, nprocs_space=None):
"""
A simple test program to do PFASST runs for the AC equation
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if nprocs_space is not None:
color = int(world_rank / nprocs_space)
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_size = space_comm.Get_size()
space_rank = space_comm.Get_rank()
# split world communicator to create time-communicators
if nprocs_space is not None:
color = int(world_rank % nprocs_space)
else:
color = int(world_rank / world_size)
time_comm = comm.Split(color=color)
time_size = time_comm.Get_size()
time_rank = time_comm.Get_rank()
# print(time_size, space_size, world_size)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 1e-03
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['L'] = 4.0
# problem_params['L'] = 16.0
problem_params['nvars'] = [(48 * 12, 48 * 12), (8 * 12, 8 * 12)]
# problem_params['nvars'] = [(48 * 48, 48 * 48), (8 * 48, 8 * 48)]
problem_params['eps'] = [0.04]
problem_params['radius'] = 0.25
problem_params['comm'] = space_comm
problem_params['init_type'] = 'circle_rand'
problem_params['spectral'] = False
if name == 'AC-bench-constforce':
problem_params['dw'] = [-23.59]
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20 if space_rank == 0 else 99 # set level depending on rank
controller_params['predict_type'] = 'fine_only'
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = fft_to_fft
if name == 'AC-bench-noforce' or name == 'AC-bench-constforce':
description['problem_class'] = allencahn_imex
elif name == 'AC-bench-timeforce':
description['problem_class'] = allencahn_imex_timeforcing
else:
raise NotImplementedError(f'{name} is not implemented')
# set time parameters
t0 = 0.0
Tend = 12 * 0.001
if space_rank == 0 and time_rank == 0:
out = f'---------> Running {name} with {time_size} process(es) in time and {space_size} process(es) in space...'
print(out)
# instantiate controller
controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
if space_rank == 0:
print()
# convert filtered statistics to list of iterations count, sorted by time
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.array([item[1] for item in iter_counts])
out = f'Mean number of iterations on rank {time_rank}: {np.mean(niters):.4f}'
print(out)
timing = get_sorted(stats, type='timing_setup', sortby='time')
out = f'Setup time on rank {time_rank}: {timing[0][1]:.4f} sec.'
print(out)
timing = get_sorted(stats, type='timing_run', sortby='time')
out = f'Time to solution on rank {time_rank}: {timing[0][1]:.4f} sec.'
print(out)
if __name__ == "__main__":
# Add parser to get number of processors in space and setup (have to do this here to enable automatic testing)
parser = ArgumentParser()
parser.add_argument(
"-s",
"--setup",
help='Specifies the setup',
type=str,
default='AC-bench-noforce',
choices=['AC-bench-noforce', 'AC-bench-constforce', 'AC-bench-timeforce'],
)
parser.add_argument("-n", "--nprocs_space", help='Specifies the number of processors in space', type=int)
args = parser.parse_args()
run_simulation(name=args.setup, nprocs_space=args.nprocs_space)
| 5,486 | 35.337748 | 120 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/visualize_temp.py | import json
import glob
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
from argparse import ArgumentParser
import imageio
def plot_data(path='./data', name='', output='.'):
"""
Visualization using numpy arrays (written via MPI I/O) and json description
Produces one png file per time-step, combine as movie via e.g.
> ffmpeg -i data/name_%08d.png name.mp4
Args:
path (str): path to data files
name (str): name of the simulation (expects data to be in data path)
output (str): path to output
"""
json_files = sorted(glob.glob(f'{path}/{name}_*.json'))
data_files = sorted(glob.glob(f'{path}/{name}_*.dat'))
for json_file, data_file in zip(json_files, data_files):
with open(json_file, 'r') as fp:
obj = json.load(fp)
index = json_file.split('_')[-1].split('.')[0]
print(f'Working on step {index}...')
array = np.fromfile(data_file, dtype=obj['datatype'])
array = array.reshape(obj['shape'], order='C')
fig = plt.figure()
grid = AxesGrid(
fig, 111, nrows_ncols=(1, 2), axes_pad=0.15, cbar_mode='single', cbar_location='right', cbar_pad=0.15
)
im = grid[0].imshow(array[..., 0], vmin=0, vmax=1)
im = grid[1].imshow(array[..., 1], vmin=0, vmax=1)
grid[0].set_title(f"Field - Time: {obj['time']:6.4f}")
grid[1].set_title(f"Temperature - Time: {obj['time']:6.4f}")
grid[1].yaxis.set_visible(False)
grid.cbar_axes[0].colorbar(im)
plt.savefig(f'{output}/{name}_{index}.png', bbox_inches='tight')
plt.close()
def make_movie(path='./data', name='', output='.'):
"""
Visualization using numpy arrays (written via MPI I/O) and json description
Produces one png file per time-step, combine as movie via e.g.
> ffmpeg -i data/name_%08d.png name.mp4
Args:
path (str): path to data files
name (str): name of the simulation (expects data to be in data path)
output (str): path to output
"""
json_files = sorted(glob.glob(f'{path}/{name}_*.json'))
data_files = sorted(glob.glob(f'{path}/{name}_*.dat'))
img_list = []
for json_file, data_file in zip(json_files, data_files):
with open(json_file, 'r') as fp:
obj = json.load(fp)
index = json_file.split('_')[1].split('.')[0]
print(f'Working on step {index}...')
array = np.fromfile(data_file, dtype=obj['datatype'])
array = array.reshape(obj['shape'], order='C')
fig, ax = plt.subplots(1, 2)
ax[0].imshow(array[..., 0], vmin=0, vmax=1)
ax[1].imshow(array[..., 1], vmin=0, vmax=1)
# ax.set_colorbar()
ax[0].set_title(f"Field - Time: {obj['time']:6.4f}")
ax[1].set_title(f"Temperature - Time: {obj['time']:6.4f}")
fig.tight_layout()
# draw the canvas, cache the renderer
fig.canvas.draw()
image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')
img_list.append(image.reshape(fig.canvas.get_width_height()[::-1] + (3,)))
plt.close()
# c += 1
# if c == 3:
# break
fname = f'{output}/{name}.mp4'
imageio.mimsave(fname, img_list, fps=8)
def make_movie_from_files(path='./data', name='', output='.'):
"""
Visualization using numpy arrays (written via MPI I/O) and json description
Produces one png file per time-step, combine as movie via e.g.
> ffmpeg -i data/name_%08d.png name.mp4
Args:
path (str): path to data files
name (str): name of the simulation (expects data to be in data path)
output (str): path to output
"""
img_files = sorted(glob.glob(f'{path}/{name}_*.png'))
print(f'{path}{name}')
images = []
for fimg in img_files:
img = imageio.imread(fimg)
print(fimg, img.shape)
images.append(imageio.imread(fimg))
fname = f'{output}/{name}.mp4'
imageio.mimsave(fname, images, fps=8)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-p", "--path", help='Path to data files', type=str, default='./data')
parser.add_argument("-n", "--name", help='Name of the simulation', type=str)
parser.add_argument("-o", "--output", help='Path for output file', type=str, default='.')
args = parser.parse_args()
# name = 'AC-test-tempforce'
name = 'AC-bench-tempforce'
plot_data(path=args.path, name=args.name, output=args.output)
# make_movie(path=args.path, name=args.name, output=args.output)
make_movie_from_files(path=args.path, name=args.name, output=args.output)
| 4,752 | 30.686667 | 113 | py |
pySDC | pySDC-master/pySDC/projects/AllenCahn_Bayreuth/visualize.py | import json
import glob
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
def plot_data(name=''):
"""
Visualization using numpy arrays (written via MPI I/O) and json description
Produces one png file per time-step, combine as movie via e.g.
> ffmpeg -i data/name_%08d.png name.mp4
Args:
name (str): name of the simulation (expects data to be in data path)
"""
json_files = sorted(glob.glob(f'./data/{name}_*.json'))
data_files = sorted(glob.glob(f'./data/{name}_*.dat'))
for json_file, data_file in zip(json_files, data_files):
with open(json_file, 'r') as fp:
obj = json.load(fp)
index = json_file.split('_')[1].split('.')[0]
print(f'Working on step {index}...')
array = np.fromfile(data_file, dtype=obj['datatype'])
array = array.reshape(obj['shape'], order='C')
plt.figure()
plt.imshow(array, vmin=0, vmax=1)
plt.colorbar()
plt.title(f"Time: {obj['time']:6.4f}")
plt.savefig(f'data/{name}_{index}.png', bbox_inches='tight')
plt.close()
if __name__ == "__main__":
# name = 'AC-test'
name = 'AC-test-noforce'
# name = 'AC-2D-application'
# name = 'AC-app-timeforce'
plot_data(name=name)
| 1,313 | 24.269231 | 79 | py |
pySDC | pySDC-master/pySDC/projects/TOMS/pySDC_with_PETSc.py | import sys
import numpy as np
from mpi4py import MPI
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from pySDC.implementations.problem_classes.HeatEquation_2D_PETSc_forced import heat2d_petsc_forced
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.transfer_classes.TransferPETScDMDA import mesh_to_mesh_petsc_dmda
def main():
"""
Program to demonstrate usage of PETSc data structures and spatial parallelization,
combined with parallelization in time.
"""
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
if len(sys.argv) >= 2:
color = int(world_rank / int(sys.argv[1]))
else:
color = int(world_rank / 1)
space_comm = comm.Split(color=color)
space_size = space_comm.Get_size()
space_rank = space_comm.Get_rank()
# split world communicator to create time-communicators
if len(sys.argv) >= 2:
color = int(world_rank % int(sys.argv[1]))
else:
color = int(world_rank / world_size)
time_comm = comm.Split(color=color)
time_size = time_comm.Get_size()
time_rank = time_comm.Get_rank()
print(
"IDs (world, space, time): %i / %i -- %i / %i -- %i / %i"
% (world_rank, world_size, space_rank, space_size, time_rank, time_size)
)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 0.125
level_params['nsweeps'] = [3, 1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [5]
sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 1.0 # diffusion coefficient
problem_params['freq'] = 2 # frequency for the test value
problem_params['cnvars'] = [(129, 129)] # number of degrees of freedom on coarse level
problem_params['refine'] = [1, 0] # number of refinements
problem_params['comm'] = space_comm # pass space-communicator to problem class
problem_params['sol_tol'] = 1e-10 # set tolerance to PETSc' linear solver
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
space_transfer_params = dict()
space_transfer_params['rorder'] = 2
space_transfer_params['iorder'] = 2
space_transfer_params['periodic'] = False
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20 if space_rank == 0 else 99 # set level depending on rank
controller_params['dump_setup'] = False
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heat2d_petsc_forced # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
description['space_transfer_class'] = mesh_to_mesh_petsc_dmda # pass spatial transfer class
description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
# set time parameters
t0 = 0.0
Tend = 3.0
# instantiate controller
controller = controller_MPI(controller_params=controller_params, description=description, comm=time_comm)
# get initial values on finest level
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# compute exact solution and compare
uex = P.u_exact(Tend)
err = abs(uex - uend)
# filter statistics by type (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
niters = np.array([item[1] for item in iter_counts])
# limit output to space-rank 0 (as before when setting the logger level)
if space_rank == 0:
out = 'This is time-rank %i...' % time_rank
print(out)
# compute and print statistics
for item in iter_counts:
out = 'Number of iterations for time %4.2f: %2i' % item
print(out)
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (
int(np.argmax(niters)),
int(np.argmin(niters)),
)
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
print(' Iteration count linear solver: %i' % P.ksp_itercount)
print(' Mean Iteration count per call: %4.2f' % (P.ksp_itercount / max(P.ksp_ncalls, 1)))
timing = get_sorted(stats, type='timing_run', sortby='time')
out = 'Time to solution: %6.4f sec.' % timing[0][1]
print(out)
out = 'Error vs. PDE solution: %6.4e' % err
print(out)
if __name__ == "__main__":
main()
| 5,708 | 36.071429 | 120 | py |
pySDC | pySDC-master/pySDC/projects/TOMS/AllenCahn_monitor.py | import numpy as np
from pySDC.core.Hooks import hooks
class monitor(hooks):
def __init__(self):
"""
Initialization of Allen-Cahn monitoring
"""
super(monitor, self).__init__()
self.init_radius = None
def pre_run(self, step, level_number):
"""
Overwrite standard pre run hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(monitor, self).pre_run(step, level_number)
L = step.levels[0]
c = np.count_nonzero(L.u[0] > 0.0)
radius = np.sqrt(c / np.pi) * L.prob.dx
radius1 = 0
rows, cols = np.where(L.u[0] > 0.0)
for r in rows:
radius1 = max(radius1, abs(L.prob.xvalues[r]))
rows1 = np.where(L.u[0][int((L.prob.init[0][0]) / 2), : int((L.prob.init[0][0]) / 2)] > -0.99)
rows2 = np.where(L.u[0][int((L.prob.init[0][0]) / 2), : int((L.prob.init[0][0]) / 2)] < 0.99)
interface_width = (rows2[0][-1] - rows1[0][0]) * L.prob.dx / L.prob.eps
self.init_radius = L.prob.radius
if L.time == 0.0:
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_radius',
value=radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_radius',
value=self.init_radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='interface_width',
value=interface_width,
)
def post_step(self, step, level_number):
"""
Overwrite standard post step hook
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(monitor, self).post_step(step, level_number)
# some abbreviations
L = step.levels[0]
c = np.count_nonzero(L.uend >= 0.0)
radius = np.sqrt(c / np.pi) * L.prob.dx
exact_radius = np.sqrt(max(self.init_radius**2 - 2.0 * (L.time + L.dt), 0))
rows1 = np.where(L.uend[int((L.prob.init[0][0]) / 2), : int((L.prob.init[0][0]) / 2)] > -0.99)
rows2 = np.where(L.uend[int((L.prob.init[0][0]) / 2), : int((L.prob.init[0][0]) / 2)] < 0.99)
interface_width = (rows2[0][-1] - rows1[0][0]) * L.prob.dx / L.prob.eps
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='computed_radius',
value=radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='exact_radius',
value=exact_radius,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=-1,
iter=step.status.iter,
sweep=L.status.sweep,
type='interface_width',
value=interface_width,
)
| 3,651 | 30.213675 | 102 | py |
pySDC | pySDC-master/pySDC/projects/TOMS/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/TOMS/visualize_pySDC_with_PETSc.py | import os
import matplotlib.colors as colors
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
def is_number(s):
"""
Helper function to detect numbers
Args:
s: a string
Returns:
bool: True if s is a number
"""
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def join_timings(file=None, result=None, cwd=''):
"""
Helper function to read in JUBE result tables and convert/join them into a single dictionary
Args:
file: current fils containing a JUBE result table
result: dictionary (empty or not)
cwd (str): current working directory
Returns:
dict: result dictionary for further usage
"""
with open(cwd + file) as f:
lines = f.readlines()
for line in lines:
line_split = line.replace('\n', '').replace(' ', '').split('|')
if is_number(line_split[0]):
ntime = int(int(line_split[0]) * int(line_split[1]) / int(line_split[2]))
nspace = int(line_split[2])
timing = float(line_split[3])
result[(nspace, ntime)] = timing
return result
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
"""
Helper function to crop a colormap
Args:
cmap: colormap
minval: minimum value
maxval: maximum value
n: stepsize
Returns:
cropped colormap
"""
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(np.linspace(minval, maxval, n))
)
return new_cmap
def visualize_matrix(result=None):
"""
Visualizes runtimes in a matrix (cores in space vs. cores in time)
Args:
result: dictionary containing the runtimes
"""
process_list = [1, 2, 4, 6, 12, 24]
dim = len(process_list)
mat = np.zeros((dim, dim))
tmin = 1e03
tmax = 0
for key, item in result.items():
mat[process_list.index(key[0]), process_list.index(key[1])] = item
tmin = min(tmin, item)
tmax = max(tmax, item)
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=120, scale=1.5)
cmap = plt_helper.plt.get_cmap('RdYlGn_r')
new_cmap = truncate_colormap(cmap, 0.1, 0.9)
plt_helper.plt.imshow(
mat.T, origin='lower', norm=colors.LogNorm(vmin=tmin, vmax=tmax), cmap=new_cmap, aspect='auto'
)
for key, item in result.items():
timing = "{:3.1f}".format(item)
plt_helper.plt.annotate(
timing,
xy=(process_list.index(key[0]), process_list.index(key[1])),
size='x-small',
ha='center',
va='center',
)
plt_helper.plt.xticks(range(dim), process_list)
plt_helper.plt.yticks(range(dim), process_list)
plt_helper.plt.xlabel('Cores in space')
plt_helper.plt.ylabel('Cores in time')
fname = 'data/runtimes_matrix_heat'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
def visualize_speedup(result=None):
"""
Visualizes runtimes of two different runs (MLSDC vs. PFASST)
Args:
result: dictionary containing the runtimes
"""
process_list_MLSDC = [1, 2, 4, 6, 12, 24]
process_list_PFASST = [24, 48, 96, 144, 288, 576]
timing_MLSDC = np.zeros(len(process_list_MLSDC))
timing_PFASST = np.zeros((len(process_list_PFASST)))
for key, item in result.items():
if key[0] * key[1] in process_list_MLSDC:
timing_MLSDC[process_list_MLSDC.index(key[0] * key[1])] = item
if key[0] * key[1] in process_list_PFASST:
timing_PFASST[process_list_PFASST.index(key[0] * key[1])] = item
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=120, scale=1.5)
process_list_all = process_list_MLSDC + process_list_PFASST
ideal = [timing_MLSDC[0] / nproc for nproc in process_list_all]
plt_helper.plt.loglog(process_list_all, ideal, 'k--', label='ideal')
plt_helper.plt.loglog(process_list_MLSDC, timing_MLSDC, 'bo-', label='MLSDC')
plt_helper.plt.loglog(process_list_PFASST, timing_PFASST, 'rs-', label='PFASST')
plt_helper.plt.xlim(process_list_all[0] / 2, process_list_all[-1] * 2)
plt_helper.plt.ylim(ideal[-1] / 2, ideal[0] * 2)
plt_helper.plt.xlabel('Number of cores')
plt_helper.plt.ylabel('Runtime (sec.)')
plt_helper.plt.legend()
plt_helper.plt.grid()
fname = 'data/speedup_heat'
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
def main(cwd=''):
"""
Main routine to call them all
Args:
cwd (str): current working directory
"""
result = {}
files = [
'data/result_PFASST_1_NEW.dat',
'data/result_PFASST_2_NEW.dat',
'data/result_PFASST_4_NEW.dat',
'data/result_PFASST_6_NEW.dat',
'data/result_PFASST_12_NEW.dat',
'data/result_PFASST_24_NEW.dat',
]
for file in files:
result = join_timings(file=file, result=result, cwd=cwd)
visualize_matrix(result=result)
result = {}
files = ['data/result_MLSDC_NEW.dat', 'data/result_PFASST_multinode_24_NEW.dat']
for file in files:
result = join_timings(file=file, result=result, cwd=cwd)
# result.pop((24, 24))
visualize_speedup(result=result)
if __name__ == "__main__":
main()
| 5,942 | 28.420792 | 114 | py |
pySDC | pySDC-master/pySDC/projects/TOMS/AllenCahn_contracting_circle.py | import os
import dill
import matplotlib.ticker as ticker
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.AllenCahn_2D_FD import (
allencahn_fullyimplicit,
allencahn_semiimplicit,
allencahn_semiimplicit_v2,
allencahn_multiimplicit,
allencahn_multiimplicit_v2,
)
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.sweeper_classes.multi_implicit import multi_implicit
from pySDC.projects.TOMS.AllenCahn_monitor import monitor
# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 1e-03
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['Q1'] = ['LU']
sweeper_params['Q2'] = ['LU']
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['EE']
sweeper_params['initial_guess'] = 'zero'
# This comes as read-in for the problem class
problem_params = dict()
problem_params['nu'] = 2
problem_params['nvars'] = [(128, 128)]
problem_params['eps'] = [0.04]
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1e-09
problem_params['lin_tol'] = 1e-10
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = monitor
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = None # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = None # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
return description, controller_params
def run_SDC_variant(variant=None, inexact=False):
"""
Routine to run particular SDC variant
Args:
variant (str): string describing the variant
inexact (bool): flag to use inexact nonlinear solve (or nor)
Returns:
timing (float)
niter (float)
"""
# load (incomplete) default parameters
description, controller_params = setup_parameters()
# add stuff based on variant
if variant == 'fully-implicit':
description['problem_class'] = allencahn_fullyimplicit
description['sweeper_class'] = generic_implicit
if inexact:
description['problem_params']['newton_maxiter'] = 1
elif variant == 'semi-implicit':
description['problem_class'] = allencahn_semiimplicit
description['sweeper_class'] = imex_1st_order
if inexact:
description['problem_params']['lin_maxiter'] = 10
elif variant == 'semi-implicit_v2':
description['problem_class'] = allencahn_semiimplicit_v2
description['sweeper_class'] = imex_1st_order
if inexact:
description['problem_params']['newton_maxiter'] = 1
elif variant == 'multi-implicit':
description['problem_class'] = allencahn_multiimplicit
description['sweeper_class'] = multi_implicit
if inexact:
description['problem_params']['newton_maxiter'] = 1
description['problem_params']['lin_maxiter'] = 10
elif variant == 'multi-implicit_v2':
description['problem_class'] = allencahn_multiimplicit_v2
description['sweeper_class'] = multi_implicit
if inexact:
description['problem_params']['newton_maxiter'] = 1
else:
raise NotImplementedError('Wrong variant specified, got %s' % variant)
if inexact:
out = 'Working on inexact %s variant...' % variant
else:
out = 'Working on exact %s variant...' % variant
print(out)
# setup parameters "in time"
t0 = 0
Tend = 0.032
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by variant (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
print(' Iteration count (nonlinear/linear): %i / %i' % (P.newton_itercount, P.lin_itercount))
print(
' Mean Iteration count per call: %4.2f / %4.2f'
% (P.newton_itercount / max(P.newton_ncalls, 1), P.lin_itercount / max(P.lin_ncalls, 1))
)
timing = get_sorted(stats, type='timing_run', sortby='time')
print('Time to solution: %6.4f sec.' % timing[0][1])
print()
return stats
def show_results(fname, cwd=''):
"""
Plotting routine
Args:
fname (str): file name to read in and name plots
cwd (str): current working directory
"""
file = open(cwd + fname + '.pkl', 'rb')
results = dill.load(file)
file.close()
# plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
# set up plot for timings
fig, ax1 = plt_helper.newfig(textwidth=238.96, scale=1.5, ratio=0.4)
timings = {}
niters = {}
for key, item in results.items():
timings[key] = get_sorted(item, type='timing_run', sortby='time')[0][1]
iter_counts = get_sorted(item, type='niter', sortby='time')
niters[key] = np.mean(np.array([item[1] for item in iter_counts]))
xcoords = list(range(len(timings)))
sorted_timings = sorted([(key, timings[key]) for key in timings], reverse=True, key=lambda tup: tup[1])
sorted_niters = [(k, niters[k]) for k in [key[0] for key in sorted_timings]]
heights_timings = [item[1] for item in sorted_timings]
heights_niters = [item[1] for item in sorted_niters]
keys = [(item[0][1] + ' ' + item[0][0]).replace('-', '\n').replace('_v2', ' mod.') for item in sorted_timings]
ax1.bar(xcoords, heights_timings, align='edge', width=-0.3, label='timings (left axis)')
ax1.set_ylabel('time (sec)')
ax2 = ax1.twinx()
ax2.bar(xcoords, heights_niters, color='lightcoral', align='edge', width=0.3, label='iterations (right axis)')
ax2.set_ylabel('mean number of iterations')
ax1.set_xticks(xcoords)
ax1.set_xticklabels(keys, rotation=90, ha='center')
# ask matplotlib for the plotted objects and their labels
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=0)
# save plot, beautify
f = fname + '_timings'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
# set up plot for radii
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
exact_radii = []
for key, item in results.items():
computed_radii = get_sorted(item, type='computed_radius', sortby='time')
xcoords = [item0[0] for item0 in computed_radii]
radii = [item0[1] for item0 in computed_radii]
if key[0] + ' ' + key[1] == 'fully-implicit exact':
ax.plot(xcoords, radii, label=(key[0] + ' ' + key[1]).replace('_v2', ' mod.'))
exact_radii = get_sorted(item, type='exact_radius', sortby='time')
diff = np.array([abs(item0[1] - item1[1]) for item0, item1 in zip(exact_radii, computed_radii)])
max_pos = int(np.argmax(diff))
assert max(diff) < 0.07, 'ERROR: computed radius is too far away from exact radius, got %s' % max(diff)
assert 0.028 < computed_radii[max_pos][0] < 0.03, (
'ERROR: largest difference is at wrong time, got %s' % computed_radii[max_pos][0]
)
xcoords = [item[0] for item in exact_radii]
radii = [item[1] for item in exact_radii]
ax.plot(xcoords, radii, color='k', linestyle='--', linewidth=1, label='exact')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
# save plot, beautify
f = fname + '_radii'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
# set up plot for interface width
fig, ax = plt_helper.newfig(textwidth=238.96, scale=1.0)
interface_width = []
for key, item in results.items():
interface_width = get_sorted(item, type='interface_width', sortby='time')
xcoords = [item[0] for item in interface_width]
width = [item[1] for item in interface_width]
if key[0] + ' ' + key[1] == 'fully-implicit exact':
ax.plot(xcoords, width, label=key[0] + ' ' + key[1])
xcoords = [item[0] for item in interface_width]
init_width = [interface_width[0][1]] * len(xcoords)
ax.plot(xcoords, init_width, color='k', linestyle='--', linewidth=1, label='exact')
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel(r'interface width ($\epsilon$)')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
# save plot, beautify
f = fname + '_interface'
plt_helper.savefig(f)
assert os.path.isfile(f + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(f + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(f + '.png'), 'ERROR: plotting did not create PNG file'
return None
def main(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# Loop over variants, exact and inexact solves
results = {}
for variant in ['multi-implicit', 'semi-implicit', 'fully-implicit', 'semi-implicit_v2', 'multi-implicit_v2']:
results[(variant, 'exact')] = run_SDC_variant(variant=variant, inexact=False)
results[(variant, 'inexact')] = run_SDC_variant(variant=variant, inexact=True)
# dump result
fname = 'data/results_SDC_variants_AllenCahn_1E-03'
file = open(cwd + fname + '.pkl', 'wb')
dill.dump(results, file)
file.close()
assert os.path.isfile(cwd + fname + '.pkl'), 'ERROR: dill did not create file'
# visualize
show_results(fname, cwd=cwd)
if __name__ == "__main__":
main()
| 12,110 | 35.260479 | 118 | py |
Subsets and Splits