repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
pySDC | pySDC-master/pySDC/projects/SDC_showdown/SDC_timing_GrayScott.py | import os
import pickle
import numpy as np
from petsc4py import PETSc
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.GrayScott_2D_PETSc_periodic import (
petsc_grayscott_multiimplicit,
petsc_grayscott_fullyimplicit,
petsc_grayscott_semiimplicit,
)
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.sweeper_classes.multi_implicit import multi_implicit
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-08
level_params['dt'] = 1.0
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['Q1'] = ['LU']
sweeper_params['Q2'] = ['LU']
sweeper_params['QI'] = ['LU']
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['Du'] = 1.0
problem_params['Dv'] = 0.01
problem_params['A'] = 0.09
problem_params['B'] = 0.086
problem_params['nvars'] = [(128, 128)]
problem_params['nlsol_tol'] = 1e-10
problem_params['nlsol_maxiter'] = 100
problem_params['lsol_tol'] = 1e-10
problem_params['lsol_maxiter'] = 100
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
# space_transfer_params = dict()
# space_transfer_params['finter'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = None # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = None # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
# description['space_transfer_class'] = mesh_to_mesh_petsc_dmda # pass spatial transfer class
# description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
return description, controller_params
def run_SDC_variant(variant=None, inexact=False, cwd=''):
"""
Routine to run particular SDC variant
Args:
variant (str): string describing the variant
inexact (bool): flag to use inexact nonlinear solve (or nor)
cwd (str): current working directory
Returns:
timing (float)
niter (float)
"""
# load (incomplete) default parameters
description, controller_params = setup_parameters()
# add stuff based on variant
if variant == 'fully-implicit':
description['problem_class'] = petsc_grayscott_fullyimplicit
description['sweeper_class'] = generic_implicit
elif variant == 'semi-implicit':
description['problem_class'] = petsc_grayscott_semiimplicit
description['sweeper_class'] = imex_1st_order
elif variant == 'multi-implicit':
description['problem_class'] = petsc_grayscott_multiimplicit
description['sweeper_class'] = multi_implicit
else:
raise NotImplementedError('Wrong variant specified, got %s' % variant)
if inexact:
description['problem_params']['lsol_maxiter'] = 2
description['problem_params']['nlsol_maxiter'] = 1
out = 'Working on inexact %s variant...' % variant
else:
out = 'Working on exact %s variant...' % variant
print(out)
# set time parameters
t0 = 0.0
Tend = 1.0
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# load reference solution to compare with
fname = cwd + 'data/GS_reference.dat'
viewer = PETSc.Viewer().createBinary(fname, 'r')
uex = P.u_exact(t0)
uex[:] = PETSc.Vec().load(viewer)
err = abs(uex - uend)
# filter statistics by variant (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
print('Iteration count (nonlinear/linear): %i / %i' % (P.snes_itercount, P.ksp_itercount))
print(
'Mean Iteration count per call: %4.2f / %4.2f'
% (P.snes_itercount / max(P.snes_ncalls, 1), P.ksp_itercount / max(P.ksp_ncalls, 1))
)
timing = get_sorted(stats, type='timing_run', sortby='time')
print('Time to solution: %6.4f sec.' % timing[0][1])
print('Error vs. reference solution: %6.4e' % err)
print()
assert err < 3e-06, 'ERROR: variant %s did not match error tolerance, got %s' % (variant, err)
assert np.mean(niters) <= 10, 'ERROR: number of iterations is too high, got %s' % np.mean(niters)
return timing[0][1], np.mean(niters)
def show_results(fname):
"""
Plotting routine
Args:
fname: file name to read in and name plots
"""
file = open(fname + '.pkl', 'rb')
results = pickle.load(file)
file.close()
plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=1.0)
xcoords = list(range(len(results)))
sorted_data = sorted([(key, results[key][0]) for key in results], reverse=True, key=lambda tup: tup[1])
heights = [item[1] for item in sorted_data]
keys = [(item[0][1] + ' ' + item[0][0]).replace('-', '\n') for item in sorted_data]
plt_helper.plt.bar(xcoords, heights, align='center')
plt_helper.plt.xticks(xcoords, keys, rotation=90)
plt_helper.plt.ylabel('time (sec)')
# save plot, beautify
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
return None
def run_reference():
"""
Helper routine to create a reference solution using very high order SDC and small time-steps
"""
description, controller_params = setup_parameters()
description['problem_class'] = petsc_grayscott_semiimplicit
description['sweeper_class'] = imex_1st_order
description['sweeper_params']['num_nodes'] = 9
description['level_params']['dt'] = 0.01
# set time parameters
t0 = 0.0
Tend = 1.0
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# filter statistics by variant (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
print('Iteration count (nonlinear/linear): %i / %i' % (P.snes_itercount, P.ksp_itercount))
print(
'Mean Iteration count per call: %4.2f / %4.2f'
% (P.snes_itercount / max(P.snes_ncalls, 1), P.ksp_itercount / max(P.ksp_ncalls, 1))
)
timing = get_sorted(stats, type='timing_run', sortby='time')
print('Time to solution: %6.4f sec.' % timing[0][1])
fname = 'data/GS_reference.dat'
viewer = PETSc.Viewer().createBinary(fname, 'w')
viewer.view(uend)
assert os.path.isfile(fname), 'ERROR: PETSc did not create file'
return None
def main(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# Loop over variants, exact and inexact solves
results = {}
for variant in ['fully-implicit', 'multi-implicit', 'semi-implicit']:
results[(variant, 'exact')] = run_SDC_variant(variant=variant, inexact=False, cwd=cwd)
results[(variant, 'inexact')] = run_SDC_variant(variant=variant, inexact=True, cwd=cwd)
# dump result
fname = 'data/timings_SDC_variants_GrayScott'
file = open(fname + '.pkl', 'wb')
pickle.dump(results, file)
file.close()
assert os.path.isfile(fname + '.pkl'), 'ERROR: pickle did not create file'
# visualize
show_results(fname)
if __name__ == "__main__":
# run_reference()
main()
| 10,220 | 33.07 | 118 | py |
pySDC | pySDC-master/pySDC/projects/SDC_showdown/SDC_timing_Fisher.py | import os
import pickle
import numpy as np
import pySDC.helpers.plot_helper as plt_helper
from pySDC.helpers.stats_helper import get_sorted
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.GeneralizedFisher_1D_PETSc import (
petsc_fisher_multiimplicit,
petsc_fisher_fullyimplicit,
petsc_fisher_semiimplicit,
)
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.sweeper_classes.multi_implicit import multi_implicit
def setup_parameters():
"""
Helper routine to fill in all relevant parameters
Note that this file will be used for all versions of SDC, containing more than necessary for each individual run
Returns:
description (dict)
controller_params (dict)
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-06
level_params['dt'] = 0.25
level_params['nsweeps'] = [1]
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = [3]
sweeper_params['Q1'] = ['LU']
sweeper_params['Q2'] = ['LU']
sweeper_params['QI'] = ['LU']
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 1
problem_params['nvars'] = 2049
problem_params['lambda0'] = 2.0
problem_params['interval'] = (-50, 50)
problem_params['nlsol_tol'] = 1e-10
problem_params['nlsol_maxiter'] = 100
problem_params['lsol_tol'] = 1e-10
problem_params['lsol_maxiter'] = 100
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize space transfer parameters
# space_transfer_params = dict()
# space_transfer_params['finter'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = None # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = None # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
# description['space_transfer_class'] = mesh_to_mesh_petsc_dmda # pass spatial transfer class
# description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer
return description, controller_params
def run_SDC_variant(variant=None, inexact=False):
"""
Routine to run particular SDC variant
Args:
variant (str): string describing the variant
inexact (bool): flag to use inexact nonlinear solve (or nor)
Returns:
timing (float)
niter (float)
"""
# load (incomplete) default parameters
description, controller_params = setup_parameters()
# add stuff based on variant
if variant == 'fully-implicit':
description['problem_class'] = petsc_fisher_fullyimplicit
description['sweeper_class'] = generic_implicit
elif variant == 'semi-implicit':
description['problem_class'] = petsc_fisher_semiimplicit
description['sweeper_class'] = imex_1st_order
elif variant == 'multi-implicit':
description['problem_class'] = petsc_fisher_multiimplicit
description['sweeper_class'] = multi_implicit
else:
raise NotImplementedError('Wrong variant specified, got %s' % variant)
if inexact:
description['problem_params']['nlsol_maxiter'] = 1
out = 'Working on inexact %s variant...' % variant
else:
out = 'Working on exact %s variant...' % variant
print(out)
# set time parameters
t0 = 0.0
Tend = 1.0
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# compute exact solution and compare
uex = P.u_exact(Tend)
err = abs(uex - uend)
# filter statistics by variant (number of iterations)
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
print(out)
out = ' Range of values for number of iterations: %2i ' % np.ptp(niters)
print(out)
out = ' Position of max/min number of iterations: %2i -- %2i' % (int(np.argmax(niters)), int(np.argmin(niters)))
print(out)
out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters)))
print(out)
print('Iteration count (nonlinear/linear): %i / %i' % (P.snes_itercount, P.ksp_itercount))
print(
'Mean Iteration count per call: %4.2f / %4.2f'
% (P.snes_itercount / max(P.snes_ncalls, 1), P.ksp_itercount / max(P.ksp_ncalls, 1))
)
timing = get_sorted(stats, type='timing_run', sortby='time')
print('Time to solution: %6.4f sec.' % timing[0][1])
print('Error vs. PDE solution: %6.4e' % err)
print()
assert err < 9.2e-05, 'ERROR: variant %s did not match error tolerance, got %s' % (variant, err)
assert np.mean(niters) <= 10, 'ERROR: number of iterations is too high, got %s' % np.mean(niters)
return timing[0][1], np.mean(niters)
def show_results(fname):
"""
Plotting routine
Args:
fname: file name to read in and name plots
"""
file = open(fname + '.pkl', 'rb')
results = pickle.load(file)
file.close()
plt_helper.mpl.style.use('classic')
plt_helper.setup_mpl()
plt_helper.newfig(textwidth=238.96, scale=1.0)
xcoords = list(range(len(results)))
sorted_data = sorted([(key, results[key][0]) for key in results], reverse=True, key=lambda tup: tup[1])
heights = [item[1] for item in sorted_data]
keys = [(item[0][1] + ' ' + item[0][0]).replace('-', '\n') for item in sorted_data]
plt_helper.plt.bar(xcoords, heights, align='center')
plt_helper.plt.xticks(xcoords, keys, rotation=90)
plt_helper.plt.ylabel('time (sec)')
# save plot, beautify
plt_helper.savefig(fname)
assert os.path.isfile(fname + '.pdf'), 'ERROR: plotting did not create PDF file'
# assert os.path.isfile(fname + '.pgf'), 'ERROR: plotting did not create PGF file'
assert os.path.isfile(fname + '.png'), 'ERROR: plotting did not create PNG file'
return None
def main(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# Loop over variants, exact and inexact solves
results = {}
for variant in ['fully-implicit', 'multi-implicit', 'semi-implicit']:
results[(variant, 'exact')] = run_SDC_variant(variant=variant, inexact=False)
results[(variant, 'inexact')] = run_SDC_variant(variant=variant, inexact=True)
# dump result
fname = cwd + 'data/timings_SDC_variants_Fisher'
file = open(fname + '.pkl', 'wb')
pickle.dump(results, file)
file.close()
assert os.path.isfile(fname + '.pkl'), 'ERROR: pickle did not create file'
# visualize
show_results(fname)
if __name__ == "__main__":
main()
| 7,803 | 32.493562 | 118 | py |
pySDC | pySDC-master/pySDC/projects/SDC_showdown/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/GPU/ac_fft.py | from pySDC.implementations.problem_classes.AllenCahn_2D_FFT import allencahn2d_imex as ac_fft_cpu
from pySDC.implementations.problem_classes.AllenCahn_2D_FFT_gpu import allencahn2d_imex as ac_fft_gpu
from pySDC.core.Collocation import CollBase as Collocation
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.helpers.stats_helper import filter_stats, sort_stats
def set_parameter():
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 2
problem_params['eps'] = 0.04
problem_params['radius'] = 0.25
problem_params['nvars'] = (512, 512)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-07
level_params['dt'] = 1e-07
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = Collocation
sweeper_params['node_type'] = 'LEGENDRE'
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['PIC']
sweeper_params['num_nodes'] = 3
sweeper_params['initial_guess'] = 'spread'
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# setup parameters "in time"
t0 = 0
schritte = 8
Tend = schritte * level_params['dt']
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
return controller_params, description, t0, Tend
def main():
controller_params, description, t0, Tend = set_parameter()
# fill description dictionary with CPU problem
description['problem_class'] = ac_fft_cpu
# instantiate controller cpu
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level cpu
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done on cpu...
uend_cpu, stats_cpu = controller.run(u0=uinit, t0=t0, Tend=Tend)
timing_cpu = sort_stats(filter_stats(stats_cpu, type='timing_run'), sortby='time')
print('Runtime CPU:', timing_cpu[0][1])
# change description dictionary with GPU problem
description['problem_class'] = ac_fft_gpu
# instantiate controller cpu
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level cpu
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done on cpu...
uend_gpu, stats_gpu = controller.run(u0=uinit, t0=t0, Tend=Tend)
timing_gpu = sort_stats(filter_stats(stats_gpu, type='timing_run'), sortby='time')
print('Runtime GPU:', timing_gpu[0][1])
assert abs(uend_gpu.get() - uend_cpu) < 1e-13, abs(uend_gpu.get() - uend_cpu)
if __name__ == '__main__':
main()
| 3,473 | 35.568421 | 109 | py |
pySDC | pySDC-master/pySDC/projects/GPU/heat.py | from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_forced as heat_cpu
from pySDC.implementations.problem_classes.HeatEquation_ND_FD_CuPy import heatNd_forced as heat_gpu
from pySDC.core.Collocation import CollBase as Collocation
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.helpers.stats_helper import filter_stats, sort_stats
def set_parameter(): # pragma: no cover
# initialize problem parameters
problem_params = dict()
problem_params['nu'] = 1
problem_params['freq'] = (4, 4, 4)
problem_params['order'] = 2
problem_params['lintol'] = 1e-10
problem_params['liniter'] = 99
problem_params['solver_type'] = 'CG'
problem_params['nvars'] = (32, 32, 32)
problem_params['bc'] = 'periodic'
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-07
level_params['dt'] = 1e-07
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = Collocation
sweeper_params['node_type'] = 'LEGENDRE'
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['QI'] = ['LU']
sweeper_params['QE'] = ['PIC']
sweeper_params['num_nodes'] = 3
sweeper_params['initial_guess'] = 'spread'
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# setup parameters "in time"
t0 = 0
schritte = 8
Tend = schritte * level_params['dt']
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
# fill description dictionary for easy step instantiation cpu
description = dict()
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
return controller_params, description, t0, Tend
def main(): # pragma: no cover
controller_params, description, t0, Tend = set_parameter()
# fill description dictionary with CPU problem
description['problem_class'] = heat_cpu
# instantiate controller cpu
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level cpu
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done on cpu...
uend_cpu, stats_cpu = controller.run(u0=uinit, t0=t0, Tend=Tend)
timing_cpu = sort_stats(filter_stats(stats_cpu, type='timing_run'), sortby='time')
print('Runtime CPU:', timing_cpu[0][1])
# change description dictionary for GPU problem
description['problem_class'] = heat_gpu
# instantiate controller cpu
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level cpu
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done on cpu...
uend_gpu, stats_gpu = controller.run(u0=uinit, t0=t0, Tend=Tend)
timing_gpu = sort_stats(filter_stats(stats_gpu, type='timing_run'), sortby='time')
print('Runtime GPU:', timing_gpu[0][1])
assert abs(uend_gpu.get() - uend_cpu) < 1e-13, abs(uend_gpu.get() - uend_cpu)
if __name__ == '__main__':
main()
| 3,662 | 36 | 109 | py |
pySDC | pySDC-master/pySDC/projects/GPU/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/PinTSimE/switch_estimator.py | import numpy as np
import scipy as sp
from pySDC.core.Collocation import CollBase
from pySDC.core.ConvergenceController import ConvergenceController, Status
class SwitchEstimator(ConvergenceController):
"""
Class to predict the time point of the switch and setting a new step size
For the first time, this is a nonMPI version, because a MPI version is not yet developed.
"""
def setup(self, controller, params, description):
"""
Function sets default variables to handle with the switch at the beginning.
Args:
controller (pySDC.Controller): The controller
params (dict): The params passed for this specific convergence controller
description (dict): The description object used to instantiate the controller
Returns:
(dict): The updated params dictionary
"""
# for RK4 sweeper, sweep.coll.nodes now consists of values of ButcherTableau
# for this reason, collocation nodes will be generated here
coll = CollBase(
num_nodes=description['sweeper_params']['num_nodes'],
quad_type=description['sweeper_params']['quad_type'],
)
defaults = {
'control_order': 100,
'tol': description['level_params']['dt'],
'nodes': coll.nodes,
}
return {**defaults, **params}
def setup_status_variables(self, controller, **kwargs):
"""
Adds switching specific variables to status variables.
Args:
controller (pySDC.Controller): The controller
"""
self.status = Status(['switch_detected', 't_switch'])
def reset_status_variables(self, controller, **kwargs):
"""
Resets status variables.
Args:
controller (pySDC.Controller): The controller
"""
self.setup_status_variables(controller, **kwargs)
def get_new_step_size(self, controller, S, **kwargs):
"""
Determine a new step size when a switch is found such that the switch happens at the time step.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
L = S.levels[0]
if S.status.iter == S.params.maxiter:
self.status.switch_detected, m_guess, vC_switch = L.prob.get_switching_info(L.u, L.time)
if self.status.switch_detected:
t_interp = [L.time + L.dt * self.params.nodes[m] for m in range(len(self.params.nodes))]
# only find root if vc_switch[0], vC_switch[-1] have opposite signs (intermediate value theorem)
if vC_switch[0] * vC_switch[-1] < 0:
self.status.t_switch = self.get_switch(t_interp, vC_switch, m_guess)
if L.time <= self.status.t_switch <= L.time + L.dt:
dt_switch = self.status.t_switch - L.time
if not np.isclose(self.status.t_switch - L.time, L.dt, atol=self.params.tol):
self.log(
f"Located Switch at time {self.status.t_switch:.6f} is outside the range of tol={self.params.tol:.4e}",
S,
)
else:
self.log(
f"Switch located at time {self.status.t_switch:.6f} inside tol={self.params.tol:.4e}", S
)
L.prob.t_switch = self.status.t_switch
controller.hooks[0].add_to_stats(
process=S.status.slot,
time=L.time,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='switch',
value=self.status.t_switch,
)
L.prob.count_switches()
dt_planned = L.status.dt_new if L.status.dt_new is not None else L.params.dt
# when a switch is found, time step to match with switch should be preferred
if self.status.switch_detected:
L.status.dt_new = dt_switch
else:
L.status.dt_new = min([dt_planned, dt_switch])
else:
self.status.switch_detected = False
else:
self.status.switch_detected = False
def determine_restart(self, controller, S, **kwargs):
"""
Check if the step needs to be restarted due to a predicting switch.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
if self.status.switch_detected:
S.status.restart = True
S.status.force_done = True
super().determine_restart(controller, S, **kwargs)
def post_step_processing(self, controller, S, **kwargs):
"""
After a step is done, some variables will be prepared for predicting a possibly new switch.
If no Adaptivity is used, the next time step will be set as the default one from the front end.
Args:
controller (pySDC.Controller): The controller
S (pySDC.Step): The current step
Returns:
None
"""
L = S.levels[0]
if self.status.t_switch is None:
L.status.dt_new = L.status.dt_new if L.status.dt_new is not None else L.params.dt_initial
super().post_step_processing(controller, S, **kwargs)
@staticmethod
def get_switch(t_interp, vC_switch, m_guess):
"""
Routine to do the interpolation and root finding stuff.
Args:
t_interp (list): collocation nodes in a step
vC_switch (list): differences vC - V_ref at these collocation nodes
m_guess (np.float): Index at which the difference drops below zero
Returns:
t_switch (np.float): time point of th switch
"""
p = sp.interpolate.interp1d(t_interp, vC_switch, 'cubic', bounds_error=False)
SwitchResults = sp.optimize.root_scalar(
p,
method='brentq',
bracket=[t_interp[0], t_interp[m_guess]],
x0=t_interp[m_guess],
xtol=1e-10,
)
t_switch = SwitchResults.root
return t_switch
| 6,687 | 34.015707 | 135 | py |
pySDC | pySDC-master/pySDC/projects/PinTSimE/battery_model.py | import numpy as np
import dill
from pathlib import Path
from pySDC.helpers.stats_helper import sort_stats, filter_stats, get_sorted
from pySDC.implementations.problem_classes.Battery import battery, battery_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.convergence_controller_classes.basic_restarting import BasicRestartingNonMPI
from pySDC.projects.PinTSimE.piline_model import setup_mpl
import pySDC.helpers.plot_helper as plt_helper
from pySDC.core.Hooks import hooks
from pySDC.projects.PinTSimE.switch_estimator import SwitchEstimator
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
class log_data(hooks):
def post_step(self, step, level_number):
super(log_data, self).post_step(step, level_number)
# some abbreviations
L = step.levels[level_number]
L.sweep.compute_end_point()
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='u',
value=L.uend,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='dt',
value=L.dt,
)
self.add_to_stats(
process=step.status.slot,
time=L.time + L.dt,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='e_embedded',
value=L.status.get('error_embedded_estimate'),
)
def generate_description(
dt,
problem,
sweeper,
hook_class,
use_adaptivity,
use_switch_estimator,
ncapacitors,
alpha,
V_ref,
C,
max_restarts=None,
):
"""
Generate a description for the battery models for a controller run.
Args:
dt (float): time step for computation
problem (pySDC.core.Problem.ptype): problem class that wants to be simulated
sweeper (pySDC.core.Sweeper.sweeper): sweeper class for solving the problem class numerically
hook_class (pySDC.core.Hooks): logged data for a problem
use_adaptivity (bool): flag if the adaptivity wants to be used or not
use_switch_estimator (bool): flag if the switch estimator wants to be used or not
ncapacitors (np.int): number of capacitors used for the battery_model
alpha (np.float): Multiple used for the initial conditions (problem_parameter)
V_ref (np.ndarray): Reference values for the capacitors (problem_parameter)
C (np.ndarray): Capacitances (problem_parameter
Returns:
description (dict): contains all information for a controller run
controller_params (dict): Parameters needed for a controller run
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = -1
level_params['dt'] = dt
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = 5
sweeper_params['QI'] = 'IE'
sweeper_params['initial_guess'] = 'spread'
# initialize problem parameters
problem_params = dict()
problem_params['ncapacitors'] = ncapacitors # number of capacitors
problem_params['C'] = C
problem_params['alpha'] = alpha
problem_params['V_ref'] = V_ref
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 4
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook_class
controller_params['mssdc_jac'] = False
# convergence controllers
convergence_controllers = dict()
if use_switch_estimator:
switch_estimator_params = {}
convergence_controllers.update({SwitchEstimator: switch_estimator_params})
if use_adaptivity:
adaptivity_params = dict()
adaptivity_params['e_tol'] = 1e-7
convergence_controllers.update({Adaptivity: adaptivity_params})
if max_restarts is not None:
convergence_controllers[BasicRestartingNonMPI] = {
'max_restarts': max_restarts,
'crash_after_max_restarts': False,
}
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = problem # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = sweeper # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params
description['convergence_controllers'] = convergence_controllers
return description, controller_params
def controller_run(description, controller_params, use_adaptivity, use_switch_estimator, t0, Tend):
"""
Executes a controller run for a problem defined in the description
Args:
description (dict): contains all information for a controller run
controller_params (dict): Parameters needed for a controller run
use_adaptivity (bool): flag if the adaptivity wants to be used or not
use_switch_estimator (bool): flag if the switch estimator wants to be used or not
Returns:
stats (dict): Raw statistics from a controller run
"""
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
problem = description['problem_class']
sweeper = description['sweeper_class']
Path("data").mkdir(parents=True, exist_ok=True)
fname = 'data/{}_{}_USE{}_USA{}.dat'.format(
problem.__name__, sweeper.__name__, use_switch_estimator, use_adaptivity
)
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
return stats
def run():
"""
Executes the simulation for the battery model using two different sweepers and plot the results
as <problem_class>_model_solution_<sweeper_class>.png
"""
dt = 1e-2
t0 = 0.0
Tend = 0.3
problem_classes = [battery, battery_implicit]
sweeper_classes = [imex_1st_order, generic_implicit]
ncapacitors = 1
alpha = 1.2
V_ref = np.array([1.0])
C = np.array([1.0])
max_restarts = 1
recomputed = False
use_switch_estimator = [True]
use_adaptivity = [True]
for problem, sweeper in zip(problem_classes, sweeper_classes):
for use_SE in use_switch_estimator:
for use_A in use_adaptivity:
description, controller_params = generate_description(
dt, problem, sweeper, log_data, use_A, use_SE, ncapacitors, alpha, V_ref, C, max_restarts
)
# Assertions
proof_assertions_description(description, use_A, use_SE)
proof_assertions_time(dt, Tend, V_ref, alpha)
stats = controller_run(description, controller_params, use_A, use_SE, t0, Tend)
check_solution(stats, dt, problem.__name__, use_A, use_SE)
plot_voltages(description, problem.__name__, sweeper.__name__, recomputed, use_SE, use_A)
def plot_voltages(description, problem, sweeper, recomputed, use_switch_estimator, use_adaptivity, cwd='./'):
"""
Routine to plot the numerical solution of the model
Args:
description(dict): contains all information for a controller run
problem (pySDC.core.Problem.ptype): problem class that wants to be simulated
sweeper (pySDC.core.Sweeper.sweeper): sweeper class for solving the problem class numerically
recomputed (bool): flag if the values after a restart are used or before
use_switch_estimator (bool): flag if the switch estimator wants to be used or not
use_adaptivity (bool): flag if adaptivity wants to be used or not
cwd (str): current working directory
"""
f = open(cwd + 'data/{}_{}_USE{}_USA{}.dat'.format(problem, sweeper, use_switch_estimator, use_adaptivity), 'rb')
stats = dill.load(f)
f.close()
# convert filtered statistics to list of iterations count, sorted by process
cL = np.array([me[1][0] for me in get_sorted(stats, type='u', recomputed=recomputed)])
vC = np.array([me[1][1] for me in get_sorted(stats, type='u', recomputed=recomputed)])
t = np.array([me[0] for me in get_sorted(stats, type='u', recomputed=recomputed)])
setup_mpl()
fig, ax = plt_helper.plt.subplots(1, 1, figsize=(3, 3))
ax.set_title('Simulation of {} using {}'.format(problem, sweeper), fontsize=10)
ax.plot(t, cL, label=r'$i_L$')
ax.plot(t, vC, label=r'$v_C$')
if use_switch_estimator:
switches = get_recomputed(stats, type='switch', sortby='time')
assert len(switches) >= 1, 'No switches found!'
t_switch = [v[1] for v in switches]
ax.axvline(x=t_switch[-1], linestyle='--', linewidth=0.8, color='r', label='Switch')
if use_adaptivity:
dt = np.array(get_sorted(stats, type='dt', recomputed=False))
dt_ax = ax.twinx()
dt_ax.plot(dt[:, 0], dt[:, 1], linestyle='-', linewidth=0.8, color='k', label=r'$\Delta t$')
dt_ax.set_ylabel(r'$\Delta t$', fontsize=8)
dt_ax.legend(frameon=False, fontsize=8, loc='center right')
ax.axhline(y=1.0, linestyle='--', linewidth=0.8, color='g', label='$V_{ref}$')
ax.legend(frameon=False, fontsize=8, loc='upper right')
ax.set_xlabel('Time', fontsize=8)
ax.set_ylabel('Energy', fontsize=8)
fig.savefig('data/{}_model_solution_{}.png'.format(problem, sweeper), dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig)
def check_solution(stats, dt, problem, use_adaptivity, use_switch_estimator):
"""
Function that checks the solution based on a hardcoded reference solution. Based on check_solution function from @brownbaerchen.
Args:
stats (dict): Raw statistics from a controller run
dt (float): initial time step
problem (problem_class.__name__): the problem_class that is numerically solved
use_switch_estimator (bool):
use_adaptivity (bool):
"""
data = get_data_dict(stats, use_adaptivity, use_switch_estimator)
if problem == 'battery':
if use_switch_estimator and use_adaptivity:
msg = f'Error when using switch estimator and adaptivity for battery for dt={dt:.1e}:'
if dt == 1e-2:
expected = {
'cL': 0.5474500710994862,
'vC': 1.0019332967173764,
'dt': 0.011761752270047832,
'e_em': 8.001793672107738e-10,
'switches': 0.18232155791181945,
'restarts': 3.0,
'sum_niters': 44.0,
}
elif dt == 4e-2:
expected = {
'cL': 0.5525783945667581,
'vC': 1.00001743462299,
'dt': 0.03550610373897258,
'e_em': 6.21240694442804e-08,
'switches': 0.18231603298272345,
'restarts': 4.0,
'sum_niters': 56.0,
}
elif dt == 4e-3:
expected = {
'cL': 0.5395601429161445,
'vC': 1.0000413761942089,
'dt': 0.028281271825675414,
'e_em': 2.5628611677319668e-08,
'switches': 0.18230920573953438,
'restarts': 3.0,
'sum_niters': 48.0,
}
got = {
'cL': data['cL'][-1],
'vC': data['vC'][-1],
'dt': data['dt'][-1],
'e_em': data['e_em'][-1],
'switches': data['switches'][-1],
'restarts': data['restarts'],
'sum_niters': data['sum_niters'],
}
elif use_switch_estimator and not use_adaptivity:
msg = f'Error when using switch estimator for battery for dt={dt:.1e}:'
if dt == 1e-2:
expected = {
'cL': 0.5495834172613568,
'vC': 1.000118710428906,
'switches': 0.1823188001399631,
'restarts': 1.0,
'sum_niters': 128.0,
}
elif dt == 4e-2:
expected = {
'cL': 0.553775247309617,
'vC': 1.0010140038721593,
'switches': 0.1824302065533169,
'restarts': 1.0,
'sum_niters': 36.0,
}
elif dt == 4e-3:
expected = {
'cL': 0.5495840499078819,
'vC': 1.0001158309787614,
'switches': 0.18232183080236553,
'restarts': 1.0,
'sum_niters': 308.0,
}
got = {
'cL': data['cL'][-1],
'vC': data['vC'][-1],
'switches': data['switches'][-1],
'restarts': data['restarts'],
'sum_niters': data['sum_niters'],
}
elif not use_switch_estimator and use_adaptivity:
msg = f'Error when using adaptivity for battery for dt={dt:.1e}:'
if dt == 1e-2:
expected = {
'cL': 0.5401449976237487,
'vC': 0.9944656165121677,
'dt': 0.013143356036619536,
'e_em': 1.2462494369813726e-09,
'restarts': 3.0,
'sum_niters': 52.0,
}
elif dt == 4e-2:
expected = {
'cL': 0.5966289599915113,
'vC': 0.9923148791604984,
'dt': 0.03564958366355817,
'e_em': 6.210964231812e-08,
'restarts': 1.0,
'sum_niters': 36.0,
}
elif dt == 4e-3:
expected = {
'cL': 0.5431613774808756,
'vC': 0.9934307674636834,
'dt': 0.022880524075396924,
'e_em': 1.1130212751453428e-08,
'restarts': 3.0,
'sum_niters': 52.0,
}
got = {
'cL': data['cL'][-1],
'vC': data['vC'][-1],
'dt': data['dt'][-1],
'e_em': data['e_em'][-1],
'restarts': data['restarts'],
'sum_niters': data['sum_niters'],
}
elif problem == 'battery_implicit':
if use_switch_estimator and use_adaptivity:
msg = f'Error when using switch estimator and adaptivity for battery_implicit for dt={dt:.1e}:'
if dt == 1e-2:
expected = {
'cL': 0.5395401085152521,
'vC': 1.00003663985255,
'dt': 0.011465727118881608,
'e_em': 2.220446049250313e-16,
'switches': 0.18231044486762837,
'restarts': 4.0,
'sum_niters': 44.0,
}
elif dt == 4e-2:
expected = {
'cL': 0.6717104472882885,
'vC': 1.0071670698947914,
'dt': 0.035896059229296486,
'e_em': 6.208836400567463e-08,
'switches': 0.18232158833761175,
'restarts': 3.0,
'sum_niters': 36.0,
}
elif dt == 4e-3:
expected = {
'cL': 0.5396216192241711,
'vC': 1.0000561014463172,
'dt': 0.009904645972832471,
'e_em': 2.220446049250313e-16,
'switches': 0.18230549652342606,
'restarts': 4.0,
'sum_niters': 44.0,
}
got = {
'cL': data['cL'][-1],
'vC': data['vC'][-1],
'dt': data['dt'][-1],
'e_em': data['e_em'][-1],
'switches': data['switches'][-1],
'restarts': data['restarts'],
'sum_niters': data['sum_niters'],
}
elif use_switch_estimator and not use_adaptivity:
msg = f'Error when using switch estimator for battery_implicit for dt={dt:.1e}:'
if dt == 1e-2:
expected = {
'cL': 0.5495834122430945,
'vC': 1.000118715162845,
'switches': 0.18231880065636324,
'restarts': 1.0,
'sum_niters': 128.0,
}
elif dt == 4e-2:
expected = {
'cL': 0.5537752525450169,
'vC': 1.0010140112484431,
'switches': 0.18243023230469263,
'restarts': 1.0,
'sum_niters': 36.0,
}
elif dt == 4e-3:
expected = {
'cL': 0.5495840604357269,
'vC': 1.0001158454740509,
'switches': 0.1823218812753008,
'restarts': 1.0,
'sum_niters': 308.0,
}
got = {
'cL': data['cL'][-1],
'vC': data['vC'][-1],
'switches': data['switches'][-1],
'restarts': data['restarts'],
'sum_niters': data['sum_niters'],
}
elif not use_switch_estimator and use_adaptivity:
msg = f'Error when using adaptivity for battery_implicit for dt={dt:.1e}:'
if dt == 1e-2:
expected = {
'cL': 0.5569818284195267,
'vC': 0.9846733115433628,
'dt': 0.01,
'e_em': 2.220446049250313e-16,
'restarts': 9.0,
'sum_niters': 88.0,
}
elif dt == 4e-2:
expected = {
'cL': 0.5556563012729733,
'vC': 0.9930947318467772,
'dt': 0.035507110551631804,
'e_em': 6.2098696185231e-08,
'restarts': 6.0,
'sum_niters': 64.0,
}
elif dt == 4e-3:
expected = {
'cL': 0.5401117929618637,
'vC': 0.9933888475391347,
'dt': 0.03176025170463925,
'e_em': 4.0386798239033794e-08,
'restarts': 8.0,
'sum_niters': 80.0,
}
got = {
'cL': data['cL'][-1],
'vC': data['vC'][-1],
'dt': data['dt'][-1],
'e_em': data['e_em'][-1],
'restarts': data['restarts'],
'sum_niters': data['sum_niters'],
}
for key in expected.keys():
assert np.isclose(
expected[key], got[key], rtol=1e-4
), f'{msg} Expected {key}={expected[key]:.4e}, got {key}={got[key]:.4e}'
def get_data_dict(stats, use_adaptivity, use_switch_estimator, recomputed=False):
"""
Converts the statistics in a useful data dictionary so that it can be easily checked in the check_solution function.
Based on @brownbaerchen's get_data function.
Args:
stats (dict): Raw statistics from a controller run
use_adaptivity (bool): flag if adaptivity wants to be used or not
use_switch_estimator (bool): flag if the switch estimator wants to be used or not
recomputed (bool): flag if the values after a restart are used or before
Return:
data (dict): contains all information as the statistics dict
"""
data = dict()
data['cL'] = np.array([me[1][0] for me in get_sorted(stats, type='u', recomputed=recomputed, sortby='time')])
data['vC'] = np.array([me[1][1] for me in get_sorted(stats, type='u', recomputed=recomputed, sortby='time')])
if use_adaptivity:
data['dt'] = np.array(get_sorted(stats, type='dt', recomputed=recomputed, sortby='time'))[:, 1]
data['e_em'] = np.array(
get_sorted(stats, type='error_embedded_estimate', recomputed=recomputed, sortby='time')
)[:, 1]
if use_switch_estimator:
data['switches'] = np.array(get_recomputed(stats, type='switch', sortby='time'))[:, 1]
if use_adaptivity or use_switch_estimator:
data['restarts'] = np.sum(np.array(get_sorted(stats, type='restart', recomputed=None, sortby='time'))[:, 1])
data['sum_niters'] = np.sum(np.array(get_sorted(stats, type='niter', recomputed=None, sortby='time'))[:, 1])
return data
def get_recomputed(stats, type, sortby):
"""
Function that filters statistics after a recomputation. It stores all value of a type before restart. If there are multiple values
with same time point, it only stores the elements with unique times.
Args:
stats (dict): Raw statistics from a controller run
type (str): the type the be filtered
sortby (str): string to specify which key to use for sorting
Returns:
sorted_list (list): list of filtered statistics
"""
sorted_nested_list = []
times_unique = np.unique([me[0] for me in get_sorted(stats, type=type)])
filtered_list = [
filter_stats(
stats,
time=t_unique,
num_restarts=max([me.num_restarts for me in filter_stats(stats, type=type, time=t_unique).keys()]),
type=type,
)
for t_unique in times_unique
]
for item in filtered_list:
sorted_nested_list.append(sort_stats(item, sortby=sortby))
sorted_list = [item for sub_item in sorted_nested_list for item in sub_item]
return sorted_list
def proof_assertions_description(description, use_adaptivity, use_switch_estimator):
"""
Function to proof the assertions in the description.
Args:
description(dict): contains all information for a controller run
use_adaptivity (bool): flag if adaptivity wants to be used or not
use_switch_estimator (bool): flag if the switch estimator wants to be used or not
"""
n = description['problem_params']['ncapacitors']
assert (
description['problem_params']['alpha'] > description['problem_params']['V_ref'][k] for k in range(n)
), 'Please set "alpha" greater than values of "V_ref"'
assert type(description['problem_params']['V_ref']) == np.ndarray, '"V_ref" needs to be an np.ndarray'
assert type(description['problem_params']['C']) == np.ndarray, '"C" needs to be an np.ndarray '
assert (
np.shape(description['problem_params']['V_ref'])[0] == n
), 'Number of reference values needs to be equal to number of condensators'
assert (
np.shape(description['problem_params']['C'])[0] == n
), 'Number of capacitance values needs to be equal to number of condensators'
assert (
description['problem_params']['V_ref'][k] > 0 for k in range(n)
), 'Please set values of "V_ref" greater than 0'
assert 'errtol' not in description['step_params'].keys(), 'No exact solution known to compute error'
assert 'alpha' in description['problem_params'].keys(), 'Please supply "alpha" in the problem parameters'
assert 'V_ref' in description['problem_params'].keys(), 'Please supply "V_ref" in the problem parameters'
if use_switch_estimator or use_adaptivity:
assert description['level_params']['restol'] == -1, "Please set restol to -1 or omit it"
def proof_assertions_time(dt, Tend, V_ref, alpha):
"""
Function to proof the assertions regarding the time domain (in combination with the specific problem):
Args:
dt (float): time step for computation
Tend (float): end time
V_ref (np.ndarray): Reference values (problem parameter)
alpha (np.float): Multiple used for initial conditions (problem_parameter)
"""
assert dt < Tend, "Time step is too large for the time domain!"
assert (
Tend == 0.3 and V_ref[0] == 1.0 and alpha == 1.2
), "Error! Do not use other parameters for V_ref != 1.0, alpha != 1.2, Tend != 0.3 due to hardcoded reference!"
assert dt == 1e-2, "Error! Do not use another time step dt!= 1e-2!"
if __name__ == "__main__":
run()
| 25,204 | 37.247344 | 134 | py |
pySDC | pySDC-master/pySDC/projects/PinTSimE/estimation_check_2capacitors.py | import numpy as np
import dill
from pathlib import Path
from pySDC.helpers.stats_helper import get_sorted
from pySDC.core.Collocation import CollBase as Collocation
from pySDC.implementations.problem_classes.Battery import battery_n_capacitors
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.PinTSimE.battery_model import controller_run, generate_description, get_recomputed, log_data
from pySDC.projects.PinTSimE.piline_model import setup_mpl
from pySDC.projects.PinTSimE.battery_2capacitors_model import (
check_solution,
proof_assertions_description,
proof_assertions_time,
)
import pySDC.helpers.plot_helper as plt_helper
from pySDC.projects.PinTSimE.switch_estimator import SwitchEstimator
def run(cwd='./'):
"""
Routine to check the differences between using a switch estimator or not
Args:
cwd (str): current working directory
"""
dt_list = [4e-1, 4e-2, 4e-3]
t0 = 0.0
Tend = 3.5
problem_classes = [battery_n_capacitors]
sweeper_classes = [imex_1st_order]
ncapacitors = 2
alpha = 5.0
V_ref = np.array([1.0, 1.0])
C = np.array([1.0, 1.0])
use_switch_estimator = [True, False]
restarts_all = []
restarts_dict = dict()
for problem, sweeper in zip(problem_classes, sweeper_classes):
for dt_item in dt_list:
for use_SE in use_switch_estimator:
description, controller_params = generate_description(
dt_item,
problem,
sweeper,
log_data,
False,
use_SE,
ncapacitors,
alpha,
V_ref,
C,
)
# Assertions
proof_assertions_description(description, False, use_SE)
proof_assertions_time(dt_item, Tend, V_ref, alpha)
stats = controller_run(description, controller_params, False, use_SE, t0, Tend)
if use_SE:
switches = get_recomputed(stats, type='switch', sortby='time')
assert len(switches) >= 2, f"Expected at least 2 switches for dt: {dt_item}, got {len(switches)}!"
check_solution(stats, dt_item, use_SE)
fname = 'data/{}_dt{}_USE{}.dat'.format(problem.__name__, dt_item, use_SE)
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
if use_SE:
restarts_dict[dt_item] = np.array(get_sorted(stats, type='restart', recomputed=None))
restarts = restarts_dict[dt_item][:, 1]
restarts_all.append(np.sum(restarts))
print("Restarts for dt: ", dt_item, " -- ", np.sum(restarts))
V_ref = description['problem_params']['V_ref']
val_switch_all = []
diff_true_all1 = []
diff_false_all_before1 = []
diff_false_all_after1 = []
diff_true_all2 = []
diff_false_all_before2 = []
diff_false_all_after2 = []
restarts_dt_switch1 = []
restarts_dt_switch2 = []
for dt_item in dt_list:
f1 = open(cwd + 'data/{}_dt{}_USETrue.dat'.format(problem.__name__, dt_item), 'rb')
stats_true = dill.load(f1)
f1.close()
f2 = open(cwd + 'data/{}_dt{}_USEFalse.dat'.format(problem.__name__, dt_item), 'rb')
stats_false = dill.load(f2)
f2.close()
switches = get_recomputed(stats_true, type='switch', sortby='time')
t_switch = [v[1] for v in switches]
val_switch_all.append([t_switch[0], t_switch[1]])
vC1_true = [me[1][1] for me in get_sorted(stats_true, type='u', recomputed=False)]
vC2_true = [me[1][2] for me in get_sorted(stats_true, type='u', recomputed=False)]
vC1_false = [me[1][1] for me in get_sorted(stats_false, type='u', recomputed=False)]
vC2_false = [me[1][2] for me in get_sorted(stats_false, type='u', recomputed=False)]
diff_true1 = vC1_true - V_ref[0]
diff_true2 = vC2_true - V_ref[1]
diff_false1 = vC1_false - V_ref[0]
diff_false2 = vC2_false - V_ref[1]
t_true = [me[0] for me in get_sorted(stats_true, type='u', recomputed=False)]
t_false = [me[0] for me in get_sorted(stats_false, type='u', recomputed=False)]
diff_true_all1.append(
[diff_true1[m] for m in range(len(t_true)) if np.isclose(t_true[m], t_switch[0], atol=1e-15)]
)
diff_true_all2.append(
[diff_true2[m] for m in range(len(t_true)) if np.isclose(t_true[m], t_switch[1], atol=1e-15)]
)
diff_false_all_before1.append(
[diff_false1[m - 1] for m in range(1, len(t_false)) if t_false[m - 1] < t_switch[0] < t_false[m]]
)
diff_false_all_after1.append(
[diff_false1[m] for m in range(1, len(t_false)) if t_false[m - 1] < t_switch[0] < t_false[m]]
)
diff_false_all_before2.append(
[diff_false2[m - 1] for m in range(1, len(t_false)) if t_false[m - 1] < t_switch[1] < t_false[m]]
)
diff_false_all_after2.append(
[diff_false2[m] for m in range(1, len(t_false)) if t_false[m - 1] < t_switch[1] < t_false[m]]
)
restarts_dt = restarts_dict[dt_item]
for i in range(len(restarts_dt[:, 0])):
if np.isclose(restarts_dt[i, 0], t_switch[0], atol=1e-15):
restarts_dt_switch1.append(np.sum(restarts_dt[0 : i - 1, 1]))
break
for i in range(len(restarts_dt[:, 0])):
if np.isclose(restarts_dt[i, 0], t_switch[1], atol=1e-15):
restarts_dt_switch2.append(np.sum(restarts_dt[i - 2 :, 1]))
break
setup_mpl()
fig1, ax1 = plt_helper.plt.subplots(1, 1, figsize=(4.5, 3))
ax1.set_title('Time evolution of $v_{C_{1}}-V_{ref1}$')
ax1.plot(t_true, diff_true1, label='SE=True', color='#ff7f0e')
ax1.plot(t_false, diff_false1, label='SE=False', color='#1f77b4')
ax1.axvline(x=t_switch[0], linestyle='--', color='k', label='Switch1')
ax1.legend(frameon=False, fontsize=10, loc='lower left')
ax1.set_yscale('symlog', linthresh=1e-5)
ax1.set_xlabel('Time')
fig1.savefig('data/difference_estimation_vC1_dt{}.png'.format(dt_item), dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig1)
setup_mpl()
fig2, ax2 = plt_helper.plt.subplots(1, 1, figsize=(4.5, 3))
ax2.set_title('Time evolution of $v_{C_{2}}-V_{ref2}$')
ax2.plot(t_true, diff_true2, label='SE=True', color='#ff7f0e')
ax2.plot(t_false, diff_false2, label='SE=False', color='#1f77b4')
ax2.axvline(x=t_switch[1], linestyle='--', color='k', label='Switch2')
ax2.legend(frameon=False, fontsize=10, loc='lower left')
ax2.set_yscale('symlog', linthresh=1e-5)
ax2.set_xlabel('Time')
fig2.savefig('data/difference_estimation_vC2_dt{}.png'.format(dt_item), dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig2)
setup_mpl()
fig1, ax1 = plt_helper.plt.subplots(1, 1, figsize=(3, 3))
ax1.set_title("Difference $v_{C_{1}}-V_{ref1}$")
pos1 = ax1.plot(dt_list, diff_false_all_before1, 'rs-', label='SE=False - before switch1')
pos2 = ax1.plot(dt_list, diff_false_all_after1, 'bd-', label='SE=False - after switch1')
pos3 = ax1.plot(dt_list, diff_true_all1, 'kd-', label='SE=True')
ax1.set_xticks(dt_list)
ax1.set_xticklabels(dt_list)
ax1.set_xscale('log', base=10)
ax1.set_yscale('symlog', linthresh=1e-10)
ax1.set_ylim(-2, 2)
ax1.set_xlabel(r'$\Delta t$')
restart_ax = ax1.twinx()
restarts = restart_ax.plot(dt_list, restarts_dt_switch1, 'cs--', label='Restarts')
restart_ax.set_ylabel('Restarts')
lines = pos1 + pos2 + pos3 + restarts
labels = [l.get_label() for l in lines]
ax1.legend(lines, labels, frameon=False, fontsize=8, loc='center right')
fig1.savefig('data/diffs_estimation_vC1.png', dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig1)
setup_mpl()
fig2, ax2 = plt_helper.plt.subplots(1, 1, figsize=(3, 3))
ax2.set_title("Difference $v_{C_{2}}-V_{ref2}$")
pos1 = ax2.plot(dt_list, diff_false_all_before2, 'rs-', label='SE=False - before switch2')
pos2 = ax2.plot(dt_list, diff_false_all_after2, 'bd-', label='SE=False - after switch2')
pos3 = ax2.plot(dt_list, diff_true_all2, 'kd-', label='SE=True')
ax2.set_xticks(dt_list)
ax2.set_xticklabels(dt_list)
ax2.set_xscale('log', base=10)
ax2.set_yscale('symlog', linthresh=1e-10)
ax2.set_ylim(-2, 2)
ax2.set_xlabel(r'$\Delta t$')
restart_ax = ax2.twinx()
restarts = restart_ax.plot(dt_list, restarts_dt_switch2, 'cs--', label='Restarts')
restart_ax.set_ylabel('Restarts')
lines = pos1 + pos2 + pos3 + restarts
labels = [l.get_label() for l in lines]
ax2.legend(lines, labels, frameon=False, fontsize=8, loc='center right')
fig2.savefig('data/diffs_estimation_vC2.png', dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig2)
if __name__ == "__main__":
run()
| 9,305 | 39.112069 | 118 | py |
pySDC | pySDC-master/pySDC/projects/PinTSimE/piline_model.py | import matplotlib as mpl
import numpy as np
import dill
from pathlib import Path
mpl.use('Agg')
from pySDC.helpers.stats_helper import get_sorted
from pySDC.core.Collocation import CollBase as Collocation
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.Piline import piline
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
import pySDC.helpers.plot_helper as plt_helper
from pySDC.core.Hooks import hooks
class log_data(hooks):
def post_step(self, step, level_number):
super(log_data, self).post_step(step, level_number)
# some abbreviations
L = step.levels[level_number]
L.sweep.compute_end_point()
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='v1',
value=L.uend[0],
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='v2',
value=L.uend[1],
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=0,
sweep=L.status.sweep,
type='p3',
value=L.uend[2],
)
def main():
"""
A simple test program to do SDC/PFASST runs for the Piline model
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = 0.25
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = Collocation
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = 5
# sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 20
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = log_data
# fill description dictionary for easy step instantiation
# keep in mind: default params are used for the problem, but can be changed
description = dict()
description['problem_class'] = piline # pass problem class
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
assert 'errtol' not in description['step_params'].keys(), "No exact or reference solution known to compute error"
# set time parameters
t0 = 0.0
Tend = 15
num_procs = 1
# instantiate controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
Path("data").mkdir(parents=True, exist_ok=True)
fname = 'data/piline.dat'
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
# filter statistics by number of iterations
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
min_iter = 20
max_iter = 0
f = open('data/piline_out.txt', 'w')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
f.write(out + '\n')
print(out)
for item in iter_counts:
out = 'Number of iterations for time %4.2f: %1i' % item
f.write(out + '\n')
print(out)
min_iter = min(min_iter, item[1])
max_iter = max(max_iter, item[1])
assert np.mean(niters) <= 10, "Mean number of iterations is too high, got %s" % np.mean(niters)
f.close()
plot_voltages()
def plot_voltages(cwd='./'):
f = open(cwd + 'data/piline.dat', 'rb')
stats = dill.load(f)
f.close()
# convert filtered statistics to list of iterations count, sorted by process
v1 = get_sorted(stats, type='v1', sortby='time')
v2 = get_sorted(stats, type='v2', sortby='time')
p3 = get_sorted(stats, type='p3', sortby='time')
times = [v[0] for v in v1]
setup_mpl()
fig, ax = plt_helper.plt.subplots(1, 1, figsize=(4.5, 3))
ax.plot(times, [v[1] for v in v1], linewidth=1, label=r'$v_{C_1}$')
ax.plot(times, [v[1] for v in v2], linewidth=1, label=r'$v_{C_2}$')
ax.plot(times, [v[1] for v in p3], linewidth=1, label=r'$i_{L_\pi}$')
ax.legend(frameon=False, fontsize=12, loc='center right')
ax.set_xlabel('Time')
ax.set_ylabel('Energy')
fig.savefig('data/piline_model_solution.png', dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig)
def setup_mpl(fontsize=8):
plt_helper.setup_mpl(reset=True)
style_options = {
"font.family": "sans-serif",
"font.serif": "Computer Modern Sans Serif",
"font.sans-serif": "Computer Modern Sans Serif",
"font.monospace": "Computer Modern Sans Serif",
"axes.labelsize": 12, # LaTeX default is 10pt font.
"legend.fontsize": 13, # Make the legend/label fonts a little smaller
"axes.xmargin": 0.03,
"axes.ymargin": 0.03,
"lines.linewidth": 1, # Make the plot lines a little smaller
}
mpl.rcParams.update(style_options)
if __name__ == "__main__":
main()
| 5,791 | 30.308108 | 117 | py |
pySDC | pySDC-master/pySDC/projects/PinTSimE/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/PinTSimE/battery_2capacitors_model.py | import numpy as np
import dill
from pathlib import Path
from pySDC.helpers.stats_helper import get_sorted
from pySDC.core.Collocation import CollBase as Collocation
from pySDC.implementations.problem_classes.Battery import battery_n_capacitors
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.PinTSimE.battery_model import (
controller_run,
generate_description,
get_recomputed,
log_data,
proof_assertions_description,
)
from pySDC.projects.PinTSimE.piline_model import setup_mpl
import pySDC.helpers.plot_helper as plt_helper
from pySDC.core.Hooks import hooks
from pySDC.projects.PinTSimE.switch_estimator import SwitchEstimator
def run():
"""
Executes the simulation for the battery model using the IMEX sweeper and plot the results
as <problem_class>_model_solution_<sweeper_class>.png
"""
dt = 1e-2
t0 = 0.0
Tend = 3.5
problem_classes = [battery_n_capacitors]
sweeper_classes = [imex_1st_order]
ncapacitors = 2
alpha = 5.0
V_ref = np.array([1.0, 1.0])
C = np.array([1.0, 1.0])
recomputed = False
use_switch_estimator = [True]
for problem, sweeper in zip(problem_classes, sweeper_classes):
for use_SE in use_switch_estimator:
description, controller_params = generate_description(
dt, problem, sweeper, log_data, False, use_SE, ncapacitors, alpha, V_ref, C
)
# Assertions
proof_assertions_description(description, False, use_SE)
proof_assertions_time(dt, Tend, V_ref, alpha)
stats = controller_run(description, controller_params, False, use_SE, t0, Tend)
check_solution(stats, dt, use_SE)
plot_voltages(description, problem.__name__, sweeper.__name__, recomputed, use_SE, False)
def plot_voltages(description, problem, sweeper, recomputed, use_switch_estimator, use_adaptivity, cwd='./'):
"""
Routine to plot the numerical solution of the model
Args:
description(dict): contains all information for a controller run
problem (pySDC.core.Problem.ptype): problem class that wants to be simulated
sweeper (pySDC.core.Sweeper.sweeper): sweeper class for solving the problem class numerically
recomputed (bool): flag if the values after a restart are used or before
use_switch_estimator (bool): flag if the switch estimator wants to be used or not
use_adaptivity (bool): flag if adaptivity wants to be used or not
cwd (str): current working directory
"""
f = open(cwd + 'data/{}_{}_USE{}_USA{}.dat'.format(problem, sweeper, use_switch_estimator, use_adaptivity), 'rb')
stats = dill.load(f)
f.close()
# convert filtered statistics to list of iterations count, sorted by process
cL = np.array([me[1][0] for me in get_sorted(stats, type='u', recomputed=recomputed)])
vC1 = np.array([me[1][1] for me in get_sorted(stats, type='u', recomputed=recomputed)])
vC2 = np.array([me[1][2] for me in get_sorted(stats, type='u', recomputed=recomputed)])
t = np.array([me[0] for me in get_sorted(stats, type='u', recomputed=recomputed)])
setup_mpl()
fig, ax = plt_helper.plt.subplots(1, 1, figsize=(4.5, 3))
ax.plot(t, cL, label='$i_L$')
ax.plot(t, vC1, label='$v_{C_1}$')
ax.plot(t, vC2, label='$v_{C_2}$')
if use_switch_estimator:
switches = get_recomputed(stats, type='switch', sortby='time')
if recomputed is not None:
assert len(switches) >= 2, f"Expected at least 2 switches, got {len(switches)}!"
t_switches = [v[1] for v in switches]
for i in range(len(t_switches)):
ax.axvline(x=t_switches[i], linestyle='--', color='k', label='Switch {}'.format(i + 1))
ax.legend(frameon=False, fontsize=12, loc='upper right')
ax.set_xlabel('Time')
ax.set_ylabel('Energy')
fig.savefig('data/battery_2capacitors_model_solution.png', dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig)
def check_solution(stats, dt, use_switch_estimator):
"""
Function that checks the solution based on a hardcoded reference solution. Based on check_solution function from @brownbaerchen.
Args:
stats (dict): Raw statistics from a controller run
dt (float): initial time step
use_switch_estimator (bool): flag if the switch estimator wants to be used or not
"""
data = get_data_dict(stats, use_switch_estimator)
if use_switch_estimator:
msg = f'Error when using the switch estimator for battery_2capacitors for dt={dt:.1e}:'
if dt == 1e-2:
expected = {
'cL': 1.207906161238752,
'vC1': 1.0094825899806945,
'vC2': 1.00000000000412,
'switch1': 1.6094379124373626,
'switch2': 3.209437912437337,
'restarts': 1.0,
'sum_niters': 1412.0,
}
elif dt == 4e-1:
expected = {
'cL': 1.5090409300896785,
'vC1': 1.0094891393319418,
'vC2': 1.0018593331860708,
'switch1': 1.6075867934844466,
'switch2': 3.2094445842818007,
'restarts': 2.0,
'sum_niters': 52.0,
}
elif dt == 4e-2:
expected = {
'cL': 1.2708164018400792,
'vC1': 1.0094825917376264,
'vC2': 1.000030506091851,
'switch1': 1.6094074085553605,
'switch2': 3.209437914186951,
'restarts': 2.0,
'sum_niters': 368.0,
}
elif dt == 4e-3:
expected = {
'cL': 1.1564912472685411,
'vC1': 1.001438946726028,
'vC2': 1.0000650435224532,
'switch1': 1.6093728710270467,
'switch2': 3.217437912434931,
'restarts': 2.0,
'sum_niters': 3516.0,
}
got = {
'cL': data['cL'][-1],
'vC1': data['vC1'][-1],
'vC2': data['vC2'][-1],
'switch1': data['switch1'],
'switch2': data['switch2'],
'restarts': data['restarts'],
'sum_niters': data['sum_niters'],
}
for key in expected.keys():
assert np.isclose(
expected[key], got[key], rtol=1e-4
), f'{msg} Expected {key}={expected[key]:.4e}, got {key}={got[key]:.4e}'
def get_data_dict(stats, use_switch_estimator, recomputed=False):
"""
Converts the statistics in a useful data dictionary so that it can be easily checked in the check_solution function.
Based on @brownbaerchen's get_data function.
Args:
stats (dict): Raw statistics from a controller run
use_switch_estimator (bool): flag if the switch estimator wants to be used or not
recomputed (bool): flag if the values after a restart are used or before
Return:
data (dict): contains all information as the statistics dict
"""
data = dict()
data['cL'] = np.array([me[1][0] for me in get_sorted(stats, type='u', recomputed=False, sortby='time')])
data['vC1'] = np.array([me[1][1] for me in get_sorted(stats, type='u', recomputed=False, sortby='time')])
data['vC2'] = np.array([me[1][2] for me in get_sorted(stats, type='u', recomputed=False, sortby='time')])
data['switch1'] = np.array(get_recomputed(stats, type='switch', sortby='time'))[0, 1]
data['switch2'] = np.array(get_recomputed(stats, type='switch', sortby='time'))[-1, 1]
data['restarts'] = np.sum(np.array(get_sorted(stats, type='restart', recomputed=None, sortby='time'))[:, 1])
data['sum_niters'] = np.sum(np.array(get_sorted(stats, type='niter', recomputed=None, sortby='time'))[:, 1])
return data
def proof_assertions_time(dt, Tend, V_ref, alpha):
"""
Function to proof the assertions regarding the time domain (in combination with the specific problem):
Args:
dt (float): time step for computation
Tend (float): end time
V_ref (np.ndarray): Reference values (problem parameter)
alpha (np.float): Multiple used for initial conditions (problem_parameter)
"""
assert (
Tend == 3.5 and V_ref[0] == 1.0 and V_ref[1] == 1.0 and alpha == 5.0
), "Error! Do not use other parameters for V_ref[:] != 1.0, alpha != 1.2, Tend != 0.3 due to hardcoded reference!"
assert (
dt == 1e-2 or dt == 4e-1 or dt == 4e-2 or dt == 4e-3
), "Error! Do not use other time steps dt != 4e-1 or dt != 4e-2 or dt != 4e-3 due to hardcoded references!"
if __name__ == "__main__":
run()
| 8,786 | 37.038961 | 132 | py |
pySDC | pySDC-master/pySDC/projects/PinTSimE/estimation_check.py | import numpy as np
import dill
from pathlib import Path
from pySDC.helpers.stats_helper import get_sorted
from pySDC.core.Collocation import CollBase as Collocation
from pySDC.implementations.problem_classes.Battery import battery, battery_implicit
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.PinTSimE.piline_model import setup_mpl
from pySDC.projects.PinTSimE.battery_model import (
controller_run,
check_solution,
generate_description,
get_recomputed,
log_data,
proof_assertions_description,
)
import pySDC.helpers.plot_helper as plt_helper
from pySDC.core.Hooks import hooks
from pySDC.projects.PinTSimE.switch_estimator import SwitchEstimator
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
def run(cwd='./'):
"""
Routine to check the differences between using a switch estimator or not
Args:
cwd (str): current working directory
"""
dt_list = [4e-2, 4e-3]
t0 = 0.0
Tend = 0.3
problem_classes = [battery, battery_implicit]
sweeper_classes = [imex_1st_order, generic_implicit]
ncapacitors = 1
alpha = 1.2
V_ref = np.array([1.0])
C = np.array([1.0])
max_restarts = 1
use_switch_estimator = [True, False]
use_adaptivity = [True, False]
restarts_SE = []
restarts_adapt = []
restarts_SE_adapt = []
for problem, sweeper in zip(problem_classes, sweeper_classes):
for dt_item in dt_list:
for use_SE in use_switch_estimator:
for use_A in use_adaptivity:
description, controller_params = generate_description(
dt_item,
problem,
sweeper,
log_data,
use_A,
use_SE,
ncapacitors,
alpha,
V_ref,
C,
max_restarts,
)
# Assertions
proof_assertions_description(description, use_A, use_SE)
stats = controller_run(description, controller_params, use_A, use_SE, t0, Tend)
if use_A or use_SE:
check_solution(stats, dt_item, problem.__name__, use_A, use_SE)
if use_SE:
assert (
len(get_recomputed(stats, type='switch', sortby='time')) >= 1
), 'No switches found for dt={}!'.format(dt_item)
fname = 'data/battery_dt{}_USE{}_USA{}_{}.dat'.format(dt_item, use_SE, use_A, sweeper.__name__)
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
if use_SE or use_A:
restarts_sorted = np.array(get_sorted(stats, type='restart', recomputed=None))[:, 1]
if use_SE and not use_A:
restarts_SE.append(np.sum(restarts_sorted))
elif not use_SE and use_A:
restarts_adapt.append(np.sum(restarts_sorted))
elif use_SE and use_A:
restarts_SE_adapt.append(np.sum(restarts_sorted))
accuracy_check(dt_list, problem.__name__, sweeper.__name__, V_ref)
differences_around_switch(
dt_list,
problem.__name__,
restarts_SE,
restarts_adapt,
restarts_SE_adapt,
sweeper.__name__,
V_ref,
)
differences_over_time(dt_list, problem.__name__, sweeper.__name__, V_ref)
iterations_over_time(dt_list, description['step_params']['maxiter'], problem.__name__, sweeper.__name__)
restarts_SE = []
restarts_adapt = []
restarts_SE_adapt = []
def accuracy_check(dt_list, problem, sweeper, V_ref, cwd='./'):
"""
Routine to check accuracy for different step sizes in case of using adaptivity
Args:
dt_list (list): list of considered (initial) step sizes
problem (pySDC.core.Problem.ptype): Problem class used to consider (the class name)
sweeper (pySDC.core.Sweeper.sweeper): Sweeper used to solve (the class name)
V_ref (np.float): reference value for the switch
cwd (str): current working directory
"""
if len(dt_list) > 1:
setup_mpl()
fig_acc, ax_acc = plt_helper.plt.subplots(
1, len(dt_list), figsize=(3 * len(dt_list), 3), sharex='col', sharey='row'
)
else:
setup_mpl()
fig_acc, ax_acc = plt_helper.plt.subplots(1, 1, figsize=(3, 3), sharex='col', sharey='row')
count_ax = 0
for dt_item in dt_list:
f3 = open(cwd + 'data/battery_dt{}_USETrue_USATrue_{}.dat'.format(dt_item, sweeper), 'rb')
stats_SE_adapt = dill.load(f3)
f3.close()
f4 = open(cwd + 'data/battery_dt{}_USEFalse_USATrue_{}.dat'.format(dt_item, sweeper), 'rb')
stats_adapt = dill.load(f4)
f4.close()
switches_SE_adapt = get_recomputed(stats_SE_adapt, type='switch', sortby='time')
t_switch_SE_adapt = [v[1] for v in switches_SE_adapt]
t_switch_SE_adapt = t_switch_SE_adapt[-1]
dt_SE_adapt_val = get_sorted(stats_SE_adapt, type='dt', recomputed=False)
dt_adapt_val = get_sorted(stats_adapt, type='dt', recomputed=False)
e_emb_SE_adapt_val = get_sorted(stats_SE_adapt, type='e_embedded', recomputed=False)
e_emb_adapt_val = get_sorted(stats_adapt, type='e_embedded', recomputed=False)
times_SE_adapt = [v[0] for v in e_emb_SE_adapt_val]
times_adapt = [v[0] for v in e_emb_adapt_val]
e_emb_SE_adapt = [v[1] for v in e_emb_SE_adapt_val]
e_emb_adapt = [v[1] for v in e_emb_adapt_val]
if len(dt_list) > 1:
ax_acc[count_ax].set_title(r'$\Delta t_\mathrm{initial}$=%s' % dt_item)
dt1 = ax_acc[count_ax].plot(
[v[0] for v in dt_SE_adapt_val],
[v[1] for v in dt_SE_adapt_val],
'ko-',
label=r'SE+A - $\Delta t_\mathrm{adapt}$',
)
dt2 = ax_acc[count_ax].plot(
[v[0] for v in dt_adapt_val], [v[1] for v in dt_adapt_val], 'g-', label=r'A - $\Delta t_\mathrm{adapt}$'
)
ax_acc[count_ax].axvline(x=t_switch_SE_adapt, linestyle='--', linewidth=0.5, color='r', label='Switch')
ax_acc[count_ax].tick_params(axis='both', which='major', labelsize=6)
ax_acc[count_ax].set_xlabel('Time', fontsize=6)
if count_ax == 0:
ax_acc[count_ax].set_ylabel(r'$\Delta t_\mathrm{adapt}$', fontsize=6)
e_ax = ax_acc[count_ax].twinx()
e_plt1 = e_ax.plot(times_SE_adapt, e_emb_SE_adapt, 'k--', label=r'SE+A - $\epsilon_{emb}$')
e_plt2 = e_ax.plot(times_adapt, e_emb_adapt, 'g--', label=r'A - $\epsilon_{emb}$')
e_ax.set_yscale('log', base=10)
e_ax.set_ylim(1e-16, 1e-7)
e_ax.tick_params(labelsize=6)
lines = dt1 + e_plt1 + dt2 + e_plt2
labels = [l.get_label() for l in lines]
ax_acc[count_ax].legend(lines, labels, frameon=False, fontsize=6, loc='upper right')
else:
ax_acc.set_title(r'$\Delta t_\mathrm{initial}$=%s' % dt_item)
dt1 = ax_acc.plot(
[v[0] for v in dt_SE_adapt_val],
[v[1] for v in dt_SE_adapt_val],
'ko-',
label=r'SE+A - $\Delta t_\mathrm{adapt}$',
)
dt2 = ax_acc.plot(
[v[0] for v in dt_adapt_val],
[v[1] for v in dt_adapt_val],
'go-',
label=r'A - $\Delta t_\mathrm{adapt}$',
)
ax_acc.axvline(x=t_switch_SE_adapt, linestyle='--', linewidth=0.5, color='r', label='Switch')
ax_acc.tick_params(axis='both', which='major', labelsize=6)
ax_acc.set_xlabel('Time', fontsize=6)
ax_acc.set_ylabel(r'$Delta t_\mathrm{adapt}$', fontsize=6)
e_ax = ax_acc.twinx()
e_plt1 = e_ax.plot(times_SE_adapt, e_emb_SE_adapt, 'k--', label=r'SE+A - $\epsilon_{emb}$')
e_plt2 = e_ax.plot(times_adapt, e_emb_adapt, 'g--', label=r'A - $\epsilon_{emb}$')
e_ax.set_yscale('log', base=10)
e_ax.tick_params(labelsize=6)
lines = dt1 + e_plt1 + dt2 + e_plt2
labels = [l.get_label() for l in lines]
ax_acc.legend(lines, labels, frameon=False, fontsize=6, loc='upper right')
count_ax += 1
fig_acc.savefig('data/embedded_error_adaptivity_{}.png'.format(sweeper), dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig_acc)
def differences_around_switch(
dt_list, problem, restarts_SE, restarts_adapt, restarts_SE_adapt, sweeper, V_ref, cwd='./'
):
"""
Routine to plot the differences before, at, and after the switch. Produces the diffs_estimation_<sweeper_class>.png file
Args:
dt_list (list): list of considered (initial) step sizes
problem (pySDC.core.Problem.ptype): Problem class used to consider (the class name)
restarts_SE (list): Restarts for the solve only using the switch estimator
restarts_adapt (list): Restarts for the solve of only using adaptivity
restarts_SE_adapt (list): Restarts for the solve of using both, switch estimator and adaptivity
sweeper (pySDC.core.Sweeper.sweeper): Sweeper used to solve (the class name)
V_ref (np.float): reference value for the switch
cwd (str): current working directory
"""
diffs_true_at = []
diffs_false_before = []
diffs_false_after = []
diffs_true_at_adapt = []
diffs_true_before_adapt = []
diffs_true_after_adapt = []
diffs_false_before_adapt = []
diffs_false_after_adapt = []
for dt_item in dt_list:
f1 = open(cwd + 'data/battery_dt{}_USETrue_USAFalse_{}.dat'.format(dt_item, sweeper), 'rb')
stats_SE = dill.load(f1)
f1.close()
f2 = open(cwd + 'data/battery_dt{}_USEFalse_USAFalse_{}.dat'.format(dt_item, sweeper), 'rb')
stats = dill.load(f2)
f2.close()
f3 = open(cwd + 'data/battery_dt{}_USETrue_USATrue_{}.dat'.format(dt_item, sweeper), 'rb')
stats_SE_adapt = dill.load(f3)
f3.close()
f4 = open(cwd + 'data/battery_dt{}_USEFalse_USATrue_{}.dat'.format(dt_item, sweeper), 'rb')
stats_adapt = dill.load(f4)
f4.close()
switches_SE = get_recomputed(stats_SE, type='switch', sortby='time')
t_switch = [v[1] for v in switches_SE]
t_switch = t_switch[-1] # battery has only one single switch
switches_SE_adapt = get_recomputed(stats_SE_adapt, type='switch', sortby='time')
t_switch_SE_adapt = [v[1] for v in switches_SE_adapt]
t_switch_SE_adapt = t_switch_SE_adapt[-1]
vC_SE = [me[1][1] for me in get_sorted(stats_SE, type='u', recomputed=False)]
vC_adapt = [me[1][1] for me in get_sorted(stats_adapt, type='u', recomputed=False)]
vC_SE_adapt = [me[1][1] for me in get_sorted(stats_SE_adapt, type='u', recomputed=False)]
vC = [me[1][1] for me in get_sorted(stats, type='u', recomputed=False)]
diff_SE, diff = vC_SE - V_ref[0], vC - V_ref[0]
times_SE = [me[0] for me in get_sorted(stats_SE, type='u', recomputed=False)]
times = [me[0] for me in get_sorted(stats, type='u', recomputed=False)]
diff_adapt, diff_SE_adapt = vC_adapt - V_ref[0], vC_SE_adapt - V_ref[0]
times_adapt = [me[0] for me in get_sorted(stats_adapt, type='u', recomputed=False)]
times_SE_adapt = [me[0] for me in get_sorted(stats_SE_adapt, type='u', recomputed=False)]
diffs_true_at.append(
[diff_SE[m] for m in range(len(times_SE)) if np.isclose(times_SE[m], t_switch, atol=1e-15)][0]
)
diffs_false_before.append(
[diff[m - 1] for m in range(1, len(times)) if times[m - 1] <= t_switch <= times[m]][0]
)
diffs_false_after.append([diff[m] for m in range(1, len(times)) if times[m - 1] <= t_switch <= times[m]][0])
for m in range(len(times_SE_adapt)):
if np.isclose(times_SE_adapt[m], t_switch_SE_adapt, atol=1e-10):
diffs_true_at_adapt.append(diff_SE_adapt[m])
diffs_true_before_adapt.append(diff_SE_adapt[m - 1])
diffs_true_after_adapt.append(diff_SE_adapt[m + 1])
diffs_false_before_adapt.append(
[diff_adapt[m - 1] for m in range(len(times_adapt)) if times_adapt[m - 1] <= t_switch <= times_adapt[m]][0]
)
diffs_false_after_adapt.append(
[diff_adapt[m] for m in range(len(times_adapt)) if times_adapt[m - 1] <= t_switch <= times_adapt[m]][0]
)
setup_mpl()
fig_around, ax_around = plt_helper.plt.subplots(1, 3, figsize=(9, 3), sharex='col', sharey='row')
ax_around[0].set_title("Using SE")
pos11 = ax_around[0].plot(dt_list, diffs_false_before, 'rs-', label='before switch')
pos12 = ax_around[0].plot(dt_list, diffs_false_after, 'bd--', label='after switch')
pos13 = ax_around[0].plot(dt_list, diffs_true_at, 'ko--', label='at switch')
ax_around[0].set_xticks(dt_list)
ax_around[0].set_xticklabels(dt_list)
ax_around[0].tick_params(axis='both', which='major', labelsize=6)
ax_around[0].set_xscale('log', base=10)
ax_around[0].set_yscale('symlog', linthresh=1e-8)
ax_around[0].set_ylim(-1, 1)
ax_around[0].set_xlabel(r'$\Delta t_\mathrm{initial}$', fontsize=6)
ax_around[0].set_ylabel(r'$v_{C}-V_{ref}$', fontsize=6)
restart_ax0 = ax_around[0].twinx()
restarts_plt0 = restart_ax0.plot(dt_list, restarts_SE, 'cs--', label='Restarts')
restart_ax0.tick_params(labelsize=6)
lines = pos11 + pos12 + pos13 + restarts_plt0
labels = [l.get_label() for l in lines]
ax_around[0].legend(lines, labels, frameon=False, fontsize=6, loc='lower right')
ax_around[1].set_title("Using Adaptivity")
pos21 = ax_around[1].plot(dt_list, diffs_false_before_adapt, 'rs-', label='before switch')
pos22 = ax_around[1].plot(dt_list, diffs_false_after_adapt, 'bd--', label='after switch')
ax_around[1].set_xticks(dt_list)
ax_around[1].set_xticklabels(dt_list)
ax_around[1].tick_params(axis='both', which='major', labelsize=6)
ax_around[1].set_xscale('log', base=10)
ax_around[1].set_yscale('symlog', linthresh=1e-8)
ax_around[1].set_ylim(-1, 1)
ax_around[1].set_xlabel(r'$\Delta t_\mathrm{initial}$', fontsize=6)
restart_ax1 = ax_around[1].twinx()
restarts_plt1 = restart_ax1.plot(dt_list, restarts_adapt, 'cs--', label='Restarts')
restart_ax1.tick_params(labelsize=6)
lines = pos21 + pos22 + restarts_plt1
labels = [l.get_label() for l in lines]
ax_around[1].legend(lines, labels, frameon=False, fontsize=6, loc='lower right')
ax_around[2].set_title("Using SE + Adaptivity")
pos31 = ax_around[2].plot(dt_list, diffs_true_before_adapt, 'rs-', label='before switch')
pos32 = ax_around[2].plot(dt_list, diffs_true_after_adapt, 'bd--', label='after switch')
pos33 = ax_around[2].plot(dt_list, diffs_true_at_adapt, 'ko--', label='at switch')
ax_around[2].set_xticks(dt_list)
ax_around[2].set_xticklabels(dt_list)
ax_around[2].tick_params(axis='both', which='major', labelsize=6)
ax_around[2].set_xscale('log', base=10)
ax_around[2].set_yscale('symlog', linthresh=1e-8)
ax_around[2].set_ylim(-1, 1)
ax_around[2].set_xlabel(r'$\Delta t_\mathrm{initial}$', fontsize=6)
restart_ax2 = ax_around[2].twinx()
restarts_plt2 = restart_ax2.plot(dt_list, restarts_SE_adapt, 'cs--', label='Restarts')
restart_ax2.tick_params(labelsize=6)
lines = pos31 + pos32 + pos33 + restarts_plt2
labels = [l.get_label() for l in lines]
ax_around[2].legend(frameon=False, fontsize=6, loc='lower right')
fig_around.savefig('data/diffs_around_switch_{}.png'.format(sweeper), dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig_around)
def differences_over_time(dt_list, problem, sweeper, V_ref, cwd='./'):
"""
Routine to plot the differences in time using the switch estimator or not. Produces the difference_estimation_<sweeper_class>.png file
Args:
dt_list (list): list of considered (initial) step sizes
problem (pySDC.core.Problem.ptype): Problem class used to consider (the class name)
sweeper (pySDC.core.Sweeper.sweeper): Sweeper used to solve (the class name)
V_ref (np.float): reference value for the switch
cwd (str): current working directory
"""
if len(dt_list) > 1:
setup_mpl()
fig_diffs, ax_diffs = plt_helper.plt.subplots(
2, len(dt_list), figsize=(4 * len(dt_list), 6), sharex='col', sharey='row'
)
else:
setup_mpl()
fig_diffs, ax_diffs = plt_helper.plt.subplots(2, 1, figsize=(4, 6))
count_ax = 0
for dt_item in dt_list:
f1 = open(cwd + 'data/battery_dt{}_USETrue_USAFalse_{}.dat'.format(dt_item, sweeper), 'rb')
stats_SE = dill.load(f1)
f1.close()
f2 = open(cwd + 'data/battery_dt{}_USEFalse_USAFalse_{}.dat'.format(dt_item, sweeper), 'rb')
stats = dill.load(f2)
f2.close()
f3 = open(cwd + 'data/battery_dt{}_USETrue_USATrue_{}.dat'.format(dt_item, sweeper), 'rb')
stats_SE_adapt = dill.load(f3)
f3.close()
f4 = open(cwd + 'data/battery_dt{}_USEFalse_USATrue_{}.dat'.format(dt_item, sweeper), 'rb')
stats_adapt = dill.load(f4)
f4.close()
switches_SE = get_recomputed(stats_SE, type='switch', sortby='time')
t_switch_SE = [v[1] for v in switches_SE]
t_switch_SE = t_switch_SE[-1] # battery has only one single switch
switches_SE_adapt = get_recomputed(stats_SE_adapt, type='switch', sortby='time')
t_switch_SE_adapt = [v[1] for v in switches_SE_adapt]
t_switch_SE_adapt = t_switch_SE_adapt[-1]
dt_adapt = np.array(get_sorted(stats_adapt, type='dt', recomputed=False))
dt_SE_adapt = np.array(get_sorted(stats_SE_adapt, type='dt', recomputed=False))
restart_adapt = np.array(get_sorted(stats_adapt, type='restart', recomputed=None))
restart_SE_adapt = np.array(get_sorted(stats_SE_adapt, type='restart', recomputed=None))
vC_SE = [me[1][1] for me in get_sorted(stats_SE, type='u', recomputed=False)]
vC_adapt = [me[1][1] for me in get_sorted(stats_adapt, type='u', recomputed=False)]
vC_SE_adapt = [me[1][1] for me in get_sorted(stats_SE_adapt, type='u', recomputed=False)]
vC = [me[1][1] for me in get_sorted(stats, type='u', recomputed=False)]
diff_SE, diff = vC_SE - V_ref[0], vC - V_ref[0]
times_SE = [me[0] for me in get_sorted(stats_SE, type='u', recomputed=False)]
times = [me[0] for me in get_sorted(stats, type='u', recomputed=False)]
diff_adapt, diff_SE_adapt = vC_adapt - V_ref[0], vC_SE_adapt - V_ref[0]
times_adapt = [me[0] for me in get_sorted(stats_adapt, type='u', recomputed=False)]
times_SE_adapt = [me[0] for me in get_sorted(stats_SE_adapt, type='u', recomputed=False)]
if len(dt_list) > 1:
ax_diffs[0, count_ax].set_title(r'$\Delta t$=%s' % dt_item)
ax_diffs[0, count_ax].plot(times_SE, diff_SE, label='SE=True, A=False', color='#ff7f0e')
ax_diffs[0, count_ax].plot(times, diff, label='SE=False, A=False', color='#1f77b4')
ax_diffs[0, count_ax].plot(times_adapt, diff_adapt, label='SE=False, A=True', color='red', linestyle='--')
ax_diffs[0, count_ax].plot(
times_SE_adapt, diff_SE_adapt, label='SE=True, A=True', color='limegreen', linestyle='-.'
)
ax_diffs[0, count_ax].axvline(x=t_switch_SE, linestyle='--', linewidth=0.5, color='k', label='Switch')
ax_diffs[0, count_ax].legend(frameon=False, fontsize=6, loc='lower left')
ax_diffs[0, count_ax].set_yscale('symlog', linthresh=1e-5)
ax_diffs[0, count_ax].tick_params(axis='both', which='major', labelsize=6)
if count_ax == 0:
ax_diffs[0, count_ax].set_ylabel('Difference $v_{C}-V_{ref}$', fontsize=6)
if count_ax == 0 or count_ax == 1:
ax_diffs[0, count_ax].legend(frameon=False, fontsize=6, loc='upper right')
else:
ax_diffs[0, count_ax].legend(frameon=False, fontsize=6, loc='upper right')
ax_diffs[1, count_ax].plot(
dt_adapt[:, 0], dt_adapt[:, 1], label=r'$\Delta t$ - SE=F, A=T', color='red', linestyle='--'
)
ax_diffs[1, count_ax].plot([None], [None], label='Restart - SE=F, A=T', color='grey', linestyle='-.')
for i in range(len(restart_adapt)):
if restart_adapt[i, 1] > 0:
ax_diffs[1, count_ax].axvline(restart_adapt[i, 0], color='grey', linestyle='-.')
ax_diffs[1, count_ax].plot(
dt_SE_adapt[:, 0],
dt_SE_adapt[:, 1],
label=r'$ \Delta t$ - SE=T, A=T',
color='limegreen',
linestyle='-.',
)
ax_diffs[1, count_ax].plot([None], [None], label='Restart - SE=T, A=T', color='black', linestyle='-.')
for i in range(len(restart_SE_adapt)):
if restart_SE_adapt[i, 1] > 0:
ax_diffs[1, count_ax].axvline(restart_SE_adapt[i, 0], color='black', linestyle='-.')
ax_diffs[1, count_ax].set_xlabel('Time', fontsize=6)
ax_diffs[1, count_ax].tick_params(axis='both', which='major', labelsize=6)
if count_ax == 0:
ax_diffs[1, count_ax].set_ylabel(r'$\Delta t_\mathrm{adapted}$', fontsize=6)
ax_diffs[1, count_ax].set_yscale('log', base=10)
ax_diffs[1, count_ax].legend(frameon=True, fontsize=6, loc='lower left')
else:
ax_diffs[0].set_title(r'$\Delta t$=%s' % dt_item)
ax_diffs[0].plot(times_SE, diff_SE, label='SE=True', color='#ff7f0e')
ax_diffs[0].plot(times, diff, label='SE=False', color='#1f77b4')
ax_diffs[0].plot(times_adapt, diff_adapt, label='SE=False, A=True', color='red', linestyle='--')
ax_diffs[0].plot(times_SE_adapt, diff_SE_adapt, label='SE=True, A=True', color='limegreen', linestyle='-.')
ax_diffs[0].axvline(x=t_switch_SE, linestyle='--', linewidth=0.5, color='k', label='Switch')
ax_diffs[0].tick_params(axis='both', which='major', labelsize=6)
ax_diffs[0].set_yscale('symlog', linthresh=1e-5)
ax_diffs[0].set_ylabel('Difference $v_{C}-V_{ref}$', fontsize=6)
ax_diffs[0].legend(frameon=False, fontsize=6, loc='center right')
ax_diffs[1].plot(dt_adapt[:, 0], dt_adapt[:, 1], label='SE=False, A=True', color='red', linestyle='--')
ax_diffs[1].plot(
dt_SE_adapt[:, 0], dt_SE_adapt[:, 1], label='SE=True, A=True', color='limegreen', linestyle='-.'
)
ax_diffs[1].tick_params(axis='both', which='major', labelsize=6)
ax_diffs[1].set_xlabel('Time', fontsize=6)
ax_diffs[1].set_ylabel(r'$\Delta t_\mathrm{adapted}$', fontsize=6)
ax_diffs[1].set_yscale('log', base=10)
ax_diffs[1].legend(frameon=False, fontsize=6, loc='upper right')
count_ax += 1
plt_helper.plt.tight_layout()
fig_diffs.savefig('data/diffs_over_time_{}.png'.format(sweeper), dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig_diffs)
def iterations_over_time(dt_list, maxiter, problem, sweeper, cwd='./'):
"""
Routine to plot the number of iterations over time using switch estimator or not. Produces the iters_<sweeper_class>.png file
Args:
dt_list (list): list of considered (initial) step sizes
maxiter (np.int): maximum number of iterations
problem (pySDC.core.Problem.ptype): Problem class used to consider (the class name)
sweeper (pySDC.core.Sweeper.sweeper): Sweeper used to solve (the class name)
cwd (str): current working directory
"""
iters_time_SE = []
iters_time = []
iters_time_SE_adapt = []
iters_time_adapt = []
times_SE = []
times = []
times_SE_adapt = []
times_adapt = []
t_switches_SE = []
t_switches_SE_adapt = []
for dt_item in dt_list:
f1 = open(cwd + 'data/battery_dt{}_USETrue_USAFalse_{}.dat'.format(dt_item, sweeper), 'rb')
stats_SE = dill.load(f1)
f1.close()
f2 = open(cwd + 'data/battery_dt{}_USEFalse_USAFalse_{}.dat'.format(dt_item, sweeper), 'rb')
stats = dill.load(f2)
f2.close()
f3 = open(cwd + 'data/battery_dt{}_USETrue_USATrue_{}.dat'.format(dt_item, sweeper), 'rb')
stats_SE_adapt = dill.load(f3)
f3.close()
f4 = open(cwd + 'data/battery_dt{}_USEFalse_USATrue_{}.dat'.format(dt_item, sweeper), 'rb')
stats_adapt = dill.load(f4)
f4.close()
# consider iterations before restarts to see what happens
iter_counts_SE_val = get_sorted(stats_SE, type='niter')
iter_counts_SE_adapt_val = get_sorted(stats_SE_adapt, type='niter')
iter_counts_adapt_val = get_sorted(stats_adapt, type='niter')
iter_counts_val = get_sorted(stats, type='niter')
iters_time_SE.append([v[1] for v in iter_counts_SE_val])
iters_time_SE_adapt.append([v[1] for v in iter_counts_SE_adapt_val])
iters_time_adapt.append([v[1] for v in iter_counts_adapt_val])
iters_time.append([v[1] for v in iter_counts_val])
times_SE.append([v[0] for v in iter_counts_SE_val])
times_SE_adapt.append([v[0] for v in iter_counts_SE_adapt_val])
times_adapt.append([v[0] for v in iter_counts_adapt_val])
times.append([v[0] for v in iter_counts_val])
switches_SE = get_recomputed(stats_SE, type='switch', sortby='time')
t_switch_SE = [v[1] for v in switches_SE]
t_switches_SE.append(t_switch_SE[-1])
switches_SE_adapt = get_recomputed(stats_SE_adapt, type='switch', sortby='time')
t_switch_SE_adapt = [v[1] for v in switches_SE_adapt]
t_switches_SE_adapt.append(t_switch_SE_adapt[-1])
if len(dt_list) > 1:
setup_mpl()
fig_iter_all, ax_iter_all = plt_helper.plt.subplots(
nrows=1, ncols=len(dt_list), figsize=(2 * len(dt_list) - 1, 3), sharex='col', sharey='row'
)
for col in range(len(dt_list)):
ax_iter_all[col].plot(times[col], iters_time[col], label='SE=F, A=F')
ax_iter_all[col].plot(times_SE[col], iters_time_SE[col], label='SE=T, A=F')
ax_iter_all[col].plot(times_SE_adapt[col], iters_time_SE_adapt[col], '--', label='SE=T, A=T')
ax_iter_all[col].plot(times_adapt[col], iters_time_adapt[col], '--', label='SE=F, A=T')
ax_iter_all[col].axvline(x=t_switches_SE[col], linestyle='--', linewidth=0.5, color='k', label='Switch')
ax_iter_all[col].set_title(r'$\Delta t_\mathrm{initial}$=%s' % dt_list[col])
ax_iter_all[col].set_ylim(0, maxiter + 2)
ax_iter_all[col].set_xlabel('Time', fontsize=6)
ax_iter_all[col].tick_params(axis='both', which='major', labelsize=6)
if col == 0:
ax_iter_all[col].set_ylabel('Number iterations', fontsize=6)
ax_iter_all[col].legend(frameon=False, fontsize=6, loc='upper right')
else:
setup_mpl()
fig_iter_all, ax_iter_all = plt_helper.plt.subplots(nrows=1, ncols=1, figsize=(3, 3))
ax_iter_all.plot(times[0], iters_time[0], label='SE=False')
ax_iter_all.plot(times_SE[0], iters_time_SE[0], label='SE=True')
ax_iter_all.plot(times_SE_adapt[0], iters_time_SE_adapt[0], '--', label='SE=T, A=T')
ax_iter_all.plot(times_adapt[0], iters_time_adapt[0], '--', label='SE=F, A=T')
ax_iter_all.axvline(x=t_switches_SE[0], linestyle='--', linewidth=0.5, color='k', label='Switch')
ax_iter_all.set_title(r'$\Delta t_\mathrm{initial}$=%s' % dt_list[0])
ax_iter_all.set_ylim(0, maxiter + 2)
ax_iter_all.set_xlabel('Time', fontsize=6)
ax_iter_all.tick_params(axis='both', which='major', labelsize=6)
ax_iter_all.set_ylabel('Number iterations', fontsize=6)
ax_iter_all.legend(frameon=False, fontsize=6, loc='upper right')
plt_helper.plt.tight_layout()
fig_iter_all.savefig('data/iters_{}.png'.format(sweeper), dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig_iter_all)
if __name__ == "__main__":
run()
| 29,105 | 44.195652 | 138 | py |
pySDC | pySDC-master/pySDC/projects/PinTSimE/buck_model.py | import numpy as np
import dill
from pathlib import Path
from pySDC.helpers.stats_helper import get_sorted
from pySDC.core.Collocation import CollBase as Collocation
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.BuckConverter import buck_converter
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.projects.PinTSimE.piline_model import log_data, setup_mpl
import pySDC.helpers.plot_helper as plt_helper
def main():
"""
A simple test program to do SDC/PFASST runs for the buck converter model
"""
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-12
level_params['dt'] = 1e-5
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['collocation_class'] = Collocation
sweeper_params['quad_type'] = 'LOBATTO'
sweeper_params['num_nodes'] = 5
sweeper_params['QI'] = 'LU' # For the IMEX sweeper, the LU-trick can be activated for the implicit part
# initialize problem parameters
problem_params = dict()
problem_params['duty'] = 0.5 # duty cycle
problem_params['fsw'] = 1e3 # switching freqency
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 20
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = log_data
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = buck_converter # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params
assert 'errtol' not in description['step_params'].keys(), 'No exact solution known to compute error'
assert 'duty' in description['problem_params'].keys(), 'Please supply "duty" in the problem parameters'
assert 'fsw' in description['problem_params'].keys(), 'Please supply "fsw" in the problem parameters'
assert 0 <= problem_params['duty'] <= 1, 'Please set "duty" greater than or equal to 0 and less than or equal to 1'
# set time parameters
t0 = 0.0
Tend = 2e-2
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
Path("data").mkdir(parents=True, exist_ok=True)
fname = 'data/buck.dat'
f = open(fname, 'wb')
dill.dump(stats, f)
f.close()
# filter statistics by number of iterations
iter_counts = get_sorted(stats, type='niter', sortby='time')
# compute and print statistics
min_iter = 20
max_iter = 0
f = open('data/buck_out.txt', 'w')
niters = np.array([item[1] for item in iter_counts])
out = ' Mean number of iterations: %4.2f' % np.mean(niters)
f.write(out + '\n')
print(out)
for item in iter_counts:
out = 'Number of iterations for time %4.2f: %1i' % item
f.write(out + '\n')
print(out)
min_iter = min(min_iter, item[1])
max_iter = max(max_iter, item[1])
assert np.mean(niters) <= 8, "Mean number of iterations is too high, got %s" % np.mean(niters)
f.close()
plot_voltages()
def plot_voltages(cwd='./'):
f = open(cwd + 'data/buck.dat', 'rb')
stats = dill.load(f)
f.close()
# convert filtered statistics to list of iterations count, sorted by process
v1 = get_sorted(stats, type='v1', sortby='time')
v2 = get_sorted(stats, type='v2', sortby='time')
p3 = get_sorted(stats, type='p3', sortby='time')
times = [v[0] for v in v1]
setup_mpl()
fig, ax = plt_helper.plt.subplots(1, 1, figsize=(4.5, 3))
ax.plot(times, [v[1] for v in v1], linewidth=1, label=r'$v_{C_1}$')
ax.plot(times, [v[1] for v in v2], linewidth=1, label=r'$v_{C_2}$')
ax.plot(times, [v[1] for v in p3], linewidth=1, label=r'$i_{L_\pi}$')
ax.legend(frameon=False, fontsize=12, loc='upper right')
ax.set_xlabel('Time')
ax.set_ylabel('Energy')
fig.savefig('data/buck_model_solution.png', dpi=300, bbox_inches='tight')
plt_helper.plt.close(fig)
if __name__ == "__main__":
main()
| 4,680 | 34.195489 | 119 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/plot_stability.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from matplotlib.patches import Polygon
from pySDC.implementations.problem_classes.FastWaveSlowWave_0D import swfw_scalar
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.core.Step import step
# noinspection PyShadowingNames
def compute_stability():
"""
Routine to compute the stability domains of different configurations of fwsw-SDC
Returns:
numpy.ndarray: lambda_slow
numpy.ndarray: lambda_fast
int: number of collocation nodes
int: number of iterations
numpy.ndarray: stability numbers
"""
N_s = 100
N_f = 400
lam_s_max = 5.0
lam_f_max = 12.0
lambda_s = 1j * np.linspace(0.0, lam_s_max, N_s)
lambda_f = 1j * np.linspace(0.0, lam_f_max, N_f)
problem_params = dict()
# SET VALUE FOR lambda_slow AND VALUES FOR lambda_fast ###
problem_params['lambda_s'] = np.array([0.0])
problem_params['lambda_f'] = np.array([0.0])
problem_params['u0'] = 1.0
# initialize sweeper parameters
sweeper_params = dict()
# SET TYPE AND NUMBER OF QUADRATURE NODES ###
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['do_coll_update'] = True
# initialize level parameters
level_params = dict()
level_params['dt'] = 1.0
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = swfw_scalar # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = dict() # pass step parameters
# SET NUMBER OF ITERATIONS - SET K=0 FOR COLLOCATION SOLUTION ###
K = 3
# now the description contains more or less everything we need to create a step
S = step(description=description)
L = S.levels[0]
Q = L.sweep.coll.Qmat[1:, 1:]
nnodes = L.sweep.coll.num_nodes
dt = L.params.dt
stab = np.zeros((N_f, N_s), dtype='complex')
for i in range(0, N_s):
for j in range(0, N_f):
lambda_fast = lambda_f[j]
lambda_slow = lambda_s[i]
if K != 0:
lambdas = [lambda_fast, lambda_slow]
# LHS, RHS = L.sweep.get_scalar_problems_sweeper_mats(lambdas=lambdas)
Mat_sweep = L.sweep.get_scalar_problems_manysweep_mat(nsweeps=K, lambdas=lambdas)
else:
# Compute stability function of collocation solution
Mat_sweep = np.linalg.inv(np.eye(nnodes) - dt * (lambda_fast + lambda_slow) * Q)
if L.sweep.params.do_coll_update:
stab_fh = 1.0 + (lambda_fast + lambda_slow) * L.sweep.coll.weights.dot(Mat_sweep.dot(np.ones(nnodes)))
else:
q = np.zeros(nnodes)
q[nnodes - 1] = 1.0
stab_fh = q.dot(Mat_sweep.dot(np.ones(nnodes)))
stab[j, i] = stab_fh
return lambda_s, lambda_f, sweeper_params['num_nodes'], K, stab
# noinspection PyShadowingNames
def plot_stability(lambda_s, lambda_f, num_nodes, K, stab):
"""
Plotting routine of the stability domains
Args:
lambda_s (numpy.ndarray): lambda_slow
lambda_f (numpy.ndarray): lambda_fast
num_nodes (int): number of collocation nodes
K (int): number of iterations
stab (numpy.ndarray): stability numbers
"""
lam_s_max = np.amax(lambda_s.imag)
lam_f_max = np.amax(lambda_f.imag)
rcParams['figure.figsize'] = 1.5, 1.5
fs = 8
fig = plt.figure()
levels = np.array([0.25, 0.5, 0.75, 0.9, 1.1])
CS1 = plt.contour(lambda_s.imag, lambda_f.imag, np.absolute(stab), levels, colors='k', linestyles='dashed')
CS2 = plt.contour(lambda_s.imag, lambda_f.imag, np.absolute(stab), [1.0], colors='k')
# Set markers at points used in plot_stab_vs_k
plt.plot(4, 10, 'x', color='k', markersize=fs - 4)
plt.plot(1, 10, 'x', color='k', markersize=fs - 4)
plt.clabel(CS1, inline=True, fmt='%3.2f', fontsize=fs - 2)
manual_locations = [(1.5, 2.5)]
if K > 0: # for K=0 and no 1.0 isoline, this crashes Matplotlib for somer reason
plt.clabel(CS2, inline=True, fmt='%3.2f', fontsize=fs - 2, manual=manual_locations)
plt.gca().add_patch(
Polygon(
[[0, 0], [lam_s_max, 0], [lam_s_max, lam_s_max]],
visible=True,
fill=True,
facecolor='.75',
edgecolor='k',
linewidth=1.0,
zorder=11,
)
)
plt.gca().set_xticks(np.arange(0, int(lam_s_max) + 1))
plt.gca().set_yticks(np.arange(0, int(lam_f_max) + 2, 2))
plt.gca().tick_params(axis='both', which='both', labelsize=fs)
plt.xlim([0.0, lam_s_max])
plt.ylim([0.0, lam_f_max])
plt.xlabel(r'$\Delta t \lambda_{slow}$', fontsize=fs, labelpad=0.0)
plt.ylabel(r'$\Delta t \lambda_{fast}$', fontsize=fs, labelpad=0.0)
plt.title(r'$M=%1i$, $K=%1i$' % (num_nodes, K), fontsize=fs)
filename = 'data/stability-K' + str(K) + '-M' + str(num_nodes) + '.png'
fig.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
lambda_s, lambda_f, num_nodes, K, stab = compute_stability()
plot_stability(lambda_s, lambda_f, num_nodes, K, stab)
| 5,606 | 35.409091 | 118 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/HookClass_boussinesq.py | from pySDC.core.Hooks import hooks
class gmres_tolerance(hooks):
def pre_iteration(self, step, level_number):
"""
Routine called before iteration starts, set new GMRES tolerance depending on the initial SDC residual
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(gmres_tolerance, self).pre_iteration(step, level_number)
L = step.levels[level_number]
L.sweep.compute_residual()
L.prob.gmres_tol_limit = max(L.status.residual * L.prob.gmres_tol_factor, L.prob.gmres_tol_limit)
def post_sweep(self, step, level_number):
"""
Routine called after each sweep, set new GMRES tolerance depending on the previous SDC residual
Args:
step (pySDC.Step.step): the current step
level_number (int): the current level number
"""
super(gmres_tolerance, self).post_sweep(step, level_number)
L = step.levels[level_number]
L.prob.gmres_tol_limit = max(L.status.residual * L.prob.gmres_tol_factor, L.prob.gmres_tol_limit)
| 1,141 | 33.606061 | 109 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/rungmrescounter_boussinesq.py | import numpy as np
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.Boussinesq_2D_FD_imex import boussinesq_2d_imex
from pySDC.implementations.problem_classes.boussinesq_helpers.standard_integrators import SplitExplicit, dirk, rk_imex
from pySDC.implementations.problem_classes.boussinesq_helpers.unflatten import unflatten
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.projects.FastWaveSlowWave.HookClass_boussinesq import gmres_tolerance
def main(cwd=''):
"""
Example running/comparing SDC and different standard integrators for the 2D Boussinesq equation
Args:
cwd (string): current working directory
"""
num_procs = 1
# setup parameters "in time"
t0 = 0
Tend = 3000
Nsteps = 100
dt = Tend / float(Nsteps)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-15
level_params['dt'] = dt
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 4
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'GAUSS'
sweeper_params['num_nodes'] = 3
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 20
controller_params['hook_class'] = gmres_tolerance
# initialize problem parameters
problem_params = dict()
problem_params['nvars'] = [(4, 300, 30)]
problem_params['u_adv'] = 0.02
problem_params['c_s'] = 0.3
problem_params['Nfreq'] = 0.01
problem_params['x_bounds'] = [(-150.0, 150.0)]
problem_params['z_bounds'] = [(0.0, 10.0)]
problem_params['order'] = [4]
problem_params['order_upw'] = [5]
problem_params['gmres_maxiter'] = [500]
problem_params['gmres_restart'] = [10]
problem_params['gmres_tol_limit'] = [1e-05]
problem_params['gmres_tol_factor'] = [0.1]
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = boussinesq_2d_imex # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = step_params # pass step parameters
# ORDER OF DIRK/IMEX EQUAL TO NUMBER OF SDC ITERATIONS AND THUS SDC ORDER
dirk_order = step_params['maxiter']
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
cfl_advection = P.params.u_adv * dt / P.h[0]
cfl_acoustic_hor = P.params.c_s * dt / P.h[0]
cfl_acoustic_ver = P.params.c_s * dt / P.h[1]
print("Horizontal resolution: %4.2f" % P.h[0])
print("Vertical resolution: %4.2f" % P.h[1])
print("CFL number of advection: %4.2f" % cfl_advection)
print("CFL number of acoustics (horizontal): %4.2f" % cfl_acoustic_hor)
print("CFL number of acoustics (vertical): %4.2f" % cfl_acoustic_ver)
print("Running SplitExplicit ....")
method_split = 'MIS4_4'
# method_split = 'RK3'
splitp = SplitExplicit(P, method_split, problem_params)
u0 = uinit.flatten()
usplit = np.copy(u0)
print(np.linalg.norm(usplit))
for _ in range(0, 2 * Nsteps):
usplit = splitp.timestep(usplit, dt / 2)
print(np.linalg.norm(usplit))
print("Running DIRK ....")
dirkp = dirk(P, dirk_order)
udirk = np.copy(u0)
print(np.linalg.norm(udirk))
for _ in range(0, Nsteps):
udirk = dirkp.timestep(udirk, dt)
print(np.linalg.norm(udirk))
print("Running RK-IMEX ....")
rkimex = rk_imex(P, dirk_order)
uimex = np.copy(u0)
dt_imex = dt
for _ in range(0, Nsteps):
uimex = rkimex.timestep(uimex, dt_imex)
print(np.linalg.norm(uimex))
print("Running SDC...")
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# For reference solution, increase GMRES tolerance
P.gmres_tol_limit = 1e-10
rkimexref = rk_imex(P, 5)
uref = np.copy(u0)
dt_ref = dt / 10.0
print("Running RK-IMEX reference....")
for _ in range(0, 10 * Nsteps):
uref = rkimexref.timestep(uref, dt_ref)
usplit = unflatten(usplit, 4, P.N[0], P.N[1])
udirk = unflatten(udirk, 4, P.N[0], P.N[1])
uimex = unflatten(uimex, 4, P.N[0], P.N[1])
uref = unflatten(uref, 4, P.N[0], P.N[1])
np.save(cwd + 'data/xaxis', P.xx)
np.save(cwd + 'data/sdc', uend)
np.save(cwd + 'data/dirk', udirk)
np.save(cwd + 'data/rkimex', uimex)
np.save(cwd + 'data/split', usplit)
np.save(cwd + 'data/uref', uref)
print("diff split ", np.linalg.norm(uref - usplit))
print("diff dirk ", np.linalg.norm(uref - udirk))
print("diff rkimex ", np.linalg.norm(uref - uimex))
print("diff sdc ", np.linalg.norm(uref - uend))
print(" #### Logging report for Split #### ")
print("Total number of matrix multiplications: %5i" % splitp.logger.nsmall)
print(" #### Logging report for DIRK-%1i #### " % dirkp.order)
print("Number of calls to implicit solver: %5i" % dirkp.logger.solver_calls)
print("Total number of GMRES iterations: %5i" % dirkp.logger.iterations)
print(
"Average number of iterations per call: %6.3f"
% (float(dirkp.logger.iterations) / float(dirkp.logger.solver_calls))
)
print(" ")
print(" #### Logging report for RK-IMEX-%1i #### " % rkimex.order)
print("Number of calls to implicit solver: %5i" % rkimex.logger.solver_calls)
print("Total number of GMRES iterations: %5i" % rkimex.logger.iterations)
print(
"Average number of iterations per call: %6.3f"
% (float(rkimex.logger.iterations) / float(rkimex.logger.solver_calls))
)
print(" ")
print(" #### Logging report for SDC-(%1i,%1i) #### " % (sweeper_params['num_nodes'], step_params['maxiter']))
print("Number of calls to implicit solver: %5i" % P.gmres_logger.solver_calls)
print("Total number of GMRES iterations: %5i" % P.gmres_logger.iterations)
print(
"Average number of iterations per call: %6.3f"
% (float(P.gmres_logger.iterations) / float(P.gmres_logger.solver_calls))
)
if __name__ == "__main__":
main()
| 6,562 | 36.502857 | 118 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/plot_stab_vs_k.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from matplotlib.ticker import ScalarFormatter
from pySDC.implementations.problem_classes.FastWaveSlowWave_0D import swfw_scalar
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.core.Step import step
# noinspection PyShadowingNames
def compute_stab_vs_k(slow_resolved):
"""
Routine to compute modulus of the stability function
Args:
slow_resolved (bool): switch to compute lambda_slow = 1 or lambda_slow = 4
Returns:
numpy.ndarray: number of nodes
numpy.ndarray: number of iterations
numpy.ndarray: moduli
"""
mvals = [2, 3, 4]
kvals = np.arange(1, 10)
lambda_fast = 10j
# PLOT EITHER FOR lambda_slow = 1 (resolved) OR lambda_slow = 4 (unresolved)
if slow_resolved:
lambda_slow = 1j
else:
lambda_slow = 4j
stabval = np.zeros((np.size(mvals), np.size(kvals)))
problem_params = dict()
# SET VALUE FOR lambda_slow AND VALUES FOR lambda_fast ###
problem_params['lambda_s'] = np.array([0.0])
problem_params['lambda_f'] = np.array([0.0])
problem_params['u0'] = 1.0
# initialize sweeper parameters
sweeper_params = dict()
# SET TYPE AND NUMBER OF QUADRATURE NODES ###
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['do_coll_update'] = True
# initialize level parameters
level_params = dict()
level_params['dt'] = 1.0
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = swfw_scalar # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['level_params'] = level_params # pass level parameters
description['step_params'] = dict() # pass step parameters
for i in range(0, np.size(mvals)):
sweeper_params['num_nodes'] = mvals[i]
description['sweeper_params'] = sweeper_params # pass sweeper parameters
# now the description contains more or less everything we need to create a step
S = step(description=description)
L = S.levels[0]
nnodes = L.sweep.coll.num_nodes
for k in range(0, np.size(kvals)):
Kmax = kvals[k]
Mat_sweep = L.sweep.get_scalar_problems_manysweep_mat(nsweeps=Kmax, lambdas=[lambda_fast, lambda_slow])
if L.sweep.params.do_coll_update:
stab_fh = 1.0 + (lambda_fast + lambda_slow) * L.sweep.coll.weights.dot(Mat_sweep.dot(np.ones(nnodes)))
else:
q = np.zeros(nnodes)
q[nnodes - 1] = 1.0
stab_fh = q.dot(Mat_sweep.dot(np.ones(nnodes)))
stabval[i, k] = np.absolute(stab_fh)
return mvals, kvals, stabval
# noinspection PyShadowingNames
def plot_stab_vs_k(slow_resolved, mvals, kvals, stabval):
"""
Plotting routine for moduli
Args:
slow_resolved (bool): switch for lambda_slow
mvals (numpy.ndarray): number of nodes
kvals (numpy.ndarray): number of iterations
stabval (numpy.ndarray): moduli
"""
rcParams['figure.figsize'] = 2.5, 2.5
fig = plt.figure()
fs = 8
plt.plot(kvals, stabval[0, :], 'o-', color='b', label=("M=%2i" % mvals[0]), markersize=fs - 2)
plt.plot(kvals, stabval[1, :], 's-', color='r', label=("M=%2i" % mvals[1]), markersize=fs - 2)
plt.plot(kvals, stabval[2, :], 'd-', color='g', label=("M=%2i" % mvals[2]), markersize=fs - 2)
plt.plot(kvals, 1.0 + 0.0 * kvals, '--', color='k')
plt.xlabel('Number of iterations K', fontsize=fs)
plt.ylabel(r'Modulus of stability function $\left| R \right|$', fontsize=fs)
plt.ylim([0.0, 1.2])
if slow_resolved:
plt.legend(loc='upper right', fontsize=fs, prop={'size': fs})
else:
plt.legend(loc='lower left', fontsize=fs, prop={'size': fs})
plt.gca().get_xaxis().get_major_formatter().labelOnlyBase = False
plt.gca().get_xaxis().set_major_formatter(ScalarFormatter())
# plt.show()
if slow_resolved:
filename = 'data/stab_vs_k_resolved.png'
else:
filename = 'data/stab_vs_k_unresolved.png'
fig.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
mvals, kvals, stabval = compute_stab_vs_k(slow_resolved=True)
print(np.amax(stabval))
plot_stab_vs_k(True, mvals, kvals, stabval)
mvals, kvals, stabval = compute_stab_vs_k(slow_resolved=False)
print(np.amax(stabval))
plot_stab_vs_k(False, mvals, kvals, stabval)
| 4,707 | 33.617647 | 118 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/plot_dispersion.py | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sympy
from pylab import rcParams
from pySDC.implementations.problem_classes.FastWaveSlowWave_0D import swfw_scalar
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.problem_classes.acoustic_helpers.standard_integrators import dirk, rk_imex
from pySDC.core.Step import step
def findomega(stab_fh):
assert np.array_equal(np.shape(stab_fh), [2, 2]), 'Not 2x2 matrix...'
omega = sympy.Symbol('omega')
func = (sympy.exp(-1j * omega) - stab_fh[0, 0]) * (sympy.exp(-1j * omega) - stab_fh[1, 1]) - stab_fh[
0, 1
] * stab_fh[1, 0]
solsym = sympy.solve(func, omega)
sol0 = complex(solsym[0])
sol1 = complex(solsym[1])
if sol0.real >= 0:
sol = sol0
elif sol1.real >= 0:
sol = sol1
else:
print("Two roots with real part of same sign...")
sol = sol0
return sol
def compute_and_plot_dispersion(Nsamples=15, K=3):
"""
Function to compute and plot the dispersion relation
Args:
Nsamples: number of samples for testing
K: number of iterations as well as order
"""
problem_params = dict()
# SET VALUE FOR lambda_slow AND VALUES FOR lambda_fast ###
problem_params['lambda_s'] = np.array([0.0])
problem_params['lambda_f'] = np.array([0.0])
problem_params['u0'] = 1.0
# initialize sweeper parameters
sweeper_params = dict()
# SET TYPE AND NUMBER OF QUADRATURE NODES ###
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['do_coll_update'] = True
sweeper_params['num_nodes'] = 3
# initialize level parameters
level_params = dict()
level_params['dt'] = 1.0
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = swfw_scalar # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper
description['sweeper_params'] = sweeper_params # pass sweeper parameters
description['level_params'] = level_params # pass level parameters
description['step_params'] = dict() # pass step parameters
# ORDER OF DIRK/IMEX IS EQUAL TO NUMBER OF ITERATIONS AND THUS ORDER OF SDC ###
dirk_order = K
c_speed = 1.0
U_speed = 0.05
# now the description contains more or less everything we need to create a step
S = step(description=description)
L = S.levels[0]
# u0 = S.levels[0].prob.u_exact(t0)
# S.init_step(u0)
QE = L.sweep.QE[1:, 1:]
QI = L.sweep.QI[1:, 1:]
Q = L.sweep.coll.Qmat[1:, 1:]
nnodes = L.sweep.coll.num_nodes
dt = L.params.dt
k_vec = np.linspace(0, np.pi, Nsamples + 1, endpoint=False)
k_vec = k_vec[1:]
phase = np.zeros((3, Nsamples))
amp_factor = np.zeros((3, Nsamples))
for i in range(0, np.size(k_vec)):
Cs = -1j * k_vec[i] * np.array([[0.0, c_speed], [c_speed, 0.0]], dtype='complex')
Uadv = -1j * k_vec[i] * np.array([[U_speed, 0.0], [0.0, U_speed]], dtype='complex')
LHS = np.eye(2 * nnodes) - dt * (np.kron(QI, Cs) + np.kron(QE, Uadv))
RHS = dt * (np.kron(Q, Uadv + Cs) - np.kron(QI, Cs) - np.kron(QE, Uadv))
LHSinv = np.linalg.inv(LHS)
Mat_sweep = np.linalg.matrix_power(LHSinv.dot(RHS), K)
for k in range(0, K):
Mat_sweep = Mat_sweep + np.linalg.matrix_power(LHSinv.dot(RHS), k).dot(LHSinv)
##
# ---> The update formula for this case need verification!!
update = dt * np.kron(L.sweep.coll.weights, Uadv + Cs)
y1 = np.array([1, 0], dtype='complex')
y2 = np.array([0, 1], dtype='complex')
e1 = np.kron(np.ones(nnodes), y1)
stab_fh_1 = y1 + update.dot(Mat_sweep.dot(e1))
e2 = np.kron(np.ones(nnodes), y2)
stab_fh_2 = y2 + update.dot(Mat_sweep.dot(e2))
stab_sdc = np.column_stack((stab_fh_1, stab_fh_2))
# Stability function of backward Euler is 1/(1-z); system is y' = (Cs+Uadv)*y
# stab_ie = np.linalg.inv( np.eye(2) - step.status.dt*(Cs+Uadv) )
# For testing, insert exact stability function exp(-dt*i*k*(Cs+Uadv)
# stab_fh = la.expm(Cs+Uadv)
dirkts = dirk(Cs + Uadv, dirk_order)
stab_fh1 = dirkts.timestep(y1, 1.0)
stab_fh2 = dirkts.timestep(y2, 1.0)
stab_dirk = np.column_stack((stab_fh1, stab_fh2))
rkimex = rk_imex(M_fast=Cs, M_slow=Uadv, order=K)
stab_fh1 = rkimex.timestep(y1, 1.0)
stab_fh2 = rkimex.timestep(y2, 1.0)
stab_rk_imex = np.column_stack((stab_fh1, stab_fh2))
sol_sdc = findomega(stab_sdc)
sol_dirk = findomega(stab_dirk)
sol_rk_imex = findomega(stab_rk_imex)
# Now solve for discrete phase
phase[0, i] = sol_sdc.real / k_vec[i]
amp_factor[0, i] = np.exp(sol_sdc.imag)
phase[1, i] = sol_dirk.real / k_vec[i]
amp_factor[1, i] = np.exp(sol_dirk.imag)
phase[2, i] = sol_rk_imex.real / k_vec[i]
amp_factor[2, i] = np.exp(sol_rk_imex.imag)
rcParams['figure.figsize'] = 1.5, 1.5
fs = 8
fig = plt.figure()
plt.plot(k_vec, (U_speed + c_speed) + np.zeros(np.size(k_vec)), '--', color='k', linewidth=1.5, label='Exact')
plt.plot(k_vec, phase[1, :], '-', color='g', linewidth=1.5, label='DIRK(' + str(dirkts.order) + ')')
plt.plot(
k_vec,
phase[2, :],
'-+',
color='r',
linewidth=1.5,
label='IMEX(' + str(rkimex.order) + ')',
markevery=(2, 3),
mew=1.0,
)
plt.plot(
k_vec,
phase[0, :],
'-o',
color='b',
linewidth=1.5,
label='SDC(' + str(K) + ')',
markevery=(1, 3),
markersize=fs / 2,
)
plt.xlabel('Wave number', fontsize=fs, labelpad=0.25)
plt.ylabel('Phase speed', fontsize=fs, labelpad=0.5)
plt.xlim([k_vec[0], k_vec[-1:]])
plt.ylim([0.0, 1.1 * (U_speed + c_speed)])
fig.gca().tick_params(axis='both', labelsize=fs)
plt.legend(loc='lower left', fontsize=fs, prop={'size': fs - 2})
plt.xticks([0, 1, 2, 3], fontsize=fs)
filename = 'data/phase-K' + str(K) + '-M' + str(sweeper_params['num_nodes']) + '.png'
plt.gcf().savefig(filename, bbox_inches='tight')
fig = plt.figure()
plt.plot(k_vec, 1.0 + np.zeros(np.size(k_vec)), '--', color='k', linewidth=1.5, label='Exact')
plt.plot(k_vec, amp_factor[1, :], '-', color='g', linewidth=1.5, label='DIRK(' + str(dirkts.order) + ')')
plt.plot(
k_vec,
amp_factor[2, :],
'-+',
color='r',
linewidth=1.5,
label='IMEX(' + str(rkimex.order) + ')',
markevery=(2, 3),
mew=1.0,
)
plt.plot(
k_vec,
amp_factor[0, :],
'-o',
color='b',
linewidth=1.5,
label='SDC(' + str(K) + ')',
markevery=(1, 3),
markersize=fs / 2,
)
plt.xlabel('Wave number', fontsize=fs, labelpad=0.25)
plt.ylabel('Amplification factor', fontsize=fs, labelpad=0.5)
fig.gca().tick_params(axis='both', labelsize=fs)
plt.xlim([k_vec[0], k_vec[-1:]])
plt.ylim([k_vec[0], k_vec[-1:]])
plt.legend(loc='lower left', fontsize=fs, prop={'size': fs - 2})
plt.gca().set_ylim([0.0, 1.1])
plt.xticks([0, 1, 2, 3], fontsize=fs)
filename = 'data/ampfactor-K' + str(K) + '-M' + str(sweeper_params['num_nodes']) + '.png'
plt.gcf().savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
compute_and_plot_dispersion()
| 7,647 | 34.082569 | 114 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/runconvergence_acoustic.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from matplotlib.ticker import ScalarFormatter
from pySDC.projects.FastWaveSlowWave.HookClass_acoustic import dump_energy
from pySDC.implementations.problem_classes.AcousticAdvection_1D_FD_imex import acoustic_1d_imex
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
def compute_convergence_data(cwd=''):
"""
Routine to run the 1d acoustic-advection example with different orders
Args:
cwd (string): current working directory
"""
num_procs = 1
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-14
# This comes as read-in for the step class
step_params = dict()
# This comes as read-in for the problem class
problem_params = dict()
problem_params['cadv'] = 0.1
problem_params['cs'] = 1.00
problem_params['order_adv'] = 5
problem_params['waveno'] = 5
# This comes as read-in for the sweeper class
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['do_coll_update'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = dump_energy
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = acoustic_1d_imex
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params
nsteps = np.zeros((3, 9))
nsteps[0, :] = [20, 30, 40, 50, 60, 70, 80, 90, 100]
nsteps[1, :] = nsteps[0, :]
nsteps[2, :] = nsteps[0, :]
for order in [3, 4, 5]:
error = np.zeros(np.shape(nsteps)[1])
# setup parameters "in time"
t0 = 0
Tend = 1.0
if order == 3:
file = open(cwd + 'data/conv-data.txt', 'w')
else:
file = open(cwd + 'data/conv-data.txt', 'a')
step_params['maxiter'] = order
description['step_params'] = step_params
for ii in range(0, np.shape(nsteps)[1]):
ns = nsteps[order - 3, ii]
if (order == 3) or (order == 4):
problem_params['nvars'] = [(2, int(2 * ns))]
elif order == 5:
problem_params['nvars'] = [(2, int(2 * ns))]
description['problem_params'] = problem_params
dt = Tend / float(ns)
level_params['dt'] = dt
description['level_params'] = level_params
# instantiate the controller
controller = controller_nonMPI(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
if ii == 0:
print("Time step: %4.2f" % dt)
print("Fast CFL number: %4.2f" % (problem_params['cs'] * dt / P.dx))
print("Slow CFL number: %4.2f" % (problem_params['cadv'] * dt / P.dx))
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# compute exact solution and compare
uex = P.u_exact(Tend)
error[ii] = np.linalg.norm(uex - uend, np.inf) / np.linalg.norm(uex, np.inf)
file.write(str(order) + " " + str(ns) + " " + str(error[ii]) + "\n")
file.close()
for ii in range(0, np.shape(nsteps)[1]):
print('error for nsteps= %s: %s' % (nsteps[order - 3, ii], error[ii]))
def plot_convergence(cwd=''):
"""
Plotting routine for the convergence data
Args:
cwd (string): current workign directory
"""
fs = 8
order = np.array([])
nsteps = np.array([])
error = np.array([])
file = open(cwd + 'data/conv-data.txt', 'r')
while True:
line = file.readline()
if not line:
break
items = str.split(line, " ", 3)
order = np.append(order, int(items[0]))
nsteps = np.append(nsteps, int(float(items[1])))
error = np.append(error, float(items[2]))
assert np.size(order) == np.size(nsteps), 'Found different number of entries in order and nsteps'
assert np.size(nsteps) == np.size(error), 'Found different number of entries in nsteps and error'
assert np.size(nsteps) % 3 == 0, 'Number of entries not a multiple of three, got %s' % np.size(nsteps)
N = int(np.size(nsteps) / 3)
error_plot = np.zeros((3, N))
nsteps_plot = np.zeros((3, N))
convline = np.zeros((3, N))
order_plot = np.zeros(3)
for ii in range(0, 3):
order_plot[ii] = order[N * ii]
for jj in range(0, N):
error_plot[ii, jj] = error[N * ii + jj]
nsteps_plot[ii, jj] = nsteps[N * ii + jj]
convline[ii, jj] = (
error_plot[ii, 0] * (float(nsteps_plot[ii, 0]) / float(nsteps_plot[ii, jj])) ** order_plot[ii]
)
color = ['r', 'b', 'g']
shape = ['o', 'd', 's']
rcParams['figure.figsize'] = 2.5, 2.5
rcParams['pgf.rcfonts'] = False
fig = plt.figure()
for ii in range(0, 3):
plt.loglog(nsteps_plot[ii, :], convline[ii, :], '-', color=color[ii])
plt.loglog(
nsteps_plot[ii, :],
error_plot[ii, :],
shape[ii],
markersize=fs,
color=color[ii],
label='p=' + str(int(order_plot[ii])),
)
plt.legend(loc='lower left', fontsize=fs, prop={'size': fs})
plt.xlabel('Number of time steps', fontsize=fs)
plt.ylabel('Relative error', fontsize=fs, labelpad=2)
plt.xlim([0.9 * np.min(nsteps_plot), 1.1 * np.max(nsteps_plot)])
plt.ylim([1e-5, 1e0])
plt.yticks([1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0], fontsize=fs)
plt.xticks([20, 30, 40, 60, 80, 100], fontsize=fs)
plt.gca().get_xaxis().get_major_formatter().labelOnlyBase = False
plt.gca().get_xaxis().set_major_formatter(ScalarFormatter())
filename = 'data/convergence.png'
fig.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
compute_convergence_data()
plot_convergence()
| 6,436 | 32.010256 | 110 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/plotgmrescounter_boussinesq.py | import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
def plot_buoyancy(cwd=''):
"""
Plotting routine for the cross section of the buoyancy
Args:
cwd (string): current working directory
"""
xx = np.load(cwd + 'data/xaxis.npy')
uend = np.load(cwd + 'data/sdc.npy')
udirk = np.load(cwd + 'data/dirk.npy')
uimex = np.load(cwd + 'data/rkimex.npy')
uref = np.load(cwd + 'data/uref.npy')
usplit = np.load(cwd + 'data/split.npy')
err_split = np.linalg.norm(usplit.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_dirk = np.linalg.norm(udirk.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_imex = np.linalg.norm(uimex.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
err_sdc = np.linalg.norm(uend.flatten() - uref.flatten(), np.inf) / np.linalg.norm(uref.flatten(), np.inf)
assert err_split < 4.821e-02, 'ERROR: split error is too high, got %s' % err_split
assert err_dirk < 1.495e-01, 'ERROR: dirk error is too high, got %s' % err_dirk
assert err_imex < 1.305e-01, 'ERROR: imex error is too high, got %s' % err_imex
assert err_sdc < 9.548e-02, 'ERROR: sdc error is too high, got %s' % err_sdc
print("Estimated discretisation error split explicit: %5.3e" % err_split)
print("Estimated discretisation error of DIRK: %5.3e" % err_dirk)
print("Estimated discretisation error of RK-IMEX: %5.3e" % err_imex)
print("Estimated discretisation error of SDC: %5.3e" % err_sdc)
fs = 8
rcParams['figure.figsize'] = 5.0, 2.5
plt.figure()
plt.plot(xx[:, 5], udirk[2, :, 5], '--', color='g', markersize=fs - 2, label='DIRK(4)', dashes=(3, 3))
plt.plot(xx[:, 5], uend[2, :, 5], '-', color='b', label='SDC(4)')
plt.plot(xx[:, 5], uimex[2, :, 5], '--', color='r', markersize=fs - 2, label='IMEX(4)', dashes=(3, 3))
plt.legend(loc='lower left', fontsize=fs, prop={'size': fs})
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
plt.xlabel('x [km]', fontsize=fs, labelpad=0)
plt.ylabel('Bouyancy', fontsize=fs, labelpad=1)
filename = 'data/boussinesq.png'
plt.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
plot_buoyancy()
| 2,298 | 42.377358 | 114 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/runitererror_acoustic.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from pySDC.projects.FastWaveSlowWave.HookClass_acoustic import dump_energy
from pySDC.implementations.problem_classes.AcousticAdvection_1D_FD_imex import acoustic_1d_imex
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.helpers.stats_helper import filter_stats
def compute_and_plot_itererror():
"""
Routine to compute and plot the error over the iterations for difference cs values
"""
num_procs = 1
t0 = 0.0
Tend = 0.025
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = Tend
# This comes as read-in for the step class
step_params = dict()
step_params['maxiter'] = 15
# This comes as read-in for the problem class
problem_params = dict()
problem_params['cadv'] = 0.1
problem_params['nvars'] = [(2, 300)]
problem_params['order_adv'] = 5
problem_params['waveno'] = 5
# This comes as read-in for the sweeper class
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['do_coll_update'] = True
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = dump_energy
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = acoustic_1d_imex
description['sweeper_class'] = imex_1st_order
description['step_params'] = step_params
description['level_params'] = level_params
cs_v = [0.5, 1.0, 1.5, 5.0]
nodes_v = [3]
residual = np.zeros((np.size(cs_v), np.size(nodes_v), step_params['maxiter']))
convrate = np.zeros((np.size(cs_v), np.size(nodes_v), step_params['maxiter'] - 1))
lastiter = np.zeros((np.size(cs_v), np.size(nodes_v))) + step_params['maxiter']
avg_convrate = np.zeros((np.size(cs_v), np.size(nodes_v)))
P = None
for cs_ind in range(0, np.size(cs_v)):
problem_params['cs'] = cs_v[cs_ind]
description['problem_params'] = problem_params
for nodes_ind in np.arange(np.size(nodes_v)):
sweeper_params['num_nodes'] = nodes_v[nodes_ind]
description['sweeper_params'] = sweeper_params
# instantiate the controller
controller = controller_nonMPI(
num_procs=num_procs, controller_params=controller_params, description=description
)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
print("Fast CFL number: %4.2f" % (problem_params['cs'] * level_params['dt'] / P.dx))
print("Slow CFL number: %4.2f" % (problem_params['cadv'] * level_params['dt'] / P.dx))
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
extract_stats = filter_stats(stats, type='residual_post_iteration')
for k, v in extract_stats.items():
if k.iter != -1:
residual[cs_ind, nodes_ind, k.iter - 2] = v
# Compute convergence rates
for iter in range(0, step_params['maxiter'] - 1):
if residual[cs_ind, nodes_ind, iter] < level_params['restol']:
lastiter[cs_ind, nodes_ind] = iter
else:
convrate[cs_ind, nodes_ind, iter] = (
residual[cs_ind, nodes_ind, iter + 1] / residual[cs_ind, nodes_ind, iter]
)
print(lastiter[cs_ind, nodes_ind])
avg_convrate[cs_ind, nodes_ind] = np.sum(convrate[cs_ind, nodes_ind, :]) / float(
lastiter[cs_ind, nodes_ind]
)
# Plot the results
fs = 8
color = ['r', 'b', 'g', 'c']
shape = ['o', 'd', 's', 'v']
rcParams['figure.figsize'] = 2.5, 2.5
rcParams['pgf.rcfonts'] = False
fig = plt.figure()
for ii in range(0, np.size(cs_v)):
x = np.arange(1, lastiter[ii, 0] - 1)
y = convrate[ii, 0, 0 : int(lastiter[ii, 0]) - 2]
plt.plot(
x,
y,
linestyle='-',
marker=shape[ii],
markersize=fs - 2,
color=color[ii],
label=r'$C_{fast}$=%4.2f' % (cs_v[ii] * level_params['dt'] / P.dx),
)
plt.legend(loc='upper right', fontsize=fs, prop={'size': fs - 2})
plt.xlabel('Iteration', fontsize=fs)
plt.ylabel(r'$|| r^{k+1} ||_{\infty}/|| r^k ||_{\infty}$', fontsize=fs, labelpad=2)
plt.xlim([0, step_params['maxiter']])
plt.ylim([0, 1.0])
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
filename = 'data/iteration.png'
fig.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
compute_and_plot_itererror()
| 5,074 | 34.243056 | 98 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from pySDC.projects.FastWaveSlowWave.HookClass_acoustic import dump_energy
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.problem_classes.acoustic_helpers.standard_integrators import bdf2, dirk, trapezoidal, rk_imex
from pySDC.projects.FastWaveSlowWave.AcousticAdvection_1D_FD_imex_multiscale import acoustic_1d_imex_multiscale
def compute_and_plot_solutions():
"""
Routine to compute and plot the solutions of SDC(2), IMEX, BDF-2 and RK for a multiscale problem
"""
num_procs = 1
t0 = 0.0
Tend = 3.0
nsteps = 154 # 154 is value in Vater et al.
dt = Tend / float(nsteps)
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-10
level_params['dt'] = dt
# This comes as read-in for the step class
step_params = dict()
step_params['maxiter'] = 2
# This comes as read-in for the problem class
problem_params = dict()
problem_params['cadv'] = 0.05
problem_params['cs'] = 1.0
problem_params['nvars'] = [(2, 512)]
problem_params['order_adv'] = 5
problem_params['waveno'] = 5
# This comes as read-in for the sweeper class
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 2
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = dump_energy
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = acoustic_1d_imex_multiscale
description['problem_params'] = problem_params
description['sweeper_class'] = imex_1st_order
description['sweeper_params'] = sweeper_params
description['step_params'] = step_params
description['level_params'] = level_params
# instantiate the controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
# instantiate standard integrators to be run for comparison
trap = trapezoidal((P.A + P.Dx).astype('complex'), 0.5)
bdf2_m = bdf2(P.A + P.Dx)
dirk_m = dirk((P.A + P.Dx).astype('complex'), step_params['maxiter'])
rkimex = rk_imex(P.A.astype('complex'), P.Dx.astype('complex'), step_params['maxiter'])
y0_tp = np.concatenate((uinit[0, :], uinit[1, :]))
y0_bdf = y0_tp
y0_dirk = y0_tp.astype('complex')
y0_imex = y0_tp.astype('complex')
# Perform time steps with standard integrators
for i in range(0, nsteps):
# trapezoidal rule step
ynew_tp = trap.timestep(y0_tp, dt)
# BDF-2 scheme
if i == 0:
ynew_bdf = bdf2_m.firsttimestep(y0_bdf, dt)
ym1_bdf = y0_bdf
else:
ynew_bdf = bdf2_m.timestep(y0_bdf, ym1_bdf, dt)
# DIRK scheme
ynew_dirk = dirk_m.timestep(y0_dirk, dt)
# IMEX scheme
ynew_imex = rkimex.timestep(y0_imex, dt)
y0_tp = ynew_tp
ym1_bdf = y0_bdf
y0_bdf = ynew_bdf
y0_dirk = ynew_dirk
y0_imex = ynew_imex
# Finished running standard integrators
unew_tp, pnew_tp = np.split(ynew_tp, 2)
unew_bdf, pnew_bdf = np.split(ynew_bdf, 2)
unew_dirk, pnew_dirk = np.split(ynew_dirk, 2)
unew_imex, pnew_imex = np.split(ynew_imex, 2)
fs = 8
rcParams['figure.figsize'] = 2.5, 2.5
# rcParams['pgf.rcfonts'] = False
fig = plt.figure()
sigma_0 = 0.1
# k = 7.0 * 2.0 * np.pi
x_0 = 0.75
# x_1 = 0.25
assert np.isclose(np.linalg.norm(uend[1, :], np.inf), 8.489e-01, 1e-03)
assert np.isclose(np.linalg.norm(pnew_dirk, np.inf), 1.003e00, 1e-03)
assert np.isclose(np.linalg.norm(pnew_imex, np.inf), 2.762e21, 1e-03)
print('Maximum pressure in SDC: %5.3e' % np.linalg.norm(uend[1, :], np.inf))
print('Maximum pressure in DIRK: %5.3e' % np.linalg.norm(pnew_dirk, np.inf))
print('Maximum pressure in RK-IMEX: %5.3e' % np.linalg.norm(pnew_imex, np.inf))
if dirk_m.order == 2:
plt.plot(P.mesh, pnew_bdf, 'd-', color='c', label='BDF-2', markevery=(50, 75))
p_slow = np.exp(-np.square(np.mod(P.mesh - problem_params['cadv'] * Tend, 1.0) - x_0) / (sigma_0 * sigma_0))
plt.plot(P.mesh, p_slow, '--', color='k', markersize=fs - 2, label='Slow mode', dashes=(10, 2))
if np.linalg.norm(pnew_imex, np.inf) <= 2:
plt.plot(
P.mesh, pnew_imex, '+-', color='r', label='IMEX(' + str(rkimex.order) + ')', markevery=(1, 75), mew=1.0
)
plt.plot(P.mesh, uend[1, :], 'o-', color='b', label='SDC(' + str(step_params['maxiter']) + ')', markevery=(25, 75))
plt.plot(P.mesh, np.real(pnew_dirk), '-', color='g', label='DIRK(' + str(dirk_m.order) + ')')
plt.xlabel('x', fontsize=fs, labelpad=0)
plt.ylabel('Pressure', fontsize=fs, labelpad=0)
fig.gca().set_xlim([0, 1.0])
fig.gca().set_ylim([-0.5, 1.1])
fig.gca().tick_params(axis='both', labelsize=fs)
plt.legend(loc='upper left', fontsize=fs, prop={'size': fs}, handlelength=3)
fig.gca().grid()
filename = 'data/multiscale-K' + str(step_params['maxiter']) + '-M' + str(sweeper_params['num_nodes']) + '.png'
plt.gcf().savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
compute_and_plot_solutions()
| 5,778 | 35.11875 | 120 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/plot_stifflimit_specrad.py | import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from pylab import rcParams
from pySDC.implementations.problem_classes.FastWaveSlowWave_0D import swfw_scalar
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.core.Step import step
# noinspection PyShadowingNames
def compute_specrad():
"""
Routine to compute spectral radius and norm of the error propagation matrix E
Returns:
numpy.nparray: list of number of nodes
numpy.nparray: list of fast lambdas
numpy.nparray: list of spectral radii
numpy.nparray: list of norms
"""
problem_params = dict()
# SET VALUE FOR lambda_slow AND VALUES FOR lambda_fast ###
problem_params['lambda_s'] = np.array([1.0 * 1j], dtype='complex')
problem_params['lambda_f'] = np.array([50.0 * 1j, 100.0 * 1j], dtype='complex')
problem_params['u0'] = 1.0
# initialize sweeper parameters
sweeper_params = dict()
# SET TYPE OF QUADRATURE NODES ###
sweeper_params['quad_type'] = 'RADAU-RIGHT'
# initialize level parameters
level_params = dict()
level_params['dt'] = 1.0
t0 = 0.0
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = swfw_scalar # pass problem class
description['problem_params'] = problem_params # pass problem parameters
description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)
description['level_params'] = level_params # pass level parameters
description['step_params'] = dict() # pass step parameters
nodes_v = np.arange(2, 10)
specrad = np.zeros((3, np.size(nodes_v)))
norm = np.zeros((3, np.size(nodes_v)))
for i in range(0, np.size(nodes_v)):
sweeper_params['num_nodes'] = nodes_v[i]
description['sweeper_params'] = sweeper_params # pass sweeper parameters
# now the description contains more or less everything we need to create a step
S = step(description=description)
L = S.levels[0]
P = L.prob
u0 = S.levels[0].prob.u_exact(t0)
S.init_step(u0)
QE = L.sweep.QE[1:, 1:]
QI = L.sweep.QI[1:, 1:]
Q = L.sweep.coll.Qmat[1:, 1:]
nnodes = L.sweep.coll.num_nodes
dt = L.params.dt
assert nnodes == nodes_v[i], 'Something went wrong during instantiation, nnodes is not correct, got %s' % nnodes
for j in range(0, 2):
LHS = np.eye(nnodes) - dt * (P.lambda_f[j] * QI + P.lambda_s[0] * QE)
RHS = dt * ((P.lambda_f[j] + P.lambda_s[0]) * Q - (P.lambda_f[j] * QI + P.lambda_s[0] * QE))
evals, evecs = np.linalg.eig(np.linalg.inv(LHS).dot(RHS))
specrad[j + 1, i] = np.linalg.norm(evals, np.inf)
norm[j + 1, i] = np.linalg.norm(np.linalg.inv(LHS).dot(RHS), np.inf)
if L.sweep.coll.left_is_node:
# For Lobatto nodes, first column and row are all zeros, since q_1 = q_0; hence remove them
QI = QI[1:, 1:]
Q = Q[1:, 1:]
# Eigenvalue of error propagation matrix in stiff limit: E = I - inv(QI)*Q
evals, evecs = np.linalg.eig(np.eye(nnodes - 1) - np.linalg.inv(QI).dot(Q))
norm[0, i] = np.linalg.norm(np.eye(nnodes - 1) - np.linalg.inv(QI).dot(Q), np.inf)
else:
evals, evecs = np.linalg.eig(np.eye(nnodes) - np.linalg.inv(QI).dot(Q))
norm[0, i] = np.linalg.norm(np.eye(nnodes) - np.linalg.inv(QI).dot(Q), np.inf)
specrad[0, i] = np.linalg.norm(evals, np.inf)
print("Spectral radius of infinitely fast wave case > 1.0 for M=%2i" % nodes_v[np.argmax(specrad[0, :] > 1.0)])
print("Spectral radius of > 1.0 for M=%2i" % nodes_v[np.argmax(specrad[1, :] > 1.0)])
return nodes_v, problem_params['lambda_f'], specrad, norm
# noinspection PyShadowingNames
def plot_specrad(nodes_v, lambda_f, specrad, norm):
"""
Plotting function for spectral radii and norms
Args:
nodes_v (numpy.nparray): list of number of nodes
lambda_f (numpy.nparray): list of fast lambdas
specrad (numpy.nparray): list of spectral radii
norm (numpy.nparray): list of norms
"""
fs = 8
rcParams['figure.figsize'] = 2.5, 2.5
rcParams['pgf.rcfonts'] = False
fig = plt.figure()
plt.plot(nodes_v, specrad[0, :], 'rd-', markersize=fs - 2, label=r'$\lambda_{fast} = \infty$')
plt.plot(nodes_v, specrad[1, :], 'bo-', markersize=fs - 2, label=r'$\lambda_{fast} = %2.0f $' % lambda_f[0].imag)
plt.plot(nodes_v, specrad[2, :], 'gs-', markersize=fs - 2, label=r'$\lambda_{fast} = %2.0f $' % lambda_f[1].imag)
plt.xlabel(r'Number of nodes $M$', fontsize=fs)
plt.ylabel(r'Spectral radius $\sigma\left( \mathbf{E} \right)$', fontsize=fs, labelpad=2)
plt.legend(loc='lower right', fontsize=fs, prop={'size': fs})
plt.xlim([np.min(nodes_v), np.max(nodes_v)])
plt.ylim([0, 1.0])
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
filename = 'data/stifflimit-specrad.png'
fig.savefig(filename, bbox_inches='tight')
fig = plt.figure()
plt.plot(nodes_v, norm[0, :], 'rd-', markersize=fs - 2, label=r'$\lambda_{fast} = \infty$')
plt.plot(nodes_v, norm[1, :], 'bo-', markersize=fs - 2, label=r'$\lambda_{fast} = %2.0f $' % lambda_f[0].imag)
plt.plot(nodes_v, norm[2, :], 'gs-', markersize=fs - 2, label=r'$\lambda_{fast} = %2.0f $' % lambda_f[1].imag)
plt.xlabel(r'Number of nodes $M$', fontsize=fs)
plt.ylabel(r'Norm $\left|| \mathbf{E} \right||_{\infty}$', fontsize=fs, labelpad=2)
plt.legend(loc='lower right', fontsize=fs, prop={'size': fs})
plt.xlim([np.min(nodes_v), np.max(nodes_v)])
plt.ylim([0, 2.4])
plt.yticks(fontsize=fs)
plt.xticks(fontsize=fs)
filename = 'data/stifflimit-norm.png'
fig.savefig(filename, bbox_inches='tight')
if __name__ == "__main__":
nodes_v, lambda_f, specrad, norm = compute_specrad()
plot_specrad(nodes_v, lambda_f, specrad, norm)
| 6,061 | 40.238095 | 120 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/HookClass_acoustic.py | import numpy as np
from pySDC.core.Hooks import hooks
class dump_energy(hooks):
def __init__(self):
"""
Initialization of output
"""
super(dump_energy, self).__init__()
self.file = open('data/energy-sdc.txt', 'w')
def post_step(self, step, level_number):
"""
Default routine called after each iteration, compute and dump energy
Args:
step: the current step
level_number: the current level number
"""
super(dump_energy, self).post_step(step, level_number)
# some abbreviations
L = step.levels[level_number]
xx = L.uend
E = np.sum(np.square(xx[0, :]) + np.square(xx[1, :]))
self.file.write('%30.20f\n' % E)
| 766 | 22.96875 | 76 | py |
pySDC | pySDC-master/pySDC/projects/FastWaveSlowWave/AcousticAdvection_1D_FD_imex_multiscale.py | import numpy as np
from pySDC.implementations.problem_classes.AcousticAdvection_1D_FD_imex import acoustic_1d_imex
# noinspection PyUnusedLocal
class acoustic_1d_imex_multiscale(acoustic_1d_imex):
"""
Example implementing the one-dimensional IMEX acoustic-advection with multiscale initial values
"""
def u_exact(self, t):
"""
Routine to compute the exact solution at time t
Args:
t (float): current time
Returns:
dtype_u: exact solution
"""
sigma_0 = 0.1
k = 7.0 * 2.0 * np.pi
x_0 = 0.75
x_1 = 0.25
ms = 1.0
me = self.dtype_u(self.init)
me[0, :] = np.exp(-np.square(self.mesh - x_0 - self.cs * t) / (sigma_0 * sigma_0)) + ms * np.exp(
-np.square(self.mesh - x_1 - self.cs * t) / (sigma_0 * sigma_0)
) * np.cos(k * (self.mesh - self.cs * t) / sigma_0)
me[1, :] = me[0, :]
return me
| 964 | 25.081081 | 105 | py |
pySDC | pySDC-master/pySDC/helpers/stats_helper.py | import numpy as np
def filter_stats(
stats, process=None, time=None, level=None, iter=None, type=None, recomputed=None, num_restarts=None, comm=None
):
"""
Helper function to extract data from the dictrionary of statistics
Args:
stats (dict): raw statistics from a controller run
process (int): process number
time (float): the requested simulation time
level (int): the requested level index
iter (int): the requested iteration count
type (str): string to describe the requested type of value
recomputed (bool): filter recomputed values from stats if set to anything other than None
comm (mpi4py.MPI.Intracomm): Communicator (or None if not applicable)
Returns:
dict: dictionary containing only the entries corresponding to the filter
"""
result = {}
for k, v in stats.items() if recomputed is None else filter_recomputed(stats.copy()).items():
# get data if key matches the filter (if specified)
if (
(k.time == time or time is None)
and (k.process == process or process is None)
and (k.level == level or level is None)
and (k.iter == iter or iter is None)
and (k.type == type or type is None)
and (k.num_restarts == num_restarts or num_restarts is None)
):
result[k] = v
if comm is not None:
# gather the results across all ranks and the flatten the list
result = {key: value for sub_result in comm.allgather(result) for key, value in sub_result.items()}
return result
def sort_stats(stats, sortby):
"""
Helper function to transform stats dictionary to sorted list of tuples
Args:
stats (dict): dictionary of statistics
sortby (str): string to specify which key to use for sorting
Returns:
list: list of tuples containing the sortby item and the value
"""
result = []
for k, v in stats.items():
# convert string to attribute and append key + value to result as tuple
item = getattr(k, sortby)
result.append((item, v))
# sort by first element of the tuple (which is the sortby key) and return
sorted_data = sorted(result, key=lambda tup: tup[0])
return sorted_data
def filter_recomputed(stats):
"""
Filter recomputed values from the stats and remove them.
Args:
stats (dict): Raw statistics from a controller run
Returns:
dict: The filtered stats dict
"""
# delete values that have been recorded and superseded by similar, but not identical keys
times_restarted = np.unique([me.time for me in stats.keys() if me.num_restarts > 0])
for t in times_restarted:
restarts = max([me.num_restarts for me in filter_stats(stats, type='_recomputed', time=t).keys()])
for i in range(restarts):
[stats.pop(me) for me in filter_stats(stats, time=t, num_restarts=i).keys()]
# delete values that were recorded at times that shouldn't be recorded because we performed a different step after the restart
other_restarted_steps = [me for me in filter_stats(stats, type='_recomputed') if stats[me]]
for step in other_restarted_steps:
[stats.pop(me) for me in filter_stats(stats, time=step.time).keys()]
return stats
def get_list_of_types(stats):
"""
Helper function to get list of types registered in stats
Args:
stats (dict): dictionary with statistics
Returns:
list: list of types registered
"""
type_list = []
for k, _ in stats.items():
if k.type not in type_list:
type_list.append(k.type)
return type_list
def get_sorted(stats, sortby='time', **kwargs):
"""
Utility for filtering and sorting stats in a single call. Pass a communicatior if using MPI.
Keyword arguments are passed to `filter_stats` for filtering.
stats (dict): raw statistics from a controller run
sortby (str): string to specify which key to use for sorting
Returns:
list: list of tuples containing the sortby item and the value
"""
return sort_stats(
filter_stats(stats, **kwargs),
sortby=sortby,
)
| 4,236 | 31.844961 | 130 | py |
pySDC | pySDC-master/pySDC/helpers/plot_helper.py | import matplotlib as mpl
import matplotlib.pyplot as plt
from distutils.spawn import find_executable
default_mpl_params = mpl.rcParams.copy()
def figsize(textwidth, scale, ratio):
"""
Get figsize.
Args:
textwidth (str): Textwdith in your LaTeX file in points
scale (float): The width of the figure relative to the textwidth
ratio (float): The height of the figure relative to its width
Returns:
list: Width and height of the figure to be passed to matplotlib
"""
fig_width_pt = textwidth # Get this from LaTeX using \the\textwidth
inches_per_pt = 1.0 / 72.27 # Convert pt to inch
fig_width = fig_width_pt * inches_per_pt * scale # width in inches
fig_height = fig_width * ratio # height in inches
fig_size = [fig_width, fig_height]
return fig_size
def figsize_by_journal(journal, scale, ratio): # pragma: no cover
"""
Get figsize for specific journal. If you supply a text height, we will rescale the figure to fit on the page instead
of the parameters supplied.
Args:
journal (str): Name of journal
scale (float): The width of the figure relative to the textwidth
ratio (float): The height of the figure relative to its width
Returns:
list: Width and height of the figure to be passed to matplotlib
"""
# store text width in points here, get this from LaTeX using \the\textwidth
textwidths = {
'JSC_beamer': 426.79135,
'Springer_Numerical_Algorithms': 338.58778,
}
# store text height in points here, get this from LaTeX using \the\textheight
textheights = {
'JSC_beamer': 214.43411,
}
assert (
journal in textwidths.keys()
), f"Textwidth only available for {list(textwidths.keys())}. Please implement one for \"{journal}\"! Get the textwidth using \"\\the\\textwidth\" in your tex file."
# see if the figure fits on the page or if we need to apply the scaling to the height instead
if scale * ratio * textwidths[journal] > textheights.get(journal, 1e9):
if textheights[journal] / scale / ratio > textwidths[journal]:
raise ValueError(
f"We cannot fit figure with scale {scale:.2f} and ratio {ratio:.2f} on the page for journal {journal}!"
)
return figsize(textheights[journal] / (scale * ratio), 1, ratio)
return figsize(textwidths[journal], scale, ratio)
def setup_mpl(font_size=8, reset=False):
if reset:
mpl.rcParams.update(default_mpl_params)
# Set up plotting parameters
style_options = { # setup matplotlib to use latex for output
"font.family": "serif",
"font.serif": [], # blank entries should cause plots to inherit fonts from the document
"font.sans-serif": [],
"font.monospace": [],
# "axes.labelsize": 8, # LaTeX default is 10pt font.
"axes.linewidth": 0.5,
"font.size": font_size,
# "legend.fontsize": 6, # Make the legend/label fonts a little smaller
"legend.numpoints": 1,
# "xtick.labelsize": 6,
"xtick.major.width": 0.5, # major tick width in points
"xtick.minor.width": 0.25,
# "ytick.labelsize": 6,
"ytick.major.width": 0.5, # major tick width in points
"ytick.minor.width": 0.25,
"lines.markersize": 4,
"lines.markeredgewidth": 0.5,
"grid.linewidth": 0.5,
"grid.linestyle": '-',
"grid.alpha": 0.25,
"figure.subplot.hspace": 0.0,
"savefig.pad_inches": 0.01,
}
mpl.rcParams.update(style_options)
if find_executable('latex'):
latex_support = {
"pgf.texsystem": "pdflatex", # change this if using xetex or lautex
"text.usetex": True, # use LaTeX to write all text
"pgf.preamble": r"\usepackage[utf8x]{inputenc}"
r"\usepackage[T1]{fontenc}"
r"\usepackage{underscore}"
r"\usepackage{amsmath,amssymb,marvosym}",
}
else:
latex_support = {
"text.usetex": False, # use LaTeX to write all text
}
mpl.rcParams.update(latex_support)
def newfig(textwidth, scale, ratio=0.6180339887):
plt.clf()
fig, ax = plt.subplots(figsize=figsize(textwidth, scale, ratio))
return fig, ax
def savefig(filename, save_pdf=True, save_pgf=True, save_png=True):
if save_pgf and find_executable('latex'):
plt.savefig('{}.pgf'.format(filename), bbox_inches='tight')
if save_pdf:
plt.savefig('{}.pdf'.format(filename), bbox_inches='tight')
if save_png:
plt.savefig('{}.png'.format(filename), bbox_inches='tight')
plt.close()
| 4,693 | 35.671875 | 168 | py |
pySDC | pySDC-master/pySDC/helpers/pysdc_helper.py | class FrozenClass(object):
"""
Helper class to freeze a class, i.e. to avoid adding more attributes
Attributes:
__isfrozen: Flag to freeze a class
"""
__isfrozen = False
def __setattr__(self, key, value):
"""
Function called when setting arttributes
Args:
key: the attribute
value: the value
"""
# check if attribute exists and if class is frozen
if self.__isfrozen and not hasattr(self, key):
raise TypeError("%r is a frozen class" % self)
object.__setattr__(self, key, value)
def _freeze(self):
"""
Function to freeze the class
"""
self.__isfrozen = True
def get(self, key, default=None):
"""
Wrapper for `__dict__.get` to use when reading variables that might not exist, depending on the configuration
Args:
key (str): Name of the variable you wish to read
default: Value to be returned if the variable does not exist
Returns:
__dict__.get(key, default)
"""
return self.__dict__.get(key, default)
| 1,157 | 25.930233 | 117 | py |
pySDC | pySDC-master/pySDC/helpers/visualization_tools.py | import matplotlib
matplotlib.use('Agg')
from pySDC.helpers.stats_helper import filter_stats
import numpy as np
from matplotlib import rc
import matplotlib.pyplot as plt
# noinspection PyShadowingBuiltins
def show_residual_across_simulation(stats, fname='residuals.png'):
"""
Helper routine to visualize the residuals across the simulation (one block of PFASST)
Args:
stats (dict): statistics object from a PFASST run
fname (str): filename
"""
# get residuals of the run
extract_stats = filter_stats(stats, type='residual_post_iteration')
# find boundaries for x-,y- and c-axis as well as arrays
maxprocs = 0
maxiter = 0
minres = 0
maxres = -99
for k, v in extract_stats.items():
maxprocs = max(maxprocs, k.process)
maxiter = max(maxiter, k.iter)
minres = min(minres, np.log10(v))
maxres = max(maxres, np.log10(v))
# grep residuals and put into array
residual = np.zeros((maxiter, maxprocs + 1))
residual[:] = -99
for k, v in extract_stats.items():
step = k.process
iter = k.iter
if iter != -1:
residual[iter - 1, step] = np.log10(v)
# Set up plotting stuff and fonts
rc('font', **{"sans-serif": ["Arial"], "size": 30})
rc('legend', fontsize='small')
rc('xtick', labelsize='small')
rc('ytick', labelsize='small')
# create plot and save
fig, ax = plt.subplots(figsize=(15, 10))
cmap = plt.get_cmap('Reds')
plt.pcolor(residual.T, cmap=cmap, vmin=minres, vmax=maxres)
cax = plt.colorbar()
cax.set_label('log10(residual)')
ax.set_xlabel('iteration')
ax.set_ylabel('process')
ax.set_xticks(np.arange(maxiter) + 0.5, minor=False)
ax.set_yticks(np.arange(maxprocs + 1) + 0.5, minor=False)
ax.set_xticklabels(np.arange(maxiter) + 1, minor=False)
ax.set_yticklabels(np.arange(maxprocs + 1), minor=False)
plt.savefig(fname, transparent=True, bbox_inches='tight')
| 1,989 | 27.428571 | 89 | py |
pySDC | pySDC-master/pySDC/helpers/transfer_helper.py | # coding=utf-8
import numpy as np
import scipy.sparse as sprs
from scipy.interpolate import BarycentricInterpolator
def next_neighbors_periodic(p, ps, k):
"""
Function to find the next neighbors for a periodic setup
This function gives for a value p the k points next to it which are found in
in the vector ps and the points which are found periodically.
Args:
p: the current point
ps (np.ndarray): the grid with the potential neighbors
k (int): number of neighbors to find
Returns:
list: the k next neighbors
"""
p_bar = p - np.floor(p / 1.0) * 1.0
ps = ps - ps[0]
distance_to_p = np.asarray(
list(map(lambda tk: min([np.abs(tk + 1 - p_bar), np.abs(tk - p_bar), np.abs(tk - 1 - p_bar)]), ps))
)
# zip it
value_index = []
for d, i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def next_neighbors(p, ps, k):
"""
Function to find the next neighbors for a non-periodic setup
This function gives for a value p the k points next to it which are found in
in the vector ps
Args:
p: the current point
ps (np.ndarray): the grid with the potential neighbors
k (int): number of neighbors to find
Returns:
list: the k next neighbors
"""
distance_to_p = np.abs(ps - p)
# zip it
value_index = []
for d, i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def continue_periodic_array(arr, nn):
"""
Function to append an array for nn neighbors for periodicity
Args:
arr (np.ndarray): the input array
nn (list): the neighbors
Returns:
np.ndarray: the continued array
"""
nn = np.asarray(nn)
d_nn = nn[1:] - nn[:-1]
if np.all(d_nn == np.ones(nn.shape[0] - 1)):
return arr[nn]
else:
cont_arr = [arr[nn[0]]]
shift = 0.0
for n, d in zip(nn[1:], d_nn):
if d != 1:
shift = -1
cont_arr.append(arr[n] + shift)
return np.asarray(cont_arr)
def restriction_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1):
"""
Function to contruct the restriction matrix in 1d using barycentric interpolation
Args:
fine_grid (np.ndarray): a one dimensional 1d array containing the nodes of the fine grid
coarse_grid (np.ndarray): a one dimensional 1d array containing the nodes of the coarse grid
k (int): order of the restriction
periodic (bool): flag to indicate periodicity
pad (int): padding parameter for boundaries
Returns:
sprs.csc_matrix: restriction matrix
"""
n_g = coarse_grid.size
if periodic:
M = np.zeros((coarse_grid.size, fine_grid.size))
for i, p in zip(range(n_g), coarse_grid):
nn = next_neighbors_periodic(p, fine_grid, k)
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
cont_arr = continue_periodic_array(fine_grid, nn)
if p > np.mean(coarse_grid) and not (cont_arr[0] <= p <= cont_arr[-1]):
cont_arr += 1
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(cont_arr, np.roll(circulating_one, l)))
with np.errstate(divide='ignore'):
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
M = np.zeros((coarse_grid.size, fine_grid.size + 2 * pad))
for i, p in zip(range(n_g), coarse_grid):
padded_f_grid = border_padding(fine_grid, pad, pad)
nn = next_neighbors(p, padded_f_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(padded_f_grid[nn], np.roll(circulating_one, l)))
with np.errstate(divide='ignore'):
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
if pad > 0:
M = M[:, pad:-pad]
return sprs.csc_matrix(M)
def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1, equidist_nested=True):
"""
Function to contruct the restriction matrix in 1d using barycentric interpolation
Args:
fine_grid (np.ndarray): a one dimensional 1d array containing the nodes of the fine grid
coarse_grid (np.ndarray): a one dimensional 1d array containing the nodes of the coarse grid
k (int): order of the restriction
periodic (bool): flag to indicate periodicity
pad (int): padding parameter for boundaries
equidist_nested (bool): shortcut possible, if nodes are equidistant and nested
Returns:
sprs.csc_matrix: interpolation matrix
"""
n_f = fine_grid.size
if periodic:
M = np.zeros((fine_grid.size, coarse_grid.size))
if equidist_nested:
for i, p in zip(range(n_f), fine_grid):
if i % 2 == 0:
M[i, int(i / 2)] = 1.0
else:
nn = []
cpos = int(i / 2)
offset = int(k / 2)
for j in range(k):
nn.append(cpos - offset + 1 + j)
if nn[-1] < 0:
nn[-1] += coarse_grid.size
elif nn[-1] > coarse_grid.size - 1:
nn[-1] -= coarse_grid.size
nn = sorted(nn)
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
if len(nn) > 0:
cont_arr = continue_periodic_array(coarse_grid, nn)
else:
cont_arr = coarse_grid
if p > np.mean(fine_grid) and not (cont_arr[0] <= p <= cont_arr[-1]):
cont_arr += 1
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(cont_arr, np.roll(circulating_one, l)))
with np.errstate(divide='ignore'):
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
for i, p in zip(range(n_f), fine_grid):
nn = next_neighbors_periodic(p, coarse_grid, k)
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
cont_arr = continue_periodic_array(coarse_grid, nn)
if p > np.mean(fine_grid) and not (cont_arr[0] <= p <= cont_arr[-1]):
cont_arr += 1
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(cont_arr, np.roll(circulating_one, l)))
with np.errstate(divide='ignore'):
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
M = np.zeros((fine_grid.size, coarse_grid.size + 2 * pad))
padded_c_grid = border_padding(coarse_grid, pad, pad)
if equidist_nested:
for i, p in zip(range(n_f), fine_grid):
if i % 2 != 0:
M[i, int((i - 1) / 2) + 1] = 1.0
else:
nn = []
cpos = int(i / 2)
offset = int(k / 2)
for j in range(k):
nn.append(cpos - offset + 1 + j)
if nn[-1] < 0:
nn[-1] += k
elif nn[-1] > coarse_grid.size + 1:
nn[-1] -= k
nn = sorted(nn)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(padded_c_grid[nn], np.roll(circulating_one, l)))
with np.errstate(divide='ignore'):
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
for i, p in zip(range(n_f), fine_grid):
nn = next_neighbors(p, padded_c_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
bary_pol = []
for l in range(k):
bary_pol.append(BarycentricInterpolator(padded_c_grid[nn], np.roll(circulating_one, l)))
with np.errstate(divide='ignore'):
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
if pad > 0:
M = M[:, pad:-pad]
return sprs.csc_matrix(M)
def border_padding(grid, l, r, pad_type='mirror'):
"""
Function to pad/embed an array at the boundaries
Args:
grid (np.npdarray): the input array
l: left boundary
r: right boundary
pad_type: type of padding
Returns:
np.npdarray: the padded array
"""
assert l < grid.size and r < grid.size
padded_arr = np.zeros(grid.size + l + r)
if pad_type == 'mirror':
for i in range(l):
padded_arr[i] = 2 * grid[0] - grid[l - i]
for j in range(r):
padded_arr[-j - 1] = 2 * grid[-1] - grid[-r + j - 1]
padded_arr[l : l + grid.size] = grid
return padded_arr
| 9,984 | 35.441606 | 112 | py |
pySDC | pySDC-master/pySDC/helpers/problem_helper.py | import numpy as np
from scipy.special import factorial
def get_steps(derivative, order, stencil_type):
"""
Get the offsets for the FD stencil.
Args:
derivative (int): Order of the derivative
order (int): Order of accuracy
stencil_type (str): Type of the stencil
steps (list): Provide specific steps, overrides `stencil_type`
Returns:
int: The number of elements in the stencil
numpy.ndarray: The offsets for the stencil
"""
if stencil_type == 'center':
n = order + derivative - (derivative + 1) % 2 // 1
steps = np.arange(n) - n // 2
elif stencil_type == 'forward':
n = order + derivative
steps = np.arange(n)
elif stencil_type == 'backward':
n = order + derivative
steps = -np.arange(n)
elif stencil_type == 'upwind':
n = order + derivative
if n <= 3:
n, steps = get_steps(derivative, order, 'backward')
else:
steps = np.append(-np.arange(n - 1)[::-1], [1])
else:
raise ValueError(
f'Stencil must be of type "center", "forward", "backward" or "upwind", not {stencil_type}. If you want something else you can also give specific steps.'
)
return n, steps
def get_finite_difference_stencil(derivative, order, stencil_type=None, steps=None):
"""
Derive general finite difference stencils from Taylor expansions
Args:
derivative (int): Order of the derivative
order (int): Order of accuracy
stencil_type (str): Type of the stencil
steps (list): Provide specific steps, overrides `stencil_type`
Returns:
numpy.ndarray: The weights of the stencil
numpy.ndarray: The offsets for the stencil
"""
if steps is not None:
n = len(steps)
else:
n, steps = get_steps(derivative, order, stencil_type)
# make a matrix that contains the Taylor coefficients
A = np.zeros((n, n))
idx = np.arange(n)
inv_facs = 1.0 / factorial(idx)
for i in range(0, n):
A[i, :] = steps ** idx[i] * inv_facs[i]
# make a right hand side vector that is zero everywhere except at the position of the desired derivative
sol = np.zeros(n)
sol[derivative] = 1.0
# solve the linear system for the finite difference coefficients
coeff = np.linalg.solve(A, sol)
return coeff, steps
def get_finite_difference_matrix(
derivative, order, stencil_type=None, steps=None, dx=None, size=None, dim=None, bc=None, cupy=False
):
"""
Build FD matrix from stencils, with boundary conditions
"""
if cupy:
import cupyx.scipy.sparse as sp
else:
import scipy.sparse as sp
if order > 2 and bc != 'periodic':
raise NotImplementedError('Higher order allowed only for periodic boundary conditions')
# get stencil
coeff, steps = get_finite_difference_stencil(
derivative=derivative, order=order, stencil_type=stencil_type, steps=steps
)
if bc == 'dirichlet-zero':
A_1d = sp.diags(coeff, steps, shape=(size, size), format='csc')
elif bc == 'neumann-zero':
A_1d = sp.diags(coeff, steps, shape=(size, size), format='csc')
A_1d[0, 0] = -(dx ** (derivative - 1))
A_1d[0, 1] = +(dx ** (derivative - 1))
A_1d[-1, -1] = -(dx ** (derivative - 1))
A_1d[-1, -2] = +(dx ** (derivative - 1))
elif bc == 'periodic':
A_1d = 0 * sp.eye(size, format='csc')
for i in steps:
A_1d += coeff[i] * sp.eye(size, k=steps[i])
if steps[i] > 0:
A_1d += coeff[i] * sp.eye(size, k=-size + steps[i])
if steps[i] < 0:
A_1d += coeff[i] * sp.eye(size, k=size + steps[i])
else:
raise NotImplementedError(f'Boundary conditions {bc} not implemented.')
if dim == 1:
A = A_1d
elif dim == 2:
A = sp.kron(A_1d, sp.eye(size)) + sp.kron(sp.eye(size), A_1d)
elif dim == 3:
A = (
sp.kron(A_1d, sp.eye(size**2))
+ sp.kron(sp.eye(size**2), A_1d)
+ sp.kron(sp.kron(sp.eye(size), A_1d), sp.eye(size))
)
else:
raise NotImplementedError(f'Dimension {dim} not implemented.')
A /= dx**derivative
return A
| 4,294 | 31.293233 | 164 | py |
pySDC | pySDC-master/pySDC/helpers/__init__.py | __author__ = 'robert'
| 22 | 10.5 | 21 | py |
pySDC | pySDC-master/pySDC/tests/test_lagrange.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 20 14:52:18 2023
@author: telu
"""
import pytest
import numpy as np
from pySDC.core.Lagrange import LagrangeApproximation
# Pre-compute reference integration matrix
nNodes = 5
approx = LagrangeApproximation(np.linspace(0, 1, nNodes))
nIntegPoints = 13
tEndVals = np.linspace(0, 1, nIntegPoints)
integMatRef = approx.getIntegrationMatrix([(0, t) for t in tEndVals])
@pytest.mark.base
@pytest.mark.parametrize("numQuad", ["LEGENDRE_NUMPY", "LEGENDRE_SCIPY"])
def test_numericalQuadrature(numQuad):
integMat = approx.getIntegrationMatrix([(0, t) for t in tEndVals], numQuad=numQuad)
assert np.allclose(integMat, integMatRef)
| 705 | 26.153846 | 87 | py |
pySDC | pySDC-master/pySDC/tests/test_collocation.py | import pytest
import numpy as np
from pySDC.core.Collocation import CollBase
t_start = np.random.rand(1) * 0.2
t_end = 0.8 + np.random.rand(1) * 0.2
tolQuad = 1e-13
node_types = ['EQUID', 'LEGENDRE']
quad_types = ['GAUSS', 'LOBATTO', 'RADAU-RIGHT', 'RADAU-LEFT']
@pytest.mark.base
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_canintegratepolynomials(node_type, quad_type):
for M in range(2, 13):
coll = CollBase(M, t_start, t_end, node_type=node_type, quad_type=quad_type)
# some basic consistency tests
assert np.size(coll.nodes) == np.size(coll.weights), (
"For node type " + coll.__class__.__name__ + ", number of entries in nodes and weights is different"
)
assert np.size(coll.nodes) == M, (
"For node type "
+ coll.__class__.__name__
+ ", requesting M nodes did not produce M entries in nodes and weights"
)
# generate random set of polynomial coefficients
poly_coeff = np.random.rand(coll.order - 1)
# evaluate polynomial at collocation nodes
poly_vals = np.polyval(poly_coeff, coll.nodes)
# use python's polyint function to compute anti-derivative of polynomial
poly_int_coeff = np.polyint(poly_coeff)
# Compute integral from 0.0 to 1.0
int_ex = np.polyval(poly_int_coeff, t_end) - np.polyval(poly_int_coeff, t_start)
# use quadrature rule to compute integral
int_coll = coll.evaluate(coll.weights, poly_vals)
# For large values of M, substantial differences from different round of error have to be considered
assert abs(int_ex - int_coll) < tolQuad, (
"For node type "
+ coll.__class__.__name__
+ ", failed to integrate polynomial of degree "
+ str(coll.order - 1)
+ " exactly. Error: %5.3e" % abs(int_ex - int_coll)
)
@pytest.mark.base
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_relateQandSmat(node_type, quad_type):
for M in range(2, 13):
coll = CollBase(M, t_start, t_end, node_type=node_type, quad_type=quad_type)
Q = coll.Qmat[1:, 1:]
S = coll.Smat[1:, 1:]
assert np.shape(Q) == np.shape(S), (
"For node type " + coll.__class__.__name__ + ", Qmat and Smat have different shape"
)
shape = np.shape(Q)
assert shape[0] == shape[1], "For node type " + coll.__class__.__name__ + ", Qmat / Smat are not quadratic"
SSum = np.cumsum(S[:, :], axis=0)
for i in range(0, M):
assert np.linalg.norm(Q[i, :] - SSum[i, :]) < 1e-15, (
"For node type "
+ coll.__class__.__name__
+ ", Qmat and Smat did not satisfy the expected summation property."
)
@pytest.mark.base
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_partialquadraturewithQ(node_type, quad_type):
for M in range(2, 13):
coll = CollBase(M, t_start, t_end, node_type=node_type, quad_type=quad_type)
Q = coll.Qmat[1:, 1:]
# as in TEST 1, create and integrate a polynomial with random coefficients, but now of degree M-1
degree = min(coll.order, M - 1)
poly_coeff = np.random.rand(degree)
poly_vals = np.polyval(poly_coeff, coll.nodes)
poly_int_coeff = np.polyint(poly_coeff)
for i in range(0, M):
int_ex = np.polyval(poly_int_coeff, coll.nodes[i]) - np.polyval(poly_int_coeff, t_start)
int_coll = np.dot(poly_vals, Q[i, :])
assert abs(int_ex - int_coll) < tolQuad, (
"For node type "
+ coll.__class__.__name__
+ ", partial quadrature from Qmat rule failed to integrate polynomial of degree M-1 exactly for M = "
+ str(M)
)
@pytest.mark.base
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_partialquadraturewithS(node_type, quad_type):
for M in range(2, 13):
coll = CollBase(M, t_start, t_end, node_type=node_type, quad_type=quad_type)
S = coll.Smat[1:, 1:]
# as in TEST 1, create and integrate a polynomial with random coefficients, but now of degree M-1
degree = min(coll.order, M - 1)
poly_coeff = np.random.rand(degree)
poly_vals = np.polyval(poly_coeff, coll.nodes)
poly_int_coeff = np.polyint(poly_coeff)
for i in range(1, M):
int_ex = np.polyval(poly_int_coeff, coll.nodes[i]) - np.polyval(poly_int_coeff, coll.nodes[i - 1])
int_coll = np.dot(poly_vals, S[i, :])
assert abs(int_ex - int_coll) < tolQuad, (
"For node type "
+ coll.__class__.__name__
+ ", partial quadrature rule from Smat failed to integrate polynomial of degree M-1 exactly for M = "
+ str(M)
)
| 5,078 | 42.042373 | 117 | py |
pySDC | pySDC-master/pySDC/tests/test_Runge_Kutta_sweeper.py | import pytest
SWEEPER_NAMES = [
'ForwardEuler',
'ExplicitMidpointMethod',
'CrankNicholson',
'BackwardEuler',
'ImplicitMidpointMethod',
'RK4',
'Cash_Karp',
'ESDIRK53',
'DIRK43',
'Heun_Euler',
]
def get_sweeper(sweeper_name):
"""
Retrieve a sweeper from a name
Args:
sweeper_name (str):
Returns:
pySDC.Sweeper.RungeKutta: The sweeper
"""
import pySDC.implementations.sweeper_classes.Runge_Kutta as RK
return eval(f'RK.{sweeper_name}')
def single_run(sweeper_name, dt, Tend, lambdas):
"""
Do a single run of the test equation.
Args:
sweeper_name (str): Name of Multistep method
dt (float): Step size to use
Tend (float): Time to simulate to
lambdas (2d complex numpy.ndarray): Lambdas for test equation
Returns:
dict: Stats
pySDC.datatypes.mesh: Initial conditions
pySDC.Controller.controller: Controller
"""
from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.hooks.log_work import LogWork
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun
from pySDC.implementations.hooks.log_solution import LogSolution
from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimate
from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError
level_params = {'dt': dt}
step_params = {'maxiter': 1}
problem_params = {
'lambdas': lambdas,
'u0': 1.0 + 0.0j,
}
sweeper_params = {
'num_nodes': 1,
'quad_type': 'RADAU-RIGHT',
}
description = {
'level_params': level_params,
'step_params': step_params,
'sweeper_class': get_sweeper(sweeper_name),
'problem_class': testequation0d,
'sweeper_params': sweeper_params,
'problem_params': problem_params,
'convergence_controllers': {EstimateEmbeddedError: {}},
}
controller_params = {
'logger_level': 30,
'hook_class': [LogWork, LogGlobalErrorPostRun, LogSolution, LogEmbeddedErrorEstimate],
}
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
prob = controller.MS[0].levels[0].prob
ic = prob.u_exact(0)
u_end, stats = controller.run(ic, 0.0, Tend)
return stats, ic, controller
@pytest.mark.base
@pytest.mark.parametrize("sweeper_name", SWEEPER_NAMES)
def test_order(sweeper_name):
"""
Test the order in time of the method
Args:
sweeper_name (str): Name of the RK method
"""
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
expected_order = {
'ForwardEuler': 1,
'BackwardEuler': 1,
'ExplicitMidpointMethod': 2,
'ImplicitMidpointMethod': 2,
'RK4': 4,
'CrankNicholson': 2,
'Cash_Karp': 5,
'ESDIRK53': 5,
'DIRK43': 4,
'Heun_Euler': 2,
}
dt_max = {
'Cash_Karp': 1e0,
'ESDIRK53': 1e0,
}
lambdas = [[-1.0e-1 + 0j]]
e = {}
e_embedded = {}
dts = [dt_max.get(sweeper_name, 1e-1) / 2**i for i in range(5)]
for dt in dts:
stats, _, controller = single_run(sweeper_name, dt, 2 * max(dts), lambdas)
e[dt] = get_sorted(stats, type='e_global_post_run')[-1][1]
e_em = get_sorted(stats, type='error_embedded_estimate')
if len(e_em):
e_embedded[dt] = e_em[-1][1]
else:
e_embedded[dt] = 0.0
order = [
np.log(e[dts[i]] / e[dts[i + 1]]) / np.log(dts[i] / dts[i + 1])
for i in range(len(dts) - 1)
if e[dts[i + 1]] > 1e-14
]
order_embedded = [
np.log(e_embedded[dts[i]] / e_embedded[dts[i + 1]]) / np.log(dts[i] / dts[i + 1])
for i in range(len(dts) - 1)
if e_embedded[dts[i + 1]] > 1e-14
]
assert np.isclose(
np.mean(order), expected_order[sweeper_name], atol=0.2
), f"Got unexpected order {np.mean(order):.2f} for {sweeper_name} method! ({order})"
try:
update_order = controller.MS[0].levels[0].sweep.get_update_order()
except NotImplementedError:
update_order = None
if update_order:
assert np.isclose(
np.mean(order_embedded), update_order, atol=0.2
), f"Got unexpected order of embedded error estimate {np.mean(order_embedded):.2f} for {sweeper_name} method! ({order_embedded})"
@pytest.mark.base
@pytest.mark.parametrize("sweeper_name", SWEEPER_NAMES)
def test_stability(sweeper_name):
"""
Test the stability of the method
Args:
sweeper_name (str): Name of the RK method
"""
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
expected_A_stability = {
'ForwardEuler': False,
'BackwardEuler': True,
'ExplicitMidpointMethod': False,
'ImplicitMidpointMethod': True,
'RK4': False,
'CrankNicholson': True,
'Cash_Karp': False,
'ESDIRK53': True,
'DIRK43': True,
'Heun_Euler': False,
}
re = -np.logspace(-3, 6, 50)
im = -np.logspace(-3, 6, 50)
lambdas = np.array([[complex(re[i], im[j]) for i in range(len(re))] for j in range(len(im))]).reshape(
(len(re) * len(im))
)
stats, ic, _ = single_run(sweeper_name, 1.0, 1.0, lambdas)
u = get_sorted(stats, type='u')[-1][1]
unstable = np.abs(u[np.abs(ic) > 0]) / np.abs(ic[np.abs(ic) > 0]) > 1.0
Astable = not any(lambdas[unstable].real < 0)
assert Astable == expected_A_stability[sweeper_name], f"Unexpected stability properties for {sweeper_name} method!"
assert any(~unstable), f"{sweeper_name} method is stable nowhere!"
@pytest.mark.base
@pytest.mark.parametrize("sweeper_name", SWEEPER_NAMES)
def test_rhs_evals(sweeper_name):
"""
Test the number of right hand side evaluations.
Args:
sweeper_name (str): Name of the RK method
"""
from pySDC.helpers.stats_helper import get_sorted
lambdas = [[-1.0e-1 + 0j]]
stats, _, controller = single_run(sweeper_name, 1.0, 10.0, lambdas)
sweep = controller.MS[0].levels[0].sweep
num_stages = sweep.coll.num_nodes - sweep.coll.num_solution_stages
rhs_evaluations = [me[1] for me in get_sorted(stats, type='work_rhs')]
assert all(
me == num_stages for me in rhs_evaluations
), f'Did not perform one RHS evaluation per step and stage in {sweeper_name} method! Expected {num_stages}, but got {rhs_evaluations}.'
@pytest.mark.base
def test_embedded_method():
"""
Here, we test if Cash Karp's method gives a hard-coded result and number of restarts when running with adaptivity.
Returns:
None
"""
import numpy as np
from pySDC.projects.Resilience.vdp import run_vdp
from pySDC.implementations.convergence_controller_classes.adaptivity import AdaptivityRK
from pySDC.helpers.stats_helper import get_sorted
sweeper_name = 'Cash_Karp'
# change only the things in the description that we need for adaptivity
adaptivity_params = {}
adaptivity_params['e_tol'] = 1e-7
convergence_controllers = {}
convergence_controllers[AdaptivityRK] = adaptivity_params
description = {}
description['convergence_controllers'] = convergence_controllers
description['sweeper_class'] = get_sweeper(sweeper_name)
description['step_params'] = {'maxiter': 1}
custom_controller_params = {'logger_level': 40}
stats, _, _ = run_vdp(description, 1, custom_controller_params=custom_controller_params)
dt_last = get_sorted(stats, type='dt')[-2][1]
restarts = sum([me[1] for me in get_sorted(stats, type='restart')])
assert np.isclose(dt_last, 0.14175080252629996), "Cash-Karp has computed a different last step size than before!"
assert restarts == 17, "Cash-Karp has restarted a different number of times than before"
if __name__ == '__main__':
test_order('ESDIRK53')
| 8,167 | 29.364312 | 139 | py |
pySDC | pySDC-master/pySDC/tests/test_problem.py | # Test some functionality of the core problem module
import pytest
import numpy as np
@pytest.mark.base
@pytest.mark.parametrize("init", [[(2, 3, 4)], [(2, 3)], [(1,)]])
def test_scipy_reference(init):
"""
Test the generation of reference solutions with scipy.
A Dahlquist problem is solved using scipy and exactly. Depending on the shape that is passed in `init`, this can
emulate a PDE. What is really tested in terms of PDEs is that the changes in shape of the solution object is handled
correctly.
Args:
init (list): Object similar to the `init` that you use for the problem class
Returns:
None
"""
from pySDC.core.Problem import ptype
# instantiate a dummy problem
problem = ptype(init)
# setup random initial conditions
u0 = np.random.rand(*init[0])
lamdt = np.random.rand(*u0.shape)
# define function to evaluate the right hand side
def eval_rhs(t, u):
return (u.reshape(init[0]) * -lamdt).flatten()
# compute two solutions: One with scipy and one analytic exact solution
u_ref = problem.generate_scipy_reference_solution(eval_rhs, 1.0, u_init=u0.copy(), t_init=0)
u_exact = u0 * np.exp(-lamdt)
# check that the two solutions are the same to high degree
assert (
u_ref.shape == u_exact.shape
), "The shape of the scipy reference solution does not match the shape of the actual solution"
assert np.allclose(u_ref, u_exact, atol=1e-12), "The scipy solution deviates significantly from the exact solution"
@pytest.mark.base
class TestBasics:
@staticmethod
def importClass(className):
if className == 'logistics_equation':
from pySDC.implementations.problem_classes.LogisticEquation import logistics_equation
return logistics_equation
else:
raise ValueError(f'cannot import {className} problem class')
PROBLEMS = {
'logistics_equation': {
'probParams': dict(u0=2.0, newton_maxiter=100, newton_tol=1e-6, direct=True, lam=0.5, stop_at_nan=True),
'testParams': {'tBeg': 0, 'tEnd': 1.0, 'nSteps': 1000, 'tol': 1e-3},
}
}
@pytest.mark.base
@pytest.mark.parametrize('className', PROBLEMS.keys())
def test_uExact_accuracy(self, className):
params = self.PROBLEMS[className]['probParams']
prob = self.importClass(className)(**params)
testParams = self.PROBLEMS[className]['testParams']
tBeg = testParams['tBeg']
tEnd = testParams['tEnd']
nSteps = testParams['nSteps']
dt = (tEnd - tBeg) / nSteps
uNum = prob.u_exact(tBeg)
for n in range(nSteps):
uNum = uNum + dt * prob.eval_f(uNum, tBeg + n * dt)
assert np.linalg.norm(prob.u_exact(tEnd) - uNum, ord=np.inf) < testParams['tol']
if __name__ == '__main__':
test_scipy_reference([(2, 3)])
prob = TestBasics()
prob.test_uExact_accuracy('logistics_equation')
| 2,967 | 33.511628 | 120 | py |
pySDC | pySDC-master/pySDC/tests/test_spatial_transfer.py | from collections import namedtuple
import pytest
import numpy as np
# setup id for gathering the results (will sort by nvars)
ID = namedtuple('ID', ('nvars_fine', 'iorder'))
@pytest.mark.base
def get_accuracy_orders(results):
"""
Routine to compute the order of accuracy in space
Args:
results: the dictionary containing the errors
Returns:
the list of orders
"""
# retrieve the list of nvars from results
assert 'nvars_fine_list' in results, 'ERROR: expecting the list of nvars in the results dictionary'
assert 'iorder_list' in results, 'ERROR: expecting the list of iorders in the results dictionary'
nvars_fine_list = sorted(results['nvars_fine_list'])
iorder_list = sorted(results['iorder_list'])
order = []
# loop over list of interpolation orders
for iorder in iorder_list:
# loop over two consecutive errors/nvars pairs
for i in range(1, len(nvars_fine_list)):
# get ids
id = ID(nvars_fine=nvars_fine_list[i], iorder=iorder)
id_prev = ID(nvars_fine=nvars_fine_list[i - 1], iorder=iorder)
# compute order as log(prev_error/this_error)/log(this_nvars/old_nvars)
if type(nvars_fine_list[i]) is tuple:
nvars = nvars_fine_list[i][0]
nvars_prev = nvars_fine_list[i - 1][0]
else:
nvars = nvars_fine_list[i]
nvars_prev = nvars_fine_list[i - 1]
computed_order = np.log(results[id_prev] / results[id]) / np.log(nvars / nvars_prev)
order.append((nvars_fine_list[i], iorder, computed_order))
return order
@pytest.mark.base
def test_mesh_to_mesh_1d_dirichlet():
"""
A simple test program to test dirichlet interpolation order in space
"""
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
# initialize problem parameters
problem_params = {}
problem_params['nu'] = 0.1 # diffusion coefficient
problem_params['freq'] = 3 # frequency for the test value
problem_params['bc'] = 'dirichlet-zero' # BCs
# initialize transfer parameters
space_transfer_params = {}
space_transfer_params['rorder'] = 2
iorder_list = [2, 4, 6, 8]
nvars_fine_list = [2**p - 1 for p in range(5, 9)]
# set up dictionary to store results (plus lists)
results = {}
results['nvars_fine_list'] = nvars_fine_list
results['iorder_list'] = iorder_list
# loop over interpolation orders and number of DOFs
for iorder in iorder_list:
space_transfer_params['iorder'] = iorder
for nvars_fine in nvars_fine_list:
# instantiate fine problem
problem_params['nvars'] = nvars_fine # number of degrees of freedom
Pfine = heatNd_unforced(**problem_params)
# instantiate coarse problem
problem_params['nvars'] = int((nvars_fine + 1) / 2.0 - 1)
Pcoarse = heatNd_unforced(**problem_params)
# instantiate spatial interpolation
T = mesh_to_mesh(fine_prob=Pfine, coarse_prob=Pcoarse, params=space_transfer_params)
# set exact fine solution to compare with
uexact_fine = Pfine.u_exact(t=0)
# set exact coarse solution as source
uexact_coarse = Pcoarse.u_exact(t=0)
# do the interpolation/prolongation
uinter = T.prolong(uexact_coarse)
# compute error and store
err = abs(uinter - uexact_fine)
id = ID(nvars_fine=nvars_fine, iorder=iorder)
results[id] = err
orders = get_accuracy_orders(results)
for p in range(len(orders)):
# print(abs(orders[p][1] - orders[p][2]) / orders[p][1])
assert (
abs(orders[p][1] - orders[p][2]) / orders[p][1] < 0.151
), 'ERROR: did not get expected orders for interpolation, got %s' % str(orders[p])
@pytest.mark.base
def test_mesh_to_mesh_1d_periodic():
"""
A simple test program to test periodic interpolation order in space
"""
from pySDC.implementations.problem_classes.AdvectionEquation_ND_FD import advectionNd
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
# initialize problem parameters
problem_params = {}
problem_params['c'] = 0.1 # advection coefficient
problem_params['freq'] = 4 # frequency for the test value
problem_params['stencil_type'] = 'center'
problem_params['bc'] = 'periodic' # boundary conditions
# initialize transfer parameters
space_transfer_params = {}
space_transfer_params['rorder'] = 2
space_transfer_params['periodic'] = True
iorder_list = [2, 4, 6, 8]
nvars_fine_list = [2**p for p in range(5, 9)]
# set up dictionary to store results (plus lists)
results = {}
results['nvars_fine_list'] = nvars_fine_list
results['iorder_list'] = iorder_list
# loop over interpolation orders and number of DOFs
for iorder in iorder_list:
space_transfer_params['iorder'] = iorder
for nvars_fine in nvars_fine_list:
# instantiate fine problem
problem_params['nvars'] = nvars_fine # number of degrees of freedom
Pfine = advectionNd(**problem_params)
# instantiate coarse problem
problem_params['nvars'] = int(nvars_fine / 2)
Pcoarse = advectionNd(**problem_params)
# instantiate spatial interpolation
T = mesh_to_mesh(fine_prob=Pfine, coarse_prob=Pcoarse, params=space_transfer_params)
# set exact fine solution to compare with
uexact_fine = Pfine.u_exact(t=0)
# set exact coarse solution as source
uexact_coarse = Pcoarse.u_exact(t=0)
# do the interpolation/prolongation
uinter = T.prolong(uexact_coarse)
# compute error and store
err = abs(uinter - uexact_fine)
id = ID(nvars_fine=nvars_fine, iorder=iorder)
results[id] = err
orders = get_accuracy_orders(results)
print(orders)
for p in range(len(orders)):
# print(abs(orders[p][1]-orders[p][2])/orders[p][1])
assert (
abs(orders[p][1] - orders[p][2]) / orders[p][1] < 0.051
), 'ERROR: did not get expected orders for interpolation, got %s' % str(orders[p])
@pytest.mark.base
def test_mesh_to_mesh_2d_periodic():
"""
A simple test program to test periodic interpolation order in 2d
"""
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.transfer_classes.TransferMesh import mesh_to_mesh
# initialize problem parameters
problem_params = {}
problem_params['freq'] = (2, 2)
problem_params['nu'] = 1.0
problem_params['bc'] = 'periodic'
# initialize transfer parameters
space_transfer_params = {}
space_transfer_params['rorder'] = 2
space_transfer_params['periodic'] = True
iorder_list = [2, 4, 6, 8]
nvars_fine_list = [(2**p, 2**p) for p in range(5, 9)]
# set up dictionary to store results (plus lists)
results = {}
results['nvars_fine_list'] = nvars_fine_list
results['iorder_list'] = iorder_list
# loop over interpolation orders and number of DOFs
for iorder in iorder_list:
space_transfer_params['iorder'] = iorder
for nvars_fine in nvars_fine_list:
# instantiate fine problem
problem_params['nvars'] = nvars_fine # number of degrees of freedom
Pfine = heatNd_unforced(**problem_params)
# instantiate coarse problem
problem_params['nvars'] = (int(nvars_fine[0] / 2), int(nvars_fine[1] / 2))
Pcoarse = heatNd_unforced(**problem_params)
# instantiate spatial interpolation
T = mesh_to_mesh(fine_prob=Pfine, coarse_prob=Pcoarse, params=space_transfer_params)
# set exact fine solution to compare with
uexact_fine = Pfine.u_exact(t=0)
# set exact coarse solution as source
uexact_coarse = Pcoarse.u_exact(t=0)
# do the interpolation/prolongation
uinter = T.prolong(uexact_coarse)
# compute error and store
err = abs(uinter - uexact_fine)
id = ID(nvars_fine=nvars_fine, iorder=iorder)
results[id] = err
orders = get_accuracy_orders(results)
print(orders)
for p in range(len(orders)):
# print(abs(orders[p][1] - orders[p][2]) / orders[p][1])
assert (
abs(orders[p][1] - orders[p][2]) / orders[p][1] < 0.115
), 'ERROR: did not get expected orders for interpolation, got %s' % str(orders[p])
if __name__ == "__main__":
test_mesh_to_mesh_1d_dirichlet()
pass
| 8,912 | 34.090551 | 103 | py |
pySDC | pySDC-master/pySDC/tests/test_2d_fd_accuracy.py | from collections import namedtuple
import pytest
import numpy as np
# setup id for gathering the results (will sort by nvars)
ID = namedtuple('ID', 'nvars')
@pytest.mark.base
def test_spatial_accuracy():
"""
A simple test program to check order of accuracy in space for a simple 2d test problem
"""
# initialize problem parameters
problem_params = {}
problem_params['freq'] = (2, 2)
problem_params['nu'] = 1.0
problem_params['bc'] = 'periodic'
# create list of nvars to do the accuracy test with
nvars_list = [(2**p, 2**p) for p in range(4, 12)]
# run accuracy test for all nvars
for order_stencil in [2, 4, 8]:
results = run_accuracy_check(nvars_list=nvars_list, problem_params=problem_params, order_stencil=order_stencil)
# compute order of accuracy
order = get_accuracy_order(results)
print(order_stencil, order)
assert all(
np.isclose(order, order_stencil, atol=5e-2)
), f"ERROR: expected spatial order to be {order_stencil} but got {np.mean(order):.2f}"
@pytest.mark.base
def run_accuracy_check(nvars_list, problem_params, order_stencil):
"""
Routine to check the error of the Laplacian vs. its FD discretization
Args:
nvars_list: list of nvars to do the testing with
problem_params: dictionary containing the problem-dependent parameters
Returns:
a dictionary containing the errors and a header (with nvars_list)
"""
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
results = {}
# loop over all nvars
for nvars in nvars_list:
# setup problem
problem_params['nvars'] = nvars
problem_params['order'] = order_stencil
prob = heatNd_unforced(**problem_params)
# create x values, use only inner points
xvalues = np.array([i * prob.dx for i in range(prob.nvars[0])])
# create a mesh instance and fill it with a sine wave
u = prob.u_exact(t=0)
# create a mesh instance and fill it with the Laplacian of the sine wave
u_lap = prob.dtype_u(init=prob.init)
u_lap[:] = (
-2
* (np.pi**2 * prob.freq[0] * prob.freq[1])
* prob.nu
* np.kron(np.sin(np.pi * prob.freq[0] * xvalues), np.sin(np.pi * prob.freq[1] * xvalues)).reshape(nvars)
)
# compare analytic and computed solution using the eval_f routine of the problem class
err = abs(prob.eval_f(u, 0) - u_lap)
# get id for this nvars and put error into dictionary
id = ID(nvars=nvars)
results[id] = err
# add nvars_list to dictionary for easier access later on
results['nvars_list'] = nvars_list
return results
def get_accuracy_order(results):
"""
Routine to compute the order of accuracy in space
Args:
results: the dictionary containing the errors
Returns:
the list of orders
"""
# retrieve the list of nvars from results
assert 'nvars_list' in results, 'ERROR: expecting the list of nvars in the results dictionary'
nvars_list = sorted(results['nvars_list'])
order = []
# loop over two consecutive errors/nvars pairs
for i in range(1, len(nvars_list)):
# get ids
id = ID(nvars=nvars_list[i])
id_prev = ID(nvars=nvars_list[i - 1])
# compute order as log(prev_error/this_error)/log(this_nvars/old_nvars) <-- depends on the sorting of the list!
if results[id] > 1e-8 and results[id_prev] > 1e-8:
order.append(np.log(results[id_prev] / results[id]) / np.log(nvars_list[i][0] / nvars_list[i - 1][0]))
return order
| 3,698 | 31.447368 | 119 | py |
pySDC | pySDC-master/pySDC/tests/tests_core.py | import numpy as np
import pytest
@pytest.mark.base
def test_errors():
classes = ['DataError']
for subclass in classes:
yield check_error, subclass
@pytest.mark.base
def check_error(subclass):
import pySDC.core.Errors
err = getattr(pySDC.core.Errors, subclass)
try:
raise err('bla')
except err:
assert True
@pytest.mark.base
def test_datatypes_mesh():
init = [10, (10, 10), (10, 10, 10)]
for i in init:
yield check_datatypes_mesh, i
@pytest.mark.base
def check_datatypes_mesh(init):
import pySDC.implementations.datatype_classes.mesh as m
m1 = m.mesh((init, None, np.dtype('float64')))
m2 = m.mesh(m1)
m1[:] = 1.0
m2[:] = 2.0
m3 = m1 + m2
m4 = m1 - m2
m5 = 0.1 * m1
m6 = m1
m7 = abs(m1)
m8 = m.mesh(m1)
assert isinstance(m3, type(m1))
assert isinstance(m4, type(m1))
assert isinstance(m5, type(m1))
assert isinstance(m6, type(m1))
assert isinstance(m7, float)
assert m2 is not m1
assert m3 is not m1
assert m4 is not m1
assert m5 is not m1
assert m6 is m1
assert np.shape(m3) == np.shape(m1)
assert np.shape(m4) == np.shape(m1)
assert np.shape(m5) == np.shape(m1)
assert np.all(m1 == 1.0)
assert np.all(m2 == 2.0)
assert np.all(m3 == 3.0)
assert np.all(m4 == -1.0)
assert np.all(m5 == 0.1)
assert np.all(m8 == 1.0)
assert m7 >= 0
@pytest.mark.base
def test_datatypes_particles():
init = [1, 10]
for i in init:
yield check_datatypes_particles, i
@pytest.mark.base
def check_datatypes_particles(init):
from pySDC.implementations.datatype_classes.particles import particles
from pySDC.implementations.datatype_classes.particles import acceleration
p1 = particles((init, None, np.dtype('float64')))
p2 = particles(p1)
p5 = particles((init, None, np.dtype('float64')))
p1.pos[:] = 1.0
p2.pos[:] = 2.0
p1.vel[:] = 10.0
p2.vel[:] = 20.0
p3 = p1 + p2
p4 = p1 - p2
p5.pos[:] = 0.1 * p1.vel
p6 = p1
p7 = abs(p1)
a1 = acceleration((init, None, np.dtype('float64')))
a2 = acceleration(a1)
p8 = particles(p1)
a1[:] = 100.0
a2[:] = 200.0
a3 = a1 + a2
p8.vel[:] = 0.1 * a1
p8.pos[:] = 0.1 * (0.1 * a1)
assert isinstance(p3, type(p1))
assert isinstance(p4, type(p1))
assert isinstance(p5.pos, type(p1.pos))
assert isinstance(p6, type(p1))
assert isinstance(p7, float)
assert isinstance(a2, type(a1))
assert isinstance(p8.pos, type(p1.pos))
assert isinstance(p8.vel, type(p1.vel))
assert p2 is not p1
assert p3 is not p1
assert p4 is not p1
assert p5 is not p1
assert p6 is p1
assert a2 is not a1
assert a3 is not a1
assert np.shape(p3.pos) == np.shape(p1.pos)
assert np.shape(p4.pos) == np.shape(p1.pos)
assert np.shape(p3.vel) == np.shape(p1.vel)
assert np.shape(p4.vel) == np.shape(p1.vel)
assert np.shape(a2) == np.shape(a1)
assert np.all(p3.pos == 3.0)
assert np.all(p4.pos == -1.0)
assert np.all(p3.vel == 30.0)
assert np.all(p4.vel == -10.0)
assert np.all(p5.pos == 1.0)
assert p7 >= 0
assert np.all(p8.pos == 1.0)
assert np.all(p8.vel == 10.0)
assert np.all(a3 == 300.0)
| 3,305 | 21.337838 | 77 | py |
pySDC | pySDC-master/pySDC/tests/__init__.py | import os
def setUp(self):
if not os.path.exists('data'):
os.makedirs('data')
| 92 | 12.285714 | 34 | py |
pySDC | pySDC-master/pySDC/tests/test_imexsweeper.py | import unittest
import pytest
import numpy as np
node_types = ['EQUID', 'LEGENDRE']
quad_types = ['GAUSS', 'LOBATTO', 'RADAU-RIGHT', 'RADAU-LEFT']
@pytest.mark.base
class TestImexSweeper(unittest.TestCase):
#
# Some auxiliary functions which are not tests themselves
#
def setupLevelStepProblem(self):
from pySDC.core import Step as stepclass
self.description['sweeper_params'] = self.swparams
step = stepclass.step(description=self.description)
level = step.levels[0]
level.status.time = 0.0
u0 = step.levels[0].prob.u_exact(step.time)
step.init_step(u0)
nnodes = step.levels[0].sweep.coll.num_nodes
problem = level.prob
return step, level, problem, nnodes
#
# General setUp function used by all tests
#
def setUp(self):
from pySDC.implementations.problem_classes.FastWaveSlowWave_0D import swfw_scalar
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order as imex
self.pparams = {}
self.pparams['lambda_s'] = np.array([-0.1 * 1j], dtype='complex')
self.pparams['lambda_f'] = np.array([-1.0 * 1j], dtype='complex')
self.pparams['u0'] = np.random.rand()
self.swparams = {}
self.swparams['num_nodes'] = 2 + np.random.randint(5)
lparams = {}
lparams['dt'] = 1.0
self.description = {}
self.description['problem_class'] = swfw_scalar
self.description['problem_params'] = self.pparams
self.description['sweeper_class'] = imex
self.description['level_params'] = lparams
# ***************
# **** TESTS ****
# ***************
#
# Check that a level object can be instantiated
#
def test_caninstantiate(self):
from pySDC.core import Step as stepclass
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order as imex
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
self.description['sweeper_params'] = self.swparams
S = stepclass.step(description=self.description)
assert isinstance(S.levels[0].sweep, imex), "sweeper in generated level is not an object of type imex"
#
# Check that a level object can be registered in a step object (needed as prerequiste to execute update_nodes
#
def test_canregisterlevel(self):
from pySDC.core import Step as stepclass
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
self.description['sweeper_params'] = self.swparams
step = stepclass.step(description=self.description)
L = step.levels[0]
with self.assertRaises(Exception):
L.sweep.predict()
with self.assertRaises(Exception):
L.update_nodes()
with self.assertRaises(Exception):
L.compute_end_point()
#
# Check that the sweeper functions update_nodes and compute_end_point can be executed
#
def test_canrunsweep(self):
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
self.description['sweeper_params'] = self.swparams
# After running setupLevelStepProblem, the functions predict, update_nodes and compute_end_point should run
step, level, problem, nnodes = self.setupLevelStepProblem()
assert level.u[0] is not None, "After init_step, level.u[0] should no longer be of type None"
assert level.u[1] is None, "Before predict, level.u[1] and following should be of type None"
level.sweep.predict()
# Should now be able to run update nodes
level.sweep.update_nodes()
assert level.uend is None, "uend should be None previous to running compute_end_point"
level.sweep.compute_end_point()
assert level.uend is not None, "uend still None after running compute_end_point"
#
# Make sure a sweep in matrix form is equal to a sweep in node-to-node form
#
def test_sweepequalmatrix(self):
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
step, level, problem, nnodes = self.setupLevelStepProblem()
step.levels[0].sweep.predict()
u0full = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
# Perform node-to-node SDC sweep
level.sweep.update_nodes()
lambdas = [problem.lambda_f[0], problem.lambda_s[0]]
LHS, RHS = level.sweep.get_scalar_problems_sweeper_mats(lambdas=lambdas)
unew = np.linalg.inv(LHS).dot(u0full + RHS.dot(u0full))
usweep = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
assert (
np.linalg.norm(unew - usweep, np.infty) < 1e-14
), "Single SDC sweeps in matrix and node-to-node formulation yield different results"
#
# Make sure the implemented update formula matches the matrix update formula
#
def test_updateformula(self):
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
step, level, problem, nnodes = self.setupLevelStepProblem()
level.sweep.predict()
u0full = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
# Perform update step in sweeper
level.sweep.update_nodes()
ustages = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
# Compute end value through provided function
level.sweep.compute_end_point()
uend_sweep = level.uend
# Compute end value from matrix formulation
if level.sweep.params.do_coll_update:
uend_mat = self.pparams['u0'] + step.dt * level.sweep.coll.weights.dot(
ustages * (problem.lambda_s[0] + problem.lambda_f[0])
)
else:
uend_mat = ustages[-1]
assert (
np.linalg.norm(uend_sweep - uend_mat, np.infty) < 1e-14
), "Update formula in sweeper gives different result than matrix update formula"
#
# Compute the exact collocation solution by matrix inversion and make sure it is a fixed point
#
def test_collocationinvariant(self):
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
step, level, problem, nnodes = self.setupLevelStepProblem()
level.sweep.predict()
u0full = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
QE, QI, Q = level.sweep.get_sweeper_mats()
# Build collocation matrix
Mcoll = np.eye(nnodes) - step.dt * Q * (problem.lambda_s[0] + problem.lambda_f[0])
# Solve collocation problem directly
ucoll = np.linalg.inv(Mcoll).dot(u0full)
# Put stages of collocation solution into level
for l in range(0, nnodes):
level.u[l + 1][:] = ucoll[l]
level.f[l + 1].impl[:] = problem.lambda_f[0] * ucoll[l]
level.f[l + 1].expl[:] = problem.lambda_s[0] * ucoll[l]
# Perform node-to-node SDC sweep
level.sweep.update_nodes()
lambdas = [problem.lambda_f[0], problem.lambda_s[0]]
LHS, RHS = level.sweep.get_scalar_problems_sweeper_mats(lambdas=lambdas)
# Make sure both matrix and node-to-node sweep leave collocation unaltered
unew = np.linalg.inv(LHS).dot(u0full + RHS.dot(ucoll))
assert (
np.linalg.norm(unew - ucoll, np.infty) < 1e-14
), "Collocation solution not invariant under matrix SDC sweep"
unew_sweep = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
print(np.linalg.norm(unew_sweep - ucoll, np.infty))
assert (
np.linalg.norm(unew_sweep - ucoll, np.infty) < 1e-14
), "Collocation solution not invariant under node-to-node sweep"
#
# Make sure that K node-to-node sweeps give the same result as K sweeps in matrix form and the single matrix formulation for K sweeps
#
def test_manysweepsequalmatrix(self):
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
step, level, problem, nnodes = self.setupLevelStepProblem()
step.levels[0].sweep.predict()
u0full = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
# Perform K node-to-node SDC sweep
K = 1 + np.random.randint(6)
for i in range(0, K):
level.sweep.update_nodes()
usweep = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
lambdas = [problem.lambda_f[0], problem.lambda_s[0]]
LHS, RHS = level.sweep.get_scalar_problems_sweeper_mats(lambdas=lambdas)
unew = u0full
for i in range(0, K):
unew = np.linalg.inv(LHS).dot(u0full + RHS.dot(unew))
assert (
np.linalg.norm(unew - usweep, np.infty) < 1e-14
), "Doing multiple node-to-node sweeps yields different result than same number of matrix-form sweeps"
Mat_sweep = level.sweep.get_scalar_problems_manysweep_mat(nsweeps=K, lambdas=lambdas)
usweep_onematrix = Mat_sweep.dot(u0full)
assert (
np.linalg.norm(usweep_onematrix - usweep, np.infty) < 1e-14
), "Single-matrix multiple sweep formulation yields different result than multiple sweeps in node-to-node or matrix form form"
#
# Make sure that update function for K sweeps computed from K-sweep matrix gives same result as K sweeps in node-to-node form plus compute_end_point
#
def test_manysweepupdate(self):
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
step, level, problem, nnodes = self.setupLevelStepProblem()
step.levels[0].sweep.predict()
u0full = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
# Perform K node-to-node SDC sweep
K = 1 + np.random.randint(6)
for i in range(0, K):
level.sweep.update_nodes()
# Fetch final value
level.sweep.compute_end_point()
uend_sweep = level.uend
lambdas = [problem.lambda_f[0], problem.lambda_s[0]]
# Build single matrix representing K sweeps
Mat_sweep = level.sweep.get_scalar_problems_manysweep_mat(nsweeps=K, lambdas=lambdas)
# Now build update function
if level.sweep.params.do_coll_update:
update = 1.0 + (problem.lambda_s[0] + problem.lambda_f[0]) * level.sweep.coll.weights.dot(
Mat_sweep.dot(np.ones(nnodes))
)
# Multiply u0 by value of update function to get end value directly
uend_matrix = update * self.pparams['u0']
else:
update = Mat_sweep.dot(np.ones(nnodes))
uend_matrix = (update * self.pparams['u0'])[-1]
print(abs(uend_matrix - uend_sweep))
assert (
abs(uend_matrix - uend_sweep) < 1e-14
), "Node-to-node sweep plus update yields different result than update function computed through K-sweep matrix"
#
# Make sure the update with do_coll_update=False reproduces last stage
#
def test_update_nocollupdate_laststage(self):
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
self.swparams['do_coll_update'] = False
step, level, problem, nnodes = self.setupLevelStepProblem()
# if type of nodes does not have right endpoint as quadrature nodes, cannot set do_coll_update to False and perform this test
if not level.sweep.coll.right_is_node:
break
level.sweep.predict()
ulaststage = np.random.rand()
level.u[nnodes][:] = ulaststage
level.sweep.compute_end_point()
uend = level.uend
assert (
abs(uend - ulaststage) < 1e-14
), "compute_end_point with do_coll_update=False did not reproduce last stage value"
#
# Make sure that update with do_coll_update=False is identical to update formula with q=(0,...,0,1)
#
def test_updateformula_no_coll_update(self):
for node_type, quad_type in zip(node_types, quad_types):
self.swparams['node_type'] = node_type
self.swparams['quad_type'] = quad_type
self.swparams['do_coll_update'] = False
step, level, problem, nnodes = self.setupLevelStepProblem()
# if type of nodes does not have right endpoint as quadrature nodes, cannot set do_coll_update to False and perform this test
if not level.sweep.coll.right_is_node:
break
level.sweep.predict()
u0full = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
# Perform update step in sweeper
level.sweep.update_nodes()
ustages = np.array([level.u[l].flatten() for l in range(1, nnodes + 1)])
# Compute end value through provided function
level.sweep.compute_end_point()
uend_sweep = level.uend
# Compute end value from matrix formulation
q = np.zeros(nnodes)
q[nnodes - 1] = 1.0
uend_mat = q.dot(ustages)
assert (
np.linalg.norm(uend_sweep - uend_mat, np.infty) < 1e-14
), "For do_coll_update=False, update formula in sweeper gives different result than matrix update formula with q=(0,..,0,1)"
| 14,641 | 44.47205 | 152 | py |
pySDC | pySDC-master/pySDC/tests/test_Q_transfer.py | import pytest
import numpy as np
from numpy.polynomial.polynomial import polyval
from pySDC.core.Collocation import CollBase
import pySDC.helpers.transfer_helper as th
t_start = np.random.rand(1) * 0.2
t_end = 0.8 + np.random.rand(1) * 0.2
node_types = ['EQUID', 'LEGENDRE']
quad_types = ['GAUSS', 'LOBATTO', 'RADAU-RIGHT', 'RADAU-LEFT']
@pytest.mark.base
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_Q_transfer(node_type, quad_type):
"""
A simple test program to check the order of the Q interpolation/restriction
"""
for M in range(3, 9):
Mfine = M
Mcoarse = int((Mfine + 1) / 2.0)
coll_fine = CollBase(Mfine, 0, 1, node_type=node_type, quad_type=quad_type)
coll_coarse = CollBase(Mcoarse, 0, 1, node_type=node_type, quad_type=quad_type)
assert (
coll_fine.left_is_node == coll_coarse.left_is_node
), 'ERROR: should be using the same class for coarse and fine Q'
fine_grid = coll_fine.nodes
coarse_grid = coll_coarse.nodes
for order in range(2, coll_coarse.num_nodes + 1):
Pcoll = th.interpolation_matrix_1d(fine_grid, coarse_grid, k=order, pad=0, equidist_nested=False)
Rcoll = th.restriction_matrix_1d(fine_grid, coarse_grid, k=order, pad=0)
for polyorder in range(1, order + 2):
coeff = np.random.rand(polyorder)
ufine = polyval(fine_grid, coeff)
ucoarse = polyval(coarse_grid, coeff)
uinter = Pcoll.dot(ucoarse)
urestr = Rcoll.dot(ufine)
err_inter = np.linalg.norm(uinter - ufine, np.inf)
err_restr = np.linalg.norm(urestr - ucoarse, np.inf)
if polyorder <= order:
assert err_inter < 5e-15, "ERROR: Q-interpolation order is not reached, got %s" % err_inter
assert err_restr < 3e-15, "ERROR: Q-restriction order is not reached, got %s" % err_restr
else:
assert err_inter > 2e-15, "ERROR: Q-interpolation order is higher than expected, got %s" % polyorder
@pytest.mark.base
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_Q_transfer_minimal(node_type, quad_type):
"""
A simple test program to check the order of the Q interpolation/restriction for only 2 coarse nodes
"""
Mcoarse = 2
coll_coarse = CollBase(Mcoarse, 0, 1, node_type=node_type, quad_type=quad_type)
for M in range(3, 9):
Mfine = M
coll_fine = CollBase(Mfine, 0, 1, node_type=node_type, quad_type=quad_type)
assert (
coll_fine.left_is_node == coll_coarse.left_is_node
), 'ERROR: should be using the same class for coarse and fine Q'
fine_grid = coll_fine.nodes
coarse_grid = coll_coarse.nodes
Pcoll = th.interpolation_matrix_1d(fine_grid, coarse_grid, k=2, pad=0, equidist_nested=False)
Rcoll = th.restriction_matrix_1d(fine_grid, coarse_grid, k=2, pad=0)
for polyorder in range(1, 3):
coeff = np.random.rand(polyorder)
ufine = polyval(fine_grid, coeff)
ucoarse = polyval(coarse_grid, coeff)
uinter = Pcoll.dot(ucoarse)
urestr = Rcoll.dot(ufine)
err_inter = np.linalg.norm(uinter - ufine, np.inf)
err_restr = np.linalg.norm(urestr - ucoarse, np.inf)
if polyorder <= 2:
assert err_inter < 2e-15, "ERROR: Q-interpolation order is not reached, got %s" % err_inter
assert err_restr < 2e-15, "ERROR: Q-restriction order is not reached, got %s" % err_restr
else:
assert err_inter > 2e-15, "ERROR: Q-interpolation order is higher than expected, got %s" % polyorder
| 3,881 | 37.435644 | 120 | py |
pySDC | pySDC-master/pySDC/tests/test_nodes.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 4 11:58:05 2023
@author: cpf5546
"""
import pytest
import numpy as np
from pySDC.core.Nodes import NodesGenerator
def chebyNodes(kind, n):
i = np.arange(n, dtype=float) + 1
i = i[-1::-1]
if kind == 1:
nodes = np.cos((i - 0.5) / n * np.pi)
elif kind == 2:
nodes = np.cos(i / (n + 1) * np.pi)
elif kind == 3:
nodes = np.cos((i - 0.5) / (n + 0.5) * np.pi)
elif kind == 4:
nodes = np.cos(i / (n + 0.5) * np.pi)
return tuple(nodes)
REF_NODES = {
'LEGENDRE': {
2: (-1 / 3**0.5, 1 / 3**0.5),
3: (-((3 / 5) ** 0.5), 0, (3 / 5) ** 0.5),
4: (
-((3 / 7 + 2 / 7 * (6 / 5) ** 0.5) ** 0.5),
-((3 / 7 - 2 / 7 * (6 / 5) ** 0.5) ** 0.5),
(3 / 7 - 2 / 7 * (6 / 5) ** 0.5) ** 0.5,
(3 / 7 + 2 / 7 * (6 / 5) ** 0.5) ** 0.5,
),
5: (
-1 / 3 * (5 + 2 * (10 / 7) ** 0.5) ** 0.5,
-1 / 3 * (5 - 2 * (10 / 7) ** 0.5) ** 0.5,
0,
1 / 3 * (5 - 2 * (10 / 7) ** 0.5) ** 0.5,
1 / 3 * (5 + 2 * (10 / 7) ** 0.5) ** 0.5,
),
}
}
nTests = list(REF_NODES['LEGENDRE'].keys())
for kind in [1, 2, 3, 4]:
REF_NODES[f'CHEBY-{kind}'] = {n: chebyNodes(kind, n) for n in nTests}
@pytest.mark.base
@pytest.mark.parametrize("node_type", REF_NODES.keys())
def test_nodesGeneration(node_type):
gen = NodesGenerator(node_type=node_type, quad_type='GAUSS')
ref = REF_NODES[node_type]
for n, nodes in ref.items():
assert np.allclose(nodes, gen.getNodes(n))
| 1,630 | 26.183333 | 73 | py |
pySDC | pySDC-master/pySDC/tests/test_helpers.py | # coding=utf-8
"""
.. moduleauthor:: Torbjörn Klatt <[email protected]>
"""
import importlib
import inspect
import os
from pySDC.helpers.problem_helper import get_finite_difference_stencil
import pytest
import numpy as np
def get_modules_in_path(base_package):
"""
Finds all modules in given base package and its subpackages
Args:
base_package (str):
base package to walk through
Returns:
list of str: list of `package.module` strings ready to be used by `import`
"""
assert os.path.isdir(base_package), "Base package not found: %s" % base_package
modules = []
for root, dirs, files in os.walk(base_package):
package = root.replace('/', '.')
for f in files:
if f.endswith('.py'):
if f == '__init__.py':
continue
modules.append(package + '.' + f.replace('.py', ''))
return modules
def load_modules_from_base(base_package):
"""
Loads all modules of given package and its subpackages
The list of modules and subpackages is generated by :meth:`get_modules_in_path`.
Args:
base_package (str):
base package to walk through
Returns:
dict of modules: dict of loaded modules mapped to the `package.module` string
"""
modules = get_modules_in_path(base_package)
imported = {}
for m in modules:
print("Loading module: %s" % m)
imported.update({m: importlib.import_module(m)})
return imported
def get_derived_from_in_package(base_class, base_package):
"""
Finds all derived classes of given base class in given package
Uses :meth:`get_modules_in_path` to find all modules in given package and its subpackages,
then loads them with :meth:`load_modules_from_base` and tests all contained classes, whether
they are derived from `base_class`.
Args:
base_class (class):
base class as class object
base_package (str):
as used by :meth:`get_modules_in_path`
Returns:
list of class objects:
all classes in `base_package` with `base_class` in their `__mro__`
"""
imported = load_modules_from_base(base_package)
derived = []
for module, loaded in imported.items():
print("checking module '%s': %s -> %s" % (module, loaded, loaded.__dict__.keys()))
for obj in dir(loaded):
cls = getattr(loaded, obj)
if not inspect.isclass(cls):
continue
if base_class in cls.__mro__ and cls is not base_class:
derived.append(cls)
return derived
def fd_stencil_single(derivative, order, stencil_type):
"""
Make a single tests where we generate a finite difference stencil using the generic framework above and compare to
harscoded stencils that were implemented in a previous version of the code.
Args:
derivative (int): Order of the derivative
order (int): Order of accuracy
stencil_type (str): Type of the stencil
Returns:
None
"""
if derivative == 1:
if stencil_type == 'center':
if order == 2:
stencil = [-1.0, 0.0, 1.0]
zero_pos = 2
coeff = 1.0 / 2.0
elif order == 4:
stencil = [1.0, -8.0, 0.0, 8.0, -1.0]
zero_pos = 3
coeff = 1.0 / 12.0
elif order == 6:
stencil = [-1.0, 9.0, -45.0, 0.0, 45.0, -9.0, 1.0]
zero_pos = 4
coeff = 1.0 / 60.0
else:
raise NotImplementedError("Order " + str(order) + " not implemented.")
elif stencil_type == 'upwind':
if order == 1:
stencil = [-1.0, 1.0]
coeff = 1.0
zero_pos = 2
elif order == 2:
stencil = [1.0, -4.0, 3.0]
coeff = 1.0 / 2.0
zero_pos = 3
elif order == 3:
stencil = [1.0, -6.0, 3.0, 2.0]
coeff = 1.0 / 6.0
zero_pos = 3
elif order == 4:
stencil = [-5.0, 30.0, -90.0, 50.0, 15.0]
coeff = 1.0 / 60.0
zero_pos = 4
elif order == 5:
stencil = [3.0, -20.0, 60.0, -120.0, 65.0, 12.0]
coeff = 1.0 / 60.0
zero_pos = 5
else:
raise NotImplementedError("Order " + str(order) + " not implemented.")
else:
raise NotImplementedError(
f"No reference values for stencil_type \"{stencil_type}\" implemented for 1st derivative"
)
elif derivative == 2:
if stencil_type == 'center':
coeff = 1.0
if order == 2:
stencil = [1, -2, 1]
zero_pos = 2
elif order == 4:
stencil = [-1 / 12, 4 / 3, -5 / 2, 4 / 3, -1 / 12]
zero_pos = 3
elif order == 6:
stencil = [1 / 90, -3 / 20, 3 / 2, -49 / 18, 3 / 2, -3 / 20, 1 / 90]
zero_pos = 4
elif order == 8:
stencil = [-1 / 560, 8 / 315, -1 / 5, 8 / 5, -205 / 72, 8 / 5, -1 / 5, 8 / 315, -1 / 560]
zero_pos = 5
else:
raise NotImplementedError(
f"No reference values for stencil_type \"{stencil_type}\" implemented for 2nd derivative"
)
else:
raise NotImplementedError(f"No reference values for derivative {derivative} implemented")
# convert the reference values to a common way of writing with what we generate here
coeff_reference = np.array(stencil) * coeff
steps_reference = np.append(np.arange(-zero_pos + 1, 1), np.arange(1, zero_pos))[: len(coeff_reference)]
sorted_idx_reference = np.argsort(steps_reference)
coeff, steps = get_finite_difference_stencil(derivative=derivative, order=order, stencil_type=stencil_type)
sorted_idx = np.argsort(steps)
assert np.allclose(
coeff_reference[sorted_idx_reference], coeff[sorted_idx]
), f"Got different FD coefficients for derivative {derivative} with order {order} and stencil_type {stencil_type}! Expected {coeff_reference[sorted_idx_reference]}, got {coeff[sorted_idx]}."
assert np.allclose(
steps_reference[sorted_idx_reference], steps[sorted_idx]
), f"Got different FD offsets for derivative {derivative} with order {order} and stencil_type {stencil_type}! Expected {steps_reference[sorted_idx_reference]}, got {steps[sorted_idx]}."
@pytest.mark.base
def test_fd_stencils():
"""
Perform multiple tests for the generic FD stencil generating framework.
Returns:
None
"""
# Make tests to things that were previously implemented in the code
for order in [1, 2, 3, 4, 5]:
fd_stencil_single(1, order, 'upwind')
for order in [2, 4, 6]:
fd_stencil_single(1, order, 'center')
for order in [2, 4, 6, 8]:
fd_stencil_single(2, order, 'center')
# Make some tests comparing to Wikipedia at https://en.wikipedia.org/wiki/Finite_difference_coefficient
coeff, steps = get_finite_difference_stencil(derivative=1, order=3, stencil_type='forward')
expect_coeff = [-11.0 / 6.0, 3.0, -3.0 / 2.0, 1.0 / 3.0]
assert np.allclose(
coeff, expect_coeff
), f"Error in thrid order forward stencil for 1st derivative! Expected {expect_coeff}, got {coeff}."
coeff, steps = get_finite_difference_stencil(derivative=2, order=2, stencil_type='backward')
expect_coeff = [-1, 4, -5, 2][::-1]
assert np.allclose(
coeff, expect_coeff
), f"Error in second order backward stencil for 2nd derivative! Expected {expect_coeff}, got {coeff}."
# test if we get the correct result when we put in steps rather than a stencil_type
new_coeff, _ = get_finite_difference_stencil(derivative=2, order=2, steps=steps)
assert np.allclose(coeff, new_coeff), f"Error when setting steps yourself! Expected {expect_coeff}, got {coeff}."
| 8,112 | 35.877273 | 194 | py |
pySDC | pySDC-master/pySDC/tests/test_benchmarks/test_collocation.py | import pytest
import numpy as np
from pySDC.core.Collocation import CollBase
t_start = np.random.rand(1) * 0.2
t_end = 0.8 + np.random.rand(1) * 0.2
tolQuad = 1e-13
node_types = ['EQUID', 'LEGENDRE']
quad_types = ['GAUSS', 'LOBATTO', 'RADAU-RIGHT', 'RADAU-LEFT']
@pytest.mark.benchmark
def test_benchmark_collocation(benchmark):
def wrapper():
for node_type in node_types:
for quad_type in quad_types:
test_canintegratepolynomials(node_type, quad_type)
test_relateQandSmat(node_type, quad_type)
test_partialquadraturewithQ(node_type, quad_type)
test_partialquadraturewithS(node_type, quad_type)
benchmark(wrapper)
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_canintegratepolynomials(node_type, quad_type):
for M in range(2, 13):
coll = CollBase(M, t_start, t_end, node_type=node_type, quad_type=quad_type)
# some basic consistency tests
assert np.size(coll.nodes) == np.size(coll.weights), (
"For node type " + coll.__class__.__name__ + ", number of entries in nodes and weights is different"
)
assert np.size(coll.nodes) == M, (
"For node type "
+ coll.__class__.__name__
+ ", requesting M nodes did not produce M entries in nodes and weights"
)
# generate random set of polynomial coefficients
poly_coeff = np.random.rand(coll.order - 1)
# evaluate polynomial at collocation nodes
poly_vals = np.polyval(poly_coeff, coll.nodes)
# use python's polyint function to compute anti-derivative of polynomial
poly_int_coeff = np.polyint(poly_coeff)
# Compute integral from 0.0 to 1.0
int_ex = np.polyval(poly_int_coeff, t_end) - np.polyval(poly_int_coeff, t_start)
# use quadrature rule to compute integral
int_coll = coll.evaluate(coll.weights, poly_vals)
# For large values of M, substantial differences from different round of error have to be considered
assert abs(int_ex - int_coll) < tolQuad, (
"For node type "
+ coll.__class__.__name__
+ ", failed to integrate polynomial of degree "
+ str(coll.order - 1)
+ " exactly. Error: %5.3e" % abs(int_ex - int_coll)
)
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_relateQandSmat(node_type, quad_type):
for M in range(2, 13):
coll = CollBase(M, t_start, t_end, node_type=node_type, quad_type=quad_type)
Q = coll.Qmat[1:, 1:]
S = coll.Smat[1:, 1:]
assert np.shape(Q) == np.shape(S), (
"For node type " + coll.__class__.__name__ + ", Qmat and Smat have different shape"
)
shape = np.shape(Q)
assert shape[0] == shape[1], "For node type " + coll.__class__.__name__ + ", Qmat / Smat are not quadratic"
SSum = np.cumsum(S[:, :], axis=0)
for i in range(0, M):
assert np.linalg.norm(Q[i, :] - SSum[i, :]) < 1e-15, (
"For node type "
+ coll.__class__.__name__
+ ", Qmat and Smat did not satisfy the expected summation property."
)
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_partialquadraturewithQ(node_type, quad_type):
for M in range(2, 13):
coll = CollBase(M, t_start, t_end, node_type=node_type, quad_type=quad_type)
Q = coll.Qmat[1:, 1:]
# as in TEST 1, create and integrate a polynomial with random coefficients, but now of degree M-1
degree = min(coll.order, M - 1)
poly_coeff = np.random.rand(degree)
poly_vals = np.polyval(poly_coeff, coll.nodes)
poly_int_coeff = np.polyint(poly_coeff)
for i in range(0, M):
int_ex = np.polyval(poly_int_coeff, coll.nodes[i]) - np.polyval(poly_int_coeff, t_start)
int_coll = np.dot(poly_vals, Q[i, :])
assert abs(int_ex - int_coll) < tolQuad, (
"For node type "
+ coll.__class__.__name__
+ ", partial quadrature from Qmat rule failed to integrate polynomial of degree M-1 exactly for M = "
+ str(M)
)
@pytest.mark.parametrize("node_type", node_types)
@pytest.mark.parametrize("quad_type", quad_types)
def test_partialquadraturewithS(node_type, quad_type):
for M in range(2, 13):
coll = CollBase(M, t_start, t_end, node_type=node_type, quad_type=quad_type)
S = coll.Smat[1:, 1:]
# as in TEST 1, create and integrate a polynomial with random coefficients, but now of degree M-1
degree = min(coll.order, M - 1)
poly_coeff = np.random.rand(degree)
poly_vals = np.polyval(poly_coeff, coll.nodes)
poly_int_coeff = np.polyint(poly_coeff)
for i in range(1, M):
int_ex = np.polyval(poly_int_coeff, coll.nodes[i]) - np.polyval(poly_int_coeff, coll.nodes[i - 1])
int_coll = np.dot(poly_vals, S[i, :])
assert abs(int_ex - int_coll) < tolQuad, (
"For node type "
+ coll.__class__.__name__
+ ", partial quadrature rule from Smat failed to integrate polynomial of degree M-1 exactly for M = "
+ str(M)
)
| 5,452 | 41.937008 | 117 | py |
pySDC | pySDC-master/pySDC/tests/test_benchmarks/test_PFASST_NumPy.py | import pytest
@pytest.mark.benchmark
def test_B(benchmark):
from pySDC.tutorial.step_5.B_my_first_PFASST_run import main as main_B
benchmark(main_B)
| 160 | 16.888889 | 74 | py |
pySDC | pySDC-master/pySDC/tests/test_benchmarks/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_convergence_controllers/test_error_convergence_controllers.py | import pytest
def run_problem(maxiter=1, num_procs=1, n_steps=1, error_estimator=None, params=None, restol=-1):
import numpy as np
from pySDC.implementations.problem_classes.TestEquation_0D import testequation0d
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.hooks.log_errors import (
LogLocalErrorPostIter,
LogGlobalErrorPostIter,
LogLocalErrorPostStep,
)
# initialize level parameters
level_params = {}
level_params['dt'] = 6e-3
level_params['restol'] = restol
# initialize sweeper parameters
sweeper_params = {}
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
# sweeper_params['initial_guess'] = 'random'
# build lambdas
re = np.linspace(-30, -1, 10)
im = np.linspace(-50, 50, 11)
lambdas = np.array([[complex(re[i], im[j]) for i in range(len(re))] for j in range(len(im))]).reshape(
(len(re) * len(im))
)
problem_params = {
'lambdas': lambdas,
'u0': 1.0 + 0.0j,
}
# initialize step parameters
step_params = dict()
step_params['maxiter'] = maxiter
# convergence controllers
convergence_controllers = {error_estimator: params}
# initialize controller parameters
controller_params = {}
controller_params['logger_level'] = 15
controller_params['hook_class'] = [LogLocalErrorPostIter, LogGlobalErrorPostIter, LogLocalErrorPostStep]
controller_params['mssdc_jac'] = False
# fill description dictionary for easy step instantiation
description = {}
description['problem_class'] = testequation0d
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['convergence_controllers'] = convergence_controllers
# set time parameters
t0 = 0.0
# instantiate controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=t0, Tend=n_steps * level_params['dt'])
return stats
@pytest.mark.base
def test_EstimateExtrapolationErrorNonMPI_serial(order_time_marching=2, n_steps=3, thresh=0.15):
from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import (
EstimateExtrapolationErrorNonMPI,
)
from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats
params = {
'no_storage': False,
}
preperatory_steps = (order_time_marching + 3) // 2
stats = run_problem(
maxiter=order_time_marching,
n_steps=n_steps + preperatory_steps,
error_estimator=EstimateExtrapolationErrorNonMPI,
params=params,
num_procs=1,
)
e_local = sort_stats(filter_stats(stats, type='e_local_post_iteration', iter=order_time_marching), sortby='time')
e_estimated = get_sorted(stats, type='error_extrapolation_estimate')
rel_diff = [
abs(e_local[i][1] - e_estimated[i][1]) / e_estimated[i][1]
for i in range(len(e_estimated))
if e_estimated[i][1] is not None
]
assert all(
me < thresh for me in rel_diff
), f'Extrapolated error estimate failed! Relative difference to true error: {rel_diff}'
@pytest.mark.base
@pytest.mark.parametrize('no_storage', [True, False])
def test_EstimateExtrapolationErrorNonMPI_parallel(
no_storage, order_time_marching=4, n_steps=3, num_procs=3, thresh=0.50
):
from pySDC.implementations.convergence_controller_classes.estimate_extrapolation_error import (
EstimateExtrapolationErrorNonMPI,
)
from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats
params = {
'no_storage': no_storage,
}
preperatory_steps = (order_time_marching + 3) // 2
if no_storage:
num_procs = max(num_procs, preperatory_steps + 1)
stats = run_problem(
maxiter=order_time_marching,
n_steps=n_steps + preperatory_steps,
error_estimator=EstimateExtrapolationErrorNonMPI,
params=params,
num_procs=num_procs,
)
e_local = sort_stats(filter_stats(stats, type='e_local_post_iteration', iter=order_time_marching), sortby='time')
e_estimated = get_sorted(stats, type='error_extrapolation_estimate')
rel_diff = [
abs(e_local[i][1] - e_estimated[i][1]) / e_local[i][1]
for i in range(len(e_estimated))
if e_estimated[i][1] is not None
]
assert all(
me < thresh for me in rel_diff
), f'Extrapolated error estimate failed! Relative difference to true error: {rel_diff}'
@pytest.mark.base
def test_EstimateEmbeddedErrorSerial(order_time_marching=3, n_steps=6, thresh=0.05):
from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError
from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats
params = {}
stats = run_problem(
maxiter=order_time_marching, n_steps=n_steps, error_estimator=EstimateEmbeddedError, params=params, num_procs=1
)
e_local = sort_stats(
filter_stats(stats, type='e_local_post_iteration', iter=order_time_marching - 1), sortby='time'
)
e_estimated = get_sorted(stats, type='error_embedded_estimate')
rel_diff = [abs(e_local[i][1] - e_estimated[i][1]) / e_local[i][1] for i in range(len(e_estimated))]
assert all(
me < thresh for me in rel_diff
), f'Embedded error estimate failed! Relative difference to true error: {rel_diff}'
@pytest.mark.base
def test_EstimateEmbeddedErrorParallel(order_time_marching=3, num_procs=3, thresh=0.10):
from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import EstimateEmbeddedError
from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats
params = {}
stats = run_problem(
maxiter=order_time_marching,
n_steps=num_procs,
error_estimator=EstimateEmbeddedError,
params=params,
num_procs=num_procs,
)
e_global = sort_stats(
filter_stats(stats, type='e_global_post_iteration', iter=order_time_marching - 1), sortby='time'
)
e_estimated = get_sorted(stats, type='error_embedded_estimate')
rel_diff = [abs(e_global[i][1] - e_estimated[i][1]) / e_global[i][1] for i in range(len(e_estimated))]
assert all(
me < thresh for me in rel_diff
), f'Embedded error estimate failed! Relative difference to true error: {rel_diff}'
@pytest.mark.base
def test_EstimateEmbeddedErrorCollocation(n_steps=6, thresh=0.01):
from pySDC.implementations.convergence_controller_classes.estimate_embedded_error import (
EstimateEmbeddedErrorCollocation,
)
from pySDC.helpers.stats_helper import get_sorted, filter_stats, sort_stats
adaptive_coll_params = {
'num_nodes': [3, 2],
}
params = {'adaptive_coll_params': adaptive_coll_params}
stats = run_problem(
maxiter=99,
n_steps=n_steps,
error_estimator=EstimateEmbeddedErrorCollocation,
params=params,
num_procs=1,
restol=1e-13,
)
e_estimated = get_sorted(stats, type='error_embedded_estimate_collocation')
e_local = sort_stats(filter_stats(stats, type='e_local_post_step'), sortby='time')
rel_diff = [abs(e_local[i][1] - e_estimated[i][1]) / e_local[i][1] for i in range(len(e_estimated))]
assert all(
me < thresh for me in rel_diff
), f'Embedded error estimate failed! Relative difference to true error: {rel_diff}'
if __name__ == '__main__':
test_EstimateEmbeddedErrorCollocation()
| 8,121 | 34.008621 | 119 | py |
pySDC | pySDC-master/pySDC/tests/test_convergence_controllers/test_InterpolateBetweenRestarts.py | import pytest
from pySDC.core.Hooks import hooks
from pySDC.core.Lagrange import LagrangeApproximation
from pySDC.core.Collocation import CollBase
import numpy as np
class LogInterpolation(hooks):
"""
Log the solution when a step is supposed to be restarted as well as the interpolated solution to the new nodes and
the solution that ends up at the nodes after the restart.
"""
def __init__(self):
super().__init__()
self.log_u_now = False
def pre_iteration(self, step, level_number):
if self.log_u_now:
L = step.levels[level_number]
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='u_inter',
value=L.u.copy(),
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='nodes_inter',
value=L.sweep.coll.nodes * L.params.dt,
)
self.log_u_now = False
def post_step(self, step, level_number):
super().post_iteration(step, level_number)
L = step.levels[level_number]
if step.status.restart:
self.log_u_now = True
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='u_before_interpolation',
value=L.u.copy(),
)
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='nodes',
value=L.sweep.coll.nodes * L.params.dt,
)
# double check
nodes_old = L.sweep.coll.nodes.copy()
nodes_new = L.sweep.coll.nodes.copy() * L.status.dt_new / L.params.dt
interpolator = LagrangeApproximation(points=np.append(0, nodes_old))
self.add_to_stats(
process=step.status.slot,
time=L.time,
level=L.level_index,
iter=step.status.iter,
sweep=L.status.sweep,
type='u_inter_double_check',
value=(interpolator.getInterpolationMatrix(np.append(0, nodes_new)) @ L.u[:])[:],
)
class CheckInterpolationOrder(hooks):
def __init__(self):
self.mess_with_solution = True
self.messed_with_solution = False
self.p = None
self.uStart = None
def post_iteration(self, step, level_number):
"""
Replace the solution by a random polynomial
"""
level = step.levels[level_number]
nodes = np.append(0, level.sweep.coll.nodes) * level.dt
nNodes = len(nodes)
if self.mess_with_solution:
self.p = np.polynomial.Polynomial(np.random.rand(nNodes))
self.uStart = self.p(nodes.copy())
for i in range(nNodes):
level.u[i][:] = self.uStart[i]
step.status.force_done = True
step.status.restart = True
self.mess_with_solution = False
self.messed_with_solution = True
level.status.dt_new = np.random.rand(1)[0] * level.params.dt
def pre_iteration(self, step, level_number):
"""
Check that the polynomial has been interpolated exactly
"""
level = step.levels[level_number]
nodes = np.append(0, level.sweep.coll.nodes) * level.dt
if self.messed_with_solution:
u_inter = [me[0] for me in level.u]
u_analytic = self.p(nodes)
self.messed_with_solution = False
assert np.allclose(
u_inter, u_analytic
), f"Interpolation of polynomial was not exact, got {u_inter} and {u_analytic}"
# stop the simulation by setting the time parameters
level.status.time = 1e1
level.params.dt = 1e1
step.status.force_done = True
def run_vdp(hook, adaptivity=True):
from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity
from pySDC.implementations.convergence_controller_classes.interpolate_between_restarts import (
InterpolateBetweenRestarts,
)
import numpy as np
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
# initialize level parameters
level_params = dict()
level_params['dt'] = 1e-2
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
sweeper_params['initial_guess'] = 'spread'
problem_params = {
'mu': 5.0,
'newton_tol': 1e-9,
'newton_maxiter': 99,
'u0': np.array([2.0, 0.0]),
'crash_at_maxiter': False,
}
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 4
# convergence controllers
convergence_controllers = {
InterpolateBetweenRestarts: {},
}
if adaptivity:
convergence_controllers[Adaptivity] = {'e_tol': 1e-7}
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = hook
controller_params['mssdc_jac'] = False
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['convergence_controllers'] = convergence_controllers
# set time parameters
t0 = 0.0
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uend, stats = controller.run(u0=uinit, t0=t0, Tend=1e-2)
return stats
@pytest.mark.base
def test_InterpolateBetweenRestarts(plotting=False):
"""
Check that the solution is interpolated to the new nodes correctly and ends up the next step the way we want it to.
We also check that the residual at the end of the step after the restart is smaller than before.
"""
from pySDC.helpers.stats_helper import get_sorted, filter_stats
stats = run_vdp(LogInterpolation)
u = {
'before': get_sorted(stats, type='u_before_interpolation'),
'after': get_sorted(stats, type='u_inter'),
'double_check': get_sorted(stats, type='u_inter_double_check'),
}
nodes = {
'before': get_sorted(stats, type='nodes'),
'after': get_sorted(stats, type='nodes_inter'),
'double_check': get_sorted(stats, type='nodes_inter'),
}
residual = get_sorted(stats, type='residual_post_step')
for t in np.unique([me[0] for me in residual]):
_res = np.array([me[1] for me in residual if me[0] == t])
if len(_res) > 1:
contraction = _res[1:] / _res[:-1]
assert all(
contraction < 6e-3
), f"Residual was not decreased as much as expected! Got {max(contraction):.2e}. Without interpolation we expect about 0.15, but with interpolation we want about 6e-3!"
for i in range(len(u['before'])):
# check the nodes
assert nodes['after'][i][1][-1] < nodes['before'][i][1][-1], "Step size was not reduced!"
# check the solution
for j in range(len(u['before'][i][1])):
assert (
abs(u['double_check'][i][1][j] - u['after'][i][1][j]) < 1e-12
), f"The interpolated solution from the convergence controller is not right! Expected {u['double_check'][i][1][j]}, got {u['after'][i][1][j]}"
if plotting:
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2, 1, sharex=True)
colors = {
'before': 'teal',
'after': 'violet',
'double_check': 'black',
}
ls = {'before': '-', 'after': '--', 'double_check': '-.'}
for i in [0, 1]:
for key in nodes.keys():
axs[0].plot(
np.append([0], nodes[key][i][1]), [me[1] for me in u[key][i][1]], color=colors[key], ls=ls[key]
)
axs[1].plot(
np.append([0], nodes[key][i][1]), [me[0] for me in u[key][i][1]], color=colors[key], ls=ls[key]
)
axs[1].set_xlabel('$t$')
axs[0].set_ylabel('$u_t$')
axs[1].set_ylabel('$u$')
plt.show()
@pytest.mark.base
def test_interpolation_order():
"""
Replace the solution with a polynomial and check that it is interpolated exactly
"""
from pySDC.helpers.stats_helper import get_sorted, filter_stats
run_vdp(CheckInterpolationOrder, False)
if __name__ == "__main__":
test_interpolation_order()
test_InterpolateBetweenRestarts(plotting=True)
| 9,733 | 33.51773 | 180 | py |
pySDC | pySDC-master/pySDC/tests/test_convergence_controllers/test_Newton_inexactness.py | import pytest
@pytest.mark.base
def test_Newton_inexactness(ratio=1e-2, min_tol=1e-11, max_tol=1e-6):
import numpy as np
from pySDC.implementations.convergence_controller_classes.inexactness import NewtonInexactness
from pySDC.implementations.problem_classes.Van_der_Pol_implicit import vanderpol
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.helpers.stats_helper import get_sorted, filter_stats
from pySDC.core.Hooks import hooks
class log_newton_tol(hooks):
def pre_iteration(self, step, level_number):
lvl = step.levels[level_number]
self.add_to_stats(
process=step.status.slot,
time=step.time,
level=level_number,
iter=step.status.iter,
sweep=lvl.status.sweep,
type='newton_tol_post_spread',
value=lvl.prob.newton_tol,
)
def post_iteration(self, step, level_number):
lvl = step.levels[level_number]
self.add_to_stats(
process=step.status.slot,
time=step.time,
level=level_number,
iter=step.status.iter,
sweep=lvl.status.sweep,
type='newton_tol',
value=lvl.prob.newton_tol,
)
# initialize level parameters
level_params = {}
level_params['dt'] = 1e-2
level_params['restol'] = 1e-10
# initialize sweeper parameters
sweeper_params = {}
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'LU'
problem_params = {
'mu': 5.0,
'newton_tol': 1e-9,
'newton_maxiter': 99,
'u0': np.array([2.0, 0.0]),
}
# initialize step parameters
step_params = {}
step_params['maxiter'] = 99
# initialize controller parameters
controller_params = {}
controller_params['logger_level'] = 30
controller_params['hook_class'] = log_newton_tol
controller_params['mssdc_jac'] = False
convergence_controllers = {}
convergence_controllers[NewtonInexactness] = {'ratio': ratio, 'min_tol': min_tol, 'max_tol': max_tol}
# fill description dictionary for easy step instantiation
description = {}
description['problem_class'] = vanderpol
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
description['convergence_controllers'] = convergence_controllers
# set time parameters
t0 = 0.0
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=0, Tend=2 * level_params['dt'])
for me in get_sorted(stats, type='newton_tol'):
stats_now = filter_stats(stats, time=me[0])
tols = get_sorted(stats_now, type='newton_tol', sortby='iter')
res = get_sorted(stats_now, type='residual_post_iteration', sortby='iter')
for i in range(len(tols) - 1):
expect = res[i][1] * ratio
assert (
tols[i + 1][1] <= expect or expect < min_tol
), f'Expected Newton tolerance smaller {expect:.2e}, but got {tols[i+1][1]:.2e} in iteration {i+1}!'
assert (
tols[i + 1][1] <= max_tol
), f'Exceeded maximal allowed Newton tolerance {max_tol:.2e} in iteration {i+1} with {tols[i+1][1]:.2e}!'
if __name__ == "__main__":
test_Newton_inexactness()
| 3,971 | 35.109091 | 117 | py |
pySDC | pySDC-master/pySDC/tests/test_convergence_controllers/test_check_convergence.py | import pytest
def run_heat(maxiter=99, restol=-1, e_tol=-1):
from pySDC.implementations.problem_classes.HeatEquation_ND_FD import heatNd_unforced
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimatePostIter
# initialize level parameters
level_params = dict()
level_params['dt'] = 0.05
level_params['restol'] = restol
level_params['e_tol'] = e_tol
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
problem_params = {
'freq': 2,
'nvars': 2**9,
'nu': 1.0,
'stencil_type': 'center',
'order': 6,
'bc': 'periodic',
'solver_type': 'direct',
'lintol': None,
'liniter': None,
}
# initialize step parameters
step_params = dict()
step_params['maxiter'] = maxiter
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = LogEmbeddedErrorEstimatePostIter
# fill description dictionary for easy step instantiation
description = dict()
description['problem_class'] = heatNd_unforced
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# instantiate controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(0.0)
# call main function to get things done...
uend, stats = controller.run(u0=uinit, t0=0.0, Tend=level_params['dt'])
from pySDC.helpers.stats_helper import get_list_of_types
# residual = np.max([me[1] for me in get_sorted(stats, type='residual_post_step')
return stats, controller
@pytest.mark.base
@pytest.mark.parametrize("maxiter", [1, 5, 50])
def test_convergence_by_iter(maxiter):
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
stats, _ = run_heat(maxiter=maxiter)
niter = np.mean([me[1] for me in get_sorted(stats, type='niter')])
assert niter == maxiter, f"Wrong number of iterations! Expected {maxiter}, but got {niter}!"
@pytest.mark.base
@pytest.mark.parametrize("e_tol", [1e-3, 1e-5, 1e-10])
def test_convergence_by_increment(e_tol):
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
stats, _ = run_heat(e_tol=e_tol)
e_em = [me[1] for me in get_sorted(stats, type='error_embedded_estimate_post_iteration', sortby='iter')]
e_em_before_convergence = np.min(e_em[:-1])
e_em_at_convergence = e_em[-1]
assert e_em_before_convergence > e_tol, "Embedded error estimate was below threshold before convergence!"
assert e_em_at_convergence <= e_tol, "Step terminated before convergence by increment was achieved!"
@pytest.mark.base
@pytest.mark.parametrize("restol", [1e-3, 1e-5, 1e-10])
def test_convergence_by_residual(restol):
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
stats, _ = run_heat(restol=restol)
res = [me[1] for me in get_sorted(stats, type='residual_post_iteration', sortby='iter')]
res_before_convergence = np.min(res[:-1])
res_at_convergence = res[-1]
assert res_before_convergence > restol, "Residual was below threshold before convergence!"
assert res_at_convergence <= restol, "Step terminated before convergence by residual was achieved!"
| 3,895 | 34.099099 | 109 | py |
pySDC | pySDC-master/pySDC/tests/test_hooks/test_log_work.py | import pytest
def run_Lorenz(useMPI, maxiter=4, newton_maxiter=5, num_procs=1):
from pySDC.implementations.hooks.log_work import LogWork
from pySDC.implementations.sweeper_classes.generic_implicit import generic_implicit
from pySDC.implementations.problem_classes.Lorenz import LorenzAttractor
from pySDC.helpers.stats_helper import get_sorted
num_steps = 2
# initialize level parameters
level_params = {}
level_params['dt'] = 1e-2
level_params['restol'] = -1
# initialize sweeper parameters
sweeper_params = {}
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 1
sweeper_params['QI'] = 'IE'
problem_params = {
'newton_tol': -1, # force to iterate to `newton_maxiter`
'newton_maxiter': newton_maxiter,
}
# initialize step parameters
step_params = {}
step_params['maxiter'] = maxiter
# initialize controller parameters
controller_params = {}
controller_params['logger_level'] = 30
controller_params['hook_class'] = LogWork
controller_params['mssdc_jac'] = False
# fill description dictionary for easy step instantiation
description = {}
description['problem_class'] = LorenzAttractor
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# set time parameters
t0 = 0.0
# instantiate controller
if useMPI:
from mpi4py import MPI
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
comm = MPI.COMM_WORLD
num_procs = comm.size
controller = controller_MPI(controller_params=controller_params, description=description, comm=comm)
P = controller.S.levels[0].prob
else:
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
comm = None
controller = controller_nonMPI(
num_procs=num_procs, controller_params=controller_params, description=description
)
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uend, stats = controller.run(u0=uinit, t0=t0, Tend=num_steps * num_procs * level_params['dt'])
for i in range(num_procs):
res = {
key: [me[1] for me in get_sorted(stats, type=key, comm=comm, process=i)]
for key in ['work_newton', 'work_rhs']
}
expected = {}
if i == 0:
# we evaluate all nodes when beginning the step and then every node except the initial conditions in every iteration
expected['work_rhs'] = maxiter * sweeper_params['num_nodes'] + sweeper_params['num_nodes'] + 1
else:
# Additionally, we reevaluate what we received. Once before we start iterating and then whenever we start a new iteration and in `it_check`
expected['work_rhs'] = maxiter * (sweeper_params['num_nodes'] + 2) + sweeper_params['num_nodes'] + 2
expected['work_newton'] = newton_maxiter * sweeper_params['num_nodes'] * maxiter
for key, val in res.items():
assert all(
me == expected[key] for me in val
), f'Error in LogWork hook when recording \"{key}\" for process {i}! Got {val}, expected {expected[key]}!'
return None
@pytest.mark.mpi4py
@pytest.mark.parametrize("num_procs", [1, 3])
@pytest.mark.parametrize("maxiter", [0, 3])
@pytest.mark.parametrize("newton_maxiter", [1, 3])
def test_LogWork_MPI(num_procs, newton_maxiter, maxiter):
import os
import subprocess
kwargs = {}
kwargs['useMPI'] = 1
kwargs['num_procs'] = num_procs
kwargs['newton_maxiter'] = newton_maxiter
kwargs['maxiter'] = maxiter
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
# run code with different number of MPI processes
kwargs_str = "".join([f"{key}:{item} " for key, item in kwargs.items()])
cmd = f"mpirun -np {num_procs} python {__file__} {kwargs_str}".split()
p = subprocess.Popen(cmd, env=my_env, cwd=".")
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (
p.returncode,
num_procs,
)
@pytest.mark.base
@pytest.mark.parametrize("num_procs", [1, 3])
@pytest.mark.parametrize("maxiter", [0, 3])
@pytest.mark.parametrize("newton_maxiter", [1, 3])
def test_LogWork_nonMPI(num_procs, newton_maxiter, maxiter):
kwargs = {}
kwargs['useMPI'] = 0
kwargs['num_procs'] = num_procs
kwargs['newton_maxiter'] = newton_maxiter
kwargs['maxiter'] = maxiter
run_Lorenz(**kwargs)
if __name__ == "__main__":
import sys
kwargs = {me.split(':')[0]: int(me.split(':')[1]) for me in sys.argv[1:]}
run_Lorenz(**kwargs)
| 4,970 | 33.047945 | 151 | py |
pySDC | pySDC-master/pySDC/tests/test_tutorials/test_step_5.py | import pytest
@pytest.mark.base
def test_A():
from pySDC.tutorial.step_5.A_multistep_multilevel_hierarchy import main as main_A
main_A()
@pytest.mark.base
def test_B():
from pySDC.tutorial.step_5.B_my_first_PFASST_run import main as main_B
main_B()
@pytest.mark.base
def test_C():
from pySDC.tutorial.step_5.C_advection_and_PFASST import main as main_C
main_C()
| 395 | 16.217391 | 85 | py |
pySDC | pySDC-master/pySDC/tests/test_tutorials/test_step_6.py | import pkg_resources
import pytest
@pytest.mark.mpi4py
def test_A():
from pySDC.tutorial.step_6.A_run_non_MPI_controller import main as main_A
main_A(num_proc_list=[1], fname='step_6_A_sl_out.txt', multi_level=False)
main_A(num_proc_list=[1, 2, 4, 8], fname='step_6_A_ml_out.txt', multi_level=True)
@pytest.mark.mpi4py
def test_B():
from pySDC.tutorial.step_6.B_odd_temporal_distribution import main as main_B
main_B()
@pytest.mark.mpi4py
def test_C():
from pySDC.tutorial.step_6.C_MPI_parallelization import main as main_C
installed_packages = [d for d in pkg_resources.working_set]
flat_installed_packages = [package.project_name for package in installed_packages]
if "mpi4py" in flat_installed_packages:
cwd = 'pySDC/tutorial/step_6'
main_C(cwd)
with open('data/step_6_C1_out.txt', 'r') as file1:
with open('data/step_6_A_ml_out.txt', 'r') as file2:
diff = set(file1).difference(file2)
diff.discard('\n')
for line in diff:
assert 'iterations' not in line, (
'ERROR: iteration counts differ between MPI and nonMPI for even ' 'distribution of time-steps'
)
with open('data/step_6_C2_out.txt', 'r') as file1:
with open('data/step_6_B_out.txt', 'r') as file2:
diff = set(file1).difference(file2)
diff.discard('\n')
for line in diff:
assert 'iterations' not in line, (
'ERROR: iteration counts differ between MPI and nonMPI for odd distribution ' 'of time-steps'
)
diff_MPI = []
with open("data/step_6_C1_out.txt") as f:
for line in f:
if "Diff" in line:
diff_MPI.append(float(line.split()[1]))
diff_nonMPI = []
with open("data/step_6_A_ml_out.txt") as f:
for line in f:
if "Diff" in line:
diff_nonMPI.append(float(line.split()[1]))
assert len(diff_MPI) == len(diff_nonMPI), (
'ERROR: got different number of results form MPI and nonMPI for even ' 'distribution of time-steps'
)
for i, j in zip(diff_MPI, diff_nonMPI):
assert abs(i - j) < 6e-11, (
'ERROR: difference between MPI and nonMPI results is too large for even '
'distributions of time-steps, got %s' % abs(i - j)
)
diff_MPI = []
with open("data/step_6_C2_out.txt") as f:
for line in f:
if "Diff" in line:
diff_MPI.append(float(line.split()[1]))
diff_nonMPI = []
with open("data/step_6_B_out.txt") as f:
for line in f:
if "Diff" in line:
diff_nonMPI.append(float(line.split()[1]))
assert len(diff_MPI) == len(diff_nonMPI), (
'ERROR: got different number of results form MPI and nonMPI for odd ' 'distribution of time-steps'
)
for i, j in zip(diff_MPI, diff_nonMPI):
assert abs(i - j) < 6e-11, (
'ERROR: difference between MPI and nonMPI results is too large for odd '
'distributions of time-steps, got %s' % abs(i - j)
)
| 3,274 | 34.597826 | 111 | py |
pySDC | pySDC-master/pySDC/tests/test_tutorials/test_step_1.py | import pytest
@pytest.mark.base
def test_A():
from pySDC.tutorial.step_1.A_spatial_problem_setup import main as main_A
main_A()
@pytest.mark.base
def test_B():
from pySDC.tutorial.step_1.B_spatial_accuracy_check import main as main_B
main_B()
@pytest.mark.base
def test_C():
from pySDC.tutorial.step_1.C_collocation_problem_setup import main as main_C
main_C()
@pytest.mark.base
def test_D():
from pySDC.tutorial.step_1.D_collocation_accuracy_check import main as main_D
main_D()
| 524 | 16.5 | 81 | py |
pySDC | pySDC-master/pySDC/tests/test_tutorials/test_step_8.py | import pytest
@pytest.mark.base
def test_A():
from pySDC.tutorial.step_8.A_visualize_residuals import main as main_A
main_A()
@pytest.mark.base
def test_B():
from pySDC.tutorial.step_8.B_multistep_SDC import main as main_B
main_B()
@pytest.mark.base
def test_C():
from pySDC.tutorial.step_8.C_iteration_estimator import main as main_C
main_C()
| 377 | 15.434783 | 74 | py |
pySDC | pySDC-master/pySDC/tests/test_tutorials/test_step_4.py | import pytest
@pytest.mark.base
def test_A():
from pySDC.tutorial.step_4.A_spatial_transfer_operators import main as main_A
main_A()
@pytest.mark.base
def test_B():
from pySDC.tutorial.step_4.B_multilevel_hierarchy import main as main_B
main_B()
@pytest.mark.base
def test_C():
from pySDC.tutorial.step_4.C_SDC_vs_MLSDC import main as main_C
main_C()
@pytest.mark.base
def test_D():
from pySDC.tutorial.step_4.D_MLSDC_with_particles import main as main_D
main_D()
| 508 | 15.966667 | 81 | py |
pySDC | pySDC-master/pySDC/tests/test_tutorials/test_step_7.py | import os
import subprocess
import pytest
@pytest.mark.fenics
def test_A():
from pySDC.tutorial.step_7.A_pySDC_with_FEniCS import main as main_A
main_A()
@pytest.mark.mpi4py
def test_B():
from pySDC.tutorial.step_7.B_pySDC_with_mpi4pyfft import main as main_B
main_B()
@pytest.mark.petsc
def test_C_1x1():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
try:
import mpi4py
del mpi4py
except ImportError:
raise ImportError('petsc tests need mpi4py')
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
# my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
fname = 'step_7_C_out_1x1.txt'
cwd = '.'
num_procs = 1
num_procs_space = 1
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
for line in p.stdout:
print(line)
for line in p.stderr:
print(line)
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
@pytest.mark.petsc
def test_C_1x2():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
try:
import mpi4py
except ImportError:
raise ImportError('petsc tests need mpi4py')
finally:
del mpi4py
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
cwd = '.'
fname = 'step_7_C_out_1x2.txt'
num_procs = 2
num_procs_space = 2
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
for line in p.stdout:
print(line)
for line in p.stderr:
print(line)
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
@pytest.mark.petsc
def test_C_2x2():
# try to import MPI here, will fail if things go wrong (and not in the subprocess part)
try:
import mpi4py
del mpi4py
except ImportError:
raise ImportError('petsc tests need mpi4py')
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
cwd = '.'
fname = 'step_7_C_out_2x2.txt'
num_procs = 4
num_procs_space = 2
cmd = (
'mpirun -np '
+ str(num_procs)
+ ' python pySDC/tutorial/step_7/C_pySDC_with_PETSc.py '
+ str(num_procs_space)
+ ' '
+ fname
).split()
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=my_env, cwd=cwd)
p.wait()
for line in p.stdout:
print(line)
for line in p.stderr:
print(line)
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (p.returncode, num_procs)
| 3,389 | 26.560976 | 119 | py |
pySDC | pySDC-master/pySDC/tests/test_tutorials/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_tutorials/test_step_3.py | import pytest
@pytest.mark.base
def test_A():
from pySDC.tutorial.step_3.A_getting_statistics import main as main_A
main_A()
@pytest.mark.base
def test_B():
from pySDC.tutorial.step_3.B_adding_statistics import main as main_B
main_B()
@pytest.mark.base
def test_C():
from pySDC.tutorial.step_3.C_study_collocations import main as main_C
main_C()
| 379 | 15.521739 | 73 | py |
pySDC | pySDC-master/pySDC/tests/test_tutorials/test_step_2.py | import pytest
@pytest.mark.base
def test_A():
from pySDC.tutorial.step_2.A_step_data_structure import main as main_A
main_A()
@pytest.mark.base
def test_B():
from pySDC.tutorial.step_2.B_my_first_sweeper import main as main_B
main_B()
@pytest.mark.base
def test_C():
from pySDC.tutorial.step_2.C_using_pySDCs_frontend import main as main_C
main_C()
| 382 | 15.652174 | 76 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_projects/test_asympconv/test_PFASST.py | import pytest
@pytest.mark.base
def test_main():
from pySDC.projects.AsympConv.PFASST_conv_tests import main
main()
@pytest.mark.base
def test_Linf():
from pySDC.projects.AsympConv.PFASST_conv_Linf import run_advection, run_diffusion
QI = 'LU'
run_diffusion(QI=QI, max_proc_exp=4)
run_advection(QI=QI, max_proc_exp=4)
QI = 'LU2'
run_diffusion(QI=QI, max_proc_exp=4)
run_advection(QI=QI, max_proc_exp=4)
@pytest.mark.base
def test_plot_results():
from pySDC.projects.AsympConv.PFASST_conv_Linf import plot_results
plot_results(cwd='pySDC/projects/AsympConv/')
| 613 | 20.172414 | 86 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_asympconv/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_projects/test_matrixPFASST/test_controllers.py | import pytest
@pytest.mark.base
def test_matrixbased():
from pySDC.projects.matrixPFASST.compare_to_matrixbased import main as A
A()
@pytest.mark.base
def test_propagator():
from pySDC.projects.matrixPFASST.compare_to_propagator import main as B
B()
| 272 | 16.0625 | 76 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_matrixPFASST/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_projects/test_DAE/test_playground.py | import pytest
import warnings
@pytest.mark.base
def test_problematic_main():
from pySDC.projects.DAE.run.fully_implicit_dae_playground import main
main()
@pytest.mark.base
def test_synch_gen_playground_main():
from pySDC.projects.DAE.run.synchronous_machine_playground import main
warnings.filterwarnings('ignore')
main()
warnings.resetwarnings()
| 377 | 18.894737 | 74 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_DAE/test_convergence.py | import pytest
import numpy as np
@pytest.mark.base
def test_main():
from pySDC.projects.DAE.run.run_convergence_test import setup, run
# get setup data
description, controller_params, run_params = setup()
# update run_params
num_samples = 2
run_params = dict()
run_params['t0'] = 0.0
run_params['tend'] = 1e-1
run_params['dt_list'] = np.logspace(-2, -3, num=num_samples)
run_params['qd_list'] = ['IE', 'LU']
run_params['num_nodes_list'] = [3]
conv_data = run(description, controller_params, run_params)
# validate results
for qd_type in run_params['qd_list']:
for num_nodes in run_params['num_nodes_list']:
for i, dt in enumerate(run_params['dt_list']):
assert np.isclose(
conv_data[qd_type][num_nodes]['error'][i], test_dict[qd_type][num_nodes][dt], atol=1e-5
), f"ERROR: error bound not fulfilled.\n Got {conv_data[qd_type][num_nodes]['error'][i]}\n Expecting less than {test_dict[qd_type][num_nodes][dt]}"
# Dictionary of test values for use with:
# num_samples = 2
# qd_list = ['IE', 'LU']
# num_nodes_list = [3]
test_dict = {'IE': {3: {1e-2: 1.4e-12, 1e-3: 2.0e-14}}, 'LU': {3: {1e-2: 1.4e-12, 1e-3: 2.2e-14}}}
| 1,257 | 33.944444 | 163 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_DAE/test_problems.py | import pytest
import warnings
import numpy as np
@pytest.mark.base
def test_pendulum_u_exact_main():
from pySDC.projects.DAE.problems.simple_DAE import pendulum_2d
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver
problem_params['nvars'] = 5
# instantiate problem
prob = pendulum_2d(**problem_params)
# ignore using warning while checking error
warnings.filterwarnings('ignore')
u_test = prob.u_exact(5.0)
assert np.array_equal(u_test, np.zeros(5))
# change warning status to error
warnings.filterwarnings('error')
try:
u_test = prob.u_exact(5.0)
except UserWarning:
pass
else:
raise Exception("User warning not raised correctly")
# reset warning status to normal
warnings.resetwarnings()
@pytest.mark.base
def test_one_transistor_amplifier_u_exact_main():
from pySDC.projects.DAE.problems.transistor_amplifier import one_transistor_amplifier
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver
problem_params['nvars'] = 5
# instantiate problem
prob = one_transistor_amplifier(**problem_params)
# ignore using warning while checking error
warnings.filterwarnings('ignore')
u_test = prob.u_exact(5.0)
assert np.array_equal(u_test, np.zeros(5))
# change warning status to error
warnings.filterwarnings('error')
try:
u_test = prob.u_exact(5.0)
except UserWarning:
pass
else:
raise Exception("User warning not raised correctly")
# reset warning status to normal
warnings.resetwarnings()
@pytest.mark.base
def test_two_transistor_amplifier_u_exact_main():
from pySDC.projects.DAE.problems.transistor_amplifier import two_transistor_amplifier
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver
problem_params['nvars'] = 8
# instantiate problem
prob = two_transistor_amplifier(**problem_params)
# ignore using warning while checking error
warnings.filterwarnings('ignore')
u_test = prob.u_exact(5.0)
assert np.array_equal(u_test, np.zeros(8))
# change warning status to error
warnings.filterwarnings('error')
try:
u_test = prob.u_exact(5.0)
except UserWarning:
pass
else:
raise Exception("User warning not raised correctly")
# reset warning status to normal
warnings.resetwarnings()
#
# Explicit test for the pendulum example
#
@pytest.mark.base
def test_pendulum_main():
from pySDC.projects.DAE.problems.simple_DAE import pendulum_2d
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.projects.DAE.misc.HookClass_DAE import error_hook
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-6
level_params['dt'] = 5e-2
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver
problem_params['nvars'] = 5
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 200
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = error_hook
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = pendulum_2d
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# instantiate the controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# set time parameters
t0 = 0.0
Tend = 1.0
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
warnings.resetwarnings()
# call main function to get things done...
# ignore warning from non-existent reference solution
warnings.filterwarnings('ignore')
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
uend_ref = [0.98613917, -0.16592027, 0.29956023, 1.77825875, 4.82500525]
# check error
err = np.linalg.norm(uend - uend_ref, np.inf)
assert np.isclose(err, 0.0, atol=1e-4), "Error too large."
@pytest.mark.base
def test_one_transistor_amplifier_main():
from pySDC.projects.DAE.problems.transistor_amplifier import one_transistor_amplifier
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.projects.DAE.misc.HookClass_DAE import error_hook
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-6
level_params['dt'] = 1e-4
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver
problem_params['nvars'] = 5
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = error_hook
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = one_transistor_amplifier
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# instantiate the controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# set time parameters
t0 = 0.0
Tend = 2e-2
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
# ignore warning from non-existent reference solution
warnings.filterwarnings('ignore')
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
warnings.resetwarnings()
uend_ref = [-0.02182035, 3.06674603, 2.89634691, 2.45212382, -2.69727238]
# check error
err = np.linalg.norm(uend - uend_ref, np.inf)
assert np.isclose(err, 0.0, atol=1e-4), "Error too large."
@pytest.mark.base
def test_two_transistor_amplifier_main():
from pySDC.projects.DAE.problems.transistor_amplifier import two_transistor_amplifier
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.projects.DAE.misc.HookClass_DAE import error_hook
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-6
level_params['dt'] = 1e-4
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver
problem_params['nvars'] = 8
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = error_hook
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = two_transistor_amplifier
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# instantiate the controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# set time parameters
t0 = 0.0
Tend = 2e-2
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
# ignore warning from non-existent reference solution
warnings.filterwarnings('ignore')
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
warnings.resetwarnings()
uend_ref = [
-5.52721527e-03,
3.00630407e00,
2.84974338e00,
4.07588343e00,
2.12960582e00,
2.19430889e00,
5.89240699e00,
9.99531182e-02,
]
# check error
err = np.linalg.norm(uend - uend_ref, np.inf)
assert np.isclose(err, 0.0, atol=1e-4), "Error too large."
@pytest.mark.base
def test_synchgen_infinite_bus_main():
from pySDC.projects.DAE.problems.synchronous_machine import synchronous_machine_infinite_bus
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.projects.DAE.misc.HookClass_DAE import error_hook
# initialize level parameters
level_params = dict()
level_params['restol'] = 1e-6
level_params['dt'] = 1e-1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-3 # tollerance for implicit solver
problem_params['nvars'] = 14
# initialize step parameters
step_params = dict()
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = dict()
controller_params['logger_level'] = 30
controller_params['hook_class'] = error_hook
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = synchronous_machine_infinite_bus
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# instantiate the controller
controller = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description)
# set time parameters
t0 = 0.0
Tend = 1
# get initial values on finest level
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
# call main function to get things done...
# ignore warning from non-existent reference solution
warnings.filterwarnings('ignore')
uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)
warnings.resetwarnings()
uend_ref = [
8.30823565e-01,
-4.02584174e-01,
1.16966755e00,
9.47592808e-01,
-3.68076863e-01,
-3.87492326e-01,
-7.77837831e-01,
-1.67347611e-01,
1.34810867e00,
5.46223705e-04,
1.29690691e-02,
-8.00823474e-02,
3.10281509e-01,
9.94039645e-01,
]
# check error
err = np.linalg.norm(uend - uend_ref, np.inf)
assert np.isclose(err, 0.0, atol=1e-4), "Error too large."
| 11,988 | 31.142091 | 109 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_DAE/test_misc.py | import pytest
#
# Tests that problem class enforces parameter requirements
@pytest.mark.base
def test_problem_class_main():
from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1
# initialize problem parameters
problem_params = dict()
# instantiate problem
try:
simple_dae_1(**problem_params)
# ensure error thrown is correct
except Exception as error:
assert type(error) == TypeError, "Parameter error was not thrown correctly"
else:
raise Exception("Parameter error was not thrown correctly")
| 564 | 25.904762 | 83 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_DAE/test_sweeper.py | import pytest
import numpy as np
@pytest.mark.base
def test_predict_main():
from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.core.Step import step
# initialize level parameters
level_params = dict()
level_params['dt'] = 5e-2
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver
problem_params['nvars'] = 3
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = simple_dae_1
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
S = step(description=description)
L = S.levels[0]
P = L.prob
# set initial time in the status of the level
L.status.time = 0.1
# compute initial value (using the exact function here)
L.u[0] = P.u_exact(L.time)
# call prediction function to initialise nodes
L.sweep.predict()
# check correct initialisation
assert np.array_equal(L.f[0], np.zeros(3))
for i in range(sweeper_params['num_nodes']):
assert np.array_equal(L.u[i + 1], np.zeros(3))
assert np.array_equal(L.f[i + 1], np.zeros(3))
# rerun check for random initialisation
# expecting that random initialisation does not initialise to zero
sweeper_params['initial_guess'] = 'random'
description['sweeper_params'] = sweeper_params
S = step(description=description)
L = S.levels[0]
P = L.prob
# set initial time in the status of the level
L.status.time = 0.1
# compute initial value (using the exact function here)
L.u[0] = P.u_exact(L.time)
L.sweep.predict()
assert np.array_equal(L.f[0], np.zeros(3))
for i in range(sweeper_params['num_nodes']):
assert np.not_equal(L.u[i + 1], np.zeros(3)).any()
assert np.not_equal(L.f[i + 1], np.zeros(3)).any()
@pytest.mark.base
def test_residual_main():
from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.core.Step import step
# initialize level parameters
level_params = dict()
level_params['dt'] = 1e-1
level_params['residual_type'] = 'last_abs'
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver
problem_params['nvars'] = 3
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = simple_dae_1
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
# description['step_params'] = step_params
# last_abs residual test
S = step(description=description)
L = S.levels[0]
P = L.prob
# set reference values
u = P.dtype_u(P.init)
du = P.dtype_u(P.init)
u[:] = (5, 5, 5)
du[:] = (0, 0, 0)
# set initial time in the status of the level
L.status.time = 0.0
L.u[0] = u
# call prediction function to initialise nodes
L.sweep.predict()
L.sweep.compute_residual()
# generate reference norm
ref_norm = []
for m in range(3):
ref_norm.append(abs(P.eval_f(u, du, L.time + L.dt * L.sweep.coll.nodes[m])))
# check correct residual computation
assert L.status.residual == ref_norm[-1], "ERROR: incorrect norm used"
# full_rel residual test
level_params['residual_type'] = 'full_rel'
description['level_params'] = level_params
S = step(description=description)
L = S.levels[0]
P = L.prob
# set initial time in the status of the level
L.status.time = 0.0
# compute initial value (using the exact function here)
L.u[0] = u
# call prediction function to initialise nodes
L.sweep.predict()
L.sweep.compute_residual()
assert L.status.residual == max(ref_norm) / abs(L.u[0]), "ERROR: incorrect norm used"
# last_rel residual test
level_params['residual_type'] = 'last_rel'
description['level_params'] = level_params
S = step(description=description)
L = S.levels[0]
P = L.prob
# set initial time in the status of the level
L.status.time = 0.0
# compute initial value (using the exact function here)
L.u[0] = u
# call prediction function to initialise nodes
L.sweep.predict()
L.sweep.compute_residual()
assert L.status.residual == ref_norm[-1] / abs(L.u[0]), "ERROR: incorrect norm used"
@pytest.mark.base
def test_compute_end_point_main():
from pySDC.projects.DAE.problems.simple_DAE import simple_dae_1
from pySDC.projects.DAE.sweepers.fully_implicit_DAE import fully_implicit_DAE
from pySDC.core.Step import step
# initialize level parameters
level_params = dict()
level_params['dt'] = 1e-1
# initialize sweeper parameters
sweeper_params = dict()
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['initial_guess'] = 'zero'
# initialize problem parameters
problem_params = dict()
problem_params['newton_tol'] = 1e-12 # tollerance for implicit solver
problem_params['nvars'] = 3
# Fill description dictionary for easy hierarchy creation
description = dict()
description['problem_class'] = simple_dae_1
description['problem_params'] = problem_params
description['sweeper_class'] = fully_implicit_DAE
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
# last_abs residual test
S = step(description=description)
L = S.levels[0]
P = L.prob
# set initial time in the status of the level
L.status.time = 0.0
# compute initial value (using the exact function here)
L.u[0] = P.u_exact(L.time)
# call prediction function to initialise nodes
L.sweep.predict()
# computer end point
L.sweep.compute_end_point()
for m in range(1, L.sweep.coll.num_nodes):
assert np.array_equal(L.u[m], L.uend), "ERROR: end point not computed correctly"
| 6,725 | 33.316327 | 89 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_DAE/test_iteration.py | import pytest
import numpy as np
@pytest.mark.base
def test_main():
from pySDC.projects.DAE.run.run_iteration_test import setup, run
# get setup data
description, controller_params, run_params = setup()
# update run_params
run_params['t0'] = 0.0
run_params['tend'] = 0.1
run_params['max_iter_list'] = [4, 5]
run_params['qd_list'] = ['IE', 'LU']
run_params['num_nodes_list'] = [3]
conv_data = run(description, controller_params, run_params)
# validate results
for qd_type in run_params['qd_list']:
for num_nodes in run_params['num_nodes_list']:
for i, max_iter in enumerate(run_params['max_iter_list']):
assert np.isclose(
conv_data[qd_type][num_nodes]['error'][i], test_dict[qd_type][num_nodes][max_iter], atol=1e-5
), f"ERROR: error bound not fulfilled.\n Got {conv_data[qd_type][num_nodes]['error'][i]}\n Expecting less than {test_dict[qd_type][num_nodes][max_iter]}"
# Dictionary of test values for use with:
# max_iter_low = 4
# max_iter_high = 6
# qd_list = ['IE', 'LU']
# num_nodes_list = [3]
test_dict = {'IE': {3: {4: 1.6e-7, 5: 6e-8}}, 'LU': {3: {4: 4.1e-10, 5: 3.8e-13}}}
| 1,217 | 33.8 | 169 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_gpu/test_ac.py | import pytest
@pytest.mark.cupy
def test_main():
from pySDC.projects.GPU.ac_fft import main
main()
| 110 | 11.333333 | 46 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_gpu/test_heat.py | import pytest
@pytest.mark.cupy
def test_main():
from pySDC.projects.GPU.heat import main
main()
| 108 | 11.111111 | 44 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_gpu/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_projects/test_hamiltonian/test_fput.py | import pytest
@pytest.mark.base
def test_main():
from pySDC.projects.Hamiltonian.fput import main
main()
| 116 | 12 | 52 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_hamiltonian/test_simple.py | import pytest
@pytest.mark.base
@pytest.mark.slow
def test_main():
from pySDC.projects.Hamiltonian.simple_problems import main
main()
| 145 | 13.6 | 63 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_hamiltonian/test_solar.py | import pytest
@pytest.mark.base
def test_main():
from pySDC.projects.Hamiltonian.solar_system import main
main()
| 124 | 12.888889 | 60 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_hamiltonian/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_schroedinger.py | import pytest
@pytest.mark.mpi4py
def test_schroedinger_solution():
from mpi4py import MPI
import numpy as np
from pySDC.projects.Resilience.Schroedinger import run_Schroedinger
from pySDC.helpers.stats_helper import get_sorted
stats, _, _ = run_Schroedinger(space_comm=MPI.COMM_WORLD)
k_mean = np.mean([me[1] for me in get_sorted(stats, type='k')])
assert (
k_mean < 17
), f"Got too many iterations in Schroedinger test! Expected less then 17 on average, but got {k_mean:.2f}!"
@pytest.mark.mpi4py
def test_schroedinger_fault_insertion():
from mpi4py import MPI
import numpy as np
from pySDC.projects.Resilience.Schroedinger import run_Schroedinger
from pySDC.projects.Resilience.fault_injection import FaultInjector
from pySDC.helpers.stats_helper import get_sorted
fault_stuff = FaultInjector.generate_fault_stuff_single_fault(
bit=0, iteration=5, problem_pos=[20, 30], level_number=0, node=3, time=0.1
)
stats, _, _ = run_Schroedinger(space_comm=MPI.COMM_WORLD, fault_stuff=fault_stuff)
k_mean = np.mean([me[1] for me in get_sorted(stats, type='k')])
assert (
k_mean > 17
), f"Got too few iterations in Schroedinger test! Expected more then 17 on average because we need to fix the fault, but got {k_mean:.2f}!"
| 1,323 | 36.828571 | 143 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_adaptive_collocation.py | import pytest
@pytest.mark.base
def test_adaptivity_collocation():
from pySDC.projects.Resilience.collocation_adaptivity import adaptivity_collocation
adaptivity_collocation(plotting=False)
@pytest.mark.base
def test_error_estimate_order():
from pySDC.projects.Resilience.collocation_adaptivity import order_stuff, run_advection
order_stuff(run_advection)
@pytest.mark.base
def test_adaptive_collocation():
from pySDC.projects.Resilience.collocation_adaptivity import compare_adaptive_collocation, run_vdp
compare_adaptive_collocation(run_vdp)
| 577 | 24.130435 | 102 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_piline.py | import pytest
@pytest.mark.base
def test_main():
from pySDC.projects.Resilience.piline import main
main()
@pytest.mark.base
def test_residual_adaptivity():
from pySDC.projects.Resilience.piline import residual_adaptivity
residual_adaptivity()
| 265 | 15.625 | 68 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_quench.py | import pytest
@pytest.mark.base
@pytest.mark.parametrize('leak_type', ['linear', 'exponential'])
def test_imex_vs_fully_implicit_quench(leak_type):
"""
Test if the IMEX and fully implicit schemes get the same solution and that the runaway process has started.
"""
from pySDC.projects.Resilience.quench import compare_imex_full
compare_imex_full(plotting=False, leak_type=leak_type)
@pytest.mark.base
def test_crossing_time_computation():
from pySDC.projects.Resilience.quench import run_quench, get_crossing_time
controller_params = {'logger_level': 30}
description = {'level_params': {'dt': 2.5e1}, 'step_params': {'maxiter': 5}}
stats, controller, _ = run_quench(
custom_controller_params=controller_params,
custom_description=description,
Tend=400,
)
_ = get_crossing_time(stats, controller, num_points=5, inter_points=155)
| 901 | 32.407407 | 111 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_efficient_sweepers.py | import pytest
def run_Lorenz(efficient, skip_residual_computation, num_procs=1):
from pySDC.implementations.problem_classes.Lorenz import LorenzAttractor
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun
from pySDC.implementations.hooks.log_solution import LogSolution
from pySDC.implementations.hooks.log_work import LogWork
from pySDC.projects.Resilience.sweepers import generic_implicit_efficient, generic_implicit
from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI
# initialize level parameters
level_params = {}
level_params['dt'] = 1e-2
# initialize sweeper parameters
sweeper_params = {}
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
sweeper_params['skip_residual_computation'] = (
('IT_CHECK', 'IT_FINE', 'IT_COARSE', 'IT_DOWN', 'IT_UP') if skip_residual_computation else ()
)
problem_params = {
'newton_tol': 1e-9,
'newton_maxiter': 99,
}
# initialize step parameters
step_params = {}
step_params['maxiter'] = 4
# initialize controller parameters
controller_params = {}
controller_params['logger_level'] = 30
controller_params['hook_class'] = [LogSolution, LogWork, LogGlobalErrorPostRun]
controller_params['mssdc_jac'] = False
# fill description dictionary for easy step instantiation
description = {}
description['problem_class'] = LorenzAttractor
description['problem_params'] = problem_params
description['sweeper_class'] = generic_implicit_efficient if efficient else generic_implicit
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# set time parameters
t0 = 0.0
# instantiate controller
controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description)
P = controller.MS[0].levels[0].prob
uinit = P.u_exact(t0)
uend, stats = controller.run(u0=uinit, t0=t0, Tend=1.0)
return stats
def run_Schroedinger(efficient=False, num_procs=1, skip_residual_computation=False):
from pySDC.implementations.problem_classes.NonlinearSchroedinger_MPIFFT import nonlinearschroedinger_imex
from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order
from pySDC.projects.Resilience.sweepers import imex_1st_order_efficient
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRunMPI
from pySDC.implementations.hooks.log_solution import LogSolution
from pySDC.implementations.hooks.log_work import LogWork
from pySDC.implementations.controller_classes.controller_MPI import controller_MPI
from mpi4py import MPI
space_comm = MPI.COMM_SELF
rank = space_comm.Get_rank()
# initialize level parameters
level_params = {}
level_params['restol'] = 1e-8
level_params['dt'] = 1e-01 / 2
level_params['nsweeps'] = 1
# initialize sweeper parameters
sweeper_params = {}
sweeper_params['quad_type'] = 'RADAU-RIGHT'
sweeper_params['num_nodes'] = 3
sweeper_params['QI'] = 'IE'
sweeper_params['initial_guess'] = 'spread'
sweeper_params['skip_residual_computation'] = (
('IT_FINE', 'IT_COARSE', 'IT_DOWN', 'IT_UP') if skip_residual_computation else ()
)
# initialize problem parameters
problem_params = {}
problem_params['nvars'] = (128, 128)
problem_params['spectral'] = False
problem_params['c'] = 1.0
problem_params['comm'] = space_comm
# initialize step parameters
step_params = {}
step_params['maxiter'] = 50
# initialize controller parameters
controller_params = {}
controller_params['logger_level'] = 30 if rank == 0 else 99
controller_params['hook_class'] = [LogSolution, LogWork, LogGlobalErrorPostRunMPI]
controller_params['mssdc_jac'] = False
# fill description dictionary for easy step instantiation
description = {}
description['problem_params'] = problem_params
description['problem_class'] = nonlinearschroedinger_imex
description['sweeper_class'] = imex_1st_order_efficient if efficient else imex_1st_order
description['sweeper_params'] = sweeper_params
description['level_params'] = level_params
description['step_params'] = step_params
# set time parameters
t0 = 0.0
# instantiate controller
controller_args = {
'controller_params': controller_params,
'description': description,
}
comm = MPI.COMM_SELF
controller = controller_MPI(**controller_args, comm=comm)
P = controller.S.levels[0].prob
uinit = P.u_exact(t0)
uend, stats = controller.run(u0=uinit, t0=t0, Tend=1.0)
return stats
@pytest.mark.base
def test_generic_implicit_efficient(skip_residual_computation=True):
stats_normal = run_Lorenz(efficient=False, skip_residual_computation=skip_residual_computation)
stats_efficient = run_Lorenz(efficient=True, skip_residual_computation=skip_residual_computation)
assert_sameness(stats_normal, stats_efficient, 'generic_implicit')
@pytest.mark.base
def test_residual_skipping():
stats_normal = run_Lorenz(efficient=True, skip_residual_computation=False)
stats_efficient = run_Lorenz(efficient=True, skip_residual_computation=True)
assert_sameness(stats_normal, stats_efficient, 'generic_implicit', check_residual=False)
@pytest.mark.mpi4py
def test_residual_skipping_with_residual_tolerance():
stats_normal = run_Schroedinger(efficient=True, skip_residual_computation=False)
stats_efficient = run_Schroedinger(efficient=True, skip_residual_computation=True)
assert_sameness(stats_normal, stats_efficient, 'imex_first_order', check_residual=False)
@pytest.mark.mpi4py
def test_imex_first_order_efficient():
stats_normal = run_Schroedinger(efficient=False)
stats_efficient = run_Schroedinger(efficient=True)
assert_sameness(stats_normal, stats_efficient, 'imex_first_order')
def assert_sameness(stats_normal, stats_efficient, sweeper_name, check_residual=True):
from pySDC.helpers.stats_helper import get_sorted, get_list_of_types
import numpy as np
for me in get_list_of_types(stats_normal):
normal = [you[1] for you in get_sorted(stats_normal, type=me)]
if 'timing' in me or all(you is None for you in normal) or (not check_residual and 'residual' in me):
continue
assert np.allclose(
normal, [you[1] for you in get_sorted(stats_efficient, type=me)]
), f'Stats don\'t match in type \"{me}\" for efficient and regular implementations of {sweeper_name} sweeper!'
| 6,735 | 37.712644 | 118 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_FD_eigenvalues.py | import pytest
@pytest.mark.base
@pytest.mark.parametrize("equation", ['heat', 'advection'])
def test_FD_eigenvalues(equation):
"""
Test two particular special cases of computing eigenvalues of a finite difference discretization.
"""
import numpy as np
from pySDC.projects.Resilience.FDeigenvalues import get_finite_difference_eigenvalues
order = 2
stencil_type = 'center'
dx = 0.1
L = 1.0
N = int(L // dx)
n = np.arange(N, dtype=complex)
if equation == 'heat':
derivative = 2
expect = -2.0 / (dx**2.0) * (1.0 - np.cos(2 * np.pi * n / N))
elif equation == 'advection':
derivative = 1
expect = 1.0j / (dx) * (np.sin(2 * np.pi * n / N))
assert np.allclose(
expect, get_finite_difference_eigenvalues(derivative, order, stencil_type, dx=dx, L=L)
), f"Error when doing {equation}"
if __name__ == '__main__':
for equation in ['heat', 'advection']:
test_FD_eigenvalues(equation)
| 990 | 27.314286 | 101 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_Lorenz.py | import pytest
@pytest.mark.base
def test_main():
from pySDC.projects.Resilience.Lorenz import main
main(plotting=False)
| 131 | 13.666667 | 53 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_fault_injection.py | import pytest
import os
import sys
import subprocess
import numpy as np
def get_random_float():
"""
Get a random float64 number in the full range.
Returns:
float: Random float
"""
return np.random.uniform(low=-1.797693134862315e307, high=1.797693134862315e307, size=1)[0]
@pytest.mark.base
def test_float_conversion():
'''
Method to test the float conversion by converting to bytes and back and by flipping bits where we know what the
impact is. We try with 1000 random numbers, so we don't know how many times we get nan beforehand.
'''
from pySDC.projects.Resilience.fault_injection import FaultInjector
# Try the conversion between floats and bytes
injector = FaultInjector()
exp = [-1, 2, 256]
bit = [0, 11, 8]
nan_counter = 0
num_tests = int(1e3)
for i in range(num_tests):
# generate a random number almost between the full range of python float
rand = get_random_float()
# convert to bytes and back
res = injector.to_float(injector.to_binary(rand))
assert np.isclose(res, rand), f"Conversion between bytes and float failed for {rand}: result: {res}"
# flip some exponent bits
for i in range(len(exp)):
res = injector.flip_bit(rand, bit[i]) / rand
if np.isfinite(res):
assert exp[i] in [
res,
1.0 / res,
], f'Bitflip failed: expected ratio: {exp[i]}, got: {res:.2e} or \
{1./res:.2e}'
else:
nan_counter += 1
if nan_counter > 0:
print(f'When flipping bits, we got nan {nan_counter} times out of {num_tests} tests')
@pytest.mark.base
def test_complex_conversion():
"""
Test conversion of complex numbers to and from binary
"""
from pySDC.projects.Resilience.fault_injection import FaultInjector
injector = FaultInjector()
num_tests = int(1e3)
for _i in range(num_tests):
rand_complex = get_random_float() + get_random_float() * 1j
# convert to bytes and back
res = injector.to_float(injector.to_binary(rand_complex))
assert np.isclose(
res, rand_complex
), f"Conversion between bytes and float failed for {rand_complex}: result: {res}"
@pytest.mark.base
def test_fault_injection():
from pySDC.projects.Resilience.fault_injection import FaultInjector
# setup arguments for fault generation for van der Pol problem
rnd_args = {'iteration': 3}
args = {'time': 1.0, 'target': 0}
injector = FaultInjector()
injector.rnd_params = {
'level_number': 1,
'node': 3,
'iteration': 3,
'problem_pos': (2,),
'bit': 64,
}
reference = {
0: {
'time': 1.0,
'timestep': None,
'level_number': 0,
'iteration': 3,
'node': 0,
'problem_pos': [1],
'bit': 48,
'target': 0,
'when': 'after',
},
1: {
'time': 1.0,
'timestep': None,
'level_number': 0,
'iteration': 3,
'node': 3,
'problem_pos': [0],
'bit': 26,
'target': 0,
'when': 'after',
},
2: {
'time': 1.0,
'timestep': None,
'level_number': 0,
'iteration': 1,
'node': 0,
'problem_pos': [0],
'bit': 0,
'target': 0,
'when': 'after',
},
3: {
'time': 1.0,
'timestep': None,
'level_number': 0,
'iteration': 1,
'node': 1,
'problem_pos': [0],
'bit': 0,
'target': 0,
'when': 'after',
},
}
# inject the faults
for i in range(4):
injector.add_fault(args=args, rnd_args=rnd_args)
if i >= 1: # switch to combination based adding
injector.random_generator = i - 1
assert (
injector.faults[i].__dict__ == reference[i]
), f'Expected fault with parameters {reference[i]}, got {injector.faults[i].__dict__}!'
@pytest.mark.mpi4py
@pytest.mark.slow
@pytest.mark.parametrize("numprocs", [5])
def test_fault_stats(numprocs):
"""
Test generation of fault statistics and their recovery rates
"""
import numpy as np
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
cmd = f"mpirun -np {numprocs} python {__file__} --test-fault-stats".split()
p = subprocess.Popen(cmd, env=my_env, cwd=".")
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (
p.returncode,
numprocs,
)
stats = generate_stats(True)
# test number of possible combinations for faults
expected_max_combinations = 3840
assert (
stats.get_max_combinations() == expected_max_combinations
), f"Expected {expected_max_combinations} possible combinations for faults in van der Pol problem, but got {stats.get_max_combinations()}!"
recovered_reference = {
'base': 1,
'adaptivity': 2,
'iterate': 1,
'Hot Rod': 2,
'adaptivity_coll': 0,
'double_adaptivity': 0,
}
stats.get_recovered()
for strategy in stats.strategies:
dat = stats.load(strategy=strategy, faults=True)
fixable_mask = stats.get_fixable_faults_only(strategy)
recovered_mask = stats.get_mask(strategy=strategy, key='recovered', op='eq', val=True)
index = stats.get_index(mask=fixable_mask)
assert all(fixable_mask[:-1] == [False, True, False]), "Error in generating mask of fixable faults"
assert all(index == [1, 3]), "Error when converting to index"
combinations = np.array(stats.get_combination_counts(dat, keys=['bit'], mask=fixable_mask))
assert all(combinations == [1.0, 1.0]), "Error when counting combinations"
recovered = len(dat['recovered'][recovered_mask])
crashed = len(dat['error'][dat['error'] == np.inf]) # on some systems the last run crashes...
assert (
recovered >= recovered_reference[strategy.name] - crashed
), f'Expected {recovered_reference[strategy.name]} recovered faults, but got {recovered} recovered faults in {strategy.name} strategy!'
def generate_stats(load=False):
"""
Generate stats to check the recovery rate
Args:
load: Load the stats or generate them from scratch
Returns:
Object containing the stats
"""
from pySDC.projects.Resilience.strategies import (
BaseStrategy,
AdaptivityStrategy,
IterateStrategy,
HotRodStrategy,
)
from pySDC.projects.Resilience.fault_stats import (
FaultStats,
)
from pySDC.projects.Resilience.Lorenz import run_Lorenz
np.seterr(all='warn') # get consistent behaviour across platforms
stats = FaultStats(
prob=run_Lorenz,
faults=[False, True],
reload=load,
recovery_thresh=1.1,
num_procs=1,
mode='random',
strategies=[
BaseStrategy(),
AdaptivityStrategy(),
IterateStrategy(),
HotRodStrategy(),
],
stats_path='data',
)
stats.run_stats_generation(runs=4, step=2)
return stats
if __name__ == "__main__":
if '--test-fault-stats' in sys.argv:
generate_stats()
else:
test_complex_conversion()
test_fault_stats(5)
test_fault_injection()
test_float_conversion()
| 7,750 | 29.159533 | 143 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_order.py | import pytest
@pytest.mark.base
@pytest.mark.parametrize("ks", [[2], [3], [4]])
@pytest.mark.parametrize("serial", [True, False])
def test_order_fixed_step_size(ks, serial):
from pySDC.projects.Resilience.accuracy_check import plot_all_errors, plt
fig, ax = plt.subplots()
plot_all_errors(ax, ks, serial, Tend_fixed=1.0)
@pytest.mark.base
@pytest.mark.parametrize("ks", [[2], [3]])
@pytest.mark.parametrize("serial", [True, False])
def test_order_adaptive_step_size(ks, serial):
print(locals())
from pySDC.projects.Resilience.accuracy_check import plot_all_errors, plt
fig, ax = plt.subplots()
plot_all_errors(ax, ks, serial, Tend_fixed=5e-1, var='e_tol', dt_list=[1e-5, 5e-6], avoid_restarts=False)
| 734 | 30.956522 | 109 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_vdp.py | import pytest
@pytest.mark.mpi4py
@pytest.mark.parametrize('num_procs', [1, 2, 5, 8])
@pytest.mark.parametrize('test_name', ['mpi_vs_nonMPI', 'check_step_size_limiter'])
def test_stuff(num_procs, test_name):
import pySDC.projects.Resilience.vdp as vdp
import os
import subprocess
# Set python path once
my_env = os.environ.copy()
my_env['PYTHONPATH'] = '../../..:.'
my_env['COVERAGE_PROCESS_START'] = 'pyproject.toml'
# run code with different number of MPI processes
cmd = f"mpirun -np {num_procs} python {vdp.__file__} {test_name}".split()
p = subprocess.Popen(cmd, env=my_env, cwd=".")
p.wait()
assert p.returncode == 0, 'ERROR: did not get return code 0, got %s with %2i processes' % (
p.returncode,
num_procs,
)
@pytest.mark.mpi4py
def test_adaptivity_with_avoid_restarts():
test_stuff(1, 'adaptivity_with_avoid_restarts')
if __name__ == "__main__":
test_stuff(8, '')
| 959 | 25.666667 | 95 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_extrapolated_error_within_Q.py | import pytest
@pytest.mark.base
@pytest.mark.parametrize("prob_name", ['advection', 'piline'])
@pytest.mark.parametrize('num_nodes', [2, 3])
@pytest.mark.parametrize('quad_type', ['RADAU-RIGHT', 'GAUSS'])
def test_order_extrapolation_estimate_within_Q(prob_name, num_nodes, quad_type):
from pySDC.projects.Resilience.extrapolation_within_Q import check_order
if prob_name == 'advection':
from pySDC.projects.Resilience.advection import run_advection
prob = run_advection
elif prob_name == 'piline':
from pySDC.projects.Resilience.piline import run_piline
prob = run_piline
else:
raise NotImplementedError(f'Problem \"{prob_name}\" not implemented in this test!')
check_order(None, prob=prob, dts=[5e-1, 1e-1, 5e-2, 1e-2], num_nodes=num_nodes, quad_type=quad_type)
| 831 | 33.666667 | 104 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_resilience/test_strategies.py | import pytest
STRATEGY_NAMES = [
'adaptivity',
'DIRK',
'iterate',
'explicitRK',
'doubleAdaptivity',
'collocationType',
'collocationRefinement',
'collocationDerefinement',
'adaptivityAvoidRestarts',
# 'adaptivityInterpolation',
'adaptivityQExtrapolation',
'base',
]
STRATEGY_NAMES_NONMPIONLY = ['adaptiveHR', 'HotRod']
LOGGER_LEVEL = 30
def single_test_vdp(strategy_name, useMPI=False, num_procs=1):
import numpy as np
from pySDC.helpers.stats_helper import get_sorted
from pySDC.projects.Resilience.vdp import run_vdp
import pySDC.projects.Resilience.strategies as strategies
from pySDC.implementations.hooks.log_work import LogWork
if useMPI:
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRunMPI as errorhook
else:
from pySDC.implementations.hooks.log_errors import LogGlobalErrorPostRun as errorhook
# load the strategy
avail_strategies = {
'adaptivity': strategies.AdaptivityStrategy(useMPI=useMPI),
'DIRK': strategies.DIRKStrategy(useMPI=useMPI),
'adaptiveHR': strategies.AdaptiveHotRodStrategy(useMPI=useMPI),
'iterate': strategies.IterateStrategy(useMPI=useMPI),
'HotRod': strategies.HotRodStrategy(useMPI=useMPI),
'explicitRK': strategies.ERKStrategy(useMPI=useMPI),
'doubleAdaptivity': strategies.DoubleAdaptivityStrategy(useMPI=useMPI),
'collocationRefinement': strategies.AdaptivityCollocationRefinementStrategy(useMPI=useMPI),
'collocationDerefinement': strategies.AdaptivityCollocationDerefinementStrategy(useMPI=useMPI),
'collocationType': strategies.AdaptivityCollocationTypeStrategy(useMPI=useMPI),
'adaptivityAvoidRestarts': strategies.AdaptivityAvoidRestartsStrategy(useMPI=useMPI),
'adaptivityInterpolation': strategies.AdaptivityInterpolationStrategy(useMPI=useMPI),
'adaptivityQExtrapolation': strategies.AdaptivityExtrapolationWithinQStrategy(useMPI=useMPI),
'base': strategies.BaseStrategy(useMPI=useMPI),
}
strategy = avail_strategies.get
if strategy_name in avail_strategies.keys():
strategy = avail_strategies[strategy_name]
else:
raise NotImplementedError(f'Strategy \"{strategy_name}\" not implemented for this test!')
prob = run_vdp
controller_params = {'logger_level': LOGGER_LEVEL}
stats, _, Tend = prob(
custom_description=strategy.get_custom_description(problem=prob, num_procs=num_procs),
hook_class=[errorhook, LogWork],
use_MPI=useMPI,
custom_controller_params=controller_params,
)
# things we want to test
tests = {
'e': ('e_global_post_run', max),
'k_newton': ('work_newton', sum),
}
for key, val in tests.items():
act = val[1]([me[1] for me in get_sorted(stats, type=val[0])])
ref = strategy.get_reference_value(prob, val[0], val[1], num_procs)
assert np.isclose(
ref, act, rtol=1e-2
), f'Error in \"{strategy.name}\" strategy ({strategy_name})! Expected {key}={ref} but got {act}!'
@pytest.mark.mpi4py
@pytest.mark.parametrize('strategy_name', STRATEGY_NAMES)
def test_strategy_with_vdp_MPI(strategy_name, num_procs=1):
single_test_vdp(strategy_name=strategy_name, useMPI=True, num_procs=num_procs)
@pytest.mark.base
@pytest.mark.parametrize('strategy_name', STRATEGY_NAMES + STRATEGY_NAMES_NONMPIONLY)
def test_strategy_with_vdp_nonMPI(strategy_name, num_procs=1):
single_test_vdp(strategy_name=strategy_name, useMPI=False, num_procs=num_procs)
if __name__ == '__main__':
for name in STRATEGY_NAMES + STRATEGY_NAMES_NONMPIONLY:
test_strategy_with_vdp_nonMPI(name)
for name in STRATEGY_NAMES:
test_strategy_with_vdp_MPI(name)
| 3,804 | 37.826531 | 106 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_RDC/test_MLRDC_PFASST.py | import pytest
@pytest.mark.base
def test_RDC_flavors():
from pySDC.projects.RDC.vanderpol_MLSDC_PFASST_test import run_RDC
results = run_RDC(cwd='pySDC/projects/RDC/')
for item in results:
assert item[0] < 9e-06, 'Error too high, got %s' % item[0]
assert item[1] < 6.5, 'Iterations too high, got %s' % item[1]
| 342 | 25.384615 | 70 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_RDC/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_projects/test_TOMS/test_AllenCahn_contracting_circle.py | import pytest
@pytest.mark.base
@pytest.mark.slow
def test_AllenCahn_contracting_circle():
from pySDC.projects.TOMS.AllenCahn_contracting_circle import main
main(cwd='pySDC/projects/TOMS/')
| 201 | 19.2 | 69 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_TOMS/test_visualize_pySDC_with_PETSc.py | import pytest
@pytest.mark.petsc
def test_visualize_pySDC_with_PETSc():
from pySDC.projects.TOMS.visualize_pySDC_with_PETSc import main
main(cwd='pySDC/projects/TOMS/')
| 180 | 19.111111 | 67 | py |
pySDC | pySDC-master/pySDC/tests/test_projects/test_TOMS/__init__.py | 0 | 0 | 0 | py |
|
pySDC | pySDC-master/pySDC/tests/test_projects/test_soft_failure/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits