repo
stringlengths
2
99
file
stringlengths
13
225
code
stringlengths
0
18.3M
file_length
int64
0
18.3M
avg_line_length
float64
0
1.36M
max_line_length
int64
0
4.26M
extension_type
stringclasses
1 value
finmag
finmag-master/examples/nmag_example_2/__init__.py
0
0
0
py
finmag
finmag-master/examples/nmag_example_2/callgraph.py
#!/usr/bin/env python try: from pycallgraph import PyCallGraph from pycallgraph import Config from pycallgraph import GlobbingFilter from pycallgraph import Grouper from pycallgraph.output import GraphvizOutput except ImportError: print "You need to install pycallgraph (for instance with `pip install pycallgraph`)." raise from run_finmag import run_simulation config = Config() config.trace_grouper = Grouper(groups=[ "finmag.util.*", "finmag.integrators.*", "finmag.energies.*", "finmag.sim.*", ]) # `max_depth`=15 is the level that would account for all calls to # Exchange.__compute_field_petsc; 14 would miss those from TableWriter.save config.trace_filter = GlobbingFilter(include=[ 'finmag.*', 'run_finmag.*', ]) graphviz = GraphvizOutput(output_file='finmag_callgraph.png') with PyCallGraph(output=graphviz, config=config): run_simulation()
911
26.636364
90
py
finmag
finmag-master/examples/nmag_example_2/test_nmag_example_2.py
import os import pytest import subprocess as sp import numpy as np import run_finmag from finmag.util.fileio import Tablereader MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) @pytest.mark.slow def test_against_nmag(): cwd_backup = os.getcwd() os.chdir(MODULE_DIR) try: cmd = ['make', 'clean'] sp.call(cmd) # the nmag file should be in version control. However, it is # convenient that the test can recompute it if needed. filename = 'averages_ref.txt' cmd = ['make', filename] sp.call(cmd) m_nmag = np.genfromtxt(os.path.join(MODULE_DIR, filename)) filename = 'finmag_bar.ndt' if not os.path.exists(filename): run_finmag.run_simulation() f = Tablereader(os.path.join(MODULE_DIR, filename)) m_finmag = np.array(f['time', 'm_x', 'm_y', 'm_z']).T except sp.CalledProcessError as ex: print("Running command '{}' was unsuccessful. The error " "message was: {}".format(cmd, ex.output)) raise finally: os.chdir(cwd_backup) assert max(map(np.linalg.norm, m_nmag - m_finmag)) < 1.1e-4 # atol is 0 by default when using assert_allclose
1,218
27.348837
68
py
finmag
finmag-master/examples/spatially-varying-anisotropy/run.py
""" Demonstrating spatially varying anisotropy. Example with anisotropy vectors as follows: ----------------------------------- --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> --> ----------------------------------- ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ | | | | | | | | | | | | | | | | | | | | | | | | ----------------------------------- """ import os import numpy as np import matplotlib as mpl mpl.use('Agg') import pylab import dolfin as df import matplotlib.pyplot as plt from finmag import Simulation from finmag.field import Field from finmag.energies import UniaxialAnisotropy, Exchange MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) def run_simulation(plot=False): mu0 = 4.0 * np.pi * 10**-7 # vacuum permeability N/A^2 Ms = 1.0e6 # saturation magnetisation A/m A = 13.0e-12 # exchange coupling strength J/m Km = 0.5 * mu0 * Ms**2 # magnetostatic energy density scale kg/ms^2 lexch = (A/Km)**0.5 # exchange length m unit_length = 1e-9 K1 = Km L = lexch / unit_length nx = 10 Lx = nx * L ny = 1 Ly = ny * L nz = 30 Lz = nz * L mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(Lx, Ly, Lz), nx, ny, nz) # Anisotropy easy axis is (0, 0, 1) in the lower half of the film and # (1, 0, 0) in the upper half. This is a toy model of the exchange spring # systems that Bob Stamps is working on. boundary = Lz / 2.0 expr_a = df.Expression(("x[2] <= b ? 0 : 1", "0", "x[2] <= b ? 1 : 0"), b=boundary, degree=1) V = df.VectorFunctionSpace(mesh, "DG", 0, dim=3) a = Field(V, expr_a) sim = Simulation(mesh, Ms, unit_length) sim.set_m((1, 0, 1)) sim.add(UniaxialAnisotropy(K1, a)) sim.add(Exchange(A)) sim.relax() if plot: points = 200 zs = np.linspace(0, Lz, points) axis_zs = np.zeros((points, 3)) # easy axis probed along z-axis m_zs = np.zeros((points, 3)) # magnetisation probed along z-axis for i, z in enumerate(zs): axis_zs[i] = a((Lx/2.0, Ly/2.0, z)) m_zs[i] = sim.m_field((Lx/2.0, Ly/2.0, z)) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(zs, axis_zs[:, 0], "-o", label="a_x") ax.plot(zs, axis_zs[:, 2], "-x", label="a_z") ax.plot(zs, m_zs[:, 0], "-", label="m_x") ax.plot(zs, m_zs[:, 2], "-", label="m_z") ax.set_xlabel("z (nm)") ax.legend(loc="upper left") plt.savefig(os.path.join(MODULE_DIR, "profile.png")) sim.m_field.save_pvd(os.path.join(MODULE_DIR, 'exchangespring.pvd')) if __name__ == "__main__": run_simulation(plot=True)
2,709
29.111111
97
py
finmag
finmag-master/examples/precession/run.py
import os import numpy as np import dolfin as df import matplotlib.pyplot as plt from finmag import Simulation from finmag.energies import Demag, Exchange MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) IMAGE = os.path.join(MODULE_DIR, 'precession.png') ts = np.linspace(0, 3e-10) def run_simulation(do_precession): Ms = 0.86e6 mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(30e-9, 30e-9, 100e-9), 6, 6, 20) sim = Simulation(mesh, Ms) sim.set_m((1, 0, 1)) sim.do_precession = do_precession sim.add(Demag()) sim.add(Exchange(13.0e-12)) averages = [] for t in ts: sim.run_until(t) averages.append(sim.m_average) return np.array(averages) subfigures = ("without precession", "with precession") figure, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4)) for i, subfigure_name in enumerate(subfigures): m = zip(* run_simulation(bool(i))) for dim in xrange(3): axes[i].plot(ts, m[dim], label="m{}".format(chr(120+dim))) axes[i].legend() axes[i].set_title(subfigure_name) axes[i].set_xlabel("time (s)") axes[i].set_ylabel("unit magnetisation") axes[i].set_ylim([-0.1, 1.0]) figure.savefig(IMAGE)
1,201
28.317073
82
py
finmag
finmag-master/examples/exchange_1D/1d_plot.py
import numpy import finmag.util.helpers as helpers from mayavi import mlab """ Visualise the final configuration of the magnetisation. """ x = numpy.genfromtxt("1d_coord.txt") y = numpy.zeros(len(x)) z = numpy.zeros(len(x)) Ms = numpy.genfromtxt("1d_M.txt") Mx, My, Mz = helpers.components(Ms[-1]) figure = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1)) q = mlab.quiver3d(x, y, z, Mx, My, Mz, figure=figure) q.scene.z_plus_view() mlab.axes(figure=figure) mlab.show()
474
18
55
py
finmag
finmag-master/examples/exchange_1D/1d_anim.py
import numpy import finmag.util.helpers as helpers from mayavi import mlab """ Visualise the evolution of the vector field M over time. """ x = numpy.genfromtxt("1d_coord.txt") y = numpy.zeros(len(x)) z = numpy.zeros(len(x)) Ms = numpy.genfromtxt("1d_M.txt") u, v, w = helpers.components(Ms[0]) figure = mlab.figure(bgcolor=(0,0,0), fgcolor=(1,1,1)) q = mlab.quiver3d(x, y, z, u, v, w, figure=figure) q.scene.z_plus_view() mlab.axes(figure=figure) it = 0 @mlab.animate(delay=1000) def animation(): global it while True: u, v, w = helpers.components(Ms[it]) q.mlab_source.set(u=u, v=v, w=w, scalars=w) it += 1 if it == len(Ms): print "End of data." break yield anim = animation() mlab.show()
769
20.388889
56
py
finmag
finmag-master/examples/exchange_1D/run_visual.py
""" Attempt to visualise some dynimacs using python's visual module. Visualisation probably okay, but some convergence problem in the time integration means this does not look smooth at all, and is very slow. """ import dolfin import numpy from scipy.integrate import odeint from finmag.physics.llg import LLG """ Compute the behaviour of a one-dimensional strip of magnetic material, with exchange interaction. """ length = 40e-9 # in meters simplexes = 10 mesh = dolfin.Interval(simplexes, 0, length) llg = LLG(mesh) llg.H_app=(0,0,llg.Ms) llg.set_alpha(0.1) llg.set_m(('1', '0', '0')) llg.setup() #llg.pins = [0, 10] print "Solving problem..." import visual y = llg.m[:] y.shape=(3,len(llg.m)/3) arrows = [] coordinates = (mesh.coordinates()/length-0.5)*len(mesh.coordinates())*0.4 for i in range(y.shape[1]): pos = list(coordinates[i]) thisM = y[:,i] while len(pos) < 3: #visual python needs 3d vector pos.append(0.0) arrows.append(visual.arrow(pos=pos,axis=tuple(thisM))) ts = numpy.linspace(0, 1e-10, 200) for i in range(len(ts)-1): ys,infodict = odeint(llg.solve_for, llg.m, [ts[i],ts[i+1]], full_output=True,printmessg=True) y = ys[-1,:] y.shape=(3,len(llg.M)/3) for j in range(y.shape[1]): arrows[j].axis=tuple(y[:,j]) print("i=%d/%d, t=%s" % (i,len(ts),ts[i])), print "nfe=%d, nje=%d" % (infodict['nfe'],infodict['nje']) print "Done"
1,412
24.232143
97
py
finmag
finmag-master/examples/exchange_1D/1d_angles.py
import numpy import pylab from finmag.util.helpers import vectors, norm, angle # Load the data which dolfin has created and odeint has integrated. Ms = numpy.genfromtxt("1d_M.txt") # Each entry in ys is M for a particular moment in time. # Each M is all the x-values of M on the mesh, followed by the y and z-values. """ Norm of M at each node, and the angles between M at the nodes for the first and last moment of the simulation. """ M0 = vectors(Ms[0]) M1 = vectors(Ms[-1]) norms0 = [norm(M) for M in M0] norms1 = [norm(M) for M in M1] angles0 = [angle(M0[i], M0[i+1]) for i in xrange(len(M0)-1)] angles1 = [angle(M1[i], M1[i+1]) for i in xrange(len(M1)-1)] print "Initial configuration." print M0, "\n", norms0 print "Final configuration." print M1, "\n", norms1 print "Angles in the initial configuration." print angles0 print "Angles at the end of the simulation." print angles1 pylab.plot(angles0, label="beginning") pylab.plot(angles1, label="end") pylab.legend() pylab.show()
994
25.184211
78
py
finmag
finmag-master/examples/exchange_1D/1d_dynamics.py
import numpy import pylab from finmag.util.helpers import norm, angle, components, \ vectors, rows_to_columns # Load the data which dolfin has created and odeint has integrated. Ms = numpy.genfromtxt("1d_M.txt") # Each entry in ys is M for a particular moment in time. # Each M is all the x-values of M on the mesh, followed by the y and z-values. """ The time series of the average value of M across the mesh. """ ts = numpy.genfromtxt("1d_times.txt") averages = rows_to_columns(numpy.array([components(M).mean(1) for M in Ms])) pylab.plot(ts, averages[0], ":", label="Mx") pylab.plot(ts, averages[1], label="My") pylab.plot(ts, averages[2], "-.", label="Mz") pylab.legend() pylab.title("dolfin - average magnetisation over time, without pinning") pylab.xlabel("time [s]") pylab.ylabel("magnetisation [A/m]") pylab.show()
838
30.074074
78
py
finmag
finmag-master/examples/exchange_1D/1d_run.py
import dolfin import numpy from scipy.integrate import odeint from finmag.physics.llg import LLG from finmag.energies import Exchange """ Compute the behaviour of a one-dimensional strip of magnetic material, with exchange interaction. """ A = 1.3e-11 Ms = 8.6e5 length = 20e-9 # in meters simplexes = 10 mesh = dolfin.Interval(simplexes, 0, length) S1 = dolfin.FunctionSpace(mesh, "Lagrange", 1) S3 = dolfin.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3) llg = LLG(S1, S3) llg.set_m(( '2*x[0]/L - 1', 'sqrt(1 - (2*x[0]/L - 1)*(2*x[0]/L - 1))', '0'), L=length) llg.pins = [0, 10] exchange = Exchange(A) llg.effective_field.add(exchange) print "Solving problem..." ts = numpy.linspace(0, 1e-9, 10) ys, infodict = odeint(llg.solve_for, llg.m, ts, full_output=True) print "Used", infodict["nfe"][-1], "function evaluations." print "Saving data..." numpy.savetxt("1d_times.txt", ts) numpy.savetxt("1d_M.txt", ys) numpy.savetxt("1d_coord.txt", mesh.coordinates().flatten()) print "Done."
1,018
22.159091
70
py
finmag
finmag-master/examples/macrospin/test_macrospin.py
import dolfin import os import py import numpy import pytest from finmag import Simulation from finmag.energies import Zeeman from finmag.util.macrospin import make_analytic_solution MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) """ The analytical solution of the LLG equation for a constant applied field, based on Appendix B of Matteo's PhD thesis, pages 127-128, equations B.16-B.18. """ def compare_with_analytic_solution(alpha=0.5, max_t=1e-9): """ Compares the C/dolfin/odeint solution to the analytical one. """ print "Running comparison with alpha={0}.".format(alpha) # define 3d mesh x0 = y0 = z0 = 0 x1 = y1 = z1 = 10e-9 nx = ny = nz = 1 mesh = dolfin.BoxMesh(dolfin.Point(x0, x1, y0), dolfin.Point(y1, z0, z1), nx, ny, nz) sim = Simulation(mesh, Ms=1) sim.alpha = alpha sim.set_m((1, 0, 0)) sim.add(Zeeman((0, 0, 1e6))) # plug in an integrator with lower tolerances sim.set_tol(abstol=1e-12, reltol=1e-12) ts = numpy.linspace(0, max_t, num=100) ys = numpy.array([(sim.advance_time(t), sim.m.copy())[1] for t in ts]) tsfine = numpy.linspace(0, max_t, num=1000) m_analytical = make_analytic_solution(1e6, alpha, sim.gamma) save_plot(ts, ys, tsfine, m_analytical, alpha) TOLERANCE = 1e-6 # tolerance on Ubuntu 11.10, VM Hans, 25/02/2012 rel_diff_maxs = list() for i in range(len(ts)): m = numpy.mean(ys[i].reshape((3, -1)), axis=1) m_ref = m_analytical(ts[i]) diff = numpy.abs(m - m_ref) diff_max = numpy.max(diff) rel_diff_max = numpy.max(diff / numpy.max(m_ref)) rel_diff_maxs.append(rel_diff_max) print "t= {0:.3g}, diff_max= {1:.3g}.".format(ts[i], diff_max) msg = "Diff at t= {0:.3g} too large.\nAllowed {1:.3g}. Got {2:.3g}." assert diff_max < TOLERANCE, msg.format(ts[i], TOLERANCE, diff_max) print "Maximal relative difference: " print numpy.max(numpy.array(rel_diff_maxs)) def save_plot(ts, ys, ts_ref, m_ref, alpha): ys3d = ys.reshape((len(ys), 3, 8)).mean(axis=-1) mx = ys3d[:,0] my = ys3d[:,1] mz = ys3d[:,2] print "mx.shape", mx.shape print "m_analytical.shape", m_ref(ts).shape m_exact = m_ref(ts_ref) mx_exact = m_exact[0,:] my_exact = m_exact[1,:] mz_exact = m_exact[2,:] import matplotlib.pyplot as plt plt.plot(ts, mx, 'o', label='mx') plt.plot(ts, my, 'x', label='my') plt.plot(ts, mz, '^', label='mz') plt.plot(ts_ref, mx_exact, '-', label='mx (exact)') plt.plot(ts_ref, my_exact, '-', label='my (exact)') plt.plot(ts_ref, mz_exact, '-', label='mz (exact)') plt.xlabel('t [s]') plt.ylabel('m=M/Ms') plt.title(r'Macrospin dynamics: $\alpha$={}'.format(alpha)) plt.grid() plt.legend() filename = ('alpha-%04.2f' % alpha) #latex does not like multiple '.' in image filenames filename = filename.replace('.', '-') plt.savefig(os.path.join(MODULE_DIR, filename + '.pdf')) plt.savefig(os.path.join(MODULE_DIR, filename + '.png')) plt.close() @pytest.mark.requires_X_display def test_macrospin_alpha_0_00001(): compare_with_analytic_solution(alpha=0.00001, max_t=1e-11) @pytest.mark.requires_X_display def test_macrospin_alpha_0_001(): compare_with_analytic_solution(alpha=0.001, max_t=1e-11) @pytest.mark.requires_X_display def test_macrospin_very_low_damping(): compare_with_analytic_solution(alpha=0.02, max_t=0.5e-9) @pytest.mark.requires_X_display def test_macrospin_low_damping(): compare_with_analytic_solution(alpha=0.1, max_t=4e-10) @pytest.mark.requires_X_display def test_macrospin_standard_damping(): compare_with_analytic_solution(alpha=0.5, max_t=1e-10) @pytest.mark.requires_X_display def test_macrospin_higher_damping(): compare_with_analytic_solution(alpha=1, max_t=1e-10) if __name__ == "__main__": test_macrospin_very_low_damping() test_macrospin_low_damping() test_macrospin_standard_damping() test_macrospin_higher_damping()
4,033
29.330827
89
py
finmag
finmag-master/examples/macrospin/test_macrospin_alpha_rtol.py
import py import os import dolfin import numpy import logging import matplotlib.pyplot as plt from finmag import Simulation from finmag.energies import Zeeman from test_macrospin import make_analytic_solution from scipy.integrate import odeint log = logging.getLogger(name='finmag') log.setLevel(logging.WARNING) """ We gather the deviation between the analytical solution of the macrospin problem and the computed one for some values of the tolerance of the time integrator and an alpha ranging from 0.01 to 0.99. """ MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) TOLERANCE = 2e-6 rtols_powers_of_ten = [-7, -8, -9, -10, -11] # easier LaTeX formatting mesh = dolfin.BoxMesh(dolfin.Point(0,1,0), dolfin.Point(1,0,1), 1,1,1) def test_deviations_over_alpha_and_tol(number_of_alphas=5, do_plot=False): alphas = numpy.linspace(0.01, 1.00, number_of_alphas) max_deviationss = [] for rtol_power_of_ten in rtols_powers_of_ten: rtol = pow(10, rtol_power_of_ten) print "#### New series for rtol={0}. ####".format(rtol) # One entry in this array corresponds to the maximum deviation between # the analytical solution and the computed solution for one value of alpha. max_deviations = [] for alpha in alphas: print "Solving for alpha={0}.".format(alpha) sim = Simulation(mesh, 1, unit_length=1e-9) sim.alpha = alpha sim.set_m((1, 0, 0)) sim.add(Zeeman((0, 0, 1e5))) ts = numpy.linspace(0, 1e-9, num=50) ys = odeint(sim.llg.solve_for, sim.llg._m_field.get_numpy_array_debug(), ts, rtol=rtol, atol=rtol) # One entry in this array corresponds to the deviation between the two # solutions for one particular moment during the simulation. deviations = [] M_analytical = make_analytic_solution(1e5, alpha, sim.gamma) for i in range(len(ts)): M_computed = numpy.mean(ys[i].reshape((3, -1)), 1) M_ref = M_analytical(ts[i]) # The difference of the two vectors has 3 components. The # deviation is the average over these components. deviation = numpy.mean(numpy.abs(M_computed - M_ref)) assert deviation < TOLERANCE deviations.append(deviation) # This represents the addition of one point to the graph. max_deviations.append(numpy.max(deviations)) # This represents one additional series in the graph. max_deviationss.append(max_deviations) if do_plot: for i in range(len(rtols_powers_of_ten)): label = r"$rtol=1\cdot 10^{" + str(rtols_powers_of_ten[i]) + r"}$" plt.plot(alphas, max_deviationss[i], ".", label=label) plt.legend() plt.title(r"Influence of $\alpha$ and rtol on the Deviation") plt.ylabel("deviation") plt.xlabel(r"$\alpha$") plt.ylim((0, 1e-6)) plt.savefig(os.path.join(MODULE_DIR, "deviation_over_alpha_rtols.pdf")) if __name__ == '__main__': test_deviations_over_alpha_and_tol(50, do_plot=True)
3,160
37.54878
110
py
finmag
finmag-master/examples/demag/run_nmag.py
import os import nmag, sys from nmag import SI try: meshfile = sys.argv[1] datafile = sys.argv[2] except IndexError: print 'Usage: nmsim %s meshfile outputdatafile' % sys.argv[0] sys.exit(1) #create simulation object sim = nmag.Simulation() # define magnetic material Py = nmag.MagMaterial(name = 'Py', Ms = SI(1.0, 'A/m'), exchange_coupling = SI(13.0e-12, 'J/m')) # load mesh sim.load_mesh(meshfile, [('sphere', Py)], unit_length = SI(1e-9, 'm')) # set initial magnetisation sim.set_m([1,0,0]) # set external field sim.set_H_ext([0,0,0], SI('A/m')) # Save and display data in a variety of ways sim.save_data(fields='all') # save all fields spatially resolved # together with average data import numpy as np Hd = sim.get_subfield('H_demag') Hdx = Hd[:,0] N = len(Hdx) exct = -1./3*np.ones(N) stddev = np.sqrt(1./N*sum((Hdx- exct)**2)) f = open(datafile, 'a') print "Saving to {}.".format(datafile) print "Current directory: {}.".format(os.getcwd()) f.write('%s %s %s\n' % (str(np.average(Hdx)), str(max(Hdx)), str(stddev))) f.close()
1,149
22.469388
74
py
finmag
finmag-master/examples/demag/test_field.py
import os import dolfin as df from numpy import average from finmag.energies import Demag from finmag.field import Field from finmag.util.meshes import from_geofile TOL = 1e-3 MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) Ms = 1e6 def test_field(): """ Test the demag field. H_demag should be equal to -1/3 M, and with m = (1, 0 ,0) and Ms = 1,this should give H_demag = (-1/3, 0, 0). """ # Using mesh with radius 10 nm (nmag ex. 1) mesh = from_geofile(os.path.join(MODULE_DIR, "sphere1.geo")) S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1) m_function = df.interpolate(df.Constant((1, 0, 0)), S3) m = Field(S3, m_function) demag = Demag() demag.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), Ms), unit_length=1e-9) # Compute demag field H_demag = demag.compute_field() H_demag.shape = (3, -1) x, y, z = H_demag[0], H_demag[1], H_demag[2] print "Max values in direction:" print "x: %g, y: %g, z: %g" % (max(x), max(y), max(z)) print "Min values in direction:" print "x: %g, y: %g, z: %g" % (min(x), min(y), min(z)) x, y, z = average(x), average(y), average(z) print "Average values in direction" print "x: %g, y: %g, z: %g" % (x, y, z) # Compute relative erros x = abs((x + 1./3*Ms)/Ms) y = abs(y/Ms) z = abs(z/Ms) print "Relative error:" print "x: %g, y: %g, z: %g" % (x, y, z) assert x < TOL, "x-average is %g, should be -1/3." % x assert y < TOL, "y-average is %g, should be zero." % y assert z < TOL, "z-average is %g, should be zero." % z if __name__ == '__main__': test_field()
1,648
27.929825
80
py
finmag
finmag-master/examples/demag/short_nmag_comp_tests2.py
##import io import os import sys import time import subprocess import numpy as np import dolfin as df from finmag.field import Field from finmag.util.meshes import from_geofile from finmag.energies import Demag from finmag.energies.demag.solver_gcr import FemBemGCRSolver import pylab as p import finmag.energies.demag.solver_base as sb from finmag.physics.llg import LLG import copy import finmag is_dolfin_1_1 = (finmag.util.versions.get_version_dolfin() == "1.1.0") class FemBemGCRboxSolver(FemBemGCRSolver): "GCR Solver but with point evaluation of the q vector as the default" def __init__(self, parameters=sb.default_parameters, degree=1, element="CG", project_method='magpar', bench = False, qvector_method = 'box'): FemBemGCRSolver.__init__(self, parameters, degree, element, project_method, bench, qvector_method ) MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) nmagoutput = os.path.join(MODULE_DIR, 'nmag_data.dat') # Need a clean file if os.path.isfile(nmagoutput): os.remove(nmagoutput) finmagsolvers = {"GCR": FemBemGCRSolver, "GCRbox": FemBemGCRboxSolver} #Define data arrays to be with data for later plotting vertices = [] initialdata = {k:[] for k in finmagsolvers.keys() + ["nmag"]} [xavg, xmax, xmin, ymax, zmax, stddev, errorH, maxerror, errnorm] = [copy.deepcopy(initialdata) for i in range(9) ] runtimes = {"bem": copy.deepcopy(initialdata), "solve": copy.deepcopy(initialdata)} iterdict = {"poisson":[],"laplace":[]} krylov_iter = {k:copy.deepcopy(iterdict) for k in finmagsolvers.keys()} def printsolverparams(mesh, m, Ms): output = open(os.path.join(MODULE_DIR, "linsolveparams.rst"), "w") for name, DemagClass in finmagsolvers.items(): demag = DemagClass() # create a solver to read out its default linear solver data demag.setup(m, Ms, unit_length=1e-9) output.write("\nFinmag %s solver parameters:\n" % name) output.write("%s \n"%repr(demag.parameters.to_dict())) output.write("\nFinmag %s solver tolerances:" % name) print dir(demag) output.write("\nFirst linear solve :%s" %(demag.poisson_solver.parameters.to_dict()["relative_tolerance"])) output.write("\nSecond linear solve: %s \n \n"% (demag.laplace_solver.parameters.to_dict()["relative_tolerance"])) output.close() def get_nmag_bemtime(): """Read the nmag log to get the BEM assembly time""" inputfile = open("run_nmag_log.log", "r") nmaglog = inputfile.read() #The time should be between the two keywords keyword1 = "Populating BEM took" keyword2 = "seconds" begin = nmaglog.find(keyword1) end = nmaglog.find(keyword2,begin) time = nmaglog[begin + len(keyword1):end] return float(time) #for maxh in (2, 1, 0.8, 0.7): meshsizes = (5, 3, 2, 1.5,1.0,0.8) #meshsizes = (5,3,2) for i,maxh in enumerate(meshsizes): # Create geofile geo = """ algebraic3d solid main = sphere (0, 0, 0; 10)-maxh=%s ; tlo main;""" % str(maxh) absname = "sphere_maxh_%s" % str(maxh) geofilename = os.path.join(MODULE_DIR, absname) geofile = geofilename + '.geo' f = open(geofile, "w") f.write(geo) f.close() # Finmag data mesh = from_geofile(geofile) #mesh.coordinates()[:] = mesh.coordinates()[:]*1e-9 #this makes the results worse!!! HF print "Using mesh with %g vertices" % mesh.num_vertices() V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) DG0 = df.FunctionSpace(mesh, "DG", 0) # Old code """ M = ("1.0", "0.0", "0.0") solver = FemBemGCRSolver(mesh,M) phi = solver.solve() H_demag = df.project(-df.grad(phi), V) """ # Weiwei code """ m = df.interpolate(df.Constant((1,0,0)), V) Ms = 1 solver = SimpleFKSolver(V, m, Ms) H_demag = df.Function(V) demag = solver.compute_field() H_demag.vector()[:] = demag x, y, z = H_demag.split(True) x, y, z = x.vector().array(), y.vector().array(), z.vector().array() """ m = Field(V, [1, 0, 0]) Ms = Field(DG0, 1.0) #print solver parameters to file on the first run if i == 0: printsolverparams(mesh, m, Ms) #Get the number of mesh vertices for the x axis in the plots. vertices.append(mesh.num_vertices()) #Get seperate data for gcr and fk solvers for demagtype in finmagsolvers.keys(): #Assemble the bem and get the time. starttime = time.time() solver = finmagsolvers[demagtype]() solver.setup(m, Ms, unit_length=1e-9) demag = solver.compute_field() endtime = time.time() #Store the times runtimes["bem"][demagtype].append(sb.demag_timer.time("build BEM", finmagsolvers[demagtype].__name__)) runtimes["solve"][demagtype].append(endtime - starttime) #store the number of krylov iterations krylov_iter[demagtype]["poisson"].append(solver.poisson_iter) krylov_iter[demagtype]["laplace"].append(solver.laplace_iter) H_demag = df.Function(V) H_demag.vector()[:] = demag demag.shape = (3, -1) x, y, z = demag[0], demag[1], demag[2] # Find x max and x avg xavg[demagtype].append(np.average(x)) xmax[demagtype].append(max(x)) xmin[demagtype].append(min(x)) ymax[demagtype].append(max(abs(y))) zmax[demagtype].append(max(abs(z))) # Find standard deviation func = H_demag.vector().array() N = len(func) exct = np.zeros(N) exct[:len(x)] = -1./3*np.ones(len(x)) sdev = np.sqrt(1./N*sum((func - exct)**2)) stddev[demagtype].append(sdev) # Find errornorm exact = df.interpolate(df.Constant((-1./3, 0, 0)), V) sphere_volume=4/3.*np.pi*(10)**3 errnorm[demagtype].append(df.errornorm(H_demag, exact, mesh=mesh)/sphere_volume) #actual error: tmperror = func-exct tmpmaxerror = max(abs(tmperror)) errorH[demagtype].append(tmperror) maxerror[demagtype].append(tmpmaxerror) #################### #Generate Nmag Data #################### nmag_meshfile = geofilename + ".nmesh.h5" try: subprocess.call('netgen -geofile={} -meshfiletype="Neutral Format" -meshfile={}.neutral -batchmode'.format(geofile, geofilename), shell=True) subprocess.call('nmeshimport --netgen {}.neutral {}'.format(geofilename, nmag_meshfile), shell=True) except subprocess.CalledProcessError as e: print "Failed with returncode {}, output:\n{}".format(e.returncode, e.output) raise starttime = time.time() try: subprocess.call('nsim run_nmag.py --clean {} {}'.format(nmag_meshfile, nmagoutput), shell=True) except subprocess.CalledProcessError as e: print "Failed with returncode {}, output:\n{}".format(e.returncode, e.output) with open("run_nmag_log.log", "r") as f: print "Contents of nmag logfile:\n{}.".format(f.read()) raise endtime = time.time() runtime = endtime - starttime bemtime = get_nmag_bemtime() runtimes["bem"]["nmag"].append(bemtime) runtimes["solve"]["nmag"].append(runtime - bemtime) ############################################ #Useful Plot xvalues ############################################ # Extract nmag data f = open('nmag_data.dat', 'r') lines = f.readlines() f.close() for line in lines: line = line.split() if len(line) == 3: xavg["nmag"].append(float(line[0])) xmax["nmag"].append(float(line[1])) stddev["nmag"].append(float(line[2])) #p.plot(vertices, xavg["FK"], 'x--',label='Finmag FK x-avg') #p.plot(vertices, xmax["FK"], 'o-',label='Finmag FK x-max') #p.plot(vertices, xmin["FK"], '^:',label='Finmag FK x-min') p.plot(vertices, xavg["nmag"], label='Nmag x-avg') p.plot(vertices, xmax["nmag"], label='Nmag x-max') p.title('Nmag - Finmag FK comparison') p.xlabel('vertices') p.grid() p.legend(loc = 0) p.savefig(os.path.join(MODULE_DIR, 'xvalues.png')) ############################################ #Useful Plot xvalues GCR ############################################ if not is_dolfin_1_1: p.figure() # Plot p.plot(vertices, xavg["GCR"], 'x--',label='Finmag GCR x-avg') p.plot(vertices, xmax["GCR"], 'o-',label='Finmag GCR x-max') p.plot(vertices, xmin["GCR"], '^:',label='Finmag GCR x-min') p.plot(vertices, xavg["nmag"], label='Nmag x-avg') p.plot(vertices, xmax["nmag"], label='Nmag x-max') p.title('Nmag - Finmag GCR comparison') p.xlabel('vertices') p.grid() p.legend(loc = 0) p.savefig(os.path.join(MODULE_DIR, 'xvaluesgcr.png')) #Standard deviation plot p.figure() #p.plot(vertices, stddev["FK"], label='Finmag FK standard deviation') if not is_dolfin_1_1: p.plot(vertices, stddev["GCR"], label='Finmag GCR standard deviation') p.plot(vertices, stddev["nmag"], label='Nmag standard deviation') p.xlabel('vertices') p.title('Standard deviation') p.grid() p.legend(loc = 0) p.savefig(os.path.join(MODULE_DIR, 'stddev.png')) #Error Norm convergence plot p.figure() #p.plot(vertices, errnorm["FK"], label='Finmag errornorm') p.xlabel('vertices') p.title('Error norm') p.grid() p.legend(loc = 0) p.savefig(os.path.join(MODULE_DIR, 'errnorm.png')) #Max error plot p.figure() #p.plot(vertices, maxerror["FK"], 'o-',label='Finmag maxerror H_demag-x') #p.plot(vertices, ymax["FK"], 'x-',label='Finmag maxerror H_demag-y') #p.plot(vertices, zmax["FK"], '^-',label='Finmag maxerror H_demag-z') p.xlabel('vertices') p.title('Max Error per component') p.grid() p.legend(loc = 0) p.savefig(os.path.join(MODULE_DIR, 'maxerror.png')) ############################################ #Useful Plot Standard deviation ############################################ p.figure() #p.loglog(vertices, stddev["FK"], label='Finmag FK standard deviation') if not is_dolfin_1_1: p.loglog(vertices, stddev["GCR"], label='Finmag GCR standard deviation') p.loglog(vertices, stddev["nmag"], label='Nmag standard deviation') p.xlabel('vertices') p.title('Standard deviation (log-log)') p.grid() p.legend(loc = 0) p.savefig(os.path.join(MODULE_DIR, 'stddev_loglog.png')) ############################################ #Useful Plot Error Norm log-log ############################################ p.figure() #p.loglog(vertices, errnorm["FK"], label='Finmag FK errornorm') if not is_dolfin_1_1: p.loglog(vertices, errnorm["GCR"], label='Finmag GCR errornorm') p.loglog(vertices, errnorm["GCRbox"], label='Finmag GCR box method errornorm') p.xlabel('vertices') p.title('Error norm (log-log)') p.grid() p.legend(loc = 0) p.savefig(os.path.join(MODULE_DIR, 'errnorm_loglog.png')) ############################################ #Useful Plot bem and solve timings ############################################ titles = ["Runtime without Bem assembly","Bem assembly times"] for title,k in zip(titles,runtimes.keys()): p.figure() #p.loglog(vertices, runtimes[k]["FK"],'o-', label='Finmag FK timings') if not is_dolfin_1_1: p.loglog(vertices, runtimes[k]["GCR"],'x-', label='Finmag GCR timings') if title == "Runtime without Bem assembly": p.loglog(vertices, runtimes[k]["GCRbox"],'x-', label='Finmag GCR box method timings') p.loglog(vertices, runtimes[k]["nmag"], label='Nmag timings') p.xlabel('vertices') p.ylabel('seconds') p.title(title) p.grid() p.legend(loc = 0) p.savefig(os.path.join(MODULE_DIR, '%stimings.png'%k)) ############################################ #Useful Plot krylov iterations ############################################ p.figure() #p.plot(vertices, krylov_iter["FK"]["laplace"],'o-', label='Finmag FK laplace') #p.plot(vertices, krylov_iter["FK"]["poisson"],'x-', label='Finmag FK poisson') if not is_dolfin_1_1: p.plot(vertices, krylov_iter["GCR"]["laplace"], label='Finmag GCR laplace') p.plot(vertices, krylov_iter["GCR"]["poisson"], label='Finmag GCR poisson') p.xlabel('vertices') p.ylabel('iterations') p.title('Krylov solver iterations') p.grid() p.legend(loc=0) p.savefig(os.path.join(MODULE_DIR, 'krylovitr.png')) print "Useful plots: errornorm_loglog.png, stddev.png, xvalues.png,xvaluesgcr.png,solvetimings.png,bemtimings,krylovitr.png"
12,237
32.165312
149
py
finmag
finmag-master/examples/demag/test_energy.py
import os import py import pytest import logging import dolfin as df from numpy import pi, sqrt from finmag.energies import Demag from finmag.field import Field from finmag.util.meshes import from_geofile from finmag.util.consts import mu0 log = logging.getLogger("finmag") MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) energy_file = os.path.join(MODULE_DIR, "demagenergies.txt") Ms = 1e5 volume = 4 * pi / 3 E_analytical = mu0 * Ms**2 * volume / 6 TOL = 1.9e-2 @pytest.mark.slow def test_demag_energy_fk(): E, error = demag_energy() assert error < TOL def demag_energy(): mesh = from_geofile(os.path.join(MODULE_DIR, "sphere_fine.geo")) S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1) m_function = df.interpolate(df.Constant((1, 0, 0)), S3) m = Field(S3, m_function) demag = Demag('FK') demag.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), Ms), unit_length=1) E = demag.compute_energy() rel_error = abs(E - E_analytical) / abs(E_analytical) print "Energy with FK method: {}.".format(E) return E, rel_error if __name__ == '__main__': with open(energy_file, "w") as f: try: E, error = demag_energy() except Exception as e: print e else: f.write("FK Method: E = {}, relative error = {}.\n".format(E, error))
1,339
25.8
81
py
finmag
finmag-master/examples/varying_alpha/run.py
import dolfin as df from finmag.physics.llg import LLG x0 = 0; x1 = 100e-9; xn = 50; y0 = 0; y1 = 10e-9; yn = 5; nanowire = df.RectangleMesh(df.Point(x0, y0), df.Point(x1, y1), xn, yn, "left/right") S1 = df.FunctionSpace(nanowire, "Lagrange", 1) S3 = df.VectorFunctionSpace(nanowire, "Lagrange", 1, dim=3) llg = LLG(S1, S3) """ We want to increase the damping at the boundary of the object. It is convenient to channel the power of dolfin expressions for this task. """ alpha_expression = df.Expression("(x[0] > x_limit) ? 1.0 : 0.5", x_limit=80e-9, degree=1) llg.set_alpha(alpha_expression) print "alpha vector:\n", llg.alpha.vector().array() df.plot(llg.alpha, interactive=True)
687
28.913043
89
py
finmag
finmag-master/examples/cubic_anisotropy/sim.py
import math import numpy as np from finmag import Simulation from finmag.energies import CubicAnisotropy, Demag, Exchange, Zeeman from finmag.util.consts import flux_density_to_field_strength from finmag.util.meshes import cylinder ps = 1e-12 # Mesh mesh = cylinder(r=10, h=2.5, maxh=3.0, filename='disk') unit_length = 1e-9 # Material Definition Ms = 9.0e5 # saturation magnetisation in A/m A = 2.0e-11 # exchange constant in J/m alpha = 0.01 # damping constant no unit gamma = 2.3245e5 # m / (As) u1 = (1, 0, 0) # cubic anisotropy axes u2 = (0, 1, 0) K1 = -1e4 # anisotropy constant in J/m^3 # External Field # the field will be zero, is this intended? H_app_dir = np.array((0, 0, 0)) # converts Tesla to A/m (divides by mu0) H_app_strength = flux_density_to_field_strength(1e-3) # Spin-Polarised Current current_density = 100e10 # in A/m^2 polarisation = 0.76 thickness = 2.5e-9 # in m theta = math.pi phi = math.pi / 2 direction = (math.sin(theta) * math.cos(phi), math.sin(theta) * math.sin(phi), math.cos(theta)) # Create Simulation sim = Simulation(mesh, Ms, unit_length, name='disksim') sim.alpha = alpha sim.gamma = gamma sim.set_m((0.01, 0.01, 1.0)) sim.set_stt(current_density, polarisation, thickness, direction) sim.add(Demag()) sim.add(Zeeman(H_app_strength * H_app_dir)) sim.add(Exchange(A)) sim.add(CubicAnisotropy(u1, u2, K1)) sim.set_tol(reltol=1e-8, abstol=1e-8) # timestepper tolerances sim.schedule('save_m', every=10*ps) sim.schedule('save_averages', every=100*ps) sim.run_until(2000 * ps)
1,560
25.016667
68
py
finmag
finmag-master/examples/cubic_anisotropy/nmag_original.py
import os, sys, math from nsim.netgen import netgen_mesh_from_string from nmag.common import SI, degrees_per_ns, Tesla, mu0, at, every from nmag.nmag5 import Simulation, MagMaterial, cubic_anisotropy from nsim.model import Value ps = SI(1e-12, "s"); nm = SI(1e-9, "m") # Useful definitions theta_rad = 3.141592654 phi_rad = 1.570796327 #length, width, thick = (2*nm, 16*nm, 64*nm) # System geometry current_density = SI( 100e10, "A/m^2") # Applied current Happ_dir = [0, 0, 0] # Applied field (mT)- # Material definition anis = cubic_anisotropy(axis1=[1, 0, 0], axis2=[0,1,0], K1=SI(-1e4, "J/m^3")) mat = MagMaterial("Co", Ms=SI(9.0e5, "A/m"), exchange_coupling=SI(2.0e-11, "J/m"), llg_gamma_G=SI(2.3245e5, "m/s A"), llg_damping=SI(0.01), anisotropy = anis) mat.sl_P = 0.76 # Polarisation mat.sl_lambda = 2.0 # lambda parameter mat.sl_d = SI(2.5e-9, "m") # Free layer thickness sim = Simulation(do_sl_stt=True, do_demag=False) sim.load_mesh("disc.nmesh.h5", [("region1", mat)], unit_length=nm) sim.set_m([0.01, 0.01, 1 ]) sim.set_H_ext(Happ_dir, 0.001*Tesla/mu0) # Direction of the polarization P_direction = [math.sin(theta_rad)*math.cos(phi_rad), math.sin(theta_rad)*math.sin(phi_rad), math.cos(theta_rad)] # Set the polarization direction and current density sim.model.quantities["sl_fix"].set_value(Value(P_direction)) sim.model.quantities["sl_current_density"].set_value(Value(current_density)) # Define the tolerances for the simulation sim.set_params(stopping_dm_dt=0*degrees_per_ns, ts_rel_tol=1e-8, ts_abs_tol=1e-8, ts_pc_rel_tol=1e-3, ts_pc_abs_tol=1e-8, demag_dbc_rel_tol=1e-6, demag_dbc_abs_tol=1e-6) sim.relax(save=[('fields', at('convergence') | every("time", 10*ps)),('averages', every('time', SI(0.1e-9, "s")) | at('stage_end'))], do=[("exit", at("time", 2000*ps))])
2,157
13.882759
133
py
finmag
finmag-master/examples/cubic_anisotropy/hysteresis.py
import numpy as np import dolfin as df from finmag import Simulation from finmag.energies import Exchange, CubicAnisotropy from finmag.util.consts import mu0 mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 40), 1, 1, 40) Ms = 876626 # A/m A = 1.46e-11 # J/m K1 = -8608726 K2 = -13744132 K3 = 1100269 u1 = (0, -0.7071, 0.7071) u2 = (0, 0.7071, 0.7071) u3 = (-1, 0, 0) # perpendicular to u1 and u2 # specification of fields close to oommf reference cubicEight_100pc.mif # on http://www.southampton.ac.uk/~fangohr/software/oxs_cubic8.html fields = np.zeros((250, 3)) fields[:, 0] = 5 fields[:, 1] = 5 fields[:, 2] = np.linspace(20000, -20000, 250) fields = fields * 0.001 / mu0 # mT to A/m sim = Simulation(mesh, Ms, unit_length=1e-9) sim.set_m((0, 0, 1)) sim.add(Exchange(A)) sim.add(CubicAnisotropy(K1, u1, K2, u2, K3, u3)) # this is not a hysteresis loop, but just a one-way swipe mzs = sim.hysteresis(fields, lambda sim: sim.m_average[2]) result = np.zeros((250, 2)) result[:, 0] = fields[:, 2] result[:, 1] = mzs np.savetxt("hysteresis.txt", result, header="field in A/m and corresponding unit magnetisation in z-direction")
1,144
28.358974
111
py
finmag
finmag-master/examples/boost_python/demo2_classdef/demo2.py
import demo2_module c = demo2_module.console() c.print_line("Hello World - demo2") c.print_line()
102
10.444444
35
py
finmag
finmag-master/examples/boost_python/demo3_numpy/demo3_exceptions.py
import numpy as np import sys import traceback import demo3_module a = np.array([[1, 2], [3, 4]], dtype=float) print "Providing a vector instead of a matrix as an argument\n" try: demo3_module.trace(a[0]) raise Exception("An exception has not been raised") except: traceback.print_exception(*sys.exc_info()) print "------------------\nProviding a non-contiguous array\n" try: demo3_module.trace(a.T) raise Exception("An exception has not been raised") except: traceback.print_exception(*sys.exc_info())
531
23.181818
63
py
finmag
finmag-master/examples/boost_python/demo3_numpy/demo3.py
import numpy as np import demo3_module a = np.array([[1, 2], [3, 4]], dtype=float) print "Trace of a:", demo3_module.trace(a)
128
17.428571
43
py
finmag
finmag-master/examples/boost_python/demo4_dolfin/demo4.py
import dolfin as df import demo4_module mesh = df.UnitCubeMesh(5, 5, 5) print "Number of vertices:", demo4_module.get_num_vertices(mesh)
138
22.166667
64
py
finmag
finmag-master/examples/boost_python/demo4_dolfin/demo4_swig_objects.py
import dolfin as df mesh = df.UnitCubeMesh(5,5,5) print "mesh = ", mesh, ", type(mesh) =", type(mesh) print "mesh.this = ", mesh.this, ", type(mesh.this) =", type(mesh.this)
176
24.285714
71
py
finmag
finmag-master/examples/boost_python/petsc/demo5.py
import dolfin as df import demo5_module mesh = df.UnitCubeMesh(5, 5, 5) print "Number of vertices:", demo5_module.get_num_vertices(mesh) V = df.FunctionSpace(mesh, 'Lagrange', 1) expr = df.Expression('sin(x[0])', degree=1) M = df.interpolate(expr, V) print 'vector length',demo5_module.get_vector_local_size(M.vector())
324
24
68
py
finmag
finmag-master/examples/boost_python/demo1_helloworld/demo1.py
import demo1_module demo1_module.print_line("Hello World!")
61
14.5
39
py
finmag
finmag-master/src/setup-native.py
#!/usr/bin/env python # Run native_compiler.make_modules() as part of the distribution # process. # See # http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder # for info on this addition before the import line, which works for modules in # arbitrary directories (here, "finmag/util") import os, sys, inspect cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"finmag/util"))) if cmd_subfolder not in sys.path: sys.path.insert(0, cmd_subfolder) import native_compiler native_compiler.make_modules()
606
29.35
138
py
finmag
finmag-master/src/print_tarball_name.py
import os import sys from finmag.util.helpers import binary_tarball_name try: revision = sys.argv[1] suffix = sys.argv[2] destdir = sys.argv[3] except IndexError: print "Usage: print_tarball_name REVISION SUFFIX DESTDIR" sys.exit(0) name = binary_tarball_name(repo=os.curdir, revision=revision, suffix=suffix) print(os.path.join(destdir, name))
368
22.0625
76
py
finmag
finmag-master/src/setup-cython.py
# build script for compiled version of finmag # change this as needed libincludedir = "." import sys import os from distutils.core import setup from distutils.extension import Extension # we'd better have Cython installed, or it's a no-go try: from Cython.Distutils import build_ext except: print "You don't seem to have Cython installed. Please get a" print "copy from www.cython.org and install it" sys.exit(1) # scan the directory for extension files, converting # them to extension names in dotted notation files_to_ignore = ['llg.py', 'bem_computation_tests.py', 'test_hello.py', 'native_compiler.py', # 'solver_base.py', # abstract method # 'energy_base.py', # abstract method 'oommf_calculator.py', # oommf/test_mesh.py fails 'magpar.py', # +3 failures 'consts.py', # +37 failures when compiled. 'solid_angle_magpar.py', # +1 failure 'material.py', # +1 failure 'solver_gcr.py', # +2 failures 'helpers.py', # +1 failure '__init__.py', # +2 failures (but only # finmag.sim.integrator.__init__), # not other __init__ # files. 'test_mesh.py', # py.test will not read a .so, no point compiling this test-file. ] directories_to_ignore = ['tests'] def scandir(dir, files=[]): for file in os.listdir(dir): path = os.path.join(dir, file) if os.path.isfile(path) and path.endswith(".py") and \ file not in files_to_ignore: files.append(path.replace(os.path.sep, ".")[:-3]) elif os.path.isdir(path): thisdirectoryname = os.path.split(path)[1] if thisdirectoryname not in directories_to_ignore: scandir(path, files) else: print("skipping directory dir =%20s, path=%s" % (dir, path)) return files # generate an Extension object from its dotted name def makeExtension(extName): extPath = extName.replace(".", os.path.sep) + ".py" return Extension( extName, [extPath], include_dirs=[libincludedir, "."], # adding the '.' to include_dirs # is CRUCIAL!! #extra_compile_args = ["-O3", "-Wall"], #libraries = ["dv",], ) # get the list of extensions extNames = scandir("finmag") # and build up the set of Extension objects extensions = [makeExtension(name) for name in extNames] print "extNames are\n", extNames #print "extensions are\n", extensions with open ('extension_names.txt','w') as ext_name_f: for ext_name in extNames: ext_name_f.write(ext_name + '\n') # finally, we can pass all this to distutils setup( name="finmag", packages=["finmag", "finmag.energies", "finmag.sim", 'finmag.util'], ext_modules=extensions, cmdclass={'build_ext': build_ext}, )
3,236
33.43617
109
py
finmag
finmag-master/src/distcp.py
import os import shutil import argparse directories_to_ignore = ['build', '__pycache__'] def cp_file(sourcedir, filename, targetdir): # The only relevant case is if we have a .so file for a given .py, # then don't copy .py if the .so file is __init__.py, we need to # copy an empty __init__.py # Create directory if it does not exist (it might be empty in the end) if not os.path.exists(targetdir): print "Creating directory %s" % targetdir os.makedirs(targetdir) path = os.path.join(sourcedir, filename) if filename.endswith('.py'): if os.path.exists(path[:-3] + ".so"): if filename != "__init__.py": # Create empty init.py file #f = open(os.path.join(targetdir, "__init__.py"), "w") #f.close() print "skipping py-file %s as a so exists" % filename return # don't copy that Python file because we have a .so if os.path.exists(path[:-3] + ".pyc"): if ( ('test' not in filename) and (filename != "__init__.py") and (filename != "run_nmag_Eexch.py") # for a test that # passes this # filename to nsim ): print 'skipping py-file %s as suitable pyc exists' % filename return # don't copy any .pyc test-files elif filename.endswith('pyc'): if (('test' in filename) or filename.startswith('__init__') or (os.path.exists(path[:-4] + ".so"))): print("Skipping pyc file ({}) as it is a test or init, " "or a .so exists".format(filename)) return elif filename.endswith('c'): print "Skipping .c file (%s)" % filename return print("Copying %s" % path) shutil.copyfile(path, os.path.join(targetdir, filename)) def scandir(srcdir, files=[]): for file_ in os.listdir(srcdir): path = os.path.join(srcdir, file_) #print "working %s / %s" % (srcdir, file_) if os.path.isfile(path): cp_file(srcdir, file_, os.path.join(targetdir, srcdir)) elif (os.path.isdir(path) and os.path.split(path) not in directories_to_ignore): scandir(path, files) return files def distcp(targetdir): print scandir('finmag', targetdir) def get_linux_issue(): """Same code as in finmag.util.version - it is difficult (at the moment) to re-use the existing code as we need to use this function to create the finmag/util/binary.py file. If we import finmag to access this function, it will try to check for the binary.py file which are just trying to create. This should be possible to detangle once we have a working setup...""" try: f = open("/etc/issue") except IOError: print "Can't read /etc/issue -- this is odd?" raise RuntimeError("Cannot establish linux version") issue = f.readline() # only return first line issue = issue.replace('\\l','') issue = issue.replace('\\n','') #logger.debug("Linux OS = '%s'" % issue) return issue.strip() # get rid of white space left and right def storeversions(targetfile): """Target file should be something like 'finmag/util/binary.py' The data in the file is used to store which version of software we had when the binary distribution was created.""" #if os.path.exists(targetfile): # print("This is odd: the file '%s' exists already, but is only" % targetfile) # print("meant to be created now (in function storeversions() in distcp.py)") # raise RuntimeError("odd error when running %s" % __file__) f = open(targetfile,'w') f.write("buildlinux = '%s'\n" % get_linux_issue()) f.close() if __name__ == '__main__': descr = 'Copy FinMag files to alternative location' parser = argparse.ArgumentParser(description=descr) parser.add_argument('destination-dir', type=str, help='The directory to copy FinMag files to') args = parser.parse_args() targetdir = vars(args)['destination-dir'] if not os.path.exists(targetdir): os.makedirs(targetdir) distcp(targetdir) storeversions(os.path.join(targetdir,"finmag/util/binary.py"))
4,398
34.192
85
py
finmag
finmag-master/src/finmag/field.py
""" Representation of scalar and vector fields, as well as operations on them backed by a dolfin function. This module exists because things like per-node operations or exporting of field values to convenient formats are awkward to do in dolfin currently. Additionally, some things are non-trivial to get right, especially in parallel. This class therefore acts as a "single point of contant", so that we don't duplicate code all over the FinMag code base. """ import logging import dolfin as df import numpy as np import numbers import os import dolfinh5tools from finmag.util import helpers from finmag.util.helpers import expression_from_python_function from finmag.util.visualization import plot_dolfin_function log = logging.getLogger(name="finmag") def associated_scalar_space(functionspace): """ Given any dolfin function space (which may be a scalar or vector space), return a scalar function space on the same mesh defined by the same finite element family and degree. """ fs_family = functionspace.ufl_element().family() fs_degree = functionspace.ufl_element().degree() return df.FunctionSpace(functionspace.mesh(), fs_family, fs_degree) class Field(object): """ Representation of scalar and vector fields using a dolfin function. You can set the field values using a wide range of object types: - tuples, lists, ints, floats, basestrings, numpy arrrays - dolfin constants, expressions and functions - callables - files in hdf5 The Field class provides raw access to the field at some particular point or all nodes. It also computes derived entities of the field, such as spatially averaged energy. It outputs data suited for visualisation or storage. """ def __init__(self, functionspace, value=None, normalised=False, name=None, unit=None): self.functionspace = functionspace self.f = df.Function(self.functionspace) self.name = name if value is not None: self.value = value self.set(value, normalised=normalised) if name is not None: self.f.rename(name, name) # set function's name and label self.unit = unit functionspace_family = self.f.ufl_element().family() if functionspace_family == 'Lagrange': dim = self.value_dim() self.v2d_xyz, self.v2d_xxx, self.d2v_xyz, self.d2v_xxx = helpers.build_maps(self.functionspace, dim) def __call__(self, x): """ Shorthand so user can do field(x) instead of field.f(x) to interpolate. """ return self.f(x) def assert_is_scalar_field(self): if self.value_dim() != 1: raise ValueError( "This operation is only defined for scalar fields.") def from_array(self, arr): assert isinstance(arr, np.ndarray) if arr.shape == (3,) and (isinstance(self.functionspace, df.FunctionSpace) and self.functionspace.num_sub_spaces() == self.value_dim()): self.from_constant(df.Constant(arr)) else: if arr.shape[0] == self.f.vector().local_size(): self.f.vector().set_local(arr) else: # in serial, local_size == size, so this will only warn in parallel log.warning("Global setting of field values by overwriting with np.array.") self.f.vector()[:] = arr def from_callable(self, func): assert hasattr(func, "__call__") and not isinstance(func, df.Function) expr = expression_from_python_function(func, self.functionspace) self.from_expression(expr) def from_constant(self, constant): assert isinstance(constant, df.Constant) self.f.assign(constant) def from_expression(self, expr, **kwargs): """ Set field values using dolfin expression or the ingredients for one, in which case it will build the dolfin expression for you. """ if not isinstance(expr, df.Expression): if isinstance(self.functionspace, df.FunctionSpace) and self.functionspace.num_sub_spaces() == 0: assert (isinstance(expr, basestring) or isinstance(expr, (tuple, list)) and len(expr) == 1) expr = str(expr) # dolfin does not like unicode in the expression if isinstance(self.functionspace, df.FunctionSpace) and \ self.functionspace.num_sub_spaces() == 3: assert isinstance(expr, (tuple, list)) and len(expr) == 3 assert all(isinstance(item, basestring) for item in expr) map(str, expr) # dolfin does not like unicode in the expression expr = df.Expression(expr, degree=1, **kwargs) temp_function = df.interpolate(expr, self.functionspace) self.f.vector().set_local(temp_function.vector().get_local()) def from_field(self, field): assert isinstance(field, Field) if self.functionspace == field.functionspace: self.f.vector().set_local(field.f.vector().get_local()) else: temp_function = df.interpolate(field.f, self.functionspace) self.f.vector().set_local(temp_function.vector().get_local()) def from_function(self, function): assert isinstance(function, df.Function) self.f.vector().set_local(function.vector().get_local()) def from_generic_vector(self, vector): assert isinstance(vector, df.GenericVector) self.f.vector().set_local(vector.get_local()) def from_sequence(self, seq): assert isinstance(seq, (tuple, list)) self._check_can_set_vector_value(seq) self.from_constant(df.Constant(seq)) def _check_can_set_scalar_value(self): if not self.functionspace.num_sub_spaces() == 0: raise ValueError("Cannot set vector field with scalar value.") def _check_can_set_vector_value(self, seq): if not (isinstance(self.functionspace, df.FunctionSpace) and self.functionspace.num_sub_spaces() == self.value_dim()): raise ValueError("Cannot set scalar field with vector value.") if len(seq) != self.functionspace.num_sub_spaces(): raise ValueError( "Cannot set vector field with value of non-matching dimension " "({} != {})", len(seq), self.functionspace.num_sub_spaces()) def set(self, value, normalised=False, **kwargs): """ Set field values using `value` and normalise if `normalised` is True. The parameter `value` can be one of many different types, as described in the class docstring. This method avoids the user having to find the correct `from_*` method to call. """ if isinstance(value, df.Constant): self.from_constant(value) elif isinstance(value, df.Expression): self.from_expression(value) elif isinstance(value, df.Function): self.from_function(value) elif isinstance(value, Field): self.from_field(value) elif isinstance(value, df.GenericVector): self.from_generic_vector(value) elif isinstance(value, (int, float)): self._check_can_set_scalar_value() self.from_constant(df.Constant(value)) elif isinstance(value, basestring): self._check_can_set_scalar_value() self.from_expression(value, **kwargs) elif (isinstance(value, (tuple, list)) and all(isinstance(item, basestring) for item in value)): self._check_can_set_vector_value(value) self.from_expression(value, **kwargs) elif isinstance(value, (tuple, list)): self.from_sequence(value) elif isinstance(value, np.ndarray): self.from_array(value) elif hasattr(value, '__call__'): # this matches df.Function as well, so this clause needs to # be after the one checking for df.Function self.from_callable(value) else: raise TypeError("Can't set field values using {}.".format(type(value))) if normalised: self.normalise() def set_with_numpy_array_debug(self, value, normalised=False): """ONLY for debugging""" self.f.vector().set_local(value) if normalised: self.normalise() def get_ordered_numpy_array(self): """ For a scalar field, return the dolfin function as an ordered numpy array, such that the field values are in the same order as the vertices of the underlying mesh (as returned by `mesh.coordinates()`). Note: This function is only defined for scalar fields and raises an error if it is applied to a vector field. For the latter, use either get_ordered_numpy_array_xxx or get_ordered_numpy_array_xyz depending on the order in which you want the values to be returned. """ self.assert_is_scalar_field() return self.get_ordered_numpy_array_xxx() def get_ordered_numpy_array_xyz(self): """ Returns the dolfin function as an ordered numpy array, so that all components at the same node are grouped together. For example, for a 3d vector field the values are returned in the following order: [f_1x, f_1y, f_1z, f_2x, f_2y, f_2z, f_3x, f_3y, f_3z, ...] Note: In the case of a scalar field this function is equivalent to `get_ordered_numpy_array_xxx` (but for vector fields they yield different results). """ return self.get_numpy_array_debug()[self.v2d_xyz] def get_ordered_numpy_array_xxx(self): """ Returns the dolfin function as an ordered numpy array, so that all x-components at different nodes are grouped together, and similarly for the other components. For example, for a 3d vector field the values are returned in the following order: [f_1x, f_2x, f_3x, ..., f_1y, f_2y, f_3y, ..., f_1z, f_2z, f_3z, ...] Note: In the case of a scalar field this function is equivalent to `get_ordered_numpy_array_xyz` (but for vector fields they yield different results). """ return self.get_numpy_array_debug()[self.v2d_xxx] # def order2_to_order1(self, order2): # """Returns the dolfin function as an ordered numpy array, so that # in the case of vector fields all components of different nodes # are grouped together.""" # n = len(order2) # return ((order2.reshape(3, n/3)).transpose()).reshape(n) # # def order1_to_order2(self, order1): # """Returns the dolfin function as an ordered numpy array, so that # in the case of vector fields all components of different nodes # are grouped together.""" # n = len(order1) # return ((order1.reshape(n/3, 3)).transpose()).reshape(n) def set_with_ordered_numpy_array(self, ordered_array): """ Set the scalar field using an ordered numpy array (where the field values have the same ordering as the vertices in the underlying mesh). This function raises an error if the field is not a scalar field. """ self.assert_is_scalar_field() self.set_with_ordered_numpy_array_xxx(ordered_array) def set_with_ordered_numpy_array_xyz(self, ordered_array): """ Set the field using an ordered numpy array in "xyz" order. For example, for a 3d vector field the values should be arranged as follows: [f_1x, f_1y, f_1z, f_2x, f_2y, f_2z, f_3x, f_3y, f_3z, ...] For a scalar field this function is equivalent to `set_with_ordered_numpy_array_xxx`. """ self.set(ordered_array[self.d2v_xyz]) def set_with_ordered_numpy_array_xxx(self, ordered_array): """ Set the field using an ordered numpy array in "xxx" order. For example, for a 3d vector field the values should be arranged as follows: [f_1x, f_2x, f_3x, ..., f_1y, f_2y, f_3y, ..., f_1z, f_2z, f_3z, ...] For a scalar field this function is equivalent to `set_with_ordered_numpy_array_xyz`. """ self.set(ordered_array[self.d2v_xxx]) def set_random_values(self, vrange=[-1, 1]): """ This is a helper function useful for debugging. It fills the array with random values where each coordinate is uniformly distributed from the half-open interval `vrange` (default: vrange=[-1, 1)). """ shape = self.f.vector().array().shape a, b = vrange vals = np.random.random_sample(shape) * float(b - a) + a self.set(vals) def as_array(self): return self.f.vector().array() def as_vector(self): return self.f.vector() def get_numpy_array_debug(self): """ONLY for debugging""" return self.f.vector().array() def is_scalar_field(self): """ Return `True` if the Field is a scalar field and `False` otherwise. """ if self.functionspace.num_sub_spaces() == 0: return True def is_constant(self, eps=1e-14): """ Return `True` if the Field has a unique constant value across the mesh and `False` otherwise. """ # Scalar field if self.is_scalar_field(): maxval = self.f.vector().max() # global (!) maximum value minval = self.f.vector().min() # global (!) minimum value return (maxval - minval) < eps # Vector field else: raise NotImplementedError() def as_constant(self, eps=1e-14): """ If the Field has a unique constant value across the mesh, return this value. Otherwise a RuntimeError is raised. """ if self.is_scalar_field(): maxval = self.f.vector().max() # global (!) maximum value minval = self.f.vector().min() # global (!) minimum value if (maxval - minval) < eps: return maxval else: raise RuntimeError("Field does not have a unique constant value.") else: raise NotImplementedError() def average(self, dx=df.dx): """ Return the spatial field average. Returns: f_average (float for scalar and np.ndarray for vector field) """ # Compute the mesh "volume". For 1D mesh "volume" is the length and # for 2D mesh is the area of the mesh. volume = df.assemble(df.Constant(1) * dx(self.mesh())) # Scalar field. if self.is_scalar_field(): return df.assemble(self.f * dx) / volume # Vector field. else: f_average = [] # Compute the average for every vector component independently. for i in xrange(self.value_dim()): f_average.append(df.assemble(self.f[i] * dx)) return np.array(f_average) / volume def coords_and_values(self, t=None): """ If the field is defined on a function space with degrees of freedom at mesh vertices only, return a list of mesh coordinates and associated field values (in the same order). """ # The function values are defined at mesh nodes only for # specific function space families. In finmag, the only families # of interest are Lagrange (CG) and Discontinuous Lagrange (DG). # Therefore, if the function space is not CG-family-type, # values cannot be associated to mesh nodes. functionspace_family = self.f.ufl_element().family() if functionspace_family == 'Discontinuous Lagrange': # Function values are not defined at nodes. raise TypeError('The function space is Discontinuous Lagrange ' '(DG) family type, for which the function values ' 'are not defined at mesh nodes.') elif functionspace_family == 'Lagrange': # Function values are defined at nodes. coords = self.functionspace.mesh().coordinates() num_nodes = self.functionspace.mesh().num_vertices() f_array = self.f.vector().array() # numpy array vtd_map = df.vertex_to_dof_map(self.functionspace) value_dim = self.value_dim() values = np.empty((num_nodes, value_dim)) for i in xrange(num_nodes): try: values[i, :] = f_array[vtd_map[value_dim * i: value_dim * (i + 1)]] except IndexError: # This only occurs in parallel and is probably related # to ghost nodes. I thought we could ignore those, but # this doesn't seem to be true since the resulting # array of function values has the wrong size. Need to # investigate. (Max, 15/05/2014) raise NotImplementedError("TODO") if value_dim == 1: values.shape = (num_nodes,) # convert to scalar field return coords, values else: raise NotImplementedError('This method is not implemented ' 'for {} family type function ' 'spaces.'.format(functionspace_family)) def __add__(self, other): result = Field(self.functionspace) result.set(self.f.vector() + other.f.vector()) return result def coerce_scalar_field(self, value): """ Coerce `value` into a scalar field defined over the same mesh (and using the same finite element family) as the current field. """ if not isinstance(value, Field): S1 = associated_scalar_space(self.functionspace) try: # Try to coerce 'value' into a scalar function space # on the same mesh. res = Field(S1, value) except: print("Error: cannot coerce into scalar field: {}".format(value)) raise else: value.assert_is_scalar_field() res = value return res def __mul__(self, other): # We use Claas Abert's 'point measure hack' to multiply the dolfin # function self.f with the scalar function a.f at each vertex. # Note that if 'other' is just a number, it should be possible to # say: result.set(self.f.vector() * other), but this currently throws # a PETSc error. -- Max, 20.3.2015 a = self.coerce_scalar_field(other) w = df.TestFunction(self.functionspace) v_res = df.assemble(df.dot(self.f * a.f, w) * df.dP) return Field(self.functionspace, value=v_res) def __rmul__(self, other): return self.__mul__(other) def __div__(self, other): # We use Claas Abert's 'point measure hack' for the vertex-wise operation. a = self.coerce_scalar_field(other) w = df.TestFunction(self.functionspace) v_res = df.assemble(df.dot(self.f / a.f, w) * df.dP) return Field(self.functionspace, value=v_res) def cross(self, other): """ Return vector field representing the cross product of this field with `other`. """ if not isinstance(other, Field): raise TypeError("Argument must be a Field. Got: {} ({})".format(other, type(other))) if not (self.value_dim() == 3 and other.value_dim() == 3): raise ValueError("The cross product is only defined for 3d vector fields.") # We use Claas Abert's 'point measure hack' for the vertex-wise cross product. w = df.TestFunction(self.functionspace) v_res = df.assemble(df.dot(df.cross(self.f, other.f), w) * df.dP) return Field(self.functionspace, value=v_res) def dot(self, other): """ Return scalar field representing the dot product of this field with `other`. """ if not isinstance(other, Field): raise TypeError("Argument must be a Field. Got: {} ({})".format(other, type(other))) if not (self.value_dim() == other.value_dim()): raise ValueError("The cross product is only defined for vector fields of the same dimension.") # We use Claas Abert's 'point measure hack' for the vertex-wise cross product. w = df.TestFunction(associated_scalar_space(self.functionspace)) v_res = df.assemble(df.dot(df.dot(self.f, other.f), w) * df.dP) return self.coerce_scalar_field(v_res) def allclose(self, other, rtol=1e-7, atol=0): """ Returns `True` if the two fields are element-wise equal up to the given tolerance. It compares the difference between 'self' and 'other' to `atol + rtol * abs(self)` This calls `np.allclose()` underneath, but with different default tolerances (in particular, we use atol=0 so that comparison also returns sensible results if the field values are very small numbers. The argument `other` must be either a scalar value or of type `Field`. Passing a numpy array raises an error because it is unclear in which order the values should be compares if the degrees of freedom of the underlying dolfin vector are re-ordered. """ if not isinstance(other, Field): raise TypeError("Argument `other` must be of type'Field'. " "Got: {} (type {}).".format(other, type(other))) a = other.f.vector().array() b = self.f.vector().array() return np.allclose(a, b, rtol=rtol, atol=atol) @property def np(self): if self.value_dim() == 1: # TODO: We should also rearrange these vector entries according to the dofmap. return self.get_ordered_numpy_array_xxx() elif self.value_dim() == 3: return self.get_ordered_numpy_array_xxx().reshape(3, -1) else: raise NotImplementedError("Numpy representation is only implemented for scalar and 3d vector fields.") def probe(self, coord): return self.f(coord) def mesh(self): return self.functionspace.mesh() def mesh_dim(self): return self.functionspace.mesh().topology().dim() def mesh_dofmap(self): return self.functionspace.dofmap() def value_dim(self): if self.is_scalar_field(): # Scalar field. return 1 else: # value_shape() returns a tuple (N,) and int is required. return self.functionspace.num_sub_spaces()#ufl_element().value_shape()[0] def vector(self): return self.f.vector() def petsc_vector(self): return df.as_backend_type(self.f.vector()).vec() def save_pvd(self, filename): """Save to pvd file using dolfin code""" if filename[-4:] != '.pvd': filename += '.pvd' pvd_file = df.File(filename) pvd_file << self.f def save_hdf5(self, filename, t): """ Save field to h5 file and corresponding metadata (times at which field is saved), which is saved to a json file. Note, the mesh is automatically saved into this file as it is required by load_hdf5. Arguments: filename - filename of data to be saved (no extensions) t - time at which the file is being save it is recomended that this is taken from sim.t This function creates to files with filename.h5 and filename.json names. When simulation/field saving is finished, it is recomended that close_hdf5() is called. To load a file, do so as: ``` from dolfinh5tools import openh5 h5file = openh5(filename, field_name='fieldname', mode='r') h5file.read(t=t) ``` See explanatory notebook tutorial-saving-field-in-hdf5-file.ipynb for more details. """ # ask if file has already been created. If not, create it. if not hasattr(self, 'h5fileWrite'): self.h5fileWrite = dolfinh5tools.Create(filename, self.functionspace) self.h5fileWrite.save_mesh() self.h5fileWrite.write(self.f, self.name, t) def close_hdf5(self): """Close hdf5 file. Delete the saving object variable.""" if hasattr(self, 'h5fileWrite'): self.h5fileWrite.close() del self.h5fileWrite def plot_with_dolfin(self, interactive=True): df.plot(self.f, interactive=interactive) def plot_with_paraview(self, **kwargs): """ Render the field using Paraview and return an `IPython.display.Image` object with the resulting plot (which is displayed as a regular image in an IPython notebook). All keyword arguments are passed on to the function `finmag.util.visualization.render_paraview_scene`, which is used internally. This currently only works for 3D vector fields. """ return plot_dolfin_function(self.f, **kwargs) def normalise_dofmap(self): """ Overwrite own field values with normalised ones. """ dofmap = df.vertex_to_dof_map(self.functionspace) reordered = self.f.vector().array()[dofmap] # [x1, y1, z1, ..., xn, yn, zn] vectors = reordered.reshape((3, -1)) # [[x1, y1, z1], ..., [xn, yn, zn]] lengths = np.sqrt(np.add.reduce(vectors * vectors, axis=1)) normalised = np.dot(vectors.T, np.diag(1 / lengths)).T.ravel() vertexmap = df.dof_to_vertex_map(self.functionspace) normalised_original_order = normalised[vertexmap] self.from_array(normalised_original_order) def normalise(self): """ Normalises the Field, so that the norm at every mesh node is 1. """ S1 = df.FunctionSpace(self.functionspace.mesh(), 'CG', 1) norm_squared = 0 for i in range(self.value_dim()): norm_squared += self.f[i]*self.f[i] norm = df.Function(S1) norm_vector = df.assemble(df.dot(df.sqrt(norm_squared), df.TestFunction(S1))*df.dP) norm.vector().set_local(norm_vector.get_local()) #self.f = df.project(self.f/norm, self.functionspace) self.f = (self / norm).f def get_spherical(self): """ Transform magnetisation coordinates to spherical coordinates theta = arctan(m_r / m_z) ; m_r = sqrt(m_x ^ 2 + m_y ^ 2) phi = arctan(m_y / m_x) The theta and phi generalised coordinates are stored in self.theta and self.phi respectively. When this function is called, the two dolfin functions are returned """ # Create an scalar Function Space to compute the cylindrical radius (x^2 + y^2) # and the angles phi and theta S1 = df.FunctionSpace(self.functionspace.mesh(), 'CG', 1) # Create a dolfin function from the FS m_r = df.Function(S1) # Compute the radius using the assemble method with dolfin dP # (like a dirac delta to get values on every node of the mesh) # This returns a dolfin vector cyl_vector = df.assemble(df.dot(df.sqrt(self.f[0] * self.f[0] + self.f[1] * self.f[1]), df.TestFunction(S1)) * df.dP, ) # Set the vector values to the dolfin function m_r.vector().set_local(cyl_vector.get_local()) # Now we compute the theta and phi angles to describe the magnetisation # and save them to the coresponding variables self.theta = df.Function(S1) self.phi = df.Function(S1) # We will use the same vector variable than the one used to # compute m_r, in order to save memory # Theta = arctan(m_r / m_z) cyl_vector = df.assemble(df.dot(df.atan_2(m_r, self.f[2]), df.TestFunction(S1)) * df.dP, tensor=cyl_vector ) # Instead of: # self.theta.vector().set_local(cyl_vector.get_local()) # We will use: self.theta.vector().axpy(1, cyl_vector) # which adds: 1 * cyl_vector # to self.theta.vector() and is much faster # (we assume self.theta.vector() is empty, i.e. only made of zeros) # See: Fenics Book, page 44 # Phi = arctan(m_y / m_x) cyl_vector = df.assemble(df.dot(df.atan_2(self.f[1], self.f[0]), df.TestFunction(S1)) * df.dP, tensor=cyl_vector ) # We will save this line just in case: # self.phi.vector().set_local(cyl_vector.get_local()) self.phi.vector().axpy(1, cyl_vector) return self.theta, self.phi
29,357
38.406711
126
py
finmag
finmag-master/src/finmag/field_test.py
import dolfin as df import numpy as np import functools import pytest import os from field import Field, associated_scalar_space class TestField(object): def setup(self): self.create_meshes() self.define_tolerances() # All created function spaces are CG (Lagrange) # with degree=1 unless named explicitly. self.create_PBCs() self.create_scalar_function_spaces() self.create_vector2d_function_spaces() self.create_vector3d_function_spaces() self.create_vector4d_function_spaces() self.all_fspaces = self.scalar_fspaces + self.vector2d_fspaces + \ self.vector3d_fspaces + self.vector4d_fspaces # x, y, or z coordinate value for probing the field. self.probing_coord = 0.4351 # Not at any mesh node. def create_meshes(self): """ Create meshes of several dimensions. """ self.mesh1d = df.UnitIntervalMesh(10) self.mesh2d = df.UnitSquareMesh(11, 10) self.mesh3d = df.UnitCubeMesh(9, 11, 10) self.meshes = [self.mesh1d, self.mesh2d, self.mesh3d] def create_PBCs(self): """ Create periodic boundary conditions which identify the nodes at the left/right edges of the mesh with each other (i.e. the nodes with minimum/maximum x-coordinates). """ class PeriodicBoundary(df.SubDomain): def inside(self, x, on_boundary): # Pick the nodes which have x-coordinate 0 and lie on # the boundary of the mesh. return (x[0] < df.DOLFIN_EPS and x[0] > df.DOLFIN_EPS and on_boundary) def map(self, pt1, pt2): # Define a mapping from the nodes on the right edge of the mesh # to the ones on the left edge (by subtracting 1.0 from the # x-coordinate). pt2[0] = pt1[0] - 1.0 pt2[1:] = pt1[1:] # Create periodic boundary condition self.pbc = PeriodicBoundary() def create_scalar_function_spaces(self): """ Create scalar function spaces (both with and without PBCs). """ self.fs1d_scalar = df.FunctionSpace(self.mesh1d, "CG", 1) self.fs2d_scalar = df.FunctionSpace(self.mesh2d, "CG", 1) self.fs3d_scalar = df.FunctionSpace(self.mesh3d, "CG", 1) self.fs1d_scalar_pbc = df.FunctionSpace( self.mesh1d, "CG", 1, constrained_domain=self.pbc) self.fs2d_scalar_pbc = df.FunctionSpace( self.mesh2d, "CG", 1, constrained_domain=self.pbc) self.fs3d_scalar_pbc = df.FunctionSpace( self.mesh3d, "CG", 1, constrained_domain=self.pbc) self.scalar_fspaces = [ self.fs1d_scalar, self.fs2d_scalar, self.fs3d_scalar, self.fs1d_scalar_pbc, self.fs2d_scalar_pbc, self.fs3d_scalar_pbc] def create_vector2d_function_spaces(self): """ Create 2D vector function spaces (both with and without PBCs). """ self.fs1d_vector2d = df.VectorFunctionSpace(self.mesh1d, "CG", 1, dim=2) self.fs2d_vector2d = df.VectorFunctionSpace(self.mesh2d, "CG", 1, dim=2) self.fs3d_vector2d = df.VectorFunctionSpace(self.mesh3d, "CG", 1, dim=2) self.fs1d_vector2d_pbc = df.VectorFunctionSpace( self.mesh1d, "CG", 1, dim=2, constrained_domain=self.pbc) self.fs2d_vector2d_pbc = df.VectorFunctionSpace( self.mesh2d, "CG", 1, dim=2, constrained_domain=self.pbc) self.fs3d_vector2d_pbc = df.VectorFunctionSpace( self.mesh3d, "CG", 1, dim=2, constrained_domain=self.pbc) self.vector2d_fspaces = [ self.fs1d_vector2d, self.fs2d_vector2d, self.fs3d_vector2d, self.fs1d_vector2d_pbc, self.fs2d_vector2d_pbc, self.fs3d_vector2d_pbc] def create_vector3d_function_spaces(self): """ Create 3D vector function spaces (both with and without PBCs). """ self.fs1d_vector3d = df.VectorFunctionSpace(self.mesh1d, "CG", 1, dim=3) self.fs2d_vector3d = df.VectorFunctionSpace(self.mesh2d, "CG", 1, dim=3) self.fs3d_vector3d = df.VectorFunctionSpace(self.mesh3d, "CG", 1, dim=3) self.fs1d_vector3d_pbc = \ df.VectorFunctionSpace(self.mesh1d, "CG", 1, dim=3, constrained_domain=self.pbc) self.fs2d_vector3d_pbc = \ df.VectorFunctionSpace(self.mesh2d, "CG", 1, dim=3, constrained_domain=self.pbc) self.fs3d_vector3d_pbc = \ df.VectorFunctionSpace(self.mesh3d, "CG", 1, dim=3, constrained_domain=self.pbc) self.vector3d_fspaces = [ # self.fs1d_vector3d, self.fs2d_vector3d, # self.fs3d_vector3d, self.fs1d_vector3d_pbc, self.fs2d_vector3d_pbc, self.fs3d_vector3d_pbc] def create_vector4d_function_spaces(self): """ Create 4D vector function spaces (both with and without PBCs). """ self.fs1d_vector4d = df.VectorFunctionSpace(self.mesh1d, "CG", 1, dim=4) self.fs2d_vector4d = df.VectorFunctionSpace(self.mesh2d, "CG", 1, dim=4) self.fs3d_vector4d = df.VectorFunctionSpace(self.mesh3d, "CG", 1, dim=4) self.fs1d_vector4d_pbc = \ df.VectorFunctionSpace(self.mesh1d, "CG", 1, dim=4, constrained_domain=self.pbc) self.fs2d_vector4d_pbc = \ df.VectorFunctionSpace(self.mesh2d, "CG", 1, dim=4, constrained_domain=self.pbc) self.fs3d_vector4d_pbc = \ df.VectorFunctionSpace(self.mesh3d, "CG", 1, dim=4, constrained_domain=self.pbc) self.vector4d_fspaces = [ self.fs1d_vector4d, self.fs2d_vector4d, self.fs3d_vector4d, self.fs1d_vector4d_pbc, self.fs2d_vector4d_pbc, self.fs3d_vector4d_pbc] def define_tolerances(self): """ Set the tolerances used throughout all tests to account for interpolation errors. """ # Tolerance value at the mesh node and # outside the mesh node for linear functions. self.tol1 = 5e-13 # Tolerance value outside the mesh node for non-linear functions. self.tol2 = 1e-2 # outside the mesh node # Tolerance value for computing average and norm. self.tol3 = 5e-6 def test_init(self): """Test the initialisation of field parameters.""" for functionspace in self.all_fspaces: # Initialisation arguments. value = None # Not specified, a zero-function is expected. normalised = True name = 'name_test' unit = 'unit_test' field = Field(functionspace, value, normalised, name, unit) assert field.functionspace == functionspace assert field.name == name assert field.unit == unit # Check that both function's name and label are changed. assert field.f.name() == name assert field.f.label() == name # Check that the created function is a dolfin zero function. assert isinstance(field.f, df.Function) assert np.all(field.coords_and_values()[1] == 0) def test_set_scalar_field_with_constant(self): """Test setting the scalar field with a constant.""" # Different expressions of constant value 42 for scalar field setting. constants = [df.Constant(42), df.Constant(42.0), df.Constant("42"), df.Constant("42.0"), 42, 42.0, "42", "42.0", u"42", u"42.0"] expected_value = 42 # Setting the scalar field for different # scalar function spaces and constants. for functionspace in self.scalar_fspaces: for constant in constants: field = Field(functionspace, constant) # Check vector (numpy array) values (should be exact). assert np.all(field.f.vector().array() == expected_value) # Check the result of coords_and_values (should be exact). field_values = field.coords_and_values()[1] # coords ignored assert np.all(field_values == expected_value) # Check the interpolated value outside the mesh node. # The expected field is constant and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value - expected_value) < self.tol1 # TODO: Add tests to set scalar/vector field using a string. def test_set_scalar_field_with_expression(self): """Test setting the scalar field with an expression.""" # Different expressions for setting the scalar field, # depending on the mesh dimension (1D, 2D, or 3D). expressions = [df.Expression("11.2*x[0]", degree=1), df.Expression("11.2*x[0] - 3.01*x[1]", degree=1), df.Expression("11.2*x[0] - 3.01*x[1] + 2.7*x[2]", degree=1)] # Setting the scalar field for different # scalar function spaces and appropriate expressions. for functionspace in self.scalar_fspaces: field = Field(functionspace) # Set the field and compute expected values # depending on the mesh dimension. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: field.set(expressions[0]) expected_values = 11.2 * coords[:, 0] expected_probed_value = 11.2 * self.probing_coord elif field.mesh_dim() == 2: field.set(expressions[1]) expected_values = 11.2 * coords[:, 0] - 3.01 * coords[:, 1] expected_probed_value = (11.2 - 3.01) * self.probing_coord elif field.mesh_dim() == 3: field.set(expressions[2]) expected_values = 11.2 * coords[:, 0] - 3.01 * coords[:, 1] + \ 2.7 * coords[:, 2] expected_probed_value = ( 11.2 - 3.01 + 2.7) * self.probing_coord # Check the result of coords_and_values (should be exact). field_values = field.coords_and_values()[1] # ignore coordinates assert np.all(field_values == expected_values) # Check the interpolated value outside the mesh node. # The expected field is linear and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value - expected_probed_value) < self.tol1 def test_set_scalar_field_with_dolfin_function(self): """Test setting the scalar field with a dolfin function.""" # Different expressions for defining the dolfin function, # depending on the mesh dimension (1D, 2D, or 3D). expressions = [df.Expression("11.2*x[0]", degree=1), df.Expression("11.2*x[0] - 3.01*x[1]", degree=1), df.Expression("11.2*x[0] - 3.01*x[1] + 2.7*x[2]", degree=1)] # Setting the scalar field for different # scalar function spaces and appropriate expressions. for functionspace in self.scalar_fspaces: field = Field(functionspace) dolfin_function = df.Function(functionspace) # Set the field and compute expected values # depending on the mesh dimension. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: dolfin_function = df.interpolate(expressions[0], functionspace) field.set(dolfin_function) expected_values = 11.2 * coords[:, 0] expected_probed_value = 11.2 * self.probing_coord elif field.mesh_dim() == 2: dolfin_function = df.interpolate(expressions[1], functionspace) field.set(dolfin_function) expected_values = 11.2 * coords[:, 0] - 3.01 * coords[:, 1] expected_probed_value = (11.2 - 3.01) * self.probing_coord elif field.mesh_dim() == 3: dolfin_function = df.interpolate(expressions[2], functionspace) field.set(dolfin_function) expected_values = 11.2 * coords[:, 0] - 3.01 * coords[:, 1] + \ 2.7 * coords[:, 2] expected_probed_value = ( 11.2 - 3.01 + 2.7) * self.probing_coord # Check the result of coords_and_values (should be exact). field_values = field.coords_and_values()[1] # ignore coordinates assert np.all(field_values == expected_values) # Check the interpolated value outside the mesh node. # The expected field is linear and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value - expected_probed_value) < self.tol1 def test_set_scalar_field_with_generic_vector(self): """Test setting the scalar field with a generic vector.""" # Different expressions for defining the dolfin function, # depending on the mesh dimension (1D, 2D, or 3D). expressions = [df.Expression("11.2*x[0]", degree=1), df.Expression("11.2*x[0] - 3.01*x[1]", degree=1), df.Expression("11.2*x[0] - 3.01*x[1] + 2.7*x[2]", degree=1)] # Setting the scalar field for different # scalar function spaces and appropriate expressions. for functionspace in self.scalar_fspaces: field = Field(functionspace) dolfin_function = df.Function(functionspace) # Set the field and compute expected values # depending on the mesh dimension. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: dolfin_function = df.interpolate(expressions[0], functionspace) field.set(dolfin_function.vector()) expected_values = 11.2 * coords[:, 0] expected_probed_value = 11.2 * self.probing_coord elif field.mesh_dim() == 2: dolfin_function = df.interpolate(expressions[1], functionspace) field.set(dolfin_function.vector()) expected_values = 11.2 * coords[:, 0] - 3.01 * coords[:, 1] expected_probed_value = (11.2 - 3.01) * self.probing_coord elif field.mesh_dim() == 3: dolfin_function = df.interpolate(expressions[2], functionspace) field.set(dolfin_function.vector()) expected_values = 11.2 * coords[:, 0] - 3.01 * coords[:, 1] + \ 2.7 * coords[:, 2] expected_probed_value = ( 11.2 - 3.01 + 2.7) * self.probing_coord # Check the result of coords_and_values (should be exact). field_values = field.coords_and_values()[1] # ignore coordinates assert np.all(field_values == expected_values) # Check the interpolated value outside the mesh node. # The expected field is linear and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value - expected_probed_value) < self.tol1 def test_set_scalar_field_with_python_function(self): """Test setting the scalar field with a python function.""" # Python functions array for setting the scalar field. python_functions = [lambda x:1.21 * x[0], lambda x:1.21 * x[0] - 3.21 * x[1], lambda x:1.21 * x[0] - 3.21 * x[1] + 2.47 * x[2]] # Setting the scalar field for different # scalar function spaces and appropriate python functions. for functionspace in self.scalar_fspaces: field = Field(functionspace) # Set the field and compute expected values # depending on the mesh dimension. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: field.set(python_functions[0]) expected_values = 1.21 * coords[:, 0] expected_probed_value = 1.21 * self.probing_coord elif field.mesh_dim() == 2: field.set(python_functions[1]) expected_values = 1.21 * coords[:, 0] - 3.21 * coords[:, 1] expected_probed_value = (1.21 - 3.21) * self.probing_coord elif field.mesh_dim() == 3: field.set(python_functions[2]) expected_values = 1.21 * coords[:, 0] - 3.21 * coords[:, 1] + \ 2.47 * coords[:, 2] expected_probed_value = ( 1.21 - 3.21 + 2.47) * self.probing_coord # Check the result of coords_and_values (should be exact). field_values = field.coords_and_values()[1] # ignore coordinates assert np.all(field_values == expected_values) # Check the interpolated value outside the mesh node. # The expected field is linear and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value - expected_probed_value) < self.tol1 def test_set_vector_field_with_constant(self): """Test setting the 3D vector field with a constant.""" # Different expressions of constant for vector field setting. constants = [df.Constant((0.15, -2.3, -6.41)), df.Constant([0.15, -2.3, -6.41]), df.Constant(np.array([0.15, -2.3, -6.41])), (0.15, -2.3, -6.41), [0.15, -2.3, -6.41], np.array([0.15, -2.3, -6.41])] expected_value = (0.15, -2.3, -6.41) # Test setting the vector field for different # vector function spaces and constants. for functionspace in self.vector3d_fspaces: for constant in constants: field = Field(functionspace, constant) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_value[0]) assert np.all(f_array_split[1] == expected_value[1]) assert np.all(f_array_split[2] == expected_value[2]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_value[0]) assert np.all(field_values[:, 1] == expected_value[1]) assert np.all(field_values[:, 2] == expected_value[2]) # Check the interpolated value outside the mesh node. # The expected field is constant and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_value[0]) < self.tol1 assert abs(probed_value[1] - expected_value[1]) < self.tol1 assert abs(probed_value[2] - expected_value[2]) < self.tol1 def test_setting_field_with_argument_of_incorrect_dimension_raises_ValueError(self): # Check that we get a decent error (rather than the generic # RuntimError thrown by dolfin) if we try to set a field with # a value whose dimension doesn't match the function space. # Try to set scalar field with a vector value field = Field(self.fs3d_scalar) with pytest.raises(ValueError): field.set([1, 0, 0]) # Try to set vector field with a scalar value field = Field(self.fs2d_vector3d) with pytest.raises(ValueError): field.set(42.0) # Try to set 2D vector field with a 3D vector field = Field(self.fs3d_vector2d) with pytest.raises(ValueError): field.set([1, 0, 0]) # Try to set 2D vector field with a 3D vector field = Field(self.fs3d_vector2d) with pytest.raises(ValueError): field.set(["x[0]", "1", "0"]) def test_set_vector_field_with_expression(self): """Test setting the 3D vector field with an expression.""" # Different expressions for 3D vector fields. expressions = [df.Expression(['1.1*x[0]', '-2.4*x[0]', '3*x[0]'], degree=1), df.Expression(['1.1*x[0]', '-2.4*x[1]', '3*x[1]'], degree=1), df.Expression(['1.1*x[0]', '-2.4*x[1]', '3*x[2]'], degree=1)] # Test setting the vector field for different # vector function spaces and appropriate expressions. for functionspace in self.vector3d_fspaces: field = Field(functionspace) # Set the vector field and compute expected values. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: field.set(expressions[0]) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 0], 3 * coords[:, 0]) elif field.mesh_dim() == 2: field.set(expressions[1]) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 1]) elif field.mesh_dim() == 3: field.set(expressions[2]) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 2]) # Compute expected probed value. expected_probed_value = (1.1 * self.probing_coord, -2.4 * self.probing_coord, 3 * self.probing_coord) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_values[0]) assert np.all(f_array_split[1] == expected_values[1]) assert np.all(f_array_split[2] == expected_values[2]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_values[0]) assert np.all(field_values[:, 1] == expected_values[1]) assert np.all(field_values[:, 2] == expected_values[2]) # Check the interpolated value outside the mesh node. # The expected field is constant and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_probed_value[0]) < self.tol1 assert abs(probed_value[1] - expected_probed_value[1]) < self.tol1 assert abs(probed_value[2] - expected_probed_value[2]) < self.tol1 def test_set_vector_field_with_dolfin_function(self): """Test setting the 3D vector field with a dolfin function.""" # Different expressions for 3D vector fields. expressions = [df.Expression(['1.1*x[0]', '-2.4*x[0]', '3*x[0]'], degree=1), df.Expression(['1.1*x[0]', '-2.4*x[1]', '3*x[1]'], degree=1), df.Expression(['1.1*x[0]', '-2.4*x[1]', '3*x[2]'], degree=1)] # Test setting the vector field for different # vector function spaces and appropriate expressions. for functionspace in self.vector3d_fspaces: field = Field(functionspace) dolfin_function = df.Function(functionspace) # Set the vector field and compute expected values. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: dolfin_function = df.interpolate(expressions[0], functionspace) field.set(dolfin_function) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 0], 3 * coords[:, 0]) elif field.mesh_dim() == 2: dolfin_function = df.interpolate(expressions[1], functionspace) field.set(dolfin_function) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 1]) elif field.mesh_dim() == 3: dolfin_function = df.interpolate(expressions[2], functionspace) field.set(dolfin_function) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 2]) # Compute expected probed value. expected_probed_value = (1.1 * self.probing_coord, -2.4 * self.probing_coord, 3 * self.probing_coord) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_values[0]) assert np.all(f_array_split[1] == expected_values[1]) assert np.all(f_array_split[2] == expected_values[2]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_values[0]) assert np.all(field_values[:, 1] == expected_values[1]) assert np.all(field_values[:, 2] == expected_values[2]) # Check the interpolated value outside the mesh node. # The expected field is constant and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_probed_value[0]) < self.tol1 assert abs(probed_value[1] - expected_probed_value[1]) < self.tol1 assert abs(probed_value[2] - expected_probed_value[2]) < self.tol1 def test_set_vector_field_with_generic_vector(self): """Test setting the 3D vector field with a generic_vector.""" # Different expressions for 3D vector fields. expressions = [df.Expression(['1.1*x[0]', '-2.4*x[0]', '3*x[0]'], degree=1), df.Expression(['1.1*x[0]', '-2.4*x[1]', '3*x[1]'], degree=1), df.Expression(['1.1*x[0]', '-2.4*x[1]', '3*x[2]'], degree=1)] # Test setting the vector field for different # vector function spaces and appropriate expressions. for functionspace in self.vector3d_fspaces: field = Field(functionspace) dolfin_function = df.Function(functionspace) # Set the vector field and compute expected values. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: dolfin_function = df.interpolate(expressions[0], functionspace) field.set(dolfin_function.vector()) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 0], 3 * coords[:, 0]) elif field.mesh_dim() == 2: dolfin_function = df.interpolate(expressions[1], functionspace) field.set(dolfin_function.vector()) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 1]) elif field.mesh_dim() == 3: dolfin_function = df.interpolate(expressions[2], functionspace) field.set(dolfin_function.vector()) expected_values = (1.1 * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 2]) # Compute expected probed value. expected_probed_value = (1.1 * self.probing_coord, -2.4 * self.probing_coord, 3 * self.probing_coord) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_values[0]) assert np.all(f_array_split[1] == expected_values[1]) assert np.all(f_array_split[2] == expected_values[2]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_values[0]) assert np.all(field_values[:, 1] == expected_values[1]) assert np.all(field_values[:, 2] == expected_values[2]) # Check the interpolated value outside the mesh node. # The expected field is constant and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_probed_value[0]) < self.tol1 assert abs(probed_value[1] - expected_probed_value[1]) < self.tol1 assert abs(probed_value[2] - expected_probed_value[2]) < self.tol1 def test_set_vector_field_with_python_function(self): """Test setting the 3D vector field with a python function.""" # Different python functions for setting the vector field. python_functions = [lambda x:(1.21 * x[0], -2.47 * x[0], 3 * x[0]), lambda x:(1.21 * x[0], -2.47 * x[1], 3 * x[1]), lambda x:(1.21 * x[0], -2.47 * x[1], 3 * x[2])] # Test setting the vector field for different # vector function spaces and appropriate expressions. for functionspace in self.vector3d_fspaces: field = Field(functionspace) # Set the vector field and compute expected values. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: field.set(python_functions[0]) expected_values = (1.21 * coords[:, 0], -2.47 * coords[:, 0], 3 * coords[:, 0]) elif field.mesh_dim() == 2: field.set(python_functions[1]) expected_values = (1.21 * coords[:, 0], -2.47 * coords[:, 1], 3 * coords[:, 1]) elif field.mesh_dim() == 3: field.set(python_functions[2]) expected_values = (1.21 * coords[:, 0], -2.47 * coords[:, 1], 3 * coords[:, 2]) # Compute expected probed value. expected_probed_value = (1.21 * self.probing_coord, -2.47 * self.probing_coord, 3 * self.probing_coord) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_values[0]) assert np.all(f_array_split[1] == expected_values[1]) assert np.all(f_array_split[2] == expected_values[2]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_values[0]) assert np.all(field_values[:, 1] == expected_values[1]) assert np.all(field_values[:, 2] == expected_values[2]) # Check the interpolated value outside the mesh node. # The expected field is constant and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_probed_value[0]) < self.tol1 assert abs(probed_value[1] - expected_probed_value[1]) < self.tol1 assert abs(probed_value[2] - expected_probed_value[2]) < self.tol1 def test_set_vector2d_field(self): """Test setting the 2D vector field.""" # Different values for setting the 2D vector field. expressions = [df.Constant((1.1, -2.4)), (1.1, -2.4), [1.1, -2.4], df.Expression(('1.1', '-2.4'), degree=1), lambda x:(1.1, -2.4)] expected_value = (1.1, -2.4) # Test setting the 2D vector field for different # vector function spaces and constants. for functionspace in self.vector2d_fspaces: for expression in expressions: field = Field(functionspace, expression) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_value[0]) assert np.all(f_array_split[1] == expected_value[1]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_value[0]) assert np.all(field_values[:, 1] == expected_value[1]) # Check the interpolated value outside the mesh node. # The expected field is constant and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_value[0]) < self.tol1 assert abs(probed_value[1] - expected_value[1]) < self.tol1 def test_set_vector4d_field(self): """Test setting the 4D vector field.""" # Different values for setting the 4D vector field. expressions = [df.Constant((1.1, -2.4, 0, 0.9)), (1.1, -2.4, 0, 0.9), [1.1, -2.4, 0, 0.9], df.Expression(('1.1', '-2.4', '0', '0.9'), degree=1), lambda x:(1.1, -2.4, 0, 0.9)] expected_value = (1.1, -2.4, 0, 0.9) # Test setting the 4D vector field for different # vector function spaces and constants. for functionspace in self.vector4d_fspaces: for expression in expressions: field = Field(functionspace, expression) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_value[0]) assert np.all(f_array_split[1] == expected_value[1]) assert np.all(f_array_split[2] == expected_value[2]) assert np.all(f_array_split[3] == expected_value[3]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_value[0]) assert np.all(field_values[:, 1] == expected_value[1]) assert np.all(field_values[:, 2] == expected_value[2]) assert np.all(field_values[:, 3] == expected_value[3]) # Check the interpolated value outside the mesh node. # The expected field is constant and, because of that, # smaller tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_value[0]) < self.tol1 assert abs(probed_value[1] - expected_value[1]) < self.tol1 assert abs(probed_value[2] - expected_value[2]) < self.tol1 assert abs(probed_value[3] - expected_value[3]) < self.tol1 def test_normalise(self): mesh = df.UnitIntervalMesh(50) V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) expr = df.Expression(("10 * x[0] + 0.1", "10 * x[0] + 0.2", "10 * x[0] + 0.3"), degree=1) field = Field(V, value=expr) field2 = Field(V, value=expr) field.normalise() field2.normalise() coords = mesh.coordinates() xcoords = coords[:, 0] m = np.array([10 * xcoords + 0.1, 10 * xcoords + 0.2, 10 * xcoords + 0.3]) m_norm = np.linalg.norm(m, axis=0) m_normalised = (1. / m_norm) * m assert np.allclose(m_normalised, field.get_ordered_numpy_array_xxx().reshape(3, -1)) assert np.allclose(field.f.vector().array(), field2.vector().array()) def test_whether_field_is_scalar_field(self): for functionspace in self.scalar_fspaces: field = Field(functionspace, 42) assert field.is_scalar_field() for functionspace in self.vector2d_fspaces: field = Field(functionspace, [42, 23]) assert not field.is_scalar_field() for functionspace in self.vector3d_fspaces: field = Field(functionspace, [42, 23, 12]) assert not field.is_scalar_field() for functionspace in self.vector4d_fspaces: field = Field(functionspace, [42, 23, 12, 5]) assert not field.is_scalar_field() def test_convert_scalar_field_to_constant_value(self): """ Check that calling 'as_constant()' on a constant scalar field returns the unique field value. Also check that calling 'as_constant()' on a non-constant scalar field raises an exception. """ for functionspace in self.scalar_fspaces: field = Field(functionspace, 42.0) assert field.is_constant() assert field.as_constant() == 42.0 for functionspace in self.scalar_fspaces: field = Field(functionspace, 'x[0]') assert not field.is_constant() with pytest.raises(RuntimeError): field.as_constant() def test_average_scalar_field(self): """Test computing the scalar field average.""" # Different expressions for setting the scalar field. # All expressions set the field with same average value. # TODO: Add test for computing average on different mesh regions. expressions = [df.Constant(5), df.Expression('10*x[0]', degree=1), lambda x:10 * x[0]] f_av_expected = 5 for functionspace in self.scalar_fspaces: for expression in expressions: field = Field(functionspace, expression) f_av = field.average() # Check the average value. assert abs(f_av - f_av_expected) < self.tol1 # Check the type of average result. assert isinstance(f_av, float) def test_average_vector_field(self): """Test computing the vector field average.""" # Different expressions for setting the 2D vector field. # All expressions set the field with same average value. expressions = [df.Constant((1, 5.1)), df.Expression(['2*x[0]', '10.2*x[0]'], degree=1), lambda x:(2 * x[0], 10.2 * x[0])] f_av_expected = (1, 5.1) for functionspace in self.vector2d_fspaces: for expression in expressions: field = Field(functionspace, expression) f_av = field.average() # Check the average values for all components. assert abs(f_av[0] - f_av_expected[0]) < self.tol1 assert abs(f_av[1] - f_av_expected[1]) < self.tol1 # Check the type and shape of average result. assert isinstance(f_av, np.ndarray) assert f_av.shape == (field.value_dim(),) # Different expressions for setting the 3D vector field. # All expressions set the field with same average value. expressions = [df.Constant((1, 5.1, -3.6)), df.Expression(['2*x[0]', '10.2*x[0]', '-7.2*x[0]'], degree=1), lambda x:(2 * x[0], 10.2 * x[0], -7.2 * x[0])] f_av_expected = (1, 5.1, -3.6) for functionspace in self.vector3d_fspaces: for expression in expressions: field = Field(functionspace, expression) f_av = field.average() # Check the average values for all components. assert abs(f_av[0] - f_av_expected[0]) < self.tol1 assert abs(f_av[1] - f_av_expected[1]) < self.tol1 assert abs(f_av[2] - f_av_expected[2]) < self.tol1 # Check the type and shape of average result. assert isinstance(f_av, np.ndarray) assert f_av.shape == (field.value_dim(),) # Different expressions for setting the 4D vector field. # All expressions set the field with same average value. expressions = [df.Constant((1, 5.1, -3.6, 0)), df.Expression(['2*x[0]', '10.2*x[0]', '-7.2*x[0]', '0'], degree=1), lambda x:(2 * x[0], 10.2 * x[0], -7.2 * x[0], 0)] f_av_expected = (1, 5.1, -3.6, 0) for functionspace in self.vector4d_fspaces: for expression in expressions: field = Field(functionspace, expression) f_av = field.average() # Check the average values for all components. assert abs(f_av[0] - f_av_expected[0]) < self.tol1 assert abs(f_av[1] - f_av_expected[1]) < self.tol1 assert abs(f_av[2] - f_av_expected[2]) < self.tol1 assert abs(f_av[3] - f_av_expected[3]) < self.tol1 # Check the type and shape of average result. assert isinstance(f_av, np.ndarray) assert f_av.shape == (field.value_dim(),) def test_coords_and_values_scalar_field(self): """Test coordinates and values for scalar field.""" # Test for scalar fields on 1D, 2D, and 3D meshes, # initialised with a dolfin expression. expression = df.Expression('1.3*x[0]', degree=1) for functionspace in self.scalar_fspaces: expected_coords = functionspace.mesh().coordinates() num_nodes = functionspace.mesh().num_vertices() expected_values = 1.3 * expected_coords[:, 0] field = Field(functionspace, expression) coords, values = field.coords_and_values() # Type of results must be numpy array. assert isinstance(coords, np.ndarray) assert isinstance(values, np.ndarray) # Check the shape of results. assert values.shape == (num_nodes,) assert coords.shape == (num_nodes, field.mesh_dim()) # Check values of results. assert np.all(coords == expected_coords) assert np.all(values == expected_values) def test_coords_and_values_vector_field(self): """Test coordinates and values for vector field.""" # Different expressions for 3D vector fields. expression = df.Expression(['1.03*x[0]', '2.31*x[0]', '-1*x[0]'], degree=1) for functionspace in self.vector3d_fspaces: # Initialise the field with an appropriate expression for # the function space and compute expected results. expected_coords = functionspace.mesh().coordinates() num_nodes = functionspace.mesh().num_vertices() expected_values = (1.03 * expected_coords[:, 0], 2.31 * expected_coords[:, 0], -1 * expected_coords[:, 0]) field = Field(functionspace, expression) coords, values = field.coords_and_values() # Type of results must be numpy array. assert isinstance(coords, np.ndarray) assert isinstance(values, np.ndarray) # Check the shape of results. assert values.shape == (num_nodes, field.value_dim()) assert coords.shape == (num_nodes, field.mesh_dim()) # Check values of results. assert np.all(coords == expected_coords) assert np.all(values[:, 0] == expected_values[0]) assert np.all(values[:, 1] == expected_values[1]) assert np.all(values[:, 2] == expected_values[2]) def test_probe_scalar_field(self): """Test probing the scalar field.""" # Test probing field at and outside the mesh node for scalar field and # an appropriate expression for setting the value. for functionspace in self.scalar_fspaces: field = Field(functionspace) mesh_dim = field.mesh_dim() if mesh_dim == 1: field.set(df.Expression('1.3*x[0]', degree=1)) exact_result_at_node = 1.3 * 0.5 exact_result_out_node = 1.3 * self.probing_coord elif mesh_dim == 2: field.set(df.Expression('1.3*x[0] - 2.3*x[1]', degree=1)) exact_result_at_node = (1.3 - 2.3) * 0.5 exact_result_out_node = (1.3 - 2.3) * self.probing_coord elif mesh_dim == 3: field.set(df.Expression('1.3*x[0] - 2.3*x[1] + 6.1*x[2]', degree=1)) exact_result_at_node = (1.3 - 2.3 + 6.1) * 0.5 exact_result_out_node = (1.3 - 2.3 + 6.1) * self.probing_coord # Probe and check the result at the mesh node. probe_point = mesh_dim * (0.5,) probed_value = field.probe(probe_point) assert isinstance(probed_value, float) assert abs(probed_value - exact_result_at_node) < self.tol1 # Probe and check the result outside the mesh node. probe_point = mesh_dim * (self.probing_coord,) probed_value = field.probe(probe_point) assert isinstance(probed_value, float) assert abs(probed_value - exact_result_out_node) < self.tol1 def test_probe_vector_field(self): """Test probing the vector field.""" # Test probing field at and outside the mesh node for vector field and # an appropriate expression for setting the value. for functionspace in self.vector3d_fspaces: field = Field(functionspace, df.Expression(['1.3*x[0]', '0.3*x[0]', '-6.2*x[0]'], degree=1)) mesh_dim = field.mesh_dim() exact_result_at_node = (1.3 * 0.5, 0.3 * 0.5, -6.2 * 0.5) exact_result_out_node = (1.3 * self.probing_coord, 0.3 * self.probing_coord, -6.2 * self.probing_coord) # Probe and check the result at the mesh node. probe_point = mesh_dim * (0.5,) probed_value = field.probe(probe_point) assert isinstance(probed_value, np.ndarray) assert len(probed_value) == 3 assert abs(probed_value[0] - exact_result_at_node[0]) < self.tol1 assert abs(probed_value[1] - exact_result_at_node[1]) < self.tol1 assert abs(probed_value[2] - exact_result_at_node[2]) < self.tol1 # Probe and check the result outside the mesh node. probe_point = mesh_dim * (self.probing_coord,) probed_value = field.probe(probe_point) assert isinstance(probed_value, np.ndarray) assert len(probed_value) == 3 assert abs(probed_value[0] - exact_result_out_node[0]) < self.tol1 assert abs(probed_value[1] - exact_result_out_node[1]) < self.tol1 assert abs(probed_value[2] - exact_result_out_node[2]) < self.tol1 def test_mesh_dim(self): """Test mesh_dim method.""" for functionspace in self.all_fspaces: field = Field(functionspace) mesh_dim_expected = functionspace.mesh().topology().dim() assert isinstance(field.mesh_dim(), int) assert field.mesh_dim() == mesh_dim_expected def test_value_dim(self): """Test value_dim method.""" for functionspace in self.all_fspaces: field = Field(functionspace) value_dim_expected = functionspace.ufl_element().value_shape() assert isinstance(field.value_dim(), int) if functionspace.num_sub_spaces() == 0: assert field.value_dim() == 1 elif functionspace.num_sub_spaces() > 0: assert field.value_dim() == value_dim_expected[0] def test_mesh(self): """Test mesh method.""" for functionspace in self.all_fspaces: field = Field(functionspace) assert isinstance(field.mesh(), df.Mesh) def test_set_nonlinear_scalar_field(self): """Test setting nonlinear scalar field.""" # Python functions array for setting the scalar field. python_functions = [lambda x:1.21 * x[0] * x[0], lambda x:1.21 * x[0] * x[0] - 3.21 * x[1], lambda x:1.21 * x[0] * x[0] - 3.21 * x[1] + 2.47 * x[2]] # Setting the scalar field for different # scalar function spaces and appropriate python functions. for functionspace in self.scalar_fspaces: field = Field(functionspace) # Set the field and compute expected values # depending on the mesh dimension. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: field.set(python_functions[0]) expected_values = 1.21 * coords[:, 0] * coords[:, 0] expected_probed_value = 1.21 * self.probing_coord * \ self.probing_coord elif field.mesh_dim() == 2: field.set(python_functions[1]) expected_values = 1.21 * coords[:, 0] * coords[:, 0] - \ 3.21 * coords[:, 1] expected_probed_value = (1.21 * self.probing_coord - 3.21) * \ self.probing_coord elif field.mesh_dim() == 3: field.set(python_functions[2]) expected_values = 1.21 * coords[:, 0] * coords[:, 0] - \ 3.21 * coords[:, 1] + 2.47 * coords[:, 2] expected_probed_value = (1.21 * self.probing_coord - 3.21 + 2.47) * self.probing_coord # Check the result of coords_and_values (should be exact). field_values = field.coords_and_values()[1] # ignore coordinates assert np.all(field_values == expected_values) # Check the interpolated value outside the mesh node. # The expected field is nonlinear and, because of that, # greater tolerance value (tol1) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value - expected_probed_value) < self.tol2 def test_set_nonlinear_vector_field(self): """Test setting the vector field with a nonlinear expression.""" # Different nonlinear expressions for 2D vector fields. expressions = [df.Expression(['1.1*x[0]*x[0]', '-2.4*x[0]'], degree=1), df.Expression(['1.1*x[0]*x[0]', '-2.4*x[0]'], degree=1), df.Expression(['1.1*x[0]*x[0]', '-2.4*x[1]'], degree=1)] # Test setting the vector field for different # vector function spaces and appropriate expressions. for functionspace in self.vector2d_fspaces: field = Field(functionspace) # Set the vector field and compute expected values. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: field.set(expressions[0]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 0]) elif field.mesh_dim() == 2: field.set(expressions[1]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 0]) elif field.mesh_dim() == 3: field.set(expressions[2]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 1]) # Compute expected probed value. expected_probed_value = (1.1 * self.probing_coord * self.probing_coord, -2.4 * self.probing_coord) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_values[0]) assert np.all(f_array_split[1] == expected_values[1]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_values[0]) assert np.all(field_values[:, 1] == expected_values[1]) # Check the interpolated value outside the mesh node. # The expected field is nonlinear and, because of that, # greater tolerance value (tol2) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_probed_value[0]) < self.tol2 assert abs(probed_value[1] - expected_probed_value[1]) < self.tol2 # Different nonlinear expressions for 3D vector fields. expressions = [df.Expression(['1.1*x[0]*x[0]', '-2.4*x[0]', '3*x[0]'], degree=1), df.Expression(['1.1*x[0]*x[0]', '-2.4*x[1]', '3*x[1]'], degree=1), df.Expression(['1.1*x[0]*x[0]', '-2.4*x[1]', '3*x[2]'], degree=1)] # Test setting the vector field for different # vector function spaces and appropriate expressions. for functionspace in self.vector3d_fspaces: field = Field(functionspace) # Set the vector field and compute expected values. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: field.set(expressions[0]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 0], 3 * coords[:, 0]) elif field.mesh_dim() == 2: field.set(expressions[1]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 1]) elif field.mesh_dim() == 3: field.set(expressions[2]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 2]) # Compute expected probed value. expected_probed_value = (1.1 * self.probing_coord * self.probing_coord, -2.4 * self.probing_coord, 3 * self.probing_coord) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_values[0]) assert np.all(f_array_split[1] == expected_values[1]) assert np.all(f_array_split[2] == expected_values[2]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_values[0]) assert np.all(field_values[:, 1] == expected_values[1]) assert np.all(field_values[:, 2] == expected_values[2]) # Check the interpolated value outside the mesh node. # The expected field is nonlinear and, because of that, # greater tolerance value (tol2) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_probed_value[0]) < self.tol2 assert abs(probed_value[1] - expected_probed_value[1]) < self.tol2 assert abs(probed_value[2] - expected_probed_value[2]) < self.tol2 # Different nonlinear expressions for 4D vector fields. expressions = [df.Expression(['1.1*x[0]*x[0]', '-2.4*x[0]', '3*x[0]', 'x[0]'], degree=1), df.Expression(['1.1*x[0]*x[0]', '-2.4*x[1]', '3*x[1]', 'x[0]'], degree=1), df.Expression(['1.1*x[0]*x[0]', '-2.4*x[1]', '3*x[2]', 'x[0]'], degree=1)] # Test setting the vector field for different # vector function spaces and appropriate expressions. for functionspace in self.vector4d_fspaces: field = Field(functionspace) # Set the vector field and compute expected values. coords = field.coords_and_values()[0] # Values ignored. if field.mesh_dim() == 1: field.set(expressions[0]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 0], 3 * coords[:, 0], coords[:, 0]) elif field.mesh_dim() == 2: field.set(expressions[1]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 1], coords[:, 0]) elif field.mesh_dim() == 3: field.set(expressions[2]) expected_values = (1.1 * coords[:, 0] * coords[:, 0], -2.4 * coords[:, 1], 3 * coords[:, 2], coords[:, 0]) # Compute expected probed value. expected_probed_value = (1.1 * self.probing_coord * self.probing_coord, -2.4 * self.probing_coord, 3 * self.probing_coord, self.probing_coord) # Check vector (numpy array) values (should be exact). f_array = field.get_ordered_numpy_array_xxx() f_array_split = np.split(f_array, field.value_dim()) assert np.all(f_array_split[0] == expected_values[0]) assert np.all(f_array_split[1] == expected_values[1]) assert np.all(f_array_split[2] == expected_values[2]) assert np.all(f_array_split[3] == expected_values[3]) # Check the result of coords_and_values (should be exact). coords, field_values = field.coords_and_values() assert np.all(field_values[:, 0] == expected_values[0]) assert np.all(field_values[:, 1] == expected_values[1]) assert np.all(field_values[:, 2] == expected_values[2]) assert np.all(field_values[:, 3] == expected_values[3]) # Check the interpolated value outside the mesh node. # The expected field is nonlinear and, because of that, # greater tolerance value (tol2) is used. probing_point = field.mesh_dim() * (self.probing_coord,) probed_value = field.probe(probing_point) assert abs(probed_value[0] - expected_probed_value[0]) < self.tol2 assert abs(probed_value[1] - expected_probed_value[1]) < self.tol2 assert abs(probed_value[2] - expected_probed_value[2]) < self.tol2 assert abs(probed_value[3] - expected_probed_value[3]) < self.tol2 def test_plot_with_dolfin(self): """Test that we can call the plotting function of a Field object.""" # Set environment variable DOLFIN_NOPLOT to a non-zero value in # order to suppress the actual plotting (because we have no way # to close the window non-interactively from within the test). os.environ['DOLFIN_NOPLOT'] = 'TRUE' field = Field(self.fs3d_vector3d, value=[1, 0, 0]) field.plot_with_dolfin(interactive=False) def test_add_scalar_fields(self): for functionspace in self.scalar_fspaces: field1 = Field(functionspace, value=3.1) field2 = Field(functionspace, value=3.35) field3 = field1 + field2 assert np.allclose(field3.f.vector().array(), 6.45) def test_add_vector_fields(self): for functionspace in self.vector3d_fspaces: field1 = Field(functionspace, value=(1, 2, 3)) field2 = Field(functionspace, value=(5, 2.1, 6)) field3 = field1 + field2 coords = field3.coords_and_values()[0] for coord in coords: assert abs(field3.probe(coord)[0] - 6) < self.tol1 assert abs(field3.probe(coord)[1] - 4.1) < self.tol1 assert abs(field3.probe(coord)[2] - 9) < self.tol1 def test_mul_scalar_fields(self): for functionspace in self.scalar_fspaces: # Define linearly varying field field1 = Field(functionspace, value="x[0] + 3.1") # Multiply with scalars field2 = field1 * 42 field3 = -12 * field1 coords2, vals2 = field2.coords_and_values() coords3, vals3 = field3.coords_and_values() np.testing.assert_allclose(vals2, 42 * (coords2[:, 0] + 3.1)) np.testing.assert_allclose(vals3, -12 * (coords3[:, 0] + 3.1)) # assert np.allclose(field2.f.vector().array(), 130.2) # assert np.allclose(field3.f.vector().array(), -37.2) def test_mul_vector_fields(self): for functionspace in self.vector3d_fspaces: # Define linearly varying field field1 = Field(functionspace, value=["x[0] + 1", "x[0] + 2.4", "x[0] + 3.7"]) # Multiply with scalars field2 = field1 * 42 field3 = -3.6 * field1 # Multiply with a scalar field S1 = associated_scalar_space(functionspace) a = Field(S1, lambda pt: pt[0]**2) field4 = field1 * a coords2, vals2 = field2.coords_and_values() coords3, vals3 = field3.coords_and_values() coords4, vals4 = field4.coords_and_values() # We extract the x-coordinates and add a new axis to the # numpy array to allow broadcasting. xcoords2 = coords2[:, 0][:, np.newaxis] xcoords3 = coords3[:, 0][:, np.newaxis] xcoords4 = coords4[:, 0][:, np.newaxis] vals2_expected = 42 * (xcoords2 + [1, 2.4, 3.7]) vals3_expected = -3.6 * (xcoords3 + [1, 2.4, 3.7]) vals4_expected = xcoords4**2 * (xcoords2 + [1, 2.4, 3.7]) np.testing.assert_allclose(vals2, vals2_expected) np.testing.assert_allclose(vals3, vals3_expected) np.testing.assert_allclose(vals4, vals4_expected) def test_div_scalar_fields(self): for functionspace in self.scalar_fspaces: field1 = Field(functionspace, value=3.1) field2 = field1 / 20 assert np.allclose(field2.f.vector().array(), 0.155) def test_div_vector_fields(self): for functionspace in self.vector3d_fspaces: field1 = Field(functionspace, value=(1, 2.4, 3.7)) # Multiply with scalars field2 = field1 / 20 # Divide by a scalar field S1 = associated_scalar_space(functionspace) a = Field(S1, lambda pt: (pt[0] + 1.0)**2) field3 = field1 / a coords = field2.coords_and_values()[0] for coord in coords: assert abs(field2.probe(coord)[0] - 0.05) < self.tol1 assert abs(field2.probe(coord)[1] - 0.12) < self.tol1 assert abs(field2.probe(coord)[2] - 0.185) < self.tol1 coords = field3.coords_and_values()[0] for coord in coords: assert abs(field3.probe(coord)[0] - 1.0 / (coord[0] + 1)**2) < self.tol1 assert abs(field3.probe(coord)[1] - 2.4 / (coord[0] + 1)**2) < self.tol1 assert abs(field3.probe(coord)[2] - 3.7 / (coord[0] + 1)**2) < self.tol1 def test_cross(self): v = np.array([1, 2, 3]) w = np.array([4, 5, -2]) v_cross_w = np.cross(v, w) for functionspace in self.vector3d_fspaces: field1 = Field(functionspace, value=v) field2 = Field(functionspace, value=w) field3 = field1.cross(field2) coords, vals = field3.coords_and_values() np.testing.assert_allclose(vals - v_cross_w, 0) def test_dot(self): v = np.array([1, 2, 3]) w = np.array([4, 5, -2]) v_dot_w = np.dot(v, w) for functionspace in self.vector3d_fspaces: field1 = Field(functionspace, value=v) field2 = Field(functionspace, value=w) field3 = field1.dot(field2) _, vals = field3.coords_and_values() np.testing.assert_allclose(vals, v_dot_w) def test_allclose(self): for functionspace in self.all_fspaces: # Define field on the function space and fill with random values. field1 = Field(functionspace) field1.set_random_values(vrange=[0.1, 1.0]) # the rtol check below can fail if the changed field value below is accidentallye very small, so valid those values here # Define second field as copy of the first. # Check that they are allclose. field2 = Field(functionspace, field1) assert field2.allclose(field1) # Change one of the coordinates and check that the fields are now # not allclose any more with the default tolerances, but that they # are allclose with less strict tolerances. a = field1.get_ordered_numpy_array_xxx() eps = np.zeros_like(a) eps[7] = 2.1e-6 try: field2.set_with_ordered_numpy_array_xxx(a + eps) assert not field2.allclose(field1) assert field2.allclose(field1, atol=1e-5) assert field2.allclose(field1, rtol=1e-4) except: import ipdb; ipdb.set_trace() pass # TODO: It would be nice to allow to pass scalar values for scalar # fields and 3-vectors for 3-vector fields, etc. (which would # check that the value at every vertex coincides with the # given value). However, for now we disallow any other type # than `Field`. with pytest.raises(TypeError): assert field2.allclose(42.0) with pytest.raises(TypeError): assert field2.allclose(a) def test_field_get_ordered_numpy_array_xxx_and_xyz(self): """ For each mesh define a scalar field as well as vector fields of dimension 2, 3, 4. The field values are defined by adding 0.01, 0.02, 0.03 and 0.04, respectively, to the x-coordinates of the mesh nodes. Then the field values are retrieved using both get_ordered_numpy_array_xxx and get_ordered_numpy_array_xyz and compared with the expected values. """ def fsetval(value_dim, pos): # Helper function to set field values x = pos[0] return [x + 0.01 * (i+1) for i in range(value_dim)] for functionspace in self.all_fspaces: # Define the field f = Field(functionspace) vdim = f.value_dim() f.set(functools.partial(fsetval, vdim)) # Retrieve the field values in the different orderings vals_xxx = f.get_ordered_numpy_array_xxx() vals_xyz = f.get_ordered_numpy_array_xyz() # Define the expected field values (derived from the # x-coordinates of the mesh nodes by adding 0.01 to all # x-components of the field, 0.02 to all y-components, # 0.03 to all z-components, etc.) xcoords = functionspace.mesh().coordinates()[:, 0] vals_xxx_expected = np.concatenate( [xcoords + 0.01 * (i+1) for i in range(vdim)]) vals_xyz_expected = np.array( [xcoords + 0.01 * (i+1) for i in range(vdim)]).transpose().ravel() # Check that we get the expected orderings np.testing.assert_almost_equal(vals_xxx, vals_xxx_expected) np.testing.assert_almost_equal(vals_xyz, vals_xyz_expected) # Check that we get an error if we try to call # get_ordered_numpy_array() on a non-scalar field. if vdim > 1: with pytest.raises(ValueError): f.get_ordered_numpy_array() def test_save_hdf5(self): """ Test saving of field to hdf5 and corresponding metadata to json file. """ # ----------------------------------------------------------------- # Create test data and files # ----------------------------------------------------------------- # Define base filename to save data to. filename = 'test_save_field' # define name of field fieldname = 'f' expression = df.Expression(['1.1*x[0]', '-2.4*x[1]', '3*x[2]'], degree=1) # Define and set field field = Field(functionspace=self.fs3d_vector3d, name=fieldname) field.set(expression) # save field to h5 file field.save_hdf5(filename, t=1.0) field.save_hdf5(filename, t=2.0) # close hdf5 file field.close_hdf5() # ----------------------------------------------------------------- # Test saving of data has created relevant files # ----------------------------------------------------------------- # check that files have been created assert(os.path.isfile(filename + '.h5')) assert(os.path.isfile(filename + '.json')) # ----------------------------------------------------------------- # Delete files # ----------------------------------------------------------------- os.remove(filename + '.h5') os.remove(filename + '.json')
74,170
46.637123
177
py
finmag
finmag-master/src/finmag/setup2.py
# build script for compiled version of finmag # change this as needed libincludedir = "." import sys import os from distutils.core import setup from distutils.extension import Extension # we'd better have Cython installed, or it's a no-go try: from Cython.Distutils import build_ext except: print "You don't seem to have Cython installed. Please get a" print "copy from www.cython.org and install it" sys.exit(1) # scan the 'dvedit' directory for extension files, converting # them to extension names in dotted notation files_to_ignore = ['llg.py'] def scandir(dir, files=[]): for file in os.listdir(dir): path = os.path.join(dir, file) if os.path.isfile(path) and path.endswith(".py") and file not in files_to_ignore: files.append(path.replace(os.path.sep, ".")[:-3]) elif os.path.isdir(path): scandir(path, files) return files # generate an Extension object from its dotted name def makeExtension(extName): extPath = extName.replace(".", os.path.sep) + ".py" return Extension( extName, [extPath], # adding the '.' to include_dirs is CRUCIAL!! include_dirs=[libincludedir, "."], #extra_compile_args = ["-O3", "-Wall"], #extra_link_args = ['-g'], #libraries = ["dv",], ) # get the list of extensions extNames = scandir("sim") # and build up the set of Extension objects extensions = [makeExtension(name) for name in extNames] print "extNames are\n", extNames print "extensions are\n", extensions # finally, we can pass all this to distutils setup( name="dvedit", packages=["sim", "sim.energies"], ext_modules=extensions, cmdclass={'build_ext': build_ext}, )
1,728
25.6
89
py
finmag
finmag-master/src/finmag/init.py
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations # Copyright (C) 2012 University of Southampton # Do not distribute # # CONTACT: [email protected] from __future__ import division import logging logger = logging.getLogger("finmag") # no need to propagate up to root handler, since we define our own later logger.propagate = False from finmag.sim.sim import Simulation, sim_with from finmag.energies.demag import MacroGeometry from finmag.sim.normal_mode_sim import NormalModeSimulation, normal_mode_simulation from finmag.util.helpers import set_logging_level from finmag.util import configuration from finmag.field import Field import util.versions from __version__ import __version__ import example import signal # Convenience access to physics object Q def timings_report(n=10): """ Returns the timings report, which is an overview of where finmag's runtime is spent. By default, it will show the 10 functions where the most runtime has been spent. This number can be changed by passing an integer to this function. Usage: import finmag print finmag.timings_report() """ from aeon import timer return timer.report(n) logger.debug("{:15} {:<20}".format("FinMag", __version__)) display_module_versions = configuration.get_config_option( "logging", "display_module_versions_at_startup", "True") if display_module_versions == "True": double_column = "{:<15} {:<20} {:<15} {:20}" logger.debug(double_column.format( "Dolfin", util.versions.get_version_dolfin(), "Matplotlib", util.versions.get_version_matplotlib())) logger.debug(double_column.format( "Numpy", util.versions.get_version_numpy(), "Scipy", util.versions.get_version_scipy())) logger.debug(double_column.format( "IPython", util.versions.get_version_ipython(), "Python", util.versions.get_version_python())) try: sundials_version = util.versions.get_version_sundials() except NotImplementedError: sundials_version = '<unknown>' logger.debug(double_column.format( "Paraview", util.versions.get_version_paraview(), "Sundials", sundials_version)) try: boost_version = util.versions.get_version_boostpython() except NotImplementedError: boost_version = '<unknown>' logger.debug(double_column.format( "Boost-Python", boost_version, "Linux", util.versions.get_linux_issue())) if util.versions.running_binary_distribution(): # check that this is the same as the binary distribution has been compiled for # This matters for sundials: on 12.04 there is one version of sundials # on 12.10 there is a different one. They are not compatible, but we # have no way to tell which one we are using. # # We thus assume that we use the system's sundials, and thus we # should be able to check by comparing the linux distribution. import util.binary # Where is this module? logger.debug("%20s: %s" % ("Build Linux", util.binary.buildlinux)) vb = util.binary.buildlinux vr = util.versions.get_linux_issue() if vb == vr: logger.debug("Build Linux and host linux versions agree.") else: if util.versions.loose_compare_ubuntu_version(vb, vr): logger.warn( "Build Linux and host linux versions only agree approximately.") else: logger.warn("Build Linux = %s" % util.binary.buildlinux) logger.warn("Host Linux = %s" % util.versions.get_linux_issue()) # create extreme debugging logging level, which has numerical value 5 logging.EXTREMEDEBUG = 5 logging.addLevelName(logging.EXTREMEDEBUG, 'EXTREMEDEBUG') # and register a function function for this for our logger logger.extremedebug = lambda msg: logger.log(logging.EXTREMEDEBUG, msg) # Register a function which starts the debugger when the program # receives the 'SIGTSTP' signal (keyboard shortcut: "Ctrl-Z"). def receive_quit_signal(signum, frame): print("Starting debugger. Type 'c' to resume execution and 'q' to quit.") try: # Try 'ipdb' first because it's nicer to use import ipdb as pdb except ImportError: # Otherwise fall back to the regular 'pdb'. import pdb pdb.set_trace() # XXX TODO: It would be nice to be able to automatically jump up # to the first frame that's not inside the Finmag (or dolfin) # module any more because this should be the lowest frame that # lives in the user script, which is probably what the user # expects to see if he presses Ctrl-Z. # # The following command should help us to check whether we're # still inside Finmag, but we still need to figure out how to jump # to the correct frame. -- Max, 5.12.2013 # # inspect.getmodule(cur_frame.f_locals['self']).__name__.startswith('finmag') logger.debug("Registering debug signal handler. Press Ctrl-Z any time " "to stop execution and jump into the debugger.") signal.signal(signal.SIGTSTP, receive_quit_signal)
5,083
37.80916
90
py
finmag
finmag-master/src/finmag/__init__.py
# FinMag # Copyright (C) 2012 University of Southampton # Do not distribute # # CONTACT: [email protected] # from init import * from __version__ import __version__ # dolfin 1.1.0 re-orders the degrees of freedom # to speed up some iterative algorithms. However, # this means that the ordering doesn't correspond # to geometrical structures (such as vertices) # anymore. For now, we disable the re-ordering. # In the future, we should enable it as it will # help with parallel execution. import dolfin as df df.parameters.reorder_dofs_serial = False
554
28.210526
49
py
finmag
finmag-master/src/finmag/distcp.py
#files_to_ignore = ['llg.py'] directories_to_ignore = ['build'] directories_for_straight_copy_only = ['native'] import os import shutil import argparse def cp_file(sourcedir, filename, targetdir): # only relevant case is if we have a .so file for a given .py, then don't copy .py # if the .so file is __init__.py, we need to copy an empty __init__.py # if directory does not exist, create (might be empty in the end) if not os.path.exists(targetdir): print "Creating directory %s" % targetdir os.makedirs(targetdir) path = os.path.join(sourcedir, filename) _, dirname = os.path.split(sourcedir) if filename.endswith('.py') and dirname not in directories_for_straight_copy_only: if os.path.exists(path[:-3] + ".so"): if filename == "__init__.py": # create empty init.py file f = open(os.path.join(targetdir, "__init__.py"), "w") f.close() return # don't copy that Python file because we have a .so elif filename.endswith('pyc'): print "Skipping pyc file (%s)" % filename return elif filename.endswith('c'): print "Skipping .c file (%s)" % filename return print("Copying %s" % path) shutil.copyfile(path, os.path.join(targetdir, filename)) def scandir(dir, files=[]): for file_ in os.listdir(dir): path = os.path.join(dir, file_) # print "working %s / %s" % (dir, file_) if os.path.isfile(path): cp_file(dir, file_, os.path.join(targetdir, dir)) elif os.path.isdir(path) and os.path.split(path) not in directories_to_ignore: scandir(path, files) return files def distcp(): print scandir('sim') if __name__ == '__main__': parser = argparse.ArgumentParser( description='Copy FinMag files to alternative location') parser.add_argument( 'destination-dir', type=str, help='The directory to copy FinMag files to') args = parser.parse_args() targetdir = vars(args)['destination-dir'] if not os.path.exists(targetdir): os.makedirs(targetdir) distcp()
2,146
31.530303
86
py
finmag
finmag-master/src/finmag/field_setters_test.py
""" Ensure that field values can be set using a wide range of types and make sure we don't assign a new dolfin function, but only overwrite the values of the existing function. """ import dolfin as df import numpy as np import pytest from .field import Field EPSILON = 1e-14 @pytest.fixture def setup(): mesh = df.UnitIntervalMesh(1) F = df.FunctionSpace(mesh, "CG", 1) F_DG = df.FunctionSpace(mesh, "DG", 0) V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) return Field(F), Field(F_DG), Field(V) def test_set_with_dolfin_constant(setup): scalar_field, scalar_field_dg, vector_field = setup v = df.Constant(1) for field in (scalar_field, scalar_field_dg): # keep track of dolfin function object to make sure we don't overwrite it f = field.f assert f.vector().array().all() == 0 field.set(v) assert f.vector().array().all() == 1 v = df.Constant((1, 2, 3)) f = vector_field.f assert f.vector().array().all() == 0 vector_field.set(v) assert np.allclose(f(0), (1, 2, 3)) def test_set_with_dolfin_expression(setup): scalar_field, scalar_field_dg, vector_field = setup v = df.Expression("x[0]", degree=1) for field, expected in ((scalar_field, 1), (scalar_field_dg, 0.5)): f = field.f assert f(1) == 0 field.set(v) assert abs(f(1) - expected) <= EPSILON v = df.Expression(("1", "2", "3 * x[0]"), degree=1) f = vector_field.f assert np.allclose(f(1), (0, 0, 0)) vector_field.set(v) assert np.allclose(f(1), (1, 2, 3)) def test_set_with_dolfin_function(): mesh = df.UnitIntervalMesh(1) F = df.FunctionSpace(mesh, "CG", 1) function = df.Function(F) function.assign(df.Constant(1)) field = Field(F) function_of_field = field.f assert function_of_field(0) == 0 field.set(function) assert abs(function_of_field(0) - 1) <= EPSILON def test_set_with_another_field(): mesh = df.UnitIntervalMesh(1) V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) field1 = Field(V) field1.set(df.Constant((1, 2, 3))) field2 = Field(V) function_of_field2 = field2.f assert function_of_field2.vector().array().all() == 0 field2.set(field1) assert np.allclose(function_of_field2(1), (1, 2, 3)) def test_set_with_another_field_new_but_same_function_space(): mesh = df.UnitIntervalMesh(1) V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) fieldV = Field(V) fieldV.set(df.Constant((1, 2, 3))) W = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) fieldW = Field(W) function_of_fieldW = fieldW.f assert function_of_fieldW.vector().array().all() == 0 fieldW.set(fieldV) assert np.allclose(function_of_fieldW(1), (1, 2, 3)) def test_assumption_that_interpolate_better_than_project_same_vectorspace(): mesh = df.UnitCubeMesh(2, 2, 2) V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) f = df.Function(V) f.vector()[:] = np.random.rand(len(f.vector().array())) W = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) w1 = df.interpolate(f, W) w2 = df.project(f, W) diff_w1 = f.vector() - w1.vector() diff_w2 = f.vector() - w2.vector() diff_w1.abs() diff_w2.abs() assert diff_w1.array().max() <= diff_w2.array().max() def test_assumption_that_interpolate_better_than_project_different_vectorspace(do_plot=False): mesh = df.UnitSquareMesh(5, 5) V = df.FunctionSpace(mesh, "DG", 0) # models use case of material parameter f = df.interpolate(df.Expression("x[0] <= 0.5 ? 0 : 1", degree=1), V) W = df.FunctionSpace(mesh, "CG", 1) w_i = df.interpolate(f, W) w_p = df.project(f, W) w_e = df.interpolate(df.Expression("x[0] <= 0.5 ? 0 : 1", degree=1), W) if do_plot: # proof by "looking at the picture" /s df.plot(f, title="original") df.plot(w_i, title="interpolate") df.plot(w_p, title="project") df.plot(w_e, title="from same expression") df.interactive() diff_w_i = w_e.vector() - w_i.vector() diff_w_p = w_e.vector() - w_p.vector() diff_w_i.abs() diff_w_p.abs() assert diff_w_i.array().max() <= diff_w_p.array().max() def test_set_with_dolfin_generic_vector(): mesh = df.UnitIntervalMesh(1) F = df.FunctionSpace(mesh, "CG", 1) function = df.Function(F) function.assign(df.Constant(1)) field = Field(F) function_of_field = field.f assert function_of_field(0) == 0 field.set(function.vector()) assert abs(function_of_field(0) - 1) <= EPSILON def test_set_with_dolfin_expression_ingredients(): mesh = df.UnitIntervalMesh(1) field = Field(df.FunctionSpace(mesh, "CG", 1)) field.set("a * x[0]", a=1) assert abs(field.f(1) - 1) <= EPSILON field = Field(df.VectorFunctionSpace(mesh, "CG", 1, dim=3)) field.set(("1", "2", "3")) assert np.allclose(field.f(1), (1, 2, 3))
4,917
29.171779
94
py
finmag
finmag-master/src/finmag/scheduler/timeevent.py
from finmag.scheduler.event import Event EPSILON = 1e-15 # Time precision, used to compare two time values. def same_time(t0, t1): """ This function compares time values and returns True or False to denote comparison. """ return (t0 is not None) and (t1 is not None) and abs(t0 - t1) < EPSILON class TimeEvent(Event): """ An event that triggers at a certain time, or at the end of time integration. Other derived event classes should define: trigger(self, time, is_stop): What happens when this event is triggered. reset(self, time): Changes the state of this event to what it should have been as defined by its time argument. __str__(self): Sensible string representation of this object. This base class is derived from Event. """ def __init__(self, init_time=None, trigger_on_stop=False, callback=None): """ This defines the init_time at which (if any) this event should trigger. """ if init_time is None and trigger_on_stop is False: raise ValueError("{}.init: Needs either a time, or " "trigger_on_stop set to True." .format(self.__class__.__name__)) self.init_time = init_time # Store time passed at initialisation. self.next_time = init_time # Store the next time to execute at. super(TimeEvent, self).__init__(trigger_on_stop, callback) def __str__(self): callback_msg = "" callback_name = "unknown" if self.callback is not None: if hasattr(self.callback, "__name__"): callback_name = self.callback.__name__ if hasattr(self.callback, "func"): callback_name = self.callback.func.__name__ callback_msg = " | callback: {}".format(callback_name) msg = "<{} | last = {} | next = {} | triggering on stop: {}{}>"\ .format(self.__class__.__name__, self.last, self.next_time, self.trigger_on_stop, callback_msg) return msg def check_and_trigger(self, time, is_stop=False): """ This identifies whether or not this event should trigger given a time value, or given the integration has stopped (is_stop == True). """ if not same_time(time, self.last): if (same_time(time, self.next_time) or (is_stop and self.trigger_on_stop)): self.trigger(time, is_stop) def trigger(self, time, is_stop): """ This abstract method should be implemented by derived classes. Calling this function on this level should raise a NotImplementedError. """ raise NotImplementedError("{0}.trigger: Abstract method of base class " "{0} should be called only by child class." .format(self.__class__.__name__))
3,027
34.209302
79
py
finmag
finmag-master/src/finmag/scheduler/event.py
# Event states are as follows: # EV_ACTIVE: Event intends to trigger eventually if integration continues. # EV_DONE: Event does not intent to trigger again. Should be removed by the # scheduler. # EV_REQUESTS_STOP_INTEGRATION: Event wishes the scheduler to stop the # integration. This does not necessarily remove this event. EV_ACTIVE, EV_DONE, EV_REQUESTS_STOP_INTEGRATION = range(3) class Event(object): """ This base class defines generic event objects from which all other event objects (should) inherit. Upon trigger, the callback function will be called with no argument passed to it. Other event objects will inherit from this, and should define: __init__(self): Should check whether a value is set such that this event may trigger at all. An event that doesn't trigger is pretty useless, but this class is not passed parameters that fully define this behaviour so it should be performed at a higher level. check_trigger(self, args): Checks the current state to see if this event should have triggered or not. trigger(self, args): What happens when this event is triggered. reset(self, args): Changes the state of this event to what it should have been, as defined by the arguments. __str__(self): Sensible string representation of this object. """ def __init__(self, trigger_on_stop=False, callback=None): """ This function defines whether or not this event will trigger at the end of the integration, and can also be passed a function to call upon the event triggering. """ self.state = EV_ACTIVE # The state of this event. self.trigger_on_stop = trigger_on_stop self.last = None # The previous time or step value with which this # event last triggered. if callback is not None: self.attach(callback) else: self.callback = None def attach(self, callback): """ This function stores the function 'callback', which takes no arguments, and which will be called when this event is triggered. """ if not hasattr(callback, "__call__"): raise ValueError("{}.attach: Argument should be callable." .format(self.__class__.__name__)) self.callback = callback
2,435
37.0625
79
py
finmag
finmag-master/src/finmag/scheduler/scheduler.py
import atexit import logging import functools from numbers import Number from datetime import datetime, timedelta from finmag.scheduler.derivedevents import SingleTimeEvent, RepeatingTimeEvent from finmag.scheduler.timeevent import same_time from finmag.scheduler.event import EV_DONE, EV_REQUESTS_STOP_INTEGRATION # This module will try to import the package apscheduler when a realtime event # is added. Install with "pip install apscheduler". # See http://pypi.python.org/pypi/APScheduler for documentation. log = logging.getLogger(name="finmag") class Scheduler(object): """ Manages a list of actions that should be performed at specific times. Note that this class *intentionally* contains hardly any error checking. The correct behaviour of the Scheduler depends on the parent code doing "the right thing". In particular, it is crucial that the reached() method be called with the next time step at which an event is expected to happen, which can be obtained using the next() method. Thus a typical (correct) usage is as follows: s = Scheduler() s.add(...) # schedule some item(s) t = s.next() # get next time step at which something should happen # [do some stuff based on the time step just obtained] s.reached(t) """ def __init__(self): """ Create a Scheduler. """ self.items = [] self.realtime_items = {} self.realtime_jobs = [] # while the scheduler is running, the job # associated with each realtime_item will be # stored in this list (otherwise it is empty) self.last = None def __iter__(self): return self def add(self, func, args=None, kwargs=None, at=None, at_end=False, every=None, after=None, realtime=False): """ Register a function with the scheduler. Returns the scheduled item, which can be removed again by calling Scheduler._remove(item). Note that this may change in the future, so use with care. """ if not hasattr(func, "__call__"): raise TypeError("The function must be callable but object '%s' is of type '%s'" % (str(func), type(func))) assert at or every or at_end or ( after and realtime), "Use either `at`, `every` or `at_end` if not in real time mode." assert not ( at is not None and every is not None), "Cannot mix `at` with `every`. Please schedule separately." assert not ( at is not None and after is not None), "Delays don't mix with `at`." args = args or [] kwargs = kwargs or {} callback = functools.partial(func, *args, **kwargs) if realtime: if at_end: at_end_item = SingleTimeEvent(None, True, callback) self._add(at_end_item) return at_end_item if at or (at_end and not every): at_item = SingleTimeEvent(at, at_end, callback) self._add(at_item) return at_item if every: every_item = RepeatingTimeEvent(every, after, at_end, callback) self._add(every_item) return every_item def _add(self, item): self.items.append(item) def _remove(self, item): self.items.remove(item) def _add_realtime(self, func, at=None, every=None, after=None): """ Add a realtime job. Returns the Job object as obtained from APScheduler.add_job() etc. """ if not hasattr(self, "apscheduler"): try: from apscheduler.scheduler import Scheduler as APScheduler except ImportError: log.error("Need APScheduler package to schedule realtime events.\n" "Please install from http://pypi.python.org/pypi/APScheduler.") raise self.apscheduler = APScheduler() atexit.register(lambda: self.apscheduler.shutdown(wait=False)) self.apscheduler.start() if after and isinstance(after, Number): # `after` can be either a delay in seconds, or a date/datetime. # Since the APScheduler API expects a date/datetime convert it. after = datetime.now() + timedelta(seconds=after) # Register the job so that it can be started/stopped as needed. self.realtime_items[func] = (at, every, after) def start_realtime_jobs(self): for (func, (at, every, after)) in self.realtime_items.items(): if at: job = self.apscheduler.add_date_job(func, at) elif every: if after: job = self.apscheduler.add_interval_job( func, seconds=every, start_date=after) else: job = self.apscheduler.add_interval_job( func, seconds=every) elif after: job = self.apscheduler.add_date_job(func, after) else: raise ValueError( "Assertion violated. Use either `at`, `every` of `after`.") self.realtime_jobs.append(job) def stop_realtime_jobs(self): for job in self.realtime_jobs: self.apscheduler.unschedule_job(job) self.realtime_jobs = [] def next(self): """ Returns the time for the next action to be performed. Automatically called upon iteration of scheduler instance. """ next_step = None stop = False # This flag determines whether or not iteration should be # stopped after all items are checked. for item in self.items: if item.next_time is not None and (next_step is None or next_step > item.next_time): next_step = item.next_time if item.state == EV_REQUESTS_STOP_INTEGRATION: self._remove(item) stop = True if next_step is None: stop = True if stop is True: raise StopIteration if next_step < self.last: log.error("Scheduler computed the next time step should be t = {:.2g} s, but the last one was already t = {:.2g} s.".format( next_step, self.last)) raise ValueError("Scheduler is corrupted. Requested a time step in the past: dt = {:.2g}.".format( next_step - self.last)) return next_step def reached(self, time): """ Notify the Scheduler that a certain point in time has been reached. It will perform the action(s) that were defined to happen at that time. """ for item in self.items: if same_time(item.next_time, time): item.check_and_trigger(time) if item.state == EV_DONE: self._remove(item) self.last = time def finalise(self, time): """ Trigger all events that need to happen at the end of time integration. """ for item in self.items: if item.trigger_on_stop: item.check_and_trigger(time, is_stop=True) def reset(self, time): """ Override schedule so that internal time is now `time` and modify scheduled items accordingly. """ self.last = None for item in self.items: item.reset(time) def _print_realtime_item(self, item, func_print=log.info): (f, (at, every, after)) = item func_print("'{}': <at={}, every={}, after={}>".format( item.callback.f.__name__, at, every, after)) def print_scheduled_items(self, func_print=log.info): for item in self.items: # this will call __str__ on the item, which should be defined for # all events print item for item in self.realtime_items: self._print_realtime_item(item, func_print) def clear(self): log.debug("Removing scheduled items:") self.print_scheduled_items(func_print=log.debug) self.items = [] self.stop_realtime_jobs() self.realtime_items = {} def run(self, integrator, callbacks_at_scheduler_events=[]): """ Integrate until an exit condition in the schedule has been met. The optional argument `callbacks_at_scheduler_events` should be a list of functions which are called whenever the time integration reaches a "checkpoint" where some event is scheduled. Each such function should expect the timestep t at which the event occurs as its single argument. Note that these functions are called just *before* the scheduled events are triggered. This is used, for example, to keep time-dependent fields up to date with the simulation time. """ self.start_realtime_jobs() for t in self: assert(t >= integrator.cur_t) # sanity check # If new items were scheduled after a previous time # integration finished, we can have t == integrator.cur_t. # However, this confuses the integrators so we don't integrate # in this case. if t != integrator.cur_t: integrator.advance_time(t) for f in callbacks_at_scheduler_events: f(t) self.reached(t) self.finalise(t) self.stop_realtime_jobs()
9,526
34.681648
136
py
finmag
finmag-master/src/finmag/scheduler/scheduler_test.py
import pytest from timeevent import TimeEvent from derivedevents import SingleTimeEvent, RepeatingTimeEvent from scheduler import Scheduler class Counter(object): cnt_every = 0 cnt_at = 0 def inc_every(self): self.cnt_every += 1 def inc_at(self): self.cnt_at += 1 def reset(self): self.cnt_every = 0 self.cnt_at = 0 def test_calling_trigger_on_TimeEvent_raises_exception(): t = TimeEvent(42) with pytest.raises(NotImplementedError): t.trigger(0, False) def test_first_every_at_start(): e = RepeatingTimeEvent(100) assert e.next_time == 0.0 e = RepeatingTimeEvent(100, 5) assert e.next_time == 5 def test_update_next_stop_according_to_interval(): e = RepeatingTimeEvent(100) assert e.next_time == 0 e.check_and_trigger(0) assert e.next_time == 100 t0 = e.next_time e.check_and_trigger(100) t1 = e.next_time assert abs(t1 - t0) == 100 def test_can_attach_callback(): c = Counter() assert c.cnt_every == 0 e = RepeatingTimeEvent(100) e.attach(c.inc_every) e.check_and_trigger(0) assert c.cnt_every == 1 # alternative syntax c.reset() e = RepeatingTimeEvent(100, callback=c.inc_every) assert c.cnt_every == 0 e.check_and_trigger(0) assert c.cnt_every == 1 def test_at_with_single_value(): c = Counter() assert c.cnt_at == 0 a = SingleTimeEvent(100) assert a.next_time == 100 a.attach(c.inc_at) a.check_and_trigger(0) assert c.cnt_at == 0 a.check_and_trigger(100) assert c.cnt_at == 1 def test_returns_None_if_no_actions_or_done(): s = Scheduler() with pytest.raises(StopIteration): s.next() def bogus(): pass s.add(bogus, at=1) assert s.next() == 1 s.reached(1) with pytest.raises(StopIteration): s.next() def test_scheduler(): c = Counter() s = Scheduler() s.add(c.inc_every, every=200) assert c.cnt_every == 0 assert s.next() == 0.0 s.add(c.inc_at, at=100) s.reached(0.0) assert c.cnt_every == 1 assert c.cnt_at == 0 assert s.next() == 100 s.reached(100) assert c.cnt_every == 1 assert c.cnt_at == 1 assert s.next() == 200 s.reached(200) assert c.cnt_every == 2 assert c.cnt_at == 1 # If a later timestep is passed to 's.reached()' then actions in # between *won't* be triggered. s.reached(500) assert c.cnt_every == 2 # still the same as before assert c.cnt_at == 1 # still the same as before def test_reached(): c = Counter() s = Scheduler() s.add(c.inc_every, every=10) s.add(c.inc_at, at=20) assert c.cnt_every == 0 assert c.cnt_at == 0 assert s.next() == 0.0 # Trigger the first couple of events s.reached(0) assert c.cnt_every == 1 assert c.cnt_at == 0 s.reached(10) assert c.cnt_every == 2 assert c.cnt_at == 0 # Call reached() with a time step that skips the next scheduled # one; this should *not* trigger any events! s.reached(30) assert c.cnt_every == 2 assert c.cnt_at == 0 # Now call reached() with the next scheduled time step, assert # that it triggered the event. Then do a couple more steps for # sanity checks. s.reached(20) assert c.cnt_every == 3 assert c.cnt_at == 1 s.reached(25) assert c.cnt_every == 3 assert c.cnt_at == 1 s.reached(30) assert c.cnt_every == 4 assert c.cnt_at == 1 # It is not an error to call reached() with a time step in the # past. However, this won't trigger any events here because the # RepeatingTimeEvent knows about its next time step, and the # SingleTimeEvent was already triggered above (and no event is # triggered twice for the same time step, unless a reset() # happens). s.reached(10) assert c.cnt_every == 4 assert c.cnt_at == 1 s.reached(20) assert c.cnt_every == 4 assert c.cnt_at == 1 def test_scheduler_every(): c = Counter() s = Scheduler() s.add(c.inc_every, every=100, after=5) assert c.cnt_every == 0 assert s.next() == 5 s.reached(5) assert c.cnt_every == 1 s.reached(100) # shouldn't trigger any event assert c.cnt_every == 1 # ... thus the counter shouldn't increase s.reached(105) assert c.cnt_every == 2 s.reached(205) assert c.cnt_every == 3 def test_scheduler_clear(): c = Counter() s = Scheduler() s.add(c.inc_every, every=5) assert c.cnt_every == 0 assert s.next() == 0.0 s.reached(0) assert c.cnt_every == 1 s.reached(5) assert c.cnt_every == 2 # Clear the schedule and assert that nothing is supposed to happen any more s.clear() assert(s.items == []) assert(s.realtime_items == {}) s.reached(10) assert c.cnt_every == 2 # still the same as before with pytest.raises(StopIteration): s.next() def test_regression_not_more_than_once_per_time(): x = [0, 0, 0, 0] def my_at_fun(): x[0] += 1 def my_at_fun_accident(): x[1] += 1 def my_every_fun(): x[2] += 1 def my_standalone_at_end_fun(): x[3] += 1 s = Scheduler() s.add(my_at_fun, at=1, at_end=True) # can trigger twice # 2 is also end, should trigger only once s.add(my_at_fun_accident, at=2, at_end=True) s.add(my_every_fun, every=1, after=1, at_end=True) # twice s.add(my_standalone_at_end_fun, at_end=True) # once anyways assert s.next() == 1 s.reached(1) assert x == [1, 0, 1, 0] s.reached(2) assert x == [1, 1, 2, 0] s.finalise(2) assert x == [2, 1, 2, 1] def test_illegal_arguments(): def dummy_func(): pass s = Scheduler() with pytest.raises(AssertionError): s.add(dummy_func, at=0, after=1) # delays don't mix with 'at' with pytest.raises(AssertionError): s.add(dummy_func, at=1, every=2) # can't mix 'at' with 'every' def test_reset_with_every(): c = Counter() s = Scheduler() s.add(c.inc_every, every=10) assert c.cnt_every == 0 # Trigger a few events at their scheduled times s.reached(0) assert c.cnt_every == 1 s.reached(10) assert c.cnt_every == 2 s.reached(20) assert c.cnt_every == 3 s.reached(30) assert c.cnt_every == 4 # Reset time from 30 to 15 (note: in between two scheduled time # steps); check that no additional events were triggered and that # the next time step is as expected s.reset(15) assert c.cnt_every == 4 assert s.next() == 20 # Trigger a few more events s.reached(20) assert c.cnt_every == 5 s.reached(30) assert c.cnt_every == 6 # Reset time again, this time precisely to a scheduled time step # (namely, 20); again, no additional events should have been # triggered and the next scheduled time step should still be 20. s.reset(20) assert c.cnt_every == 6 assert s.next() == 20 # Trigger a few more events s.reached(20) assert c.cnt_every == 7 s.reached(30) assert c.cnt_every == 8 def test_reset_with_at(): c = Counter() s = Scheduler() s.add(c.inc_at, at=10) assert c.cnt_at == 0 s.reached(0) assert c.cnt_at == 0 s.reached(10) assert c.cnt_at == 1 s.reached(20) assert c.cnt_at == 1 s.reached(30) assert c.cnt_at == 1 # Events that already happened are not triggered again ... s.reached(10) assert c.cnt_at == 1 # ... unless we reset the scheduler first s.reset(2) s.reached(10) assert c.cnt_at == 2 # Resetting to a time *after* the scheduled time will result in # the event not being triggered again, even if we tell the # scheduler that the time step was reached. s.reset(30) s.reached(10) assert c.cnt_at == 2
7,883
23.184049
79
py
finmag
finmag-master/src/finmag/scheduler/__init__.py
# This file is needed just os that python recognises # the contents as a module, but we're not actually # exposing any names here.
131
32
52
py
finmag
finmag-master/src/finmag/scheduler/derivedevents.py
import logging from finmag.scheduler.timeevent import TimeEvent, same_time, EPSILON # Import the possible states of events. from finmag.scheduler.event import EV_ACTIVE, EV_DONE from finmag.scheduler.event import EV_REQUESTS_STOP_INTEGRATION log = logging.getLogger(name="finmag") class SingleTimeEvent(TimeEvent): """ A time-based event that triggers at a certain time, and/or at the end of time integration. """ def trigger(self, time, is_stop=False): """ This calls the callback function now, and does not check whether it is correct to do so (this is the job of check_trigger). This method updates time values, executes the callback function, and may alter the state of this event. """ self.last = time self.next_time = None if self.callback is None: log.warning("Event triggered with no callback function.") else: returnValue = self.callback() if returnValue is True: self.state = EV_DONE if returnValue is False: self.state = EV_REQUESTS_STOP_INTEGRATION def reset(self, time): """ This changes the state of this event to what it should have been at the specified time. This does not re/set the callback function. """ # Reset to initial if specified time is prior to initial time. if time < self.init_time: self.last = None self.next_time = self.init_time self.state = EV_ACTIVE # Otherwise, we have already triggered. else: self.last = self.init_time self.next_time = None self.state = EV_DONE class RepeatingTimeEvent(SingleTimeEvent): """ A time-based event that triggers regularly, and/or at the end of time integration. If a variant time progression is desired, a callable object can be passed instead of constant interval, which will be evaluated by the trigger. The callable object accepts only "self" as an argument, and so can operate on properties defined in this event. """ def __init__(self, interval, init_time=None, trigger_on_stop=False, callback=None): super(RepeatingTimeEvent, self).__init__(init_time or 0., trigger_on_stop, callback) # Negative intervals make us sad. if interval < 0: raise ValueError("{}.init: Proposed interval is negative; events " "cannot occur in the past without the use of " "reset.".format(self.__class__.__name__)) self.interval = interval def trigger(self, time, is_stop=False): super(RepeatingTimeEvent, self).trigger(time=time, is_stop=is_stop) # Calculate next time to trigger. if not hasattr(self.interval, "__call__"): self.next_time = self.last + self.interval else: self.next_time = self.last + self.interval() def reset(self, time): """ As with base classes, though it is important to note that if this event is reset to a time that is precisely when the event would trigger, then it should trigger again. """ if not hasattr(self.interval, "__call__"): self.last = time - time % self.interval if time % self.interval < EPSILON: self.last -= self.interval self.next_time = self.last + self.interval else: msg = "Resetting in time is not well defined for repeated " +\ "events with non-constant interval." raise NotImplementedError(msg)
3,767
32.945946
79
py
finmag
finmag-master/src/finmag/physics/effective_field.py
import logging import numpy as np from finmag.field import Field from finmag.util.helpers import vector_valued_function from finmag.energies import TimeZeeman from finmag.physics.errors import UnknownInteraction logger = logging.getLogger(name="finmag") class EffectiveField(object): def __init__(self, m, Ms, unit_length): """ *Arguments* m: Field Ms: number (?) unit_length: float """ assert isinstance(m, Field) assert isinstance(Ms, Field) self.m_field = m self.Ms = Ms self.unit_length = unit_length self.output_size = self.m_field.f.vector().local_size() self.H_eff = np.zeros(self.output_size) self.interactions = {} # TODO: Get rid of self._callables. # At the moment, we keep track of which functions need # to be updated with simulation time. We want to move to a # model where we pass the simulation state (m and t at the moment) # explicitly to compute_field/compute_energy. self.need_time_update = [] def add(self, interaction, with_time_update=None): """ Add an interaction (such as Exchange, Anisotropy, Demag). *Arguments:* interaction The interaction to be added. with_time_update (optional) A function of the form f(t), which accepts a time step `t` as its only single parameter and updates the internal state of the interaction accordingly. """ if interaction.name in self.interactions: raise ValueError("Interaction names must be unique, but an " "interaction with the same name already " "exists: {}.".format(interaction.name)) logger.debug( "Adding interaction {} to simulation.".format(interaction.name)) interaction.setup(self.m_field, self.Ms, self.unit_length) self.interactions[interaction.name] = interaction # automatic connection of TimeZeeman to with_time_update if isinstance(interaction, TimeZeeman) and with_time_update is None: with_time_update = interaction.update if with_time_update: self.need_time_update.append(with_time_update) def update(self, t=None): """ Update the effective field internally so that its value reflects the value at time `t`. The argument `t` can be omitted if no interaction requires a time update. """ if t is None and self.need_time_update: raise ValueError("Some interactions require a time update, " "but no time step was given.") for update in self.need_time_update: update(t) self.H_eff[:] = 0 for interaction in self.interactions.itervalues(): self.H_eff += interaction.compute_field() def compute(self, t=None): """ Compute and return the effective field. The argument `t` is only required if one or more interactions require a time-update. """ self.update(t) return self.H_eff.copy() def compute_jacobian_only(self, t): """ Compute and return the total contribution of all interactions that are included in the Jacobian. """ for update in self.need_time_update: update(t) H_eff = np.zeros(self.output_size) for interaction in self.interactions.itervalues(): if interaction.in_jacobian: H_eff += interaction.compute_field() return H_eff def total_energy(self): """ Compute and return the total energy contribution of all interactions present in the simulation. """ energy = 0. for interaction in self.interactions.itervalues(): energy += interaction.compute_energy() return energy def exists(self, interaction_name): """ Returns true if an interaction by that name is known to the system. """ return interaction_name in self.interactions.keys() def get(self, interaction_name): """ Returns the interaction object with the given name. Raises a ValueError if no (or more than one) matching interaction is found. Use all() to obtain list of names of available interactions. """ if not self.exists(interaction_name): raise UnknownInteraction(interaction_name, self.all()) return self.interactions[interaction_name] def all(self): """ Returns list of interactions names (as list of strings). """ return sorted(self.interactions.keys()) def remove(self, interaction_name): """ Removes the interaction object of the given name. Raises a ValueError if no (or more than one) matching interaction is found. """ if not self.exists(interaction_name): raise UnknownInteraction(interaction_name, self.all()) del self.interactions[interaction_name] def get_dolfin_function(self, interaction_name, region=None): interaction = self.get(interaction_name) return vector_valued_function(interaction.compute_field(), self.m_field.functionspace)
5,424
30.911765
76
py
finmag
finmag-master/src/finmag/physics/errors.py
class UnknownInteraction(KeyError): def __init__(self, unknown_interaction, known_interactions): self.unknown = unknown_interaction self.known = known_interactions super(UnknownInteraction, self).__init__() def __str__(self): message = ("Couldn't find interaction with name {}. Do you mean " "one of {}?".format(self.unknown, ", ".join(self.known))) return message
434
35.25
76
py
finmag
finmag-master/src/finmag/physics/neb_cartesian_modified.py
import os import numpy as np import dolfin as df # import inspect from aeon import timer # import finmag.util.consts as consts # from finmag.util import helpers # from finmag.physics.effective_field import EffectiveField from finmag.util.vtk_saver import VTKSaver from finmag import Simulation # from finmag.field import Field # Change sim._m to new field class # in line 184 import finmag.native.cvode_petsc as cvode from finmag.util.fileio import Tablewriter, Tablereader # import matplotlib.pyplot as plt # from mpl_toolkits.mplot3d import Axes3D # from matplotlib.colors import colorConverter # from matplotlib.collections import PolyCollection, LineCollection import logging log = logging.getLogger(name="finmag") from neb import cartesian2spherical from neb import spherical2cartesian def linear_interpolation_two(m0, m1, n): """ Define a linear interpolation between two states of the energy band (m0, m1) to get an initial state. The interpolation is done in the magnetic moments that constitute the magnetic system. """ theta_phi0 = cartesian2spherical(m0) theta_phi1 = cartesian2spherical(m1) # The differences with the number of interps + 1 dtheta = (theta_phi1 - theta_phi0) / (n + 1) coords = [] for i in range(n): theta = theta_phi0 + (i + 1) * dtheta coords.append(spherical2cartesian(theta)) return coords def compute_dm(m0, m1): dm = m0 - m1 length = len(dm) dm = np.sqrt(np.sum(dm ** 2)) / length return dm class NEB_Sundials(object): """ Nudged elastic band method by solving the differential equation using Sundials. """ def __init__(self, sim, initial_images, climbing_image=None, interpolations=None, spring=5e5, name='unnamed'): """ *Arguments* sim: the Simulation class initial_images: a list contain the initial value, which can have any of the forms accepted by the function 'finmag.util.helpers. vector_valued_function', for example, initial_images = [(0,0,1), (0,0,-1)] or with given defined function def init_m(pos): x=pos[0] if x<10: return (0,1,1) return (-1,0,0) initial_images = [(0,0,1), (0,0,-1), init_m ] are accepted forms. climbing_image : An integer with the index (from 1 to the total number of images minus two; it doesn't have any sense to use the extreme images) of the image with the largest energy, which will be updated in the NEB algorithm using the Climbing Image NEB method (no spring force and "with the component along the elastic band inverted" [*]). See: [*] Henkelman et al., The Journal of Chemical Physics 113, 9901 (2000) interpolations : a list only contain integers and the length of this list should equal to the length of the initial_images minus 1, i.e., len(interpolations) = len(initial_images) - 1 ** THIS IS not well defined in CARTESIAN coordinates** spring: the spring constant, a float value disable_tangent: this is an experimental option, by disabling the tangent, we can get a rough feeling about the local energy minima quickly. """ self.sim = sim self.name = name self.spring = spring # We set a minus one because the *sundials_rhs* function # only uses an array without counting the extreme images, # whose length is self.image_num (see below) if climbing_image is not None: self.climbing_image = climbing_image - 1 else: self.climbing_image = climbing_image # Dolfin function of the new _m_field (instead of _m) self._m = sim.llg._m_field.f self.effective_field = sim.llg.effective_field if interpolations is None: interpolations = [0 for i in range(len(initial_images) - 1)] self.initial_images = initial_images self.interpolations = interpolations if len(interpolations) != len(initial_images) - 1: raise RuntimeError("""The length of interpolations should be equal to the length of the initial_images array minus 1, i.e., len(interpolations) = len(initial_images) - 1""") if len(initial_images) < 2: raise RuntimeError("""At least two images must be provided to create the energy band""") # the total image number including two ends self.total_image_num = len(initial_images) + sum(interpolations) self.image_num = self.total_image_num - 2 S3 = sim.S3 Vs = [] for i in range(self.image_num): Vs.append(S3) ME = df.MixedFunctionSpace(Vs) self.images_fun = df.Function(ME) #all the degree of freedom, which is a petsc vector self.coords = df.as_backend_type(self.images_fun.vector()).vec() self.t = 0 self.step = 0 self.ode_count = 1 self.integrator = None self.initial_image_coordinates() self.create_tablewriter() def linear_interpolation_two(self, image0, image1, n): """ Define a linear interpolation between two states of the energy band (m0, m1) to get an initial state. The interpolation is done in the magnetic moments that constitute the magnetic system. """ # Get the spherical coordinates dolfin functions # for the m0 and m1 magnetisation vector fields self.sim.set_m(self.initial_images[image0]) theta0, phi0 = self.sim._m_field.get_spherical() self.sim.set_m(self.initial_images[image1]) theta1, phi1 = self.sim._m_field.get_spherical() # To not depend on numpy arrays, we will assemble every # interpolation into dolfin functions and assign their # values to the subdomains of the MixedFunctionSpace of images # Create a scalar Function Space S1 = df.FunctionSpace(self.sim.m_field.functionspace.mesh(), 'CG', 1) # Define a variable to use as vector in all the assemble instances assemble_vector = None # Define the interpolations step for theta assemble_vector = df.assemble(df.dot((theta1 - theta0) / (n + 1)) * df.dP, tensor=assemble_vector) dtheta = df.Function(S1) dtheta.vector().axpy(1, assemble_vector) # The same for Phi assemble_vector = df.assemble(df.dot((theta1 - theta0) / (n + 1)) * df.dP, tensor=assemble_vector) dphi = df.Function(S1) dphi.vector().axpy(1, assemble_vector) # Now loop for every interpolation and assign it to # the MixedFunctionSpace for i in xrange(n): # Create a dolfin function from the FS, for the interpolation interpolation_theta = df.Function(S1) interpolation_phi = df.Function(S1) # Compute the radius using the assemble method with dolfin dP # (like a dirac delta to get values on every node of the mesh) # This returns a dolfin vector # Theta assemble_vector = df.assemble(df.dot(theta0 + (i + 1) * dtheta, # df.TestFunction(S1)) * df.dP, tensor=assemble_vector ) # Set the vector values to the dolfin function interpolation_theta.vector().axpy(1, assemble_vector) # Phi assemble_vector = df.assemble(df.dot(phi0 + (i + 1) * dphi, # df.TestFunction(S1)) * df.dP, tensor=assemble_vector ) # Set the vector values to the dolfin function interpolation_phi.vector().axpy(1, assemble_vector) # Now set this interpolation to the corresponding image # Set a vector function space for the simulation # magnetisation vector field interpolation = df.VectorFunction(self.sim.S3) interpolation = df.assemble(df.dot(df.as_vector((df.sin(interpolation_theta) * df.cos(interpolation_phi), df.sin(interpolation_theta) * df.sin(interpolation_phi), df.cos(interpolation_theta) )), df.TestFunction(self.sim.S3)) * df.dP ) interpolation.vector().axpy(1, interpolation) # Now assign the interpolation vector function values to the corresponding # image in the MixedFunctionSpace df.assign(self.images_fun.sub(i), interpolation) # coords = [] # for i in range(n): # theta_phi_interp = theta_phi0 + (i + 1) * d_theta_phi # coords.append(spherical2cartesian(theta_phi_interp)) # return coords def create_tablewriter(self): entities_energy = { 'step': {'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps'}, 'energy': {'unit': '<J>', 'get': lambda sim: sim.energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)]} } self.tablewriter = Tablewriter( '%s_energy.ndt' % (self.name), self, override=True, entities=entities_energy) entities_dm = { 'step': {'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps'}, 'dms': {'unit': '<1>', 'get': lambda sim: sim.distances, 'header': ['image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1)]} } self.tablewriter_dm = Tablewriter( '%s_dms.ndt' % (self.name), self, override=True, entities=entities_dm) def initial_image_coordinates(self): """ Generate the coordinates linearly according to the number of interpolations provided. Example: Imagine we have 4 images and we want 3 interpolations between every neighbouring pair, i.e interpolations = [3, 3, 3] 1. Imagine the initial states with the interpolation numbers and choose the first and second state 0 1 2 3 X -------- X --------- X -------- X 3 3 3 2. Counter image_id is set to 0 3. Set the image 0 magnetisation vector as m0 and append the values to self.coords[0]. Update the counter: image_id = 1 now 4. Set the image 1 magnetisation values as m1 and interpolate the values between m0 and m1, generating 3 arrays with the magnetisation values of every interpolation image. For every array, append the values to self.coords[i] with i = 1, 2 and 3 ; updating the counter every time, so image_id = 4 now 5. Append the value of m1 (image 1) in self.coords[4] Update counter (image_id = 5 now) 6. Move to the next pair of images, now set the 1-th image magnetisation values as m0 and append to self.coords[5] 7. Interpolate to get self.coords[i], for i = 6, 7, 8 ... 8. Repeat as before until move to the pair of images: 2 - 3 9. Finally append the magnetisation of the last image (self.initial_images[-1]). In this case, the 3rd image Then, for every magnetisation vector values array (self.coords[i]) append the value to the simulation and store the energies corresponding to every i-th image to the self.energy[i] arrays Finally, flatten the self.coords matrix (containing the magnetisation values of every image in different rows) ** Our generalised coordinates in the NEBM are the magnetisation values """ # Initiate the counter image_id = 0 # For every interpolation between images (zero if no interpolations # were specified) for i in range(len(self.interpolations)): # Store the number n = self.interpolations[i] # Save on the first image of a pair (step 1, 6, ...) self.sim.set_m(self.initial_images[i]) m0 = self.sim._m_field.get_ordered_numpy_array_xxx() df.assign(self.images_fun.sub(image_id),self.sim._m_field.f) image_id = image_id + 1 # Set the second image in the pair as m1 and interpolate # (step 4 and 7), saving in corresponding self.coords entries self.sim.set_m(self.initial_images[i + 1]) m1 = self.sim._m_field.get_ordered_numpy_array_xxx() # Interpolations (arrays with magnetisation values) coords = linear_interpolation_two(m0, m1, n) for coord in coords: self.sim.set_m(coord) df.assign(self.images_fun.sub(image_id), self.sim._m_field.f) self.coords[image_id][:] = coord[:] image_id = image_id + 1 # Continue to the next pair of images # Append the magnetisation of the last image self.sim.set_m(self.initial_images[-1]) df.assign(self.images_fun.sub(image_id), self.sim._m_field.f) """ # Save the energies for i in range(self.total_image_num): self._m.vector().set_local(self.coords[i]) self.effective_field.update() self.energy[i] = self.effective_field.total_energy() """ def save_vtks(self): """ Save vtk files in different folders, according to the simulation name and step. Files are saved as simname_simstep/image_00000x.vtu """ # Create the directory directory = 'vtks/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the vtk files with the finmag function # The last separator '_' is to distinguish the image # from its corresponding number, e.g. image_000001.pvd vtk_saver = VTKSaver('%s/image_.pvd' % (directory), overwrite=True) self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): self._m.vector().set_local(self.coords[i, :]) # set t =0, it seems that the parameter time is only # for the interface? vtk_saver.save_field(self._m, 0) self.coords.shape = (-1, ) def create_integrator(self, rtol=1e-6, atol=1e-6): integrator = cvode.CvodeSolver(self.sundials_rhs, 0, self.coords, rtol, atol) self.integrator = integrator def compute_effective_field(self, y): y.shape = (self.total_image_num, -1) """ for i in range(self.image_num): self._m.vector().set_local(y[i + 1]) # self.effective_field.update() # Compute effective field, which is the gradient of # the energy in the NEB method (derivative with respect to # the generalised coordinates) h = self.effective_field.H_eff # self.Heff[i + 1, :] = h[:] # Compute the total energy self.energy[i + 1] = self.effective_field.total_energy() # Compute the 'distance' or difference between neighbouring states # around y[i+1]. This is used to compute the spring force # dm1 = compute_dm(y[i], y[i + 1]) dm2 = compute_dm(y[i + 1], y[i + 2]) self.springs[i] = self.spring * (dm2 - dm1) # Use the native NEB (C++ code) to compute the tangents according # to the improved NEB method, developed by Henkelman and Jonsson # at: Henkelman et al., Journal of Chemical Physics 113, 22 (2000) native_neb.compute_tangents(y, self.energy, self.tangents) # native_neb.compute_springs(y,self.springs,self.spring) """ y.shape = (-1, ) def sundials_rhs(self, time, y, ydot): """ Right hand side of the optimization scheme used to find the minimum energy path. In our case, we use a LLG kind of equation: d Y / dt = Y x Y x D D = -( nabla E + [nabla E * t] t ) + F_spring where Y is an image: Y = (M_0, ... , M_N) and t is the tangent vector defined according to the energy of the neighbouring images (see Henkelman et al publication) If a climbing_image index is specified, the corresponding image will be iterated without the spring force and with an inversed component along the tangent """ # Update the ODE solver self.ode_count += 1 timer.start("sundials_rhs", self.__class__.__name__) # Compute the eff field H for every image, H = -nabla E # (derived with respect to M) self.compute_effective_field(y) # Reshape y and ydot in a matrix of total_image_num rows y.shape = (self.total_image_num, -1) ydot.shape = (self.total_image_num, -1) # Compute the total force for every image (not the extremes) # Rememeber that self.image_num = self.total_image_num - 2 # The total force is: # D = - (-nabla E + [nabla E * t] t) + F_spring # This value is different is a climbing image is specified: # D_climb = -nabla E + 2 * [nabla E * t] t for i in range(self.image_num): h = self.Heff[i + 1] t = self.tangents[i] sf = self.springs[i] if not (self.climbing_image and i == self.climbing_image): h3 = h - np.dot(h, t) * t + sf * t else: h3 = h - 2 * np.dot(h, t) * t h[:] = h3[:] #ydot[i+1, :] = h3[:] # Update the step with the optimisation algorithm, in this # case we use: dY /dt = Y x Y x D # (check the C++ code in finmag/native/src/) #native_neb.compute_dm_dt(y, self.Heff, ydot) ydot[0, :] = 0 ydot[-1, :] = 0 y.shape = (-1,) ydot.shape = (-1,) timer.stop("sundials_rhs", self.__class__.__name__) return 0 def compute_distance(self): distance = [] ys = self.coords ys.shape = (self.total_image_num, -1) for i in range(self.total_image_num - 1): dm = compute_dm(ys[i], ys[i + 1]) distance.append(dm) ys.shape = (-1, ) self.distances = np.array(distance) def run_until(self, t): if t <= self.t: return self.integrator.advance_time(t, self.coords) m = self.coords y = self.last_m m.shape = (self.total_image_num, -1) y.shape = (self.total_image_num, -1) max_dmdt = 0 for i in range(1, self.image_num + 1): dmdt = compute_dm(y[i], m[i]) / (t - self.t) if dmdt > max_dmdt: max_dmdt = dmdt m.shape = (-1,) y.shape = (-1,) self.last_m[:] = m[:] self.t = t return max_dmdt def relax(self, dt=1e-8, stopping_dmdt=1e4, max_steps=1000, save_npy_steps=100, save_vtk_steps=100): if self.integrator is None: self.create_integrator() log.debug("Relaxation parameters: " "stopping_dmdt={} (degrees per nanosecond), " "time_step={} s, max_steps={}.".format(stopping_dmdt, dt, max_steps)) # Save the initial state i=0 self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() for i in range(max_steps): if i % save_vtk_steps == 0: self.save_vtks() if i % save_npy_steps == 0: self.save_npys() self.step += 1 cvode_dt = self.integrator.get_current_step() increment_dt = dt if cvode_dt > dt: increment_dt = cvode_dt dmdt = self.run_until(self.t + increment_dt) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() log.debug("step: {:.3g}, step_size: {:.3g}" " and max_dmdt: {:.3g}.".format(self.step, increment_dt, dmdt)) if dmdt < stopping_dmdt: break log.info("Relaxation finished at time step = {:.4g}, " "t = {:.2g}, call rhs = {:.4g} " "and max_dmdt = {:.3g}".format(self.step, self.t, self.ode_count, dmdt)) self.save_vtks() self.save_npys()
22,197
34.5168
117
py
finmag
finmag-master/src/finmag/physics/physics.py
""" Derives physical quantities from the primary simulation state. """ import logging import numpy as np import dolfin as df import finmag.util.consts as consts from finmag.field import Field from effective_field import EffectiveField from equation import Equation logger = logging.getLogger(name="finmag") class Physics(object): def __init__(self, mesh, unit_length): self.mesh = mesh self.unit_length = unit_length self.S1 = df.FunctionSpace(mesh, "CG", 1) self.S3 = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) self.alpha = Field(self.S1, name="alpha") self.dmdt = Field(self.S3, name="dmdt") self.H = Field(self.S3, name="H") # TODO: connect effective field to H self.m = Field(self.S3, name="m") self.Ms = Field(self.S1, name="Ms") self.pins = [] # TODO: connect pins to instant code self.effective_field = EffectiveField(self.m, self.Ms, self.unit_length) self.eq = Equation(self.m.as_vector(), self.H.as_vector(), self.dmdt.as_vector()) self.eq.set_alpha(self.alpha.as_vector()) self.eq.set_gamma(consts.gamma) self.eq.set_saturation_magnetisation(self.Ms.as_vector()) def hooks_scipy(self): """ Methods that scipy calls during time integration. """ return (self.solve_for, self.m.from_array) def hooks_sundials(self): """ Methods that sundials calls during time integration. """ return (self.sundials_rhs, self.sundials_jtimes, self.sundials_psetup, self.sundials_psolve, self.m.from_array) def hooks_sundials_parallel(self): """ Methods that sundials calls during time integration when it operates in parallel mode. TODO: What does parallel sundials need? """ return () def solve(self, t): self.effective_field.update(t) self.H.set(self.effective_field.H_eff) # FIXME: remove double book-keeping self.eq.solve() return self.dmdt.as_array() def solve_for(self, m, t): self.m.from_array(m) return self.solve(t) def sundials_jtimes(self, mp, J_mp, t, m, fy, tmp): """ Computes the Jacobian-times-vector product, as used by sundials cvode. The time integration problem we need to solve is of type .. math:: \\frac{d y}{d t} = f(y,t) where y is the state vector (such as the magnetisation components for all sites), t is the time, and f(y,t) is the LLG equation. For the implicite integration schemes, sundials' cvode solver needs to know the Jacobian J, which is the derivative of the (vector-valued) function f(y,t) with respect to the (components of the vector) y. The Jacobian is a matrix. For a magnetic system N sites, the state vector y has 3N entries (because every site has 3 components). The Jacobian matrix J would thus have a size of 3N*3N. In general, this is too big to store. Fortunately, cvode only needs the result of the multiplication of some vector y' (provided by cvode) with the Jacobian. We can thus store the Jacobian in our own way (in particular as a sparse matrix if we leave out the demag field), and carry out the multiplication of J with y' when required, and that is what this function does. In more detail: We use the variable name mp to represent m' (i.e. mprime) which is just a notation to distinguish m' from m (and not any derivative). Our equation is: .. math:: \\frac{dm}{dt} = LLG(m, H) And we're interested in computing the Jacobian (J) times vector (m') product .. math:: J m' = [\\frac{dLLG(m, H)}{dm}] m'. However, the H field itself depends on m, so the total derivative J m' will have two terms .. math:: \\frac{d LLG(m, H)}{dm} = \\frac{\\partial LLG(m, H)}{\\partial m} + [\\frac{\\partial LLG(m, H)}{\\partial H}] [\\frac{\\partial H(m)}{\\partial m}]. This is a matrix identity, so to make the derivations easier (and since we don't need the full Jacobian matrix) we can write the Jacobian-times-vector product as a directional derivative: .. math:: J m' = \\frac{d LLG(m + a m',H(m + a m'))}{d a}|_{a=0} The code to compute this derivative is in ``llg.cc`` but you can see that the derivative will depend on m, m', H(m), and dH(m+a m')/da [which is labelled H' in the code]. Most of the components of the effective field are linear in m; if that's the case, the directional derivative H' is just H(m') .. math:: H' = \\frac{d H(m+a m')}{da} = H(m') """ assert m.shape == self.m.as_array().shape assert mp.shape == m.shape assert tmp.shape == m.shape # First, compute the derivative H' = dH_eff/dt self.m.from_array(mp) Hp = tmp.view() Hp[:] = self.effective_field.compute_jacobian_only(t) if not hasattr(self, 'sundials_reuse_jacobean') or not self.sundials_reuse_jacobean: if not np.array_equal(self.m.as_array(), m): self.m.from_array(m) self.effective_field.update(t) self.eq.sundials_jtimes_serial(mp, Hp, J_mp) return 0 def sundials_psetup(self, t, m, fy, jok, gamma, tmp1, tmp2, tmp3): # Note that some of the arguments are deliberately ignored, but they # need to be present because the function must have the correct signature # when it is passed to set_spils_preconditioner() in the cvode class. if not jok: self.m.from_array(m) self.sundials_reuse_jacobean = True return 0, not jok def sundials_psolve(self, t, y, fy, r, z, gamma, delta, lr, tmp): # Note that some of the arguments are deliberately ignored, but they # need to be present because the function must have the correct signature # when it is passed to set_spils_preconditioner() in the cvode class. z[:] = r return 0 def sundials_rhs(self, t, y, ydot): """ Computes the dm/dt right hand side ODE term, as used by sundials cvode. """ ydot[:] = self.solve_for(y, t) return 0
6,484
34.054054
195
py
finmag
finmag-master/src/finmag/physics/neb_cartesian.py
import os import dolfin as df import numpy as np # import inspect from aeon import timer # import finmag.util.consts as consts # from finmag.util import helpers # from finmag.physics.effective_field import EffectiveField from finmag.util.vtk_saver import VTKSaver from finmag import Simulation # from finmag.field import Field # Change sim._m to new field class # in line 184 from finmag.native import sundials import finmag.native.neb as native_neb from finmag.util.fileio import Tablewriter, Tablereader # import matplotlib.pyplot as plt # from mpl_toolkits.mplot3d import Axes3D # from matplotlib.colors import colorConverter # from matplotlib.collections import PolyCollection, LineCollection import logging log = logging.getLogger(name="finmag") def linear_interpolation_two(m0, m1, n): """ Define a linear interpolation between two states of the energy band (m0, m1) to get an initial state. The interpolation is done in the magnetic moments that constitute the magnetic system. """ dm = (m1 - m0) / (n + 1) coords = [] for i in range(n): m = m0 + dm * (i + 1) coords.append(m) return coords def normalise_m(a): """ Normalise the magnetisation array. We asume: a = [mx1, mx2, ..., my1, my2, ..., mz1, mz2, ...] to transform this into [ [mx1, mx2, ...], [my1, my2, ...], [mz1, mz2, ...] ] normalise the matrix, and return again a 1 x -- array """ # Transform to matrix a.shape = (3, -1) # Compute the array 'a' length lengths = np.sqrt(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]) # Normalise all the entries a[:] /= lengths # Return to original shape a.shape = (-1, ) def compute_dm(m0, m1): dm = m0 - m1 length = len(dm) dm = np.sqrt(np.sum(dm ** 2)) / length return dm class NEB_Sundials(object): """ Nudged elastic band method by solving the differential equation using Sundials. """ def __init__(self, sim, initial_images, climbing_image=None, interpolations=None, spring=5e5, name='unnamed'): """ *Arguments* sim: the Simulation class initial_images: a list contain the initial value, which can have any of the forms accepted by the function 'finmag.util.helpers. vector_valued_function', for example, initial_images = [(0,0,1), (0,0,-1)] or with given defined function def init_m(pos): x=pos[0] if x<10: return (0,1,1) return (-1,0,0) initial_images = [(0,0,1), (0,0,-1), init_m ] are accepted forms. climbing_image : An integer with the index (from 1 to the total number of images minus two; it doesn't have any sense to use the extreme images) of the image with the largest energy, which will be updated in the NEB algorithm using the Climbing Image NEB method (no spring force and "with the component along the elastic band inverted" [*]). See: [*] Henkelman et al., The Journal of Chemical Physics 113, 9901 (2000) interpolations : a list only contain integers and the length of this list should equal to the length of the initial_images minus 1, i.e., len(interpolations) = len(initial_images) - 1 ** THIS IS not well defined in CARTESIAN coordinates** spring: the spring constant, a float value disable_tangent: this is an experimental option, by disabling the tangent, we can get a rough feeling about the local energy minima quickly. """ self.sim = sim self.name = name self.spring = spring # We set a minus one because the *sundials_rhs* function # only uses an array without counting the extreme images, # whose length is self.image_num (see below) if climbing_image is not None: self.climbing_image = climbing_image - 1 else: self.climbing_image = climbing_image # Dolfin function of the new _m_field (instead of _m) # self._m = sim.llg._m_field.f self._m = sim.m_field.f self.effective_field = sim.llg.effective_field if interpolations is None: interpolations = [0 for i in range(len(initial_images) - 1)] self.initial_images = initial_images self.interpolations = interpolations if len(interpolations) != len(initial_images) - 1: raise RuntimeError("""The length of interpolations should be equal to the length of the initial_images array minus 1, i.e., len(interpolations) = len(initial_images) - 1""") if len(initial_images) < 2: raise RuntimeError("""At least two images must be provided to create the energy band""") # the total image number including two ends self.total_image_num = len(initial_images) + sum(interpolations) self.image_num = self.total_image_num - 2 # Number of spins per image. The _m.vector has the form # [mx1, mx2, ..., my1, my2, ..., mz1, mz2] # Thus we divide by 3 to get the total of ms # self.nxyz = len(self._m.vector()) / 3 # Use the full vector from the field class to get the total number # of degrees of freedom when using PBC # (the self._m gave us the reduced number of spins when using PBC) self.nxyz = len(self.sim.m_field.get_ordered_numpy_array_xxx()) / 3 # Total number of degrees of freedom (3 components per spin) self.coords = np.zeros(3 * self.nxyz * self.total_image_num) self.last_m = np.zeros(self.coords.shape) self.Heff = np.zeros(self.coords.shape) self.Heff.shape = (self.total_image_num, -1) self.tangents = np.zeros(3 * self.nxyz * self.image_num) self.tangents.shape = (self.image_num, -1) self.energy = np.zeros(self.total_image_num) self.springs = np.zeros(self.image_num) self.t = 0 self.step = 0 self.ode_count = 1 self.integrator = None self.initial_image_coordinates() self.create_tablewriter() def create_tablewriter(self): entities_energy = { 'step': {'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps'}, 'energy': {'unit': '<J>', 'get': lambda sim: sim.energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)]} } self.tablewriter = Tablewriter( '%s_energy.ndt' % (self.name), self, override=True, entities=entities_energy) entities_dm = { 'step': {'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps'}, 'dms': {'unit': '<1>', 'get': lambda sim: sim.distances, 'header': ['image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1)]} } self.tablewriter_dm = Tablewriter( '%s_dms.ndt' % (self.name), self, override=True, entities=entities_dm) def initial_image_coordinates(self): """ Generate the coordinates linearly according to the number of interpolations provided. Example: Imagine we have 4 images and we want 3 interpolations between every neighbouring pair, i.e interpolations = [3, 3, 3] 1. Imagine the initial states with the interpolation numbers and choose the first and second state 0 1 2 3 X -------- X --------- X -------- X 3 3 3 2. Counter image_id is set to 0 3. Set the image 0 magnetisation vector as m0 and append the values to self.coords[0]. Update the counter: image_id = 1 now 4. Set the image 1 magnetisation values as m1 and interpolate the values between m0 and m1, generating 3 arrays with the magnetisation values of every interpolation image. For every array, append the values to self.coords[i] with i = 1, 2 and 3 ; updating the counter every time, so image_id = 4 now 5. Append the value of m1 (image 1) in self.coords[4] Update counter (image_id = 5 now) 6. Move to the next pair of images, now set the 1-th image magnetisation values as m0 and append to self.coords[5] 7. Interpolate to get self.coords[i], for i = 6, 7, 8 ... 8. Repeat as before until move to the pair of images: 2 - 3 9. Finally append the magnetisation of the last image (self.initial_images[-1]). In this case, the 3rd image Then, for every magnetisation vector values array (self.coords[i]) append the value to the simulation and store the energies corresponding to every i-th image to the self.energy[i] arrays Finally, flatten the self.coords matrix (containing the magnetisation values of every image in different rows) ** Our generalised coordinates in the NEBM are the magnetisation values """ # Initiate the counter image_id = 0 self.coords.shape = (self.total_image_num, -1) # For every interpolation between images (zero if no interpolations # were specified) for i in range(len(self.interpolations)): # Store the number n = self.interpolations[i] # Save on the first image of a pair (step 1, 6, ...) self.sim.set_m(self.initial_images[i]) # m0 = self.sim.m # Use the full array for PBCs m0 = self.sim.m_field.get_ordered_numpy_array_xxx() self.coords[image_id][:] = m0[:] image_id = image_id + 1 # Set the second image in the pair as m1 and interpolate # (step 4 and 7), saving in corresponding self.coords entries self.sim.set_m(self.initial_images[i + 1]) # m1 = self.sim.m # Use the full array for PBCs m1 = self.sim.m_field.get_ordered_numpy_array_xxx() # Interpolations (arrays with magnetisation values) coords = linear_interpolation_two(m0, m1, n) for coord in coords: self.coords[image_id][:] = coord[:] image_id = image_id + 1 # Continue to the next pair of images # Append the magnetisation of the last image self.sim.set_m(self.initial_images[-1]) # m2 = self.sim.m # Use the full array for PBCs m2 = self.sim.m_field.get_ordered_numpy_array_xxx() self.coords[image_id][:] = m2[:] # Save the energies for i in range(self.total_image_num): # To assign the values from the full array using set_local # (with boundaries when using PBCs) to a reduced # sim.m_field.vector() , # we use the ordered dof to vertex map (d2v_xxx), # which has a reduced number of indexes # We take the map from the field class self._m.vector().set_local(self.coords[i][self.sim.m_field.d2v_xxx]) self.effective_field.update() self.energy[i] = self.effective_field.total_energy() # Flatten the array self.coords.shape = (-1,) def save_vtks(self): """ Save vtk files in different folders, according to the simulation name and step. Files are saved as simname_simstep/image_00000x.vtu """ # Create the directory directory = 'vtks/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the vtk files with the finmag function # The last separator '_' is to distinguish the image # from its corresponding number, e.g. image_000001.pvd vtk_saver = VTKSaver('%s/image_.pvd' % (directory), overwrite=True) self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): # Save the REDUCED length arrray self._m.vector().set_local(self.coords[i, :][self.sim.m_field.d2v_xxx]) # set t =0, it seems that the parameter time is only # for the interface? vtk_saver.save_field(self._m, 0) self.coords.shape = (-1, ) def save_npys(self): """ Save npy files in different folders according to the simulation name and step Files are saved as: simname_simstep/image_x.npy """ # Create directory as simname_simstep directory = 'npys/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the images with the format: 'image_{}.npy' # where {} is the image number, starting from 0 self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): name = os.path.join(directory, 'image_%d.npy' % i) # Save the reduced length array # Since the dolfin vector (for the magnetisation) can have any # ordering, we rely on the fact that # this mapping does not change when we use the same mesh # when loading the system from a different simulation np.save(name, self.coords[i, :][self.sim.m_field.d2v_xxx]) self.coords.shape = (-1, ) def create_integrator(self, reltol=1e-6, abstol=1e-6, nsteps=10000): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.coords) integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator def compute_effective_field(self, y): y.shape = (self.total_image_num, -1) for i in range(self.image_num): # To update # the magnetisation we only need the reduced vector, thus # we use the d2v map self._m.vector().set_local(y[i + 1][self.sim.m_field.d2v_xxx]) self.effective_field.update() # Compute effective field, which is the gradient of # the energy in the NEB method (derivative with respect to # the generalised coordinates) # We need the whole system effective field # (thus we use the v2d map) h = self.effective_field.H_eff[self.sim.m_field.v2d_xxx] self.Heff[i + 1, :] = h[:] # Compute the total energy self.energy[i + 1] = self.effective_field.total_energy() # Compute the 'distance' or difference between neighbouring states # around y[i+1]. This is used to compute the spring force # dm1 = compute_dm(y[i], y[i + 1]) dm2 = compute_dm(y[i + 1], y[i + 2]) self.springs[i] = self.spring * (dm2 - dm1) # Use the native NEB (C++ code) to compute the tangents according # to the improved NEB method, developed by Henkelman and Jonsson # at: Henkelman et al., Journal of Chemical Physics 113, 22 (2000) native_neb.compute_tangents(y, self.energy, self.tangents) # native_neb.compute_springs(y,self.springs,self.spring) y.shape = (-1, ) def sundials_rhs(self, time, y, ydot): """ Right hand side of the optimization scheme used to find the minimum energy path. In our case, we use a LLG kind of equation: d Y / dt = Y x Y x D D = -( nabla E + [nabla E * t] t ) + F_spring where Y is an image: Y = (M_0, ... , M_N) and t is the tangent vector defined according to the energy of the neighbouring images (see Henkelman et al publication) If a climbing_image index is specified, the corresponding image will be iterated without the spring force and with an inversed component along the tangent """ # Update the ODE solver self.ode_count += 1 timer.start("sundials_rhs", self.__class__.__name__) # Compute the eff field H for every image, H = -nabla E # (derived with respect to M) self.compute_effective_field(y) # Reshape y and ydot in a matrix of total_image_num rows y.shape = (self.total_image_num, -1) ydot.shape = (self.total_image_num, -1) # Compute the total force for every image (not the extremes) # Rememeber that self.image_num = self.total_image_num - 2 # The total force is: # D = - (-nabla E + [nabla E * t] t) + F_spring # This value is different is a climbing image is specified: # D_climb = -nabla E + 2 * [nabla E * t] t for i in range(self.image_num): h = self.Heff[i + 1] t = self.tangents[i] sf = self.springs[i] if not (self.climbing_image and i == self.climbing_image): h3 = h - np.dot(h, t) * t + sf * t else: h3 = h - 2 * np.dot(h, t) * t h[:] = h3[:] #ydot[i+1, :] = h3[:] # Update the step with the optimisation algorithm, in this # case we use: dY /dt = Y x Y x D # (check the C++ code in finmag/native/src/) native_neb.compute_dm_dt(y, self.Heff, ydot) ydot[0, :] = 0 ydot[-1, :] = 0 y.shape = (-1,) ydot.shape = (-1,) timer.stop("sundials_rhs", self.__class__.__name__) return 0 def compute_distance(self): distance = [] ys = self.coords ys.shape = (self.total_image_num, -1) for i in range(self.total_image_num - 1): dm = compute_dm(ys[i], ys[i + 1]) distance.append(dm) ys.shape = (-1, ) self.distances = np.array(distance) def run_until(self, t): if t <= self.t: return self.integrator.advance_time(t, self.coords) m = self.coords y = self.last_m m.shape = (self.total_image_num, -1) y.shape = (self.total_image_num, -1) max_dmdt = 0 for i in range(1, self.image_num + 1): dmdt = compute_dm(y[i], m[i]) / (t - self.t) if dmdt > max_dmdt: max_dmdt = dmdt m.shape = (-1,) y.shape = (-1,) self.last_m[:] = m[:] self.t = t return max_dmdt def relax(self, dt=1e-8, stopping_dmdt=1e4, max_steps=1000, save_npy_steps=100, save_vtk_steps=100): if self.integrator is None: self.create_integrator() log.debug("Relaxation parameters: " "stopping_dmdt={} (degrees per nanosecond), " "time_step={} s, max_steps={}.".format(stopping_dmdt, dt, max_steps)) # Save the initial state i=0 self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() for i in range(max_steps): if i % save_vtk_steps == 0: self.save_vtks() if i % save_npy_steps == 0: self.save_npys() self.step += 1 cvode_dt = self.integrator.get_current_step() increment_dt = dt if cvode_dt > dt: increment_dt = cvode_dt dmdt = self.run_until(self.t + increment_dt) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() log.debug("step: {:.3g}, step_size: {:.3g}" " and max_dmdt: {:.3g}.".format(self.step, increment_dt, dmdt)) if dmdt < stopping_dmdt: break log.info("Relaxation finished at time step = {:.4g}, " "t = {:.2g}, call rhs = {:.4g} " "and max_dmdt = {:.3g}".format(self.step, self.t, self.ode_count, dmdt)) self.save_vtks() self.save_npys()
21,007
33.839138
94
py
finmag
finmag-master/src/finmag/physics/llg.py
import logging import numpy as np import dolfin as df import finmag.util.consts as consts from aeon import timer from finmag.field import Field from finmag.physics.effective_field import EffectiveField from finmag.native import llg as native_llg from finmag.util import helpers from finmag.util.meshes import nodal_volume # default settings for logger 'finmag' set in __init__.py # getting access to logger here logger = logging.getLogger(name='finmag') # used for parallel testing #from finmag.native import cvode_petsc, llg_petsc class LLG(object): """ Solves the Landau-Lifshitz-Gilbert equation. The equation reads .. math:: \\frac{d\\vec{M}}{dt} = -\\gamma_{LL} (\\vec{M} \\times \\vec{H}) - \\alpha \\gamma_{LL} (\\vec{M} \\times [ \\vec{M} \\times \\vec{H}]) where :math:`\\gamma_{LL} = \\frac{\\gamma}{1+\\alpha^2}`. In our code :math:`-\\gamma_{LL}` is referred to as *precession coefficient* and :math:`-\\alpha\\gamma_{LL}` as *damping coefficient*. """ @timer.method def __init__(self, S1, S3, do_precession=True, average=False, unit_length=1): """ S1 and S3 are df.FunctionSpace and df.VectorFunctionSpace objects, and the boolean do_precession controls whether the precession of the magnetisation around the effective field is computed or not. """ logger.debug("Creating LLG object.") self.S1 = S1 self.S3 = S3 self.mesh = S1.mesh() self.DG = df.FunctionSpace(self.mesh, "DG", 0) self.set_default_values() self.do_precession = do_precession self.unit_length = unit_length self.do_slonczewski = False self.do_zhangli = False self.effective_field = EffectiveField(self._m_field, self.Ms, self.unit_length) # will be computed on demand, and carries volume of the mesh self.Volume = None self.v2d_xyz, self.v2d_xxx, self.d2v_xyz, self.d2v_xxx = helpers.build_maps(S3) self.v2d_scale, self.d2v_scale = helpers.build_maps(S1, dim=1, scalar=True) def set_default_values(self): self.alpha = df.Function(self.S1) self.alpha.assign(df.Constant(0.5)) self.alpha.rename('alpha', 'Gilbert damping constant') self.gamma = consts.gamma self.c = 1e11 # 1/s numerical scaling correction \ # 0.1e12 1/s is the value used by default in nmag 0.2 self._Ms_dg = Field(self.DG) self.Ms = 8.6e5 # A/m saturation magnetisation self._m_field = Field(self.S3, name='m') self.pins = [] # nodes where the magnetisation gets pinned self._dmdt = df.Function(self.S3) # used for parallel stuff. #self.field = df.Function(self.S3) #self.h_petsc = df.as_backend_type(self.field.vector()).vec() def set_pins(self, nodes): """ Hold the magnetisation constant for certain nodes in the mesh. Pass the indices of the pinned sites as *nodes*. Any type of sequence is fine, as long as the indices are between 0 (inclusive) and the highest index. This means you CANNOT use python style indexing with negative offsets counting backwards. """ if len(nodes) > 0: nb_nodes_mesh = len(self._m_field.get_ordered_numpy_array_xxx()) / 3 if min(nodes) >= 0 and max(nodes) < nb_nodes_mesh: self._pins = np.array(nodes, dtype="int") else: logger.error("Indices of pinned nodes should be in [0, {}), were [{}, {}].".format( nb_nodes_mesh, min(nodes), max(nodes))) else: self._pins = np.array([], dtype="int") def pins(self): return self._pins pins = property(pins, set_pins) def set_alpha(self, value): """ Set the damping constant :math:`\\alpha`. The parameter `value` can have any of the types accepted by the function :py:func:`finmag.util.helpers.scalar_valued_function` (see its docstring for details). """ self.alpha = helpers.scalar_valued_function(value, self.S1) self.alpha.rename('alpha', 'Gilbert damping constant') @property def Ms(self): return self._Ms_dg @Ms.setter def Ms(self, value): # XXX TODO: Rename _Ms_dg to _Ms because it is not a DG0 function!!! # We need a DG function here, so we should use # scalar_valued_dg_function dg_fun = Field(self.DG, value)#helpers.scalar_valued_dg_function(value, self.DG) self._Ms_dg.vector().set_local(dg_fun.vector().get_local()) # FIXME: change back to DG space. #self._Ms_dg=helpers.scalar_valued_function(value, self.S1) self._Ms_dg.name = 'Saturation magnetisation' self.volumes = df.assemble(df.TestFunction(self.S1) * df.dx) Ms = df.assemble( self._Ms_dg.f * df.TestFunction(self.S1) * df.dx).array() / self.volumes.array() self._Ms = Ms.copy() self.Ms_av = np.average(self._Ms_dg.vector().array()) @property def M(self): """The magnetisation, with length Ms.""" # FIXME:error here m = self.m.view().reshape((3, -1)) Ms = self.Ms.vector().array() if isinstance( self.Ms, df.Function) else self.Ms M = Ms * m return M.ravel() @property def M_average(self): """The average magnetisation, computed with m_average().""" volume_Ms = df.assemble(self._Ms_dg * df.dx) volume = df.assemble(self._Ms_dg * df.dx) return self.m_average * volume_Ms / volume @property def m(self): """The unit magnetisation.""" raise RuntimeError("DON'T USE llg.m UNTIL FURTHER NOTICE!!!!") @property def m_field(self): """The unit magnetisation.""" return self._m_field @property def m_numpy(self): """ Return the magnetisation as a numpy.array. This is not recommended and should only be used for debugging! """ return self._m_field.get_ordered_numpy_array_xxx() # @m.setter # def m(self, value): # Not enforcing unit length here, as that is better done # once at the initialisation of m. # self._m.vector().set_local(value) @property def dmdt(self): """ dmdt values for all mesh nodes """ return self._dmdt.vector().array() @property def sundials_m(self): """The unit magnetisation.""" return self._m_field.get_ordered_numpy_array_xxx() @sundials_m.setter def sundials_m(self, value): # used to copy back from sundials cvode self._m_field.set_with_ordered_numpy_array_xxx(value) def m_average_fun(self, dx=df.dx): """ Compute and return the average polarisation according to the formula :math:`\\langle m \\rangle = \\frac{1}{V} \int m \: \mathrm{d}V` """ # mx = df.assemble(self._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx) # my = df.assemble(self._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx) # mz = df.assemble(self._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx) # volume = df.assemble(self._Ms_dg * dx) # # return np.array([mx, my, mz]) / volume return self._m_field.average(dx=dx) m_average = property(m_average_fun) def set_m(self, value, normalise=True, **kwargs): """ Set the magnetisation (if `normalise` is True, it is automatically normalised to unit length). `value` can have any of the forms accepted by the function 'finmag.util.helpers.vector_valued_function' (see its docstring for details). You can call this method anytime during the simulation. However, when providing a numpy array during time integration, the use of the attribute m instead of this method is advised for performance reasons and because the attribute m doesn't normalise the vector. """ m0 = helpers.vector_valued_function(value, self.S3, normalise=False, **kwargs).vector().array()[self.v2d_xxx] if np.any(np.isnan(m0)): raise ValueError("Attempting to initialise m with NaN(s)") if normalise: m0 = helpers.fnormalise(m0) self._m_field.set_with_ordered_numpy_array_xxx(m0) def solve_for(self, m, t): self._m_field.set_with_ordered_numpy_array_xxx(m) value = self.solve(t) return value def solve(self, t): # we don't use self.effective_field.compute(t) for performance reasons self.effective_field.update(t) H_eff = self.effective_field.H_eff[self.v2d_xxx] # alias (for readability) H_eff.shape = (3, -1) timer.start("solve", self.__class__.__name__) # Use the same characteristic time as defined by c char_time = 0.1 / self.c # Prepare the arrays in the correct shape m = self._m_field.get_ordered_numpy_array_xxx() m.shape = (3, -1) dmdt = np.zeros(m.shape) alpha__ = self.alpha.vector().array()[self.v2d_scale] # Calculate dm/dt if self.do_slonczewski: if self.fun_slonczewski_time_update != None: J_new = self.fun_slonczewski_time_update(t) self.J[:] = J_new native_llg.calc_llg_slonczewski_dmdt( m, H_eff, t, dmdt, self.pins, self.gamma, alpha__, char_time, self.Lambda, self.epsilonprime, self.J, self.P, self.d, self._Ms, self.p) elif self.do_zhangli: if self.fun_zhangli_time_update != None: J_profile = self.fun_zhangli_time_update(t) self._J = helpers.vector_valued_function(J_profile, self.S3) self.J = self._J.vector().array() self.compute_gradient_matrix() H_gradm = self.compute_gradient_field() H_gradm.shape = (3, -1) native_llg.calc_llg_zhang_li_dmdt( m, H_eff, H_gradm, t, dmdt, self.pins, self.gamma, alpha__, char_time, self.u0, self.beta, self._Ms) H_gradm.shape = (-1,) else: native_llg.calc_llg_dmdt(m, H_eff, t, dmdt, self.pins, self.gamma, alpha__, char_time, self.do_precession) dmdt.shape = (-1,) H_eff.shape = (-1,) timer.stop("solve", self.__class__.__name__) self._dmdt.vector().set_local(dmdt[self.d2v_xxx]) return dmdt # Computes the dm/dt right hand side ODE term, as used by SUNDIALS CVODE def sundials_rhs(self, t, y, ydot): ydot[:] = self.solve_for(y, t) return 0 def sundials_psetup(self, t, m, fy, jok, gamma, tmp1, tmp2, tmp3): # Note that some of the arguments are deliberately ignored, but they # need to be present because the function must have the correct signature # when it is passed to set_spils_preconditioner() in the cvode class. if not jok: self._m_field.set_with_ordered_numpy_array_xxx(m) self._reuse_jacobean = True return 0, not jok def sundials_psolve(self, t, y, fy, r, z, gamma, delta, lr, tmp): # Note that some of the arguments are deliberately ignored, but they # need to be present because the function must have the correct signature # when it is passed to set_spils_preconditioner() in the cvode class. z[:] = r return 0 """ def sundials_rhs_petsc(self, t, y, ydot): #only for the testing of parallel stuff, will delete later. self.effective_field.update(t) self.field.vector().set_local(self.effective_field.H_eff) #this is not ideal, will change it after we make use of Field class for damping. alpha_petsc = df.as_backend_type(self.alpha.vector()).vec() llg_petsc.compute_dm_dt(y, self.h_petsc, ydot, alpha_petsc, self.gamma, self.do_precession, self.c) return 0 """ # Computes the Jacobian-times-vector product, as used by SUNDIALS CVODE @timer.method def sundials_jtimes(self, mp, J_mp, t, m, fy, tmp): """ The time integration problem we need to solve is of type .. math:: \\frac{d y}{d t} = f(y,t) where y is the state vector (such as the magnetisation components for all sites), t is the time, and f(y,t) is the LLG equation. For the implicite integration schemes, sundials' cvode solver needs to know the Jacobian J, which is the derivative of the (vector-valued) function f(y,t) with respect to the (components of the vector) y. The Jacobian is a matrix. For a magnetic system N sites, the state vector y has 3N entries (because every site has 3 components). The Jacobian matrix J would thus have a size of 3N*3N. In general, this is too big to store. Fortunately, cvode only needs the result of the multiplication of some vector y' (provided by cvode) with the Jacobian. We can thus store the Jacobian in our own way (in particular as a sparse matrix if we leave out the demag field), and carry out the multiplication of J with y' when required, and that is what this function does. In more detail: We use the variable name mp to represent m' (i.e. mprime) which is just a notation to distinguish m' from m (and not any derivative). Our equation is: .. math:: \\frac{dm}{dt} = LLG(m, H) And we're interested in computing the Jacobian (J) times vector (m') product .. math:: J m' = [\\frac{dLLG(m, H)}{dm}] m'. However, the H field itself depends on m, so the total derivative J m' will have two terms .. math:: \\frac{d LLG(m, H)}{dm} = \\frac{\\partial LLG(m, H)}{\\partial m} + [\\frac{\\partial LLG(m, H)}{\\partial H}] [\\frac{\\partial H(m)}{\\partial m}]. This is a matrix identity, so to make the derivations easier (and since we don't need the full Jacobian matrix) we can write the Jacobian-times-vector product as a directional derivative: .. math:: J m' = \\frac{d LLG(m + a m',H(m + a m'))}{d a}|_{a=0} The code to compute this derivative is in ``llg.cc`` but you can see that the derivative will depend on m, m', H(m), and dH(m+a m')/da [which is labelled H' in the code]. Most of the components of the effective field are linear in m; if that's the case, the directional derivative H' is just H(m') .. math:: H' = \\frac{d H(m+a m')}{da} = H(m') The actual implementation of the jacobian-times-vector product is in src/llg/llg.cc, function calc_llg_jtimes(...), which in turn makes use of CVSpilsJacTimesVecFn in CVODE. """ assert m.shape == self._m_field.get_ordered_numpy_array_xxx().shape assert mp.shape == m.shape assert tmp.shape == m.shape # First, compute the derivative H' = dH_eff/dt self._m_field.set_with_ordered_numpy_array_xxx(mp) Hp = tmp.view() Hp[:] = self.effective_field.compute_jacobian_only(t)[self.v2d_xxx] if not hasattr(self, '_reuse_jacobean') or not self._reuse_jacobean: # If the field m has changed, recompute H_eff as well if not np.array_equal(self.m_numpy, m): self.m_field.set_with_ordered_numpy_array_xxx(m) self.effective_field.update(t) else: pass # print "This actually happened." #import sys; sys.exit() m.shape = (3, -1) mp.shape = (3, -1) Hp.shape = (3, -1) J_mp.shape = (3, -1) # Use the same characteristic time as defined by c char_time = 0.1 / self.c Heff2 = self.effective_field.H_eff[self.v2d_xxx] native_llg.calc_llg_jtimes(m, Heff2.reshape((3, -1)), mp, Hp, t, J_mp, self.gamma, self.alpha.vector().array()[self.v2d_scale], char_time, self.do_precession, self.pins) J_mp.shape = (-1, ) m.shape = (-1,) mp.shape = (-1,) tmp.shape = (-1,) # Nonnegative exit code indicates success return 0 def use_slonczewski(self, J, P, d, p, Lambda=2, epsilonprime=0.0, with_time_update=None): """ Activates the computation of the Slonczewski spin-torque term in the LLG. *Arguments* J is the current density in A/m^2 as a number, dolfin function, dolfin expression or Python function. In the last case the current density is assumed to be spatially constant but can vary with time. Thus J=J(t) should be a function expecting a single variable t (the simulation time) and return a number. Note that a time-dependent current density can also be given as a dolfin Expression, but a python function should be much more efficient. P is the polarisation (between 0 and 1). It is defined as P = (x-y)/(x+y), where x and y are the fractions of spin up/down electrons). d is the thickness of the free layer in m. p is the direction of the polarisation as a triple (is automatically normalised to unit length). - Lambda: the Lambda parameter in the Slonczewski/Xiao spin-torque term - epsilonprime: the strength of the secondary spin transfer term - with_time_update: A function of the form J(t), which accepts a time step `t` as its only argument and returns the new current density. N.B.: For efficiency reasons, the return value is currently assumed to be a number, i.e. J is assumed to be spatially constant (and only varying with time). """ self.do_slonczewski = True self.fun_slonczewski_time_update = with_time_update self.Lambda = Lambda self.epsilonprime = epsilonprime if isinstance(J, df.Expression): J = df.interpolate(J, self.S1) if not isinstance(J, df.Function): func = df.Function(self.S1) func.assign(df.Constant(J)) J = func self.J = J.vector().array() assert P >= 0.0 and P <= 1.0 self.P = P self.d = d polarisation = df.Function(self.S3) polarisation.assign(df.Constant((p))) # we use fnormalise to ensure that p has unit length self.p = helpers.fnormalise( polarisation.vector().array()).reshape((3, -1)) def compute_gradient_matrix(self): """ compute (J nabla) m , we hope we can use a matrix M such that M*m = (J nabla)m. """ tau = df.TrialFunction(self.S3) sigma = df.TestFunction(self.S3) self.nodal_volume_S3 = nodal_volume(self.S3) * self.unit_length dim = self.S3.mesh().topology().dim() ty = tz = 0 tx = self._J[0] * df.dot(df.grad(tau)[:, 0], sigma) if dim >= 2: ty = self._J[1] * df.dot(df.grad(tau)[:, 1], sigma) if dim >= 3: tz = self._J[2] * df.dot(df.grad(tau)[:, 2], sigma) self.gradM = df.assemble((tx + ty + tz) * df.dx) #self.gradM = df.assemble(df.dot(df.dot(self._J, df.nabla_grad(tau)),sigma)*df.dx) def compute_gradient_field(self): self.gradM.mult(self._m_field.f.vector(), self.H_gradm) return self.H_gradm.array() / self.nodal_volume_S3 def use_zhangli(self, J_profile=(1e10, 0, 0), P=0.5, beta=0.01, using_u0=False, with_time_update=None): """ if using_u0 = True, the factor of 1/(1+beta^2) will be dropped. With with_time_update should be a function like: def f(t): return (0, 0, J*g(t)) We do not use a position dependent function for performance reasons. """ self.do_zhangli = True self.fun_zhangli_time_update = with_time_update self._J = helpers.vector_valued_function(J_profile, self.S3) self.J = self._J.vector().array() self.compute_gradient_matrix() self.H_gradm = df.PETScVector() const_e = 1.602176565e-19 # elementary charge in As mu_B = 9.27400968e-24 # Bohr magneton self.P = P self.beta = beta u0 = P * mu_B / const_e # P g mu_B/(2 e Ms) and g=2 for electrons if using_u0: self.u0 = u0 else: self.u0 = u0 / (1 + beta ** 2)
21,056
36.137566
195
py
finmag
finmag-master/src/finmag/physics/llg_stt.py
import dolfin as df import numpy as np import inspect from aeon import timer import finmag.util.consts as consts from finmag.field import Field from finmag.util import helpers from finmag.physics.effective_field import EffectiveField from finmag.util.meshes import nodal_volume from finmag.native import llg as native_llg import logging log = logging.getLogger(name="finmag") ONE_DEGREE_PER_NS = 17453292.5 # in rad/s class LLG_STT(object): """ Solves the Landau-Lifshitz-Gilbert equation with the nonlocal spin transfer torque. """ def __init__(self, S1, S3, unit_length=1, average=False): self.S1 = S1 self.S3 = S3 self.unit_length = unit_length self.mesh = S1.mesh() self._m_field = Field(self.S3, name='m') self._delta_m = df.Function(self.S3) self.nxyz = len(self.m) self._alpha = np.zeros(self.nxyz / 3) self.delta_m = np.zeros(self.nxyz) self.H_eff = np.zeros(self.nxyz) self.dy_m = np.zeros(2 * self.nxyz) # magnetisation and delta_m self.dm_dt = np.zeros(2 * self.nxyz) # magnetisation and delta_m self.set_default_values() self.effective_field = EffectiveField( self._m_field, self.Ms, self.unit_length) self._t = 0 def set_default_values(self): self.set_alpha(0.5) self.gamma = consts.gamma self.c = 1e11 # 1/s numerical scaling correction \ # 0.1e12 1/s is the value used by default in nmag 0.2 self.Ms = 8.6e5 # A/m saturation magnetisation self.vol = df.assemble(df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx).array() self.real_vol = self.vol * self.unit_length ** 3 self.pins = [] self._pre_rhs_callables = [] self._post_rhs_callables = [] self.interactions = [] def set_parameters(self, J_profile=(1e10, 0, 0), P=0.5, D=2.5e-4, lambda_sf=5e-9, lambda_J=1e-9, speedup=1): self._J = helpers.vector_valued_function(J_profile, self.S3) self.J = self._J.vector().array() self.compute_gradient_matrix() self.H_gradm = df.PETScVector() self.P = P self.D = D / speedup self.lambda_sf = lambda_sf self.lambda_J = lambda_J self.tau_sf = lambda_sf ** 2 / D * speedup self.tau_sd = lambda_J ** 2 / D * speedup self.compute_laplace_matrix() self.H_laplace = df.PETScVector() self.nodal_volume_S3 = nodal_volume(self.S3) def set_pins(self, nodes): """ Hold the magnetisation constant for certain nodes in the mesh. Pass the indices of the pinned sites as *nodes*. Any type of sequence is fine, as long as the indices are between 0 (inclusive) and the highest index. This means you CANNOT use python style indexing with negative offsets counting backwards. """ if len(nodes) > 0: nb_nodes_mesh = len(self._m_field.get_numpy_array_debug()) / 3 if min(nodes) >= 0 and max(nodes) < nb_nodes_mesh: self._pins = np.array(nodes, dtype="int") else: log.error("Indices of pinned nodes should be in [0, {}), were [{}, {}].".format( nb_nodes_mesh, min(nodes), max(nodes))) else: self._pins = np.array([], dtype="int") def pins(self): return self._pins pins = property(pins, set_pins) @property def Ms(self): return self._Ms_dg @Ms.setter def Ms(self, value): self._Ms_dg = Field(df.FunctionSpace(self.mesh, 'DG', 0), value) self._Ms_dg.name = 'Ms' self.volumes = df.assemble(df.TestFunction(self.S1) * df.dx) Ms = df.assemble( self._Ms_dg.f * df.TestFunction(self.S1) * df.dx).array() / self.volumes self._Ms = Ms.copy() self.Ms_av = np.average(self._Ms_dg.vector().array()) @property def M(self): """The magnetisation, with length Ms.""" # FIXME:error here m = self.m.view().reshape((3, -1)) Ms = self.Ms.vector().array() if isinstance( self.Ms, df.Function) else self.Ms M = Ms * m return M.ravel() @property def M_average(self): """The average magnetisation, computed with m_average().""" volume_Ms = df.assemble(self._Ms_dg * df.dx) volume = df.assemble(self._Ms_dg * df.dx) return self.m_average * volume_Ms / volume @property def m(self): """The unit magnetisation.""" return self._m_field.get_numpy_array_debug() @m.setter def m(self, value): # Not enforcing unit length here, as that is better done # once at the initialisation of m. self._m_field.set_with_numpy_array_debug(value) self.dy_m.shape = (2, -1) self.dy_m[0][:] = value self.dy_m.shape = (-1,) @property def sundials_m(self): """The unit magnetisation.""" return self.dy_m @sundials_m.setter def sundials_m(self, value): # used to copy back from sundials cvode self.dy_m[:] = value[:] self.dy_m.shape = (2, -1) self._m_field.set_with_numpy_array_debug(self.dy_m[0][:]) self.dy_m.shape = (-1,) def m_average_fun(self, dx=df.dx): """ Compute and return the average polarisation according to the formula :math:`\\langle m \\rangle = \\frac{1}{V} \int m \: \mathrm{d}V` """ # mx = df.assemble(self._Ms_dg*df.dot(self._m, df.Constant([1, 0, 0])) * dx) # my = df.assemble(self._Ms_dg*df.dot(self._m, df.Constant([0, 1, 0])) * dx) # mz = df.assemble(self._Ms_dg*df.dot(self._m, df.Constant([0, 0, 1])) * dx) # volume = df.assemble(self._Ms_dg*dx) # # return np.array([mx, my, mz]) / volume return self._m_field.average(dx=dx) m_average = property(m_average_fun) def set_m(self, value, normalise=True, **kwargs): """ Set the magnetisation (if `normalise` is True, it is automatically normalised to unit length). `value` can have any of the forms accepted by the function 'finmag.util.helpers.vector_valued_function' (see its docstring for details). You can call this method anytime during the simulation. However, when providing a numpy array during time integration, the use of the attribute m instead of this method is advised for performance reasons and because the attribute m doesn't normalise the vector. """ self.m = helpers.vector_valued_function( value, self.S3, normalise=normalise, **kwargs).vector().array() def set_alpha(self, value): """ Set the damping constant :math:`\\alpha`. The parameter `value` can have any of the types accepted by the function :py:func:`finmag.util.helpers.scalar_valued_function` (see its docstring for details). """ self._alpha[:] = helpers.scalar_valued_function( value, self.S1).vector().array()[:] def compute_gradient_matrix(self): """ compute (J nabla) m , we hope we can use a matrix M such that M*m = (J nabla)m. """ tau = df.TrialFunction(self.S3) sigma = df.TestFunction(self.S3) dim = self.S3.mesh().topology().dim() ty = tz = 0 tx = self._J[0] * df.dot(df.grad(tau)[:, 0], sigma) if dim >= 2: ty = self._J[1] * df.dot(df.grad(tau)[:, 1], sigma) if dim >= 3: tz = self._J[2] * df.dot(df.grad(tau)[:, 2], sigma) self.gradM = df.assemble(1 / self.unit_length * (tx + ty + tz) * df.dx) def compute_gradient_field(self): self.gradM.mult(self._m_field.f.vector(), self.H_gradm) return self.H_gradm.array() / self.nodal_volume_S3 def compute_laplace_matrix(self): u3 = df.TrialFunction(self.S3) v3 = df.TestFunction(self.S3) self.laplace_M = df.assemble( self.D / self.unit_length ** 2 * df.inner(df.grad(u3), df.grad(v3)) * df.dx) def compute_laplace_field(self): self.laplace_M.mult(self._delta_m.vector(), self.H_laplace) return -1.0 * self.H_laplace.array() / self.nodal_volume_S3 def sundials_rhs(self, t, y, ydot): self.t = t y.shape = (2, -1) self._m_field.set_with_numpy_array_debug(y[0]) self._delta_m.vector().set_local(y[1]) y.shape = (-1,) self.effective_field.update(t) H_eff = self.effective_field.H_eff # alias (for readability) H_eff.shape = (3, -1) timer.start("sundials_rhs", self.__class__.__name__) # Use the same characteristic time as defined by c H_gradm = self.compute_gradient_field() H_gradm.shape = (3, -1) H_laplace = self.compute_laplace_field() H_laplace.shape = (3, -1) self.dm_dt.shape = (6, -1) m = self.m m.shape = (3, -1) char_time = 0.1 / self.c delta_m = self._delta_m.vector().array() delta_m.shape = (3, -1) native_llg.calc_llg_nonlocal_stt_dmdt( m, delta_m, H_eff, H_laplace, H_gradm, self.dm_dt, self.pins, self.gamma, self._alpha, char_time, self.P, self.tau_sd, self.tau_sf, self._Ms) timer.stop("sundials_rhs", self.__class__.__name__) self.dm_dt.shape = (-1,) ydot[:] = self.dm_dt[:] H_gradm.shape = (-1,) H_eff.shape = (-1,) m.shape = (-1,) delta_m.shape = (-1,) return 0 if __name__ == '__main__': pass
9,776
30.236422
112
py
finmag
finmag-master/src/finmag/physics/equation.py
""" This module solves the LLG equation or one of its many variants. Using native code tied in by instant, it allows to specify parameters and the terms of the equation that are to be used and then solves for dm/dt. No effective field computation, no saving of magnetisation to file or whatever, just straight up solving of the equation of motion. """ import logging import instant import dolfin as df import os import fnmatch import glob from os import path log = logging.getLogger(name="finmag") def find_slepc(): slepc = None if 'SLEPC_DIR' in os.environ: slepc = os.environ['SLEPC_DIR'] else: # At least on Ubuntu 16.04, the header files are in # /usr/lib/slepcdir/3.7.2/x86_64-linux-gnu-real/include/ # However, tried to be a bit more robust to find it. slepcpath = '/usr/lib/slepcdir' matches = [] if os.path.isdir(slepcpath): for root, dirnames, filenames in os.walk(slepcpath): for filename in fnmatch.filter(filenames, 'slepceps.h'): matches.append(root) # Dont want fortran header files! matches = [match for match in matches if 'finclude' not in match] if matches: slepc = matches[0] if not slepc: raise Exception("Cannot find SLEPc header files - please set environment variable SLEPC_DIR\n" "You can also modify finmag/src/physics/equation.py") else: print("Found SLEPc include files at {}".format(slepc)) return slepc def find_petsc(): petsc = None if 'SLEPC_DIR' in os.environ: petsc = os.environ['PETSC_DIR'] else: # At least on Ubuntu 16.04, the header files are in # /usr/lib/slepcdir/3.7.2/x86_64-linux-gnu-real/include/ # However, tried to be a bit more robust to find it. petscpath = '/usr/lib/petscdir' matches = [] if os.path.isdir(petscpath): for root, dirnames, filenames in os.walk(petscpath): for filename in fnmatch.filter(filenames, 'petscsys.h'): matches.append(root) # Dont want fortran header files! matches = [match for match in matches if 'finclude' not in match] if matches: petsc = matches[0] if not petsc: raise Exception("Cannot find PETSc header files - please set environment variable PETSC_DIR\n" "You can also modify finmag/src/physics/equation.py") else: print("Found PETSc include files at {}".format(petsc)) return petsc # find_slepc() # find_petsc() # TODO: use field class objects instead of dolfin vectors def Equation(m, H, dmdt): """ Returns equation object initialised with dolfin vectors m, H and dmdt. """ equation_module = get_equation_module(True) return equation_module.Equation(m, H, dmdt) def get_equation_module(for_distribution=False): """ Returns extension module that deals with the equation of motion. Will try to return from cache before recompiling. By default, dolfin will chose a cache directory using a digest of our code and some version numbers. This procedure enables dolfin to detect changes to our code and recompile on the fly. However, when we distribute FinMag we don't need or want on the fly recompilation and we'd rather have the resulting files placed in a directory known ahead of time. For this, call this function once with `for_distribution` set to True and ship FinMag including the directory build/equation. During normal use, our known cache directory is always checked before dolfin's temporary ones. Its existence bypasses on the fly recompilation. """ # __file__ will not be available during module init if this module is # compiled with cython. So the following line shouldn't be moved to the # module level. It is perfectly safe inside this function though. MODULE_DIR = path.dirname(path.abspath(__file__)) SOURCE_DIR = path.join(MODULE_DIR, "native") # Define our own cache base directory instead of the default one. This # helps in distributing only the compiled code without sources. CACHE_DIR = path.join(MODULE_DIR, "build") signature = "equation" if for_distribution else "" # dolfin will chose # Try to get the module from the known distribution location before # asking instant about its cache. This way a distributed copy of FinMag # should never attempt recompilation (which would fail without sources). equation_module = instant.import_module("equation", CACHE_DIR) if equation_module is not None: log.debug("Got equation extension module from distribution location.") else: with open(path.join(SOURCE_DIR, "equation.h"), "r") as header: code = header.read() equation_module = df.compile_extension_module( code=code, sources=["equation.cpp", "terms.cpp", "derivatives.cpp"], source_directory=SOURCE_DIR, # where the sources given above are include_dirs=[SOURCE_DIR, find_petsc(), find_slepc()], # where to look for header files # dolfin's compile_extension_module will pass on `module_name` to # instant's build_module as `signature`. That's the name of the # directory it will be cached in. So don't worry if instant's doc # says that passing a module name will disable caching. module_name=signature, cache_dir=CACHE_DIR,) return equation_module
5,606
37.9375
102
py
finmag
finmag-master/src/finmag/physics/__init__.py
0
0
0
py
finmag
finmag-master/src/finmag/physics/neb.py
import os import dolfin as df import numpy as np import inspect from aeon import timer import finmag.util.consts as consts from finmag.util import helpers from finmag.physics.effective_field import EffectiveField from finmag.util.vtk_saver import VTKSaver from finmag import Simulation # from finmag.field import Field # Change sim._m to new field class # in line 184 from finmag.native import sundials import finmag.native.neb as native_neb from finmag.util.fileio import Tablewriter, Tablereader from mpl_toolkits.mplot3d import Axes3D from matplotlib.colors import colorConverter from matplotlib.collections import PolyCollection, LineCollection import logging log = logging.getLogger(name="finmag") def cartesian2spherical(xyz): """ Transform cartesian coordinates (x, y, z) in spherical coordinates. The function only returns the (theta, phi) pair since the magnetisation is fixed at zero Temperature (the r-component is constant) and is fully characterised by two degrees of freedom. (We use this to specifically transform M coordinates) Are we asuming? that xyz is: [x1, x2, .... , y1, y2, ... , z1, z2 ...] """ # Transform to a 3 x -- array xyz.shape = (3, -1) r_xy = np.sqrt(xyz[0, :] ** 2 + xyz[1, :] ** 2) theta = np.arctan2(r_xy, xyz[2, :]) phi = np.arctan2(xyz[1, :], xyz[0, :]) xyz.shape = (-1,) theta_phi = np.concatenate((theta, phi)) # Return [theta1, theta2, ... , phi1, phi2, ... ] return theta_phi def spherical2cartesian(theta_phi): """ Returns the (x, y, z) cartesian components from spherical coordinates (theta, phi) for a r-component equal to 1 (thus, (x,y,z) is normalised) We are assuming that theta_phi has the form [theta1, theta2, ... , phi1, phi2, ...] as in the previous function """ theta_phi.shape = (2, -1) theta = theta_phi[0] phi = theta_phi[1] mxyz = np.zeros(3 * len(theta)) mxyz.shape = (3, -1) mxyz[0, :] = np.sin(theta) * np.cos(phi) mxyz[1, :] = np.sin(theta) * np.sin(phi) mxyz[2, :] = np.cos(theta) mxyz.shape = (-1, ) theta_phi.shape = (-1, ) return mxyz def check_boundary(theta_phi): """ Rescale the theta, phi angles between two vectors when they are larger than specific values: * theta is undefined when is smaller than zero or larger than pi. Here we redefine to zero or pi in those cases * If phi is larger than pi: substract 2*pi to get the shorter arc that separate two vectors Similar if phi is smaller than -pi This makes the phi angle differences to be rescaled in the same range than the theta angle: | Delta-phi | < pi """ theta_phi.shape = (2, -1) theta = theta_phi[0] phi = theta_phi[1] theta[theta > np.pi] = np.pi theta[theta < 0] = 0 phi[phi > np.pi] -= 2 * np.pi phi[phi < -np.pi] += 2 * np.pi theta_phi.shape = (-1,) def cartesian2spherical_field(field_c, theta_phi): """ Transform the cartesian (x, y, z) field coordinates to spherical coordinates (r, t, p) using the unit vectors transformation matrix: | sin t cos p | sin t sin p | cos t | | hx | | h_r | | cos t cos p | cos t sin p | -sin t | | hy | = | h_t | | -sin p | cos p | 0 | | hz | | h_p | The function only returns the (t, p) = (theta, phi) coordinates of h since we asume that the r component is fixed """ theta_phi.shape = (2, -1) theta = theta_phi[0] phi = theta_phi[1] field_s = np.zeros(theta_phi.shape) field_c.shape = (3, -1) field_s.shape = (2, -1) hx = field_c[0] hy = field_c[1] hz = field_c[2] sin_t = np.sin(theta) cos_t = np.cos(theta) sin_p = np.sin(phi) cos_p = np.cos(phi) field_s[0] = (hx * cos_p + hy * sin_p) * cos_t - hz * sin_t field_s[1] = (-hx * sin_p + hy * cos_p) * sin_t # sin t ??? field_c.shape = (-1,) field_s.shape = (-1,) theta_phi.shape = (-1,) return field_s def linear_interpolation_two(m0, m1, n): """ Define a linear interpolation between two states of the energy band (m0, m1) to get an initial state. The interpolation is done in the magnetic moments that constitute the magnetic system. To achieve this, we use spherical coordinates and the angle difference between m0 and m1, is divided 'n + 1' times. Thus, we obtain n interpolations: m0 = [theta0_1, theta0_1, ..., phi0_1, phi0_2, ...] [theta1_1 + dt1, theta1_2 + dt2, ..., phi1_1 + df1, phi1_2 + df2, ...] [theta2_1 + 2*dt1, theta2_2 + 2*dt2, ..., phi2_1 + 2*df1, phi2_2 + 2*df2, ...] ... m1 = [thetan_1 + n*dt1, thetan_2 + n*dt2, ..., phin_1 + n*df1, phin_2 + n*df2, ...] where [dt1, dt2, ..., df1, df2, ...] = (m0 - m1) / (n + 1) and m0, m1 are in spherical coordinates, as specified before. The function return an array of arrays, with the interpolations [thetai_1, thetai_2, ..., phii_1, phii_2, ...] as the entries of the main array (thetai_1 = thetai + i*dt1, and so on) """ # Convert magnetic moments to spherical coords theta_phi0 = cartesian2spherical(m0) theta_phi1 = cartesian2spherical(m1) # The differences with the number of interps + 1 dtheta = (theta_phi1 - theta_phi0) / (n + 1) coords = [] for i in range(n): theta = theta_phi0 + (i + 1) * dtheta coords.append(theta) return coords def normalise_m(a): """ Normalise the magnetisation array. We asume: a = [mx1, mx2, ..., my1, my2, ..., mz1, mz2, ...] to transform this into [ [mx1, mx2, ...], [my1, my2, ...], [mz1, mz2, ...] ] normalise the matrix, and return again a 1 x -- array """ # Transform to matrix a.shape = (3, -1) # Compute the array 'a' length lengths = np.sqrt(a[0] * a[0] + a[1] * a[1] + a[2] * a[2]) # Normalise all the entries a[:] /= lengths # Return to original shape a.shape = (-1, ) def linear_interpolation(theta_phi0, theta_phi1): """ Compute a linear interpolation between two states: theta_phi0 and theta_phi1, which are given in cartersian coordinates. The output is a normalised linear interpolation between both states. """ m0 = spherical2cartesian(theta_phi0) m1 = spherical2cartesian(theta_phi1) # suppose m0 and m1 are quite close m2 = (m0 + m1) / 2.0 normalise_m(m2) return cartesian2spherical(m2) def compute_dm(m0, m1): dm = m0 - m1 length = len(dm) x = dm > np.pi dm[x] = 2 * np.pi - dm[x] x = dm < -np.pi np.add(dm[x], 2*np.pi, out=dm[x], casting="unsafe") # dm[x] += 2 * np.pi dm = np.sqrt(np.sum(dm ** 2)) / length return dm class NEB_Sundials(object): """ Nudged elastic band method by solving the differential equation using Sundials. """ def __init__(self, sim, initial_images, interpolations=None, spring=5e5, name='unnamed'): """ *Arguments* sim: the Simulation class initial_images: a list contain the initial value, which can have any of the forms accepted by the function 'finmag.util.helpers. vector_valued_function', for example, initial_images = [(0,0,1), (0,0,-1)] or with given defined function def init_m(pos): x=pos[0] if x<10: return (0,1,1) return (-1,0,0) initial_images = [(0,0,1), (0,0,-1), init_m ] are accepted forms. interpolations : a list only contain integers and the length of this list should equal to the length of the initial_images minus 1, i.e., len(interpolations) = len(initial_images) - 1 spring: the spring constant, a float value disable_tangent: this is an experimental option, by disabling the tangent, we can get a rough feeling about the local energy minima quickly. """ self.sim = sim self.name = name self.spring = spring # Dolfin function of the new _m_field (instead of _m) # self._m = sim.llg._m_field.f self._m = sim.m_field.f self.effective_field = sim.llg.effective_field if interpolations is None: interpolations = [0 for i in range(len(initial_images) - 1)] self.initial_images = initial_images self.interpolations = interpolations if len(interpolations) != len(initial_images) - 1: raise RuntimeError("""The length of interpolations should be equal to the length of the initial_images array minus 1, i.e., len(interpolations) = len(initial_images) - 1""") if len(initial_images) < 2: raise RuntimeError("""At least two images must be provided to create the energy band""") # the total image number including two ends self.total_image_num = len(initial_images) + sum(interpolations) # The number of images without the extremes self.image_num = self.total_image_num - 2 # Number of spins per image. The _m.vector has the form # [mx1, mx2, ..., my1, my2, ..., mz1, mz2] # Thus we divide by 3 to get the total of ms # self.nxyz = len(self._m.vector()) / 3 # Use the full vector from the field class to get the total number # of degrees of freedom when using PBC # (the self._m gave us the reduced number of spins when using PBC) self.nxyz = len(self.sim.m_field.get_ordered_numpy_array_xxx()) / 3 # Total number of degrees of freedom # (In spherical coords, we have 2 components per spin) self.coords = np.zeros(2 * self.nxyz * self.total_image_num) self.last_m = np.zeros(self.coords.shape) self.Heff = np.zeros(2 * self.nxyz * self.image_num) self.Heff.shape = (self.image_num, -1) self.tangents = np.zeros(self.Heff.shape) self.energy = np.zeros(self.total_image_num) self.springs = np.zeros(self.image_num) self.t = 0 self.step = 0 self.ode_count = 1 self.integrator = None self.initial_image_coordinates() self.create_tablewriter() def create_tablewriter(self): entities_energy = { 'step': {'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps'}, 'energy': {'unit': '<J>', 'get': lambda sim: sim.energy, 'header': ['image_%d' % i for i in range(self.image_num + 2)]} } self.tablewriter = Tablewriter( '%s_energy.ndt' % (self.name), self, override=True, entities=entities_energy) entities_dm = { 'step': {'unit': '<1>', 'get': lambda sim: sim.step, 'header': 'steps'}, 'dms': {'unit': '<1>', 'get': lambda sim: sim.distances, 'header': ['image_%d_%d' % (i, i + 1) for i in range(self.image_num + 1)]} } self.tablewriter_dm = Tablewriter( '%s_dms.ndt' % (self.name), self, override=True, entities=entities_dm) def initial_image_coordinates(self): """ Generate the coordinates linearly according to the number of interpolations provided. Example: Imagine we have 4 images and we want 3 interpolations between every neighbouring pair, i.e interpolations = [3, 3, 3] 1. Imagine the initial states with the interpolation numbers and choose the first and second state 0 1 2 3 X -------- X --------- X -------- X 3 3 3 2. Counter image_id is set to 0 3. Set the image 0 magnetisation vector as m0 and append the values to self.coords[0]. Update the counter: image_id = 1 now 4. Set the image 1 magnetisation values as m1 and interpolate the values between m0 and m1, generating 3 arrays with the magnetisation values of every interpolation image. For every array, append the values to self.coords[i] with i = 1, 2 and 3 ; updating the counter every time, so image_id = 4 now 5. Append the value of m1 (image 1) in self.coords[4] Update counter (image_id = 5 now) 6. Move to the next pair of images, now set the 1-th image magnetisation values as m0 and append to self.coords[5] 7. Interpolate to get self.coords[i], for i = 6, 7, 8 ... 8. Repeat as before until move to the pair of images: 2 - 3 9. Finally append the magnetisation of the last image (self.initial_images[-1]). In this case, the 3rd image Then, for every magnetisation vector values array (self.coords[i]) append the value to the simulation and store the energies corresponding to every i-th image to the self.energy[i] arrays Finally, flatten the self.coords matrix (containing the magnetisation values of every image in different rows) ** Our generalised coordinates in the NEBM are the magnetisation values """ image_id = 0 self.coords.shape = (self.total_image_num, -1) # For every interpolation between images (zero if no interpolations # were specified) for i in range(len(self.interpolations)): n = self.interpolations[i] # Save on the first image of a pair (step 1, 6, ...) self.sim.set_m(self.initial_images[i]) # m0 = self.sim.m # Use the full array for PBCs m0 = self.sim.m_field.get_ordered_numpy_array_xxx() # DEBUGGING # This shows that, when using PBC, # the vector() is reduced (boundary vlaues that are repeated) # while the ordered array consider all the spins # # print len(self.sim.m_field.f.vector()) # print len(self.sim.m_field.get_ordered_numpy_array_xxx()) # df.plot(self.sim.m_field.f, interactive=True) self.coords[image_id][:] = cartesian2spherical(m0) image_id = image_id + 1 # Set the second image in the pair as m1 and interpolate # (step 4 and 7), saving in corresponding self.coords entries self.sim.set_m(self.initial_images[i + 1]) # m1 = self.sim.m # Use the full array for PBCs m1 = self.sim.m_field.get_ordered_numpy_array_xxx() # Interpolations (arrays with magnetisation values) coords = linear_interpolation_two(m0, m1, n) for coord in coords: self.coords[image_id][:] = coord[:] image_id = image_id + 1 # Continue to the next pair of images # Append the magnetisation of the last image self.sim.set_m(self.initial_images[-1]) # m2 = self.sim.m # Use the full array for PBCs m2 = self.sim.m_field.get_ordered_numpy_array_xxx() self.coords[image_id][:] = cartesian2spherical(m2) # Save the energies for i in range(self.total_image_num): # To assign the values from the full array using set_local # (with boundaries when using PBCs) to a reduced # sim.m_field.vector() , # we use the ordered dof to vertex map (d2v_xxx), # which has a reduced number of indexes # (we take the value from the field class) self._m.vector().set_local(spherical2cartesian(self.coords[i])[self.sim.m_field.d2v_xxx]) # This is for checking that the interpolations worked # df.plot(self._m, interactive=True) self.effective_field.update() self.energy[i] = self.effective_field.total_energy() # Flatten the array self.coords.shape = (-1,) def save_vtks(self): """ Save vtk files in different folders, according to the simulation name and step. Files are saved as simname_simstep/image_00000x.vtu """ # Create the directory directory = 'vtks/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the vtk files with the finmag function # The last separator '_' is to distinguish the image # from its corresponding number, e.g. image_000001.pvd vtk_saver = VTKSaver('%s/image_.pvd' % (directory), overwrite=True) self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): # We will save the vectors with the REDUCED length array self._m.vector().set_local(spherical2cartesian(self.coords[i, :])[self.sim.m_field.d2v_xxx]) # set t =0, it seems that the parameter time is only # for the interface? vtk_saver.save_field(self._m, 0) self.coords.shape = (-1, ) def save_npys(self): """ Save npy files in different folders according to the simulation name and step Files are saved as: simname_simstep/image_x.npy """ # Create directory as simname_simstep directory = 'npys/%s_%d' % (self.name, self.step) if not os.path.exists(directory): os.makedirs(directory) # Save the images with the format: 'image_{}.npy' # where {} is the image number, starting from 0 self.coords.shape = (self.total_image_num, -1) for i in range(self.total_image_num): name = os.path.join(directory, 'image_%d.npy' % i) # Save the reduced length array # Since the dolfin vector (for the magnetisation) can have any # ordering, we rely on the fact that # this mapping does not change when we use the same mesh # when loading the system from a different simulation # In the future it can be useful to save the mesh together with # the magnetisation in a single hdf5 file np.save(name, spherical2cartesian(self.coords[i, :])[self.sim.m_field.d2v_xxx]) self.coords.shape = (-1, ) def create_integrator(self, reltol=1e-6, abstol=1e-6, nsteps=10000): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.coords) integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator def compute_effective_field(self, y): """ Compute the effective field and the tangents using the formalism developed by Henkelman et al and applied to micromagnetics by Dittrich et al The tangents use the native NEB code in finmag/native/src/neb/helper.cc """ y.shape = (self.total_image_num, -1) for i in range(self.image_num): # Redefine the angles if phi is larger than pi # (see the corresponding function) check_boundary(y[i + 1]) # Transform the input 'y' to cartesian to compute the fields # # spherical2cartesian updates the full system vector (y), but to update # the magnetisation we only need the reduced vector, thus # we use the d2v map self._m.vector().set_local(spherical2cartesian(y[i + 1])[self.sim.m_field.d2v_xxx]) # self.effective_field.update() # Compute effective field, which is the gradient of # the energy in the NEB method (derivative with respect to # the generalised coordinates) # # To get the effective field for the whole system we use the v2d map h = self.effective_field.H_eff[self.sim.m_field.v2d_xxx] # Transform to spherical coordinates # DEBUG # print len(h) # print len(y[i + 1]) self.Heff[i, :] = cartesian2spherical_field(h, y[i + 1]) # Compute the total energy self.energy[i + 1] = self.effective_field.total_energy() # Compute the 'distance' or difference between neighbouring states # around y[i+1]. This is used to compute the spring force # dm1 = compute_dm(y[i], y[i + 1]) dm2 = compute_dm(y[i + 1], y[i + 2]) self.springs[i] = self.spring * (dm2 - dm1) # Use the native NEB to compute the tangents according to the # improved NEB method, developed by Henkelman and Jonsson # at: Henkelman et al., Journal of Chemical Physics 113, 22 (2000) native_neb.compute_tangents(y, self.energy, self.tangents) # native_neb.compute_springs(y,self.springs,self.spring) y.shape = (-1, ) def sundials_rhs(self, time, y, ydot): self.ode_count += 1 timer.start("sundials_rhs", self.__class__.__name__) self.compute_effective_field(y) ydot.shape = (self.total_image_num, -1) for i in range(self.image_num): h = self.Heff[i] t = self.tangents[i] sf = self.springs[i] h3 = h - np.dot(h, t) * t + sf * t ydot[i + 1, :] = h3[:] ydot[0, :] = 0 ydot[-1, :] = 0 ydot.shape = (-1,) timer.stop("sundials_rhs", self.__class__.__name__) return 0 def compute_distance(self): distance = [] ys = self.coords ys.shape = (self.total_image_num, -1) for i in range(self.total_image_num - 1): dm = compute_dm(ys[i], ys[i + 1]) distance.append(dm) ys.shape = (-1, ) self.distances = np.array(distance) def run_until(self, t): if t <= self.t: return self.integrator.advance_time(t, self.coords) m = self.coords y = self.last_m m.shape = (self.total_image_num, -1) y.shape = (self.total_image_num, -1) max_dmdt = 0 for i in range(1, self.image_num + 1): dmdt = compute_dm(y[i], m[i]) / (t - self.t) if dmdt > max_dmdt: max_dmdt = dmdt m.shape = (-1,) y.shape = (-1,) self.last_m[:] = m[:] self.t = t return max_dmdt def relax(self, dt=1e-8, stopping_dmdt=1e4, max_steps=1000, save_npy_steps=100, save_vtk_steps=100): if self.integrator is None: self.create_integrator() log.debug("Relaxation parameters: " "stopping_dmdt={} (degrees per nanosecond), " "time_step={} s, max_steps={}.".format(stopping_dmdt, dt, max_steps)) # Write the initial step (step=0) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() for i in range(max_steps): if i % save_vtk_steps == 0: self.save_vtks() if i % save_npy_steps == 0: self.save_npys() self.step += 1 cvode_dt = self.integrator.get_current_step() increment_dt = dt if cvode_dt > dt: increment_dt = cvode_dt dmdt = self.run_until(self.t + increment_dt) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() log.debug("step: {:.3g}, step_size: {:.3g}" " and max_dmdt: {:.3g}.".format(self.step, increment_dt, dmdt)) if dmdt < stopping_dmdt: break log.info("Relaxation finished at time step = {:.4g}, " "t = {:.2g}, call rhs = {:.4g} " "and max_dmdt = {:.3g}".format(self.step, self.t, self.ode_count, dmdt)) self.save_vtks() self.save_npys() def __adjust_coords_once(self): self.compute_effective_field(self.coords) self.compute_distance() average_dm = np.mean(self.distances) # What about a local minimum? energy_barrier = np.max(self.energy) - np.min(self.energy) dm_threshold = average_dm / 5.0 energy_threshold = energy_barrier / 5.0 to_be_remove_id = -1 for i in range(self.image_num): e1 = self.energy[i + 1] - self.energy[i] e2 = self.energy[i + 2] - self.energy[i + 1] if self.distances[i] < dm_threshold and \ self.distances[i + 1] < dm_threshold \ and e1 * e2 > 0 \ and abs(e1) < energy_threshold \ and abs(e2) < energy_threshold: to_be_remove_id = i + 1 break if to_be_remove_id < 0: return -1 self.coords.shape = (self.total_image_num, -1) coords_list = [] for i in range(self.total_image_num): coords_list.append(self.coords[i].copy()) energy_diff = [] for i in range(self.total_image_num - 1): de = abs(self.energy[i] - self.energy[i + 1]) energy_diff.append(de) # if there is a saddle point, increase the weight # of the energy difference factor1 = 2.0 for i in range(1, self.total_image_num - 1): de1 = self.energy[i] - self.energy[i - 1] de2 = self.energy[i + 1] - self.energy[i] if de1 * de2 < 0: energy_diff[i - 1] *= factor1 energy_diff[i] *= factor1 factor2 = 2.0 for i in range(2, self.total_image_num - 2): de1 = self.energy[i - 1] - self.energy[i - 2] de2 = self.energy[i] - self.energy[i - 1] de3 = self.energy[i + 1] - self.energy[i] de4 = self.energy[i + 2] - self.energy[i + 1] if de1 * de2 > 0 and de3 * de4 > 0 and de2 * de3 < 0: energy_diff[i - 1] *= factor2 energy_diff[i] *= factor2 max_i = np.argmax(energy_diff) theta_phi = linear_interpolation(coords_list[max_i], coords_list[max_i + 1]) if to_be_remove_id < max_i: coords_list.insert(max_i + 1, theta_phi) coords_list.pop(to_be_remove_id) else: coords_list.pop(to_be_remove_id) coords_list.insert(max_i + 1, theta_phi) for i in range(self.total_image_num): m = coords_list[i] self.coords[i, :] = m[:] # print to_be_remove_id, max_i self.coords.shape = (-1, ) return 0 def adjust_coordinates(self): """ Adjust the coordinates automatically. """ for i in range(self.total_image_num / 2): if self.__adjust_coords_once() < 0: break """ self.compute_effective_field(self.coords) self.compute_distance() self.tablewriter.save() self.tablewriter_dm.save() self.step += 1 self.tablewriter.save() self.tablewriter_dm.save() """ log.info("Adjust the coordinates at step = {:.4g}, t = {:.6g},".format( self.step, self.t)) def plot_energy_2d(name, step=-1): """ Plot the energy path at given step. name is the simulation name. """ import matplotlib.pyplot as plt data = np.loadtxt('%s_energy.ndt' % name) dms = np.loadtxt('%s_dms.ndt' % name) if data.ndim == 1: data.shape = (1, -1) dms.shape = (1, -1) if step < 0: step = data[-1, 0] id = -1 else: steps = abs(data[:, 0] - step) id = np.argmin(steps) step = data[id, 0] fig = plt.figure() xs = range(1, len(data[0, :])) for i in range(len(xs)): xs[i] = sum(dms[id, 1:i + 1]) plt.plot(xs, data[id, 1:], '.-') plt.legend() plt.grid() plt.ylabel('Energy (J)') plt.xlabel('Position in path (a.u.)') fig.savefig('energy_%d.pdf' % step) def plot_energy_3d(name, key_steps=50, filename=None): import matplotlib.pyplot as plt data = np.loadtxt('%s_energy.ndt' % name) if data.ndim == 1: data.shape = (1, -1) fig = plt.figure() ax = fig.gca(projection='3d') # image index xs = range(1, data.shape[1]) steps = data[:, 0] each_n_step = int(len(steps) / key_steps) if each_n_step < 1: each_n_step = 1 cc = lambda arg: colorConverter.to_rgba(arg, alpha=0.6) colors = [cc('r'), cc('g'), cc('b'), cc('y')] facecolors = [] line_data = [] energy_min = np.min(data[:, 1:]) zs = [] index = 0 for i in range(0, len(steps), each_n_step): line_data.append(list(zip(xs, data[i, 1:] - energy_min))) facecolors.append(colors[index % 4]) zs.append(data[i, 0]) index += 1 poly = PolyCollection(line_data, facecolors=facecolors, closed=False) poly.set_alpha(0.7) ax.add_collection3d(poly, zs=zs, zdir='x') ax.set_xlabel('Steps') ax.set_ylabel('images') ax.set_zlabel('Energy (J)') ax.set_ylim3d(0, len(xs) + 1) ax.set_xlim3d(0, int(data[-1, 0]) + 1) ax.set_zlim3d(0, np.max(data[:, 1:] - energy_min)) if filename is None: filename = '%s_energy_3d.pdf' % name fig.savefig(filename) if __name__ == '__main__': import finmag sim = finmag.example.barmini() init_images = [(0, 0, -1), (1, 1, 0), (0, 0, 1)] interpolations = [15, 14] neb = NEB_Sundials(sim, init_images, interpolations) neb.relax(stopping_dmdt=1e2) plot_energy_3d('unnamed_energy.ndt')
30,649
31.263158
104
py
finmag
finmag-master/src/finmag/physics/tests/test_effective_field.py
import numpy as np import dolfin as df import pytest import os from finmag.example import barmini def test_effective_field_compute_returns_copy(tmpdir): """ Regression test to ensure that the value returned by EffectiveField.compute() does not change as a simulation progresses. This used to happen since EffectiveField.compute() returned a reference to an internal numpy array instead of a copy. Here we check that this is fixed. """ os.chdir(str(tmpdir)) sim = barmini() h0 = sim.effective_field() h0_copy = h0.copy() sim.run_until(1e-12) h1 = sim.effective_field() assert np.allclose(h0, h0_copy, atol=0, rtol=1e-8) assert not np.allclose(h0, h1, atol=0, rtol=1e-8)
733
27.230769
70
py
finmag
finmag-master/src/finmag/physics/tests/test_terms.py
import pytest from os import path import dolfin as df import finmag.physics.equation as eqn @pytest.fixture def terms_module(): MODULE_DIR = path.dirname(path.abspath(__file__)) SOURCE_DIR = path.join(MODULE_DIR, "..", "native") with open(path.join(SOURCE_DIR, "terms.h"), "r") as header: code = header.read() extension_module = df.compile_extension_module( code=code, source_directory=SOURCE_DIR, sources=["terms.cpp"], # declare dm_x, dm_y and dm_z as input/output parameters # they will turn up in Python as return values additional_declarations="%apply double& INOUT { double& dm_x, double& dm_y, double& dm_z };", include_dirs=[SOURCE_DIR, eqn.find_petsc(), eqn.find_slepc()],) return extension_module def test_damping(terms_module): alpha, gamma = 1, 1 mx, my, mz = 1, 0, 0 Hx, Hy, Hz = 0, 1, 0 dmx, dmy, dmz = terms_module.damping(alpha, gamma, mx, my, mz, Hx, Hy, Hz, 0, 0, 0) assert (dmx, dmy, dmz) == (0, 0.5, 0) def test_precession(terms_module): alpha, gamma = 1, 1 mx, my, mz = 1, 0, 0 Hx, Hy, Hz = 0, 1, 0 dmx, dmy, dmz = terms_module.precession(alpha, gamma, mx, my, mz, Hx, Hy, Hz, 0, 0, 0) assert (dmx, dmy, dmz) == (0, 0, -0.5) def test_relaxation(terms_module): c = 1.0 mx, my, mz = 2, 0, 0 dmx, dmy, dmz = terms_module.relaxation(c, mx, my, mz, 0, 0, 0) assert (dmx, dmy, dmz) == (-6, 0, 0)
1,460
30.76087
101
py
finmag
finmag-master/src/finmag/physics/tests/test_equation.py
import pytest import numpy as np import dolfin as df from finmag.physics.equation import Equation @pytest.fixture def setup(): mesh = df.UnitIntervalMesh(3) V = df.FunctionSpace(mesh, "CG", 1) alpha = df.Function(V) alpha.assign(df.Constant(1)) W = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) m = df.Function(W) m.assign(df.Constant((0.6, 0.8, 0))) H = df.Function(W) H.assign(df.Constant((1, 2, 3))) dmdt = df.Function(W) return mesh, V, alpha, W, m, H, dmdt def setup_for_debugging(): """ Sets up equation for greater convenience during interactive debugging. """ mesh, V, alpha, W, m, H, dmdt = setup() equation = Equation(m.vector(), H.vector(), dmdt.vector()) equation.set_alpha(alpha.vector()) equation.set_gamma(1.0) equation.solve() return {'mesh': mesh, 'V': V, 'alpha': alpha, 'W': W, 'm': m, 'H': H, 'dmdt': dmdt, 'equation': equation} def same(v, w, TOL=1e-14): """ Returns True if the vectors `v` and `w` have the same entries. """ diff = v - w diff.abs() print "v = {}\nw = {}\ndiff = {}".format(v.array(), w.array(), diff.array()) return diff.sum() < TOL def test_new_equation(setup): mesh, V, alpha, W, m, H, dmdt = setup equation = Equation(m.vector(), H.vector(), dmdt.vector()) def test_new_equation_wrong_size(setup): mesh, V, alpha, W, m, H, dmdt = setup W = df.VectorFunctionSpace(mesh, "CG", 2, dim=3) # W like Wrong H_W = df.Function(W) with pytest.raises(StandardError): equation = Equation(m.vector(), H_W.vector(), dmdt.vector()) def test_regression_vector_wrong_state(setup): mesh, V, alpha, W, m, H, dmdt = setup equation = Equation(m.vector(), H.vector(), dmdt.vector()) equation.set_alpha(alpha.vector()) equation.set_gamma(1.0) equation.solve() # the following operation would fail with PETSc error code 73 # saying the vector is in wrong state. An "apply" call in the C++ # code fixes this. operation = dmdt.vector() - m.vector() def test_alpha_not_set(setup): mesh, V, alpha, W, m, H, dmdt = setup equation = Equation(m.vector(), H.vector(), dmdt.vector()) assert equation.get_alpha() is None # doesn't crash with pytest.raises(RuntimeError): equation.solve() def test_alpha_keeps_track_of_change(setup): mesh, V, alpha, W, m, H, dmdt = setup equation = Equation(m.vector(), H.vector(), dmdt.vector()) equation.set_alpha(alpha.vector()) assert same(alpha.vector(), equation.get_alpha()) # since alpha and Equation::alpha are fundamentally the same object # changing one should change the other, which is what we test next alpha.assign(df.Constant(2)) assert same(alpha.vector(), equation.get_alpha()) def test_solve(setup): mesh, V, alpha, W, m, H, dmdt = setup equation = Equation(m.vector(), H.vector(), dmdt.vector()) equation.set_alpha(alpha.vector()) equation.set_gamma(1.0) equation.solve() dmdt_expected = df.Function(W) #dmdt_expected.assign(df.Constant((0.0, 0.5, -0.5))) dmdt_expected.assign(df.Constant((-1.36, 1.02, 1.3))) assert same(dmdt.vector(), dmdt_expected.vector()) def test_pinning(setup): mesh, V, alpha, W, m, H, dmdt = setup equation = Equation(m.vector(), H.vector(), dmdt.vector()) equation.set_alpha(alpha.vector()) equation.set_gamma(1.0) pins = df.Function(V) pins.vector()[0] = 1 # pin first node, but this could be done using an expression equation.set_pinned_nodes(pins.vector()) equation.solve() dmdt_node0 = dmdt.vector().array().reshape(3, -1)[:, 0] dmdt_node_others = dmdt.vector().array().reshape(3, -1)[:, 1:] # check that first node is pinned, i.e. dmdt = 0 there assert np.all(dmdt_node0 == np.array((0, 0, 0))) # check that we don't accidentally set the whole dmdt array to zero assert not np.all(dmdt_node_others == 0) def test_slonczewski(setup): mesh, V, alpha, W, m, H, dmdt = setup equation = Equation(m.vector(), H.vector(), dmdt.vector()) equation.set_alpha(alpha.vector()) equation.set_gamma(1.0) Ms = df.Function(V) Ms.assign(df.Constant(1)) J = df.Function(V) J.assign(df.Constant(1)) equation.slonczewski(5e-9, 0.4, np.array((1.0, 0.0, 0.0)), 1, 0) assert equation.slonczewski_status() is False # missing J, Ms equation.set_saturation_magnetisation(Ms.vector()) equation.set_current_density(J.vector()) assert equation.slonczewski_status() is True equation.solve()
4,598
32.086331
86
py
finmag
finmag-master/src/finmag/physics/tests/neb/microspin.py
import os import dolfin as df import numpy as np import matplotlib as mpl mpl.use("Agg") import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.colors import colorConverter from matplotlib.collections import PolyCollection, LineCollection from finmag import Simulation as Sim from finmag.energies import Exchange, DMI, UniaxialAnisotropy from finmag.util.fileio import Tablereader from finmag.physics.neb import NEB_Sundials, plot_energy_3d, NEB_Sundials MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) def create_simulation(mesh): sim = Sim(mesh, Ms=8.6e5, unit_length=1e-9) sim.set_m((1, 0, 0)) sim.add(UniaxialAnisotropy(-1e5, (0, 0, 1), name='Kp')) sim.add(UniaxialAnisotropy(1e4, (1, 0, 0), name='Kx')) return sim def relax_system(sim): init_images = [(-1, 0, 0), (0, 1, 1), (1, 0, 0)] interpolations = [10, 8] neb = NEB_Sundials( sim, init_images, interpolations, spring=1e6, name='neb') neb.relax(max_steps=500, save_ndt_steps=1, stopping_dmdt=1e2) def plot_data_2d(): data = np.loadtxt('neb_energy.ndt') fig = plt.figure() xs = range(1, len(data[0, :])) plt.plot(xs, data[-1, 1:], '.-') plt.grid() fig.savefig('last_energy.pdf') if __name__ == "__main__": mesh = df.RectangleMesh(df.Point(0, 0), df.Point(10, 10), 1, 1) sim = create_simulation(mesh) relax_system(sim) plot_data_2d() plot_energy_3d('neb_energy.ndt')
1,477
22.09375
73
py
finmag
finmag-master/src/finmag/physics/tests/neb/neb_test.py
import os import dolfin as df import numpy as np import matplotlib as mpl mpl.use("Agg") import matplotlib.pyplot as plt from finmag import Simulation as Sim from finmag.energies import Exchange, UniaxialAnisotropy MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) def test_compute_dm(): from finmag.physics.neb import compute_dm a1 = np.array([0, 0, 1]) a2 = np.array([0, 1, 0]) assert(compute_dm(a1, a2) ** 2 - 2.0) < 1e-15 def test_cartesian2spherical_field(): from finmag.physics.neb import cartesian2spherical_field Kx = 1e5 Kp = 6e4 theta = 0.17 phi = 0.23 theta_phi = np.array([theta, phi]) mx = np.sin(theta) * np.cos(phi) my = np.sin(theta) * np.sin(phi) mz = np.cos(theta) E = -Kx * mx ** 2 + Kp * mz ** 2 hx = -2 * Kx * mx hz = 2 * Kp * mz H = np.array([hx, 0, hz]) pE_theta = -2 * Kx * mx * \ np.cos(phi) * np.cos(theta) + 2 * Kp * mz * (-np.sin(theta)) pE_phi = 2 * Kx * mx * np.sin(theta) * np.sin(phi) res = cartesian2spherical_field(H, theta_phi) assert abs(res[0] - pE_theta) < 1e-15 assert abs(res[1] - pE_phi) < 1e-12 if __name__ == "__main__": test_compute_dm() test_cartesian2spherical_field()
1,245
20.482759
68
py
finmag
finmag-master/src/finmag/physics/llb/sllg.py
import time import numpy as np import dolfin as df import finmag.util.consts as consts import finmag.native.llb as native_llb from finmag.field import Field from finmag.util import helpers from finmag.physics.effective_field import EffectiveField from finmag.util.meshes import mesh_volume, nodal_volume from finmag.util.fileio import Tablewriter from finmag.energies import Zeeman from finmag.energies import Exchange from finmag.energies import Demag from finmag.util.pbc2d import PeriodicBoundary2D import logging log = logging.getLogger(name="finmag") class SLLG(object): def __init__(self, S1, S3, method='RK2b', checking_length=False, unit_length=1): self.S1 = S1 self.S3 = S3 self.mesh = S1.mesh() self._t = 0 self.time_scale = 1e-9 self._m_field = Field(self.S3, name='m') self.nxyz = self._m_field.f.vector().size() / 3 self._T = np.zeros(self.nxyz) self._alpha = np.zeros(self.nxyz) self.m = np.zeros(3 * self.nxyz) self.field = np.zeros(3 * self.nxyz) self.grad_m = np.zeros(3 * self.nxyz) self.dm_dt = np.zeros(3 * self.nxyz) # Note: nxyz for Ms length is more suitable? self._Ms = np.zeros(3 * self.nxyz) self.pin_fun = None self.method = method self.checking_length = checking_length self.unit_length = unit_length self.DG = df.FunctionSpace(self.mesh, "DG", 0) self._Ms_dg = Field(self.DG) self.effective_field = EffectiveField( self._m_field, self.Ms, self.unit_length) self.zhangli_stt = False self.set_default_values() def set_default_values(self): self.Ms = Field(df.FunctionSpace(self.mesh, 'DG', 0), 8.6e5) # A/m saturation magnetisation self._pins = np.array([], dtype="int") self.volumes = df.assemble( df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx).array() self.Volume = mesh_volume(self.mesh) self.real_volumes = self.volumes * self.unit_length ** 3 self.m_pred = np.zeros(self.m.shape) self.integrator = native_llb.StochasticSLLGIntegrator( self.m, self.m_pred, self._Ms, self._T, self.real_volumes, self._alpha, self.stochastic_update_field, self.method) self.alpha = 0.1 self._gamma = consts.gamma self._seed = np.random.random_integers(4294967295) self.dt = 1e-13 self.T = 0 @property def cur_t(self): return self._t * self.time_scale @cur_t.setter def cur_t(self, value): self._t = value / self.time_scale @property def dt(self): return self._dt * self.time_scale @property def gamma(self): return self._gamma @property def seed(self): return self._seed @seed.setter def seed(self, value): self._seed = value self.setup_parameters() @gamma.setter def gamma(self, value): self._gamma = value self.setup_parameters() @dt.setter def dt(self, value): self._dt = value / self.time_scale self.setup_parameters() def setup_parameters(self): # print 'seed:', self.seed self.integrator.set_parameters( self.dt, self.gamma, self.seed, self.checking_length) log.info("seed=%d." % self.seed) log.info("dt=%g." % self.dt) log.info("gamma=%g." % self.gamma) #log.info("checking_length: "+str(self.checking_length)) def set_m(self, value, normalise=True): m_tmp = helpers.vector_valued_function( value, self.S3, normalise=normalise).vector().array() self._m_field.set_with_numpy_array_debug(m_tmp) self.m[:] = self._m_field.get_numpy_array_debug() def advance_time(self, t): tp = t / self.time_scale if tp <= self._t: return try: while tp - self._t > 1e-12: if self.zhangli_stt: self.integrator.run_step(self.field, self.grad_m) else: self.integrator.run_step(self.field) self._m_field.set_with_numpy_array_debug(self.m) self._t += self._dt except Exception, error: log.info(error) raise Exception(error) if abs(tp - self._t) < 1e-12: self._t = tp def stochastic_update_field(self, y): self._m_field.set_with_numpy_array_debug(y) self.field[:] = self.effective_field.compute(self.cur_t)[:] if self.zhangli_stt: self.grad_m[:] = self.compute_gradient_field()[:] @property def T(self): return self._T @T.setter def T(self, value): self._T[:] = helpers.scalar_valued_function( value, self.S1).vector().array()[:] log.info('Temperature : %g', self._T[0]) @property def alpha(self): return self._alpha @alpha.setter def alpha(self, value): self._alpha[:] = helpers.scalar_valued_function( value, self.S1).vector().array()[:] def set_alpha(self, value): """ for compability reasons with LLG """ self.alpha = value @property def Ms(self): return self._Ms_dg @Ms.setter def Ms(self, value): dg_fun = Field(self.DG, value) self._Ms_dg.vector().set_local(dg_fun.vector().get_local()) # FIXME: change back to DG space. #self._Ms_dg=helpers.scalar_valued_function(value, self.S1) self._Ms_dg.name = 'Saturation magnetisation' self.volumes = df.assemble(df.TestFunction(self.S1) * df.dx) Ms = df.assemble( self._Ms_dg.f * df.TestFunction(self.S1) * df.dx).array() / self.volumes.array() self._Ms = Ms.copy() self.Ms_av = np.average(self._Ms_dg.vector().array()) #self._Ms_dg = value.f#.vector().set_local( #helpers.scalar_valued_dg_function(value, self.mesh).vector().array()) #tmp = df.assemble( # self._Ms_dg * df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx) #tmp = tmp / self.volumes #self._Ms[:] = tmp[:] def m_average_fun(self, dx=df.dx): """ Compute and return the average polarisation according to the formula :math:`\\langle m \\rangle = \\frac{1}{V} \int m \: \mathrm{d}V` """ # mx = df.assemble(self._Ms_dg*df.dot(self._m, df.Constant([1, 0, 0])) * dx) # my = df.assemble(self._Ms_dg*df.dot(self._m, df.Constant([0, 1, 0])) * dx) # mz = df.assemble(self._Ms_dg*df.dot(self._m, df.Constant([0, 0, 1])) * dx) # volume = df.assemble(self._Ms_dg*dx) # # return np.array([mx, my, mz]) / volume return self._m_field.average(dx=dx) m_average = property(m_average_fun) def compute_gradient_matrix(self): """ compute (J nabla) m , we hope we can use a matrix M such that M*m = (J nabla)m. """ tau = df.TrialFunction(self.S3) sigma = df.TestFunction(self.S3) self.nodal_volume_S3 = nodal_volume(self.S3) * self.unit_length dim = self.S3.mesh().topology().dim() ty = tz = 0 tx = self._J[0] * df.dot(df.grad(tau)[:, 0], sigma) if dim >= 2: ty = self._J[1] * df.dot(df.grad(tau)[:, 1], sigma) if dim >= 3: tz = self._J[2] * df.dot(df.grad(tau)[:, 2], sigma) self.gradM = df.assemble((tx + ty + tz) * df.dx) #self.gradM = df.assemble(df.dot(df.dot(self._J, df.nabla_grad(tau)),sigma)*df.dx) def compute_gradient_field(self): self.gradM.mult(self._m_field.f.vector(), self.H_gradm) return self.H_gradm.array() / self.nodal_volume_S3 def use_zhangli(self, J_profile=(1e10, 0, 0), P=0.5, beta=0.01, using_u0=False, with_time_update=None): self.zhangli_stt = True self.fun_zhangli_time_update = with_time_update self.P = P self.beta = beta self._J = helpers.vector_valued_function(J_profile, self.S3) self.J = self._J.vector().array() self.compute_gradient_matrix() self.H_gradm = df.PETScVector() self.integrator = native_llb.StochasticLLGIntegratorSTT( self.m, self.m_pred, self._Ms, self._T, self.real_volumes, self._alpha, self.P, self.beta, self.stochastic_update_field, self.method) # seems that in the presence of current, the time step have to very # small self.dt = 1e-14 # TODO: fix the using_u0 here. if __name__ == "__main__": mesh = df.Box(0, 0, 0, 5, 5, 5, 1, 1, 1) sim = SLLG(mesh, 8.6e5, unit_length=1e-9) sim.alpha = 0.1 sim.set_m((1, 0, 0)) ts = np.linspace(0, 1e-9, 11) print sim.Ms sim.T = 2000 sim.dt = 1e-14 H0 = 1e6 sim.add(Zeeman((0, 0, H0))) A = helpers.scalar_valued_dg_function(13.0e-12, mesh) exchange = Exchange(A) sim.add(exchange) demag = Demag(solver='FK') sim.add(demag) print exchange.Ms.vector().array() for t in ts: sim.run_until(t) print sim.m_average
9,355
28.514196
107
py
finmag
finmag-master/src/finmag/physics/llb/exchange.py
import logging import dolfin as df from aeon import timer from finmag.energies.energy_base import EnergyBase from finmag.util.consts import mu0 from finmag.physics.llb.material import Material logger = logging.getLogger('finmag') class ExchangeStd(EnergyBase): """ Compute the exchange field for LLB case. Notes: This class just works for one material which means even the magnetisation is not normalised, but a constant value m_e everywhere is expected. .. math:: E_{\\text{exch}} = \\int_\\Omega A (\\nabla M)^2 dx *Arguments* A the exchange constant method possible methods are * 'box-assemble' * 'box-matrix-numpy' * 'box-matrix-petsc' [Default] * 'project' See documentation of EnergyBase class for details. *Example of Usage* .. code-block:: python from dolfin import * Ms = 0.8e6 m = 1e-8 n = 5 mesh = BoxMesh(df.Point(0, m, 0), df.Point(m, 0, m), n, n, n) S3 = VectorFunctionSpace(mesh, "Lagrange", 1) A = 1.3e-11 # J/m exchange constant M = project(Constant((Ms, 0, 0)), S3) # Initial magnetisation exchange = Exchange(C, Ms) exchange.setup(S3, M) # Print energy print exchange.compute_energy() # Exchange field H_exch = exchange.compute_field() # Using 'box-matrix-numpy' method (fastest for small matrices) exchange_np = Exchange(A, M, C, Ms, method='box-matrix-numpy') H_exch_np = exchange_np.compute_field() """ def __init__(self, C, method="box-matrix-petsc"): super(ExchangeStd, self).__init__(method, in_jacobian=True) self.C = C @timer.method def setup(self, S3, m, Ms, Me, unit_length=1): self.Me = Me # expression for the energy exchange_factor = df.Constant( 1 * self.C / (mu0 * Ms * unit_length ** 2)) self.exchange_factor = exchange_factor # XXX E = exchange_factor * mu0 * Ms \ * df.inner(df.grad(m), df.grad(m)) # Needed for energy density S1 = df.FunctionSpace(S3.mesh(), "CG", 1) w = df.TestFunction(S1) nodal_E = Ms * mu0 * df.dot(self.exchange_factor * df.inner(df.grad(m), df.grad(m)), w) * df.dx super(ExchangeStd, self).setup( E_integrand=E, nodal_E=nodal_E, S3=S3, m=m, Ms=Ms, unit_length=unit_length) def compute_field(self): """ Compute the field associated with the energy. *Returns* numpy.ndarray The coefficients of the dolfin-function in a numpy array. """ Hex = super(ExchangeStd, self).compute_field() return Hex / self.Me ** 2 class Exchange(object): def __init__(self, mat, in_jacobian=False): self.C = mat._A_dg self.me = mat._m_e self.in_jacobian = in_jacobian @timer.method def setup(self, S3, m, Ms0, unit_length=1.0): self.S3 = S3 self.m = m self.Ms0 = Ms0 self.unit_length = unit_length self.mu0 = mu0 self.exchange_factor = 2.0 / (self.mu0 * Ms0 * self.unit_length ** 2) u3 = df.TrialFunction(S3) v3 = df.TestFunction(S3) self.K = df.PETScMatrix() df.assemble( self.C * df.inner(df.grad(u3), df.grad(v3)) * df.dx, tensor=self.K) self.H = df.PETScVector() self.vol = df.assemble( df.dot(v3, df.Constant([1, 1, 1])) * df.dx).array() self.coeff = -self.exchange_factor / (self.vol * self.me ** 2) def compute_field(self): self.K.mult(self.m.vector(), self.H) return self.coeff * self.H.array() if __name__ == "__main__": mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(10, 1, 1), 10, 1, 1) S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1) C = 1.3e-11 # J/m exchange constant expr = df.Expression(('4.0*sin(x[0])', '4*cos(x[0])', '0'), degree=1) m0 = df.interpolate(expr, S3) from finmag.physics.llb.material import Material mat = Material(mesh, name='FePt') mat.set_m(expr) mat.T = 1 mat.alpha = 0.01 exch = Exchange(mat) exch.setup(mat.S3, mat._m, mat.Ms0, unit_length=1e-9) #exch2 = ExchangeStd(mat) #exch2.setup(mat.S3, mat._m, mat.Ms0, unit_length=1e-9) # print max(exch2.compute_field()-exch.compute_field()) print exch.compute_field() # print timings.report()
4,723
25.840909
82
py
finmag
finmag-master/src/finmag/physics/llb/llb_test.py
import os import pytest import dolfin as df import numpy as np import matplotlib as mpl mpl.use("Agg") import matplotlib.pyplot as plt from finmag.physics.llb.llb import LLB from finmag.physics.llb.exchange import Exchange from finmag.energies import Zeeman from finmag.energies import Demag from finmag.physics.llb.material import Material MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) @pytest.mark.xfail def test_llb_sundials(do_plot=False): mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(2, 2, 2), 1, 1, 1) mat = Material(mesh, name='FePt', unit_length=1e-9) mat.set_m((1, 0, 0)) mat.T = 10 mat.alpha = 0.1 sim = LLB(mat) sim.set_up_solver() H0 = 1e5 sim.add(Zeeman((0, 0, H0))) dt = 1e-12 ts = np.linspace(0, 1000 * dt, 101) precession_coeff = sim.gamma_LL mz_ref = [] mxyz = [] mz = [] real_ts = [] for t in ts: sim.run_until(t) real_ts.append(sim.t) mz_ref.append(np.tanh(precession_coeff * mat.alpha * H0 * sim.t)) mz.append(sim.m[-1]) # same as m_average for this macrospin problem sim.m.shape = (3, -1) mxyz.append(sim.m[:, -1].copy()) sim.m.shape = (-1,) mxyz = np.array(mxyz) mz = np.array(mz) print np.sum(mxyz ** 2, axis=1) - 1 if do_plot: ts_ns = np.array(real_ts) * 1e9 plt.plot(ts_ns, mz, "b.", label="computed") plt.plot(ts_ns, mz_ref, "r-", label="analytical") plt.xlabel("time (ns)") plt.ylabel("mz") plt.title("integrating a macrospin") plt.legend() plt.savefig(os.path.join(MODULE_DIR, "test_llb.png")) print("Deviation = {}".format(np.max(np.abs(mz - mz_ref)))) #assert np.max(np.abs(mz - mz_ref)) < 1e-7 def sim_llb_100(do_plot=False): mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(10, 10, 10), 1, 1, 1) mat = Material(mesh, name='FePt', unit_length=1e-9) mat.set_m((1, 0, 0)) mat.T = 100 mat.alpha = 0.1 print mat.Ms0 print mat.volumes print mat.mat.chi_par(100) sim = LLB(mat) sim.set_up_stochastic_solver(using_type_II=True) H0 = 1e5 sim.add(Zeeman((0, 0, H0))) dt = 1e-12 ts = np.linspace(0, 100 * dt, 101) precession_coeff = sim.gamma_LL mz_ref = [] mz = [] real_ts = [] for t in ts: sim.run_until(t) real_ts.append(sim.t) mz_ref.append(np.tanh(precession_coeff * mat.alpha * H0 * sim.t)) # same as m_average for this macrospin problem mz.append(sim.m_average) mz = np.array(mz) print mz if do_plot: ts_ns = np.array(real_ts) * 1e9 plt.plot(ts_ns, mz, "b.", label="computed") plt.plot(ts_ns, mz_ref, "r-", label="analytical") plt.xlabel("time (ns)") plt.ylabel("mz") plt.title("integrating a macrospin") plt.legend() plt.savefig(os.path.join(MODULE_DIR, "test_llb_100K.png")) @pytest.mark.xfail def test_llb_save_data(): mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(10, 10, 5), 2, 2, 1) def region1(coords): if coords[2] < 0.5: return True else: return False def region2(coords): return not region1(coords) def init_Ms(coords): if region1(coords) > 0: return 8.6e5 else: return 8.0e5 def init_T(pos): return 1 * pos[2] mat = Material(mesh, name='FePt', unit_length=1e-9) mat.Ms = init_Ms mat.set_m((1, 0, 0)) mat.T = init_T mat.alpha = 0.1 assert(mat.T[0] == 0) sim = LLB(mat, name='test_llb') sim.set_up_solver() ts = np.linspace(0, 1e-11, 11) H0 = 1e6 sim.add(Zeeman((0, 0, H0))) sim.add(Exchange(mat)) demag = Demag(solver='FK') sim.add(demag) sim.save_m_in_region(region1, name='bottom') sim.save_m_in_region(region2, name='top') sim.schedule('save_ndt', every=1e-12) for t in ts: print 't===', t sim.run_until(t) def llb_relax(): mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(10, 10, 5), 2, 2, 1) mat = Material(mesh, name='FePt', unit_length=1e-9) mat.set_m((1, 0, 0)) mat.T = 0 mat.alpha = 0.1 assert(mat.T[0] == 0) sim = LLB(mat, name='llb_relax') sim.set_up_solver() H0 = 1e6 sim.add(Zeeman((0, 0, H0))) sim.add(Exchange(mat)) demag = Demag(solver='FK') sim.add(demag) sim.schedule('save_vtk', at_end=True, filename='p0.pvd') sim.schedule('save_ndt', at_end=True) sim.relax() if __name__ == "__main__": test_llb_sundials(do_plot=True) # sim_llb_100(do_plot=True) # test_llb_save_data() # llb_relax()
4,687
22.44
76
py
finmag
finmag-master/src/finmag/physics/llb/material.py
import logging import numpy as np import dolfin as df import finmag.util.consts as consts import finmag.native.llb as native_llb from finmag.util import helpers from finmag.field import Field logger = logging.getLogger(name='finmag') class Material(object): """ The aim to define this class is to collect materials properties in one class, such as the common parameters Ms, A, and K since these properties may have different response to temperature T. Another reason is that saturation magnetisation Ms should be defined in cells in the framework of FEM but for some reasons it's convenience to use the related definition in nodes for dynamics, which will cause some confusion if put them in one class. Despite the traditional definition that the magnetisation M(r) are separated by the unit magnetisation m(r) and Ms which stored in nodes and cells respectively, we just focus on magnetisation M(r) and pass it into other classes such as Exchange, Anisotropy and Demag. Therefore, Ms in this class in fact is mainly used for users to input. Certainly, another way to deal with such confusion is to define different class for different scenarios, for example, if the simulation just focus on one material and at temperature zero we can define a class have constant Ms. We will adapt this class to the situation that LLB case. """ def __init__(self, mesh, name='FePt', unit_length=1): self.mesh = mesh self.name = name self.S1 = df.FunctionSpace(mesh, "Lagrange", 1) self.S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3) self.nxyz = mesh.num_vertices() self._m = Field(self.S3, name='m') self._T = np.zeros(self.nxyz) self._Ms = np.zeros(3 * self.nxyz) self._m_e = np.zeros(3 * self.nxyz) self.inv_chi_par = np.zeros(self.nxyz) self.h = np.zeros(3 * self.nxyz) self.unit_length = unit_length self.alpha = 0.5 self.gamma_LL = consts.gamma if self.name == 'FePt': self.Tc = 660 self.Ms0 = 1047785.4656 self.A0 = 2.148042e-11 self.K0 = 8.201968e6 self.mu_a = 2.99e-23 elif self.name == 'Nickel': self.Tc = 630 self.Ms0 = 4.9e5 self.A0 = 9e-12 self.K0 = 0 self.mu_a = 0.61e-23 elif self.name == 'Permalloy': self.Tc = 870 self.Ms0 = 8.6e5 self.A0 = 13e-12 self.K0 = 0 # TODO: find the correct mu_a for permalloy self.mu_a = 1e-23 else: raise NotImplementedError("Only FePt and Nickel available") self.volumes = df.assemble(df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx).array() self.real_vol = self.volumes * self.unit_length ** 3 self.mat = native_llb.Materials( self.Ms0, self.Tc, self.A0, self.K0, self.mu_a) dg = df.FunctionSpace(mesh, "DG", 0) self._A_dg = df.Function(dg) self._m_e_dg = df.Function(dg) self.T = 0 self.Ms = self.Ms0 * self._m_e_dg.vector().array() @property def me(self): return self._m_e[0] def compute_field(self): native_llb.compute_relaxation_field( self._T, self.m, self.h, self._m_e, self.inv_chi_par, self.Tc) return self.h @property def T(self): return self._T @T.setter def T(self, value): self._T[:] = helpers.scalar_valued_function( value, self.S1).vector().array()[:] self._T_dg = helpers.scalar_valued_dg_function(value, self.mesh) As = self._A_dg.vector().array() Ts = self._T_dg.vector().array() mes = self._m_e_dg.vector().array() for i in range(len(Ts)): As[i] = self.mat.A(Ts[i]) mes[i] = self.mat.m_e(Ts[i]) self._A_dg.vector().set_local(As) self._m_e_dg.vector().set_local(mes) self._m_e.shape = (3, -1) for i in range(len(self._T)): self._m_e[:, i] = self.mat.m_e(self._T[i]) self.inv_chi_par[i] = self.mat.inv_chi_par(self._T[i]) self._m_e.shape = (-1,) # TODO: Trying to use spatial parameters self.inv_chi_perp = self.mat.inv_chi_perp(self._T[0]) @property def Ms(self): return self._Ms @Ms.setter def Ms(self, value): self._Ms_dg = helpers.scalar_valued_dg_function(value, self.mesh) tmp_Ms = df.assemble( self._Ms_dg * df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx) / self.volumes self._Ms[:] = tmp_Ms[:] @property def m(self): """ not too good since this will return a copy try to solve this later """ return self._m.vector().array() def set_m(self, value, **kwargs): """ Set the magnetisation (scaled automatically). There are several ways to use this function. Either you provide a 3-tuple of numbers, which will get cast to a dolfin.Constant, or a dolfin.Constant directly. Then a 3-tuple of strings (with keyword arguments if needed) that will get cast to a dolfin.Expression, or directly a dolfin.Expression. You can provide a numpy.ndarray of nodal values of shape (3*n,), where n is the number of nodes. Finally, you can pass a function (any callable object will do) which accepts the coordinates of the mesh as a numpy.ndarray of shape (3, n) and returns the magnetisation like that as well. You can call this method anytime during the simulation. However, when providing a numpy array during time integration, the use of the attribute m instead of this method is advised for performance reasons and because the attribute m doesn't normalise the vector. """ self._m.set(value) if __name__ == "__main__": mesh = df.UnitCubeMesh(1, 1, 1) mat = Material(mesh, name='Nickel') mat.set_m((1, 0, 0)) mat.T = 3 print mat.T print mat.inv_chi_par print mat.compute_field()
6,255
32.816216
106
py
finmag
finmag-master/src/finmag/physics/llb/__init__.py
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations # Copyright (C) 2012 University of Southampton # Do not distribute # # CONTACT: [email protected] # # AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
270
32.875
90
py
finmag
finmag-master/src/finmag/physics/llb/llb.py
import dolfin as df import numpy as np import inspect from aeon import timer from finmag.native import sundials import finmag.native.llb as native_llb from finmag.energies import Zeeman from finmag.energies import Demag from finmag.physics.llb.exchange import Exchange from finmag.physics.llb.material import Material from finmag.util import helpers from finmag.util.vtk_saver import VTKSaver from finmag.util.fileio import Tablewriter from finmag.scheduler import scheduler, derivedevents from finmag.util.pbc2d import PeriodicBoundary2D import logging log = logging.getLogger(name="finmag") ONE_DEGREE_PER_NS = 17453292.5 # in rad/s class LLB(object): def __init__(self, mat, method='RK2b', name='unnamed', pbc2d=None): self.material = mat self._m = mat._m self.m = self._m.vector().array() self.S1 = mat.S1 self.S3 = mat.S3 self.mesh = self.S1.mesh() self.dm_dt = np.zeros(self.m.shape) self.H_eff = np.zeros(self.m.shape) self.time_scale = 1e-9 self.method = method self.pbc2d = pbc2d self.set_default_values() self.interactions.append(mat) if self.pbc2d: self.pbc2d = PeriodicBoundary2D(self.S3) self.name = name self.sanitized_name = helpers.clean_filename(name) self.logfilename = self.sanitized_name + '.log' self.ndtfilename = self.sanitized_name + '.ndt' helpers.start_logging_to_file( self.logfilename, mode='w', level=logging.DEBUG) self.scheduler = scheduler.Scheduler() self.domains = df.CellFunction("uint", self.mesh) self.domains.set_all(0) self.region_id = 0 self.tablewriter = Tablewriter(self.ndtfilename, self, override=True) self.overwrite_pvd_files = False self.vtk_export_filename = self.sanitized_name + '.pvd' self.vtk_saver = VTKSaver(self.vtk_export_filename, overwrite=True) self.scheduler_shortcuts = { 'save_ndt': LLB.save_ndt, 'save_vtk': LLB.save_vtk, } def set_default_values(self): self.alpha = self.material.alpha self.gamma_G = 2.21e5 # m/(As) self.gamma_LL = self.gamma_G / (1. + self.alpha ** 2) self.t = 0.0 # s self.do_precession = True self.vol = df.assemble(df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx).array() self.real_vol = self.vol * self.material.unit_length ** 3 self.nxyz = self.mesh.num_vertices() self._alpha = np.zeros(self.nxyz) self.pins = [] self._pre_rhs_callables = [] self._post_rhs_callables = [] self.interactions = [] def set_up_solver(self, reltol=1e-8, abstol=1e-8, nsteps=10000): integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) integrator.init(self.sundials_rhs, 0, self.m) integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_scalar_tolerances(reltol, abstol) integrator.set_max_num_steps(nsteps) self.integrator = integrator self.method = 'cvode' def set_up_stochastic_solver(self, dt=1e-13, using_type_II=True): self.using_type_II = using_type_II M_pred = np.zeros(self.m.shape) integrator = native_llb.StochasticLLBIntegrator( self.m, M_pred, self.material.Ms, self.material.T, self.material.real_vol, self.pins, self.stochastic_rhs, self.method) self.integrator = integrator self._seed = np.random.random_integers(4294967295) self.dt = dt @property def t(self): return self._t * self.time_scale @t.setter def t(self, value): self._t = value / self.time_scale @property def dt(self): return self._dt * self.time_scale @dt.setter def dt(self, value): self._dt = value / self.time_scale log.info("dt=%g." % self.dt) self.setup_parameters() @property def seed(self): return self._seed @seed.setter def seed(self, value): self._seed = value log.info("seed=%d." % self._seed) self.setup_parameters() def set_pins(self, nodes): pinlist = [] if hasattr(nodes, '__call__'): coords = self.mesh.coordinates() for i, c in enumerate(coords): if nodes(c): pinlist.append(i) else: pinlist = nodes self._pins = np.array(pinlist, dtype="int") if self.pbc2d: self._pins = np.concatenate([self.pbc2d.ids_pbc, self._pins]) if len(self._pins > 0): self.nxyz = self.S1.mesh().num_vertices() assert(np.min(self._pins) >= 0) assert(np.max(self._pins) < self.nxyz) def pins(self): return self._pins pins = property(pins, set_pins) def set_spatial_alpha(self, value): self._alpha[:] = helpers.scalar_valued_function( value, self.S1).vector().array()[:] def setup_parameters(self): self.integrator.set_parameters(self.dt, self.gamma_LL, self.alpha, self.material.Tc, self.seed, self.do_precession, self.using_type_II) def add(self, interaction): interaction.setup(self.material._m, self.material.Ms0, unit_length=self.material.unit_length) self.interactions.append(interaction) if interaction.__class__.__name__ == 'Zeeman': self.zeeman_interation = interaction self.tablewriter.add_entity('zeeman', { 'unit': '<A/m>', 'get': lambda sim: sim.zeeman_interation.average_field(), 'header': ('h_x', 'h_y', 'h_z')}) def compute_effective_field(self): self.H_eff[:] = 0 for interaction in self.interactions: self.H_eff += interaction.compute_field() def total_energy(self): # FIXME: change to the real total energy return 0 def stochastic_rhs(self, y): self._m.vector().set_local(y) for func in self._pre_rhs_callables: func(self.t) self.compute_effective_field() for func in self._post_rhs_callables: func(self) def sundials_rhs(self, t, y, ydot): self.t = t self._m.vector().set_local(y) for func in self._pre_rhs_callables: func(self.t) self.compute_effective_field() timer.start("sundials_rhs", self.__class__.__name__) # Use the same characteristic time as defined by c native_llb.calc_llb_dmdt(self._m.vector().array(), self.H_eff, self.dm_dt, self.material.T, self.pins, self._alpha, self.gamma_LL, self.material.Tc, self.do_precession) timer.stop("sundials_rhs", self.__class__.__name__) for func in self._post_rhs_callables: func(self) ydot[:] = self.dm_dt[:] return 0 def run_with_scheduler(self): if self.method == 'cvode': run_fun = self.run_until_sundial else: run_fun = self.run_until_stochastic for t in self.scheduler: run_fun(t) self.scheduler.reached(t) self.scheduler.finalise(t) def run_until(self, time): # Define function that stops integration and add it to scheduler. The # at_end parameter is required because t can be zero, which is # considered as False for comparison purposes in scheduler.add. def StopIntegration(): return False self.scheduler.add(StopIntegration, at=time, at_end=True) self.run_with_scheduler() def run_until_sundial(self, t): if t <= self.t: return self.integrator.advance_time(t, self.m) self._m.vector().set_local(self.m) self.t = t def run_until_stochastic(self, t): tp = t / self.time_scale if tp <= self._t: return try: while tp - self._t > 1e-12: self.integrator.run_step(self.H_eff) self._m.vector().set_local(self.m) if self.pbc2d: self.pbc2d.modify_m(self._m.vector()) self._t += self._dt except Exception, error: log.info(error) raise Exception(error) if abs(tp - self._t) < 1e-12: self._t = tp log.debug("Integrating dynamics up to t = %g" % t) def m_average_fun(self, dx=df.dx): """ Compute and return the average polarisation according to the formula :math:`\\langle m \\rangle = \\frac{1}{V} \int m \: \mathrm{d}V` """ mx = df.assemble( self.material._Ms_dg * df.dot(self._m, df.Constant([1, 0, 0])) * dx) my = df.assemble( self.material._Ms_dg * df.dot(self._m, df.Constant([0, 1, 0])) * dx) mz = df.assemble( self.material._Ms_dg * df.dot(self._m, df.Constant([0, 0, 1])) * dx) volume = df.assemble(self.material._Ms_dg * dx) return np.array([mx, my, mz]) / volume m_average = property(m_average_fun) def save_m_in_region(self, region, name='unnamed'): self.region_id += 1 helpers.mark_subdomain_by_function( region, self.mesh, self.region_id, self.domains) self.dx = df.Measure("dx")[self.domains] if name == 'unnamed': name = 'region_' + str(self.region_id) region_id = self.region_id self.tablewriter.add_entity(name, { 'unit': '<>', 'get': lambda sim: sim.m_average_fun(dx=self.dx(region_id)), 'header': (name + '_m_x', name + '_m_y', name + '_m_z')}) def save_ndt(self): #log.debug("Saving average field values for simulation '{}'.".format(self.name)) self.tablewriter.save() def schedule(self, func, *args, **kwargs): if isinstance(func, str): if func in self.scheduler_shortcuts: func = self.scheduler_shortcuts[func] else: msg = "Scheduling keyword '%s' unknown. Known values are %s" \ % (func, self.scheduler_shortcuts.keys()) log.error(msg) raise KeyError(msg) func_args = inspect.getargspec(func).args illegal_argnames = ['at', 'after', 'every', 'at_end', 'realtime'] for kw in illegal_argnames: if kw in func_args: raise ValueError( "The scheduled function must not use any of the following " "argument names: {}".format(illegal_argnames)) at = kwargs.pop('at', None) after = kwargs.pop('after', None) every = kwargs.pop('every', None) at_end = kwargs.pop('at_end', False) realtime = kwargs.pop('realtime', False) self.scheduler.add(func, [self] + list(args), kwargs, at=at, at_end=at_end, every=every, after=after, realtime=realtime) def save_vtk(self, filename=None): """ Save the magnetisation to a VTK file. """ if filename != None: # Explicitly provided filename overwrites the previously used one. self.vtk_export_filename = filename # Check whether we're still writing to the same file. if self.vtk_saver.filename != self.vtk_export_filename: self.vtk_saver.open( self.vtk_export_filename, self.overwrite_pvd_files) self.vtk_saver.save_field(self._m, self.t) def relax(self, stopping_dmdt=ONE_DEGREE_PER_NS, dt_limit=1e-10, dmdt_increased_counter_limit=10000): """ Run the simulation until the magnetisation has relaxed. This means the magnetisation reaches a state where its change over time at each node is smaller than the threshold `stopping_dm_dt` (which should be given in rad/s). """ relax = derivedevents.RelaxationTimeEvent( self, stopping_dmdt, dmdt_increased_counter_limit, dt_limit) self.scheduler._add(relax) self.run_with_scheduler() self.integrator.reinit(self.t, self.m) self.scheduler._remove(relax) if __name__ == '__main__': x0 = y0 = z0 = 0 x1 = 500 y1 = 10 z1 = 100 nx = 50 ny = 1 nz = 1 mesh = df.BoxMesh(df.Point(x0, y0, z0), df.Point(x1, y1, z1), nx, ny, nz) mat = Material(mesh, name='FePt') mat.set_m((1, 0.2, 0)) mat.T = 100 mat.alpha = 0.01 sim = LLB(mat) # sim.set_up_solver() sim.set_up_stochastic_solver() sim.add(Zeeman((0, 0, 5e5))) sim.add(Exchange(mat)) sim.add(Demag()) #demag.demag.poisson_solver.parameters["relative_tolerance"] = 1e-8 #demag.demag.laplace_solver.parameters["relative_tolerance"] = 1e-8 max_time = 1 * np.pi / (sim.gamma_LL * 1e5) ts = np.linspace(0, max_time, num=100) mlist = [] Ms_average = [] for t in ts: print t sim.run_until(t) mlist.append(sim.m) df.plot(sim._m) df.interactive()
13,787
30.124153
93
py
finmag
finmag-master/src/finmag/physics/llb/anisotropy.py
import dolfin as df import logging from aeon import timer from finmag.energies.energy_base import EnergyBase from material import Material logger = logging.getLogger('finmag') class LLBAnisotropy(EnergyBase): """ Compute the anisotropy field for LLB case H = -(m_x e_x + m_y e_y)/chi_perp ==> E = 0.5 * (m_x^2 + m_y^2)/chi_perp """ def __init__(self, mat, method="box-matrix-petsc"): self.e_x = df.Constant([1, 0, 0]) self.e_y = df.Constant([0, 1, 0]) self.inv_chi_perp = mat.inv_chi_perp super(LLBAnisotropy, self).__init__(method, in_jacobian=True) @timer.method def setup(self, S3, m, Ms0, unit_length=1): #self._m_normed = df.Function(S3) self.m = m # Testfunction self.v = df.TestFunction(S3) # Anisotropy energy E = 0.5 * \ ((df.dot(self.e_x, self.m)) ** 2 + df.dot(self.e_y, self.m) ** 2) # Needed for energy density S1 = df.FunctionSpace(S3.mesh(), "CG", 1) w = df.TestFunction(S1) # This is only needed if we want the energy density # as a df.Function, in order to e.g. probe. self.ED = df.Function(S1) super(LLBAnisotropy, self).setup( E_integrand=E, S3=S3, m=self.m, Ms=Ms0, unit_length=unit_length) def compute_field(self): """ Compute the field associated with the energy. *Returns* numpy.ndarray The coefficients of the dolfin-function in a numpy array. """ Han = super(LLBAnisotropy, self).compute_field() return Han * self.inv_chi_perp if __name__ == "__main__": from dolfin import * m = 1e-8 Ms = 0.8e6 n = 5 mesh = BoxMesh(df.Point(0, m, 0), df.Point(m, 0, m), n, n, n) mat = Material(mesh) mat.set_m((1, 2, 3)) anis = LLBAnisotropy(mat) anis.setup(mat.S3, mat._m, mat.Ms0) print anis.compute_field() print anis.compute_energy() print anis.energy_density()
2,072
23.104651
77
py
finmag
finmag-master/src/finmag/physics/llb/sllg_test.py
import os import dolfin as df import numpy as np import matplotlib as mpl mpl.use("Agg") import matplotlib.pyplot as plt from finmag.physics.llb.sllg import SLLG from finmag.energies import Zeeman from finmag.energies import Demag MODULE_DIR = os.path.dirname(os.path.abspath(__file__)) def plot_random_number(): from finmag.native.llb import RandomMT19937 mt = RandomMT19937() mt.initial_random(422353390) x = np.zeros(10000000, dtype=np.float) for i in range(100): mt.gaussian_random_np(x) if i > 80: plt.cla() plt.hist(x, 100, normed=1, facecolor='green', alpha=0.75) plt.grid(True) plt.savefig(os.path.join(MODULE_DIR, "test_mt19937_%d.png" % i)) print 'step=', i def plot_random_number_np(): np.random.seed(422353390) for i in range(100): x = np.random.randn(10000000) plt.cla() plt.hist(x, 100, normed=1, facecolor='green', alpha=0.75) plt.grid(True) plt.savefig(os.path.join(MODULE_DIR, "test_np_%d.png" % i)) if __name__ == "__main__": plot_random_number() plot_random_number_np()
1,150
22.979167
76
py
finmag
finmag-master/src/finmag/physics/llb/tests/__init__.py
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations # Copyright (C) 2012 University of Southampton # Do not distribute # # CONTACT: [email protected] # # AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
270
32.875
90
py
finmag
finmag-master/src/finmag/drivers/llg_integrator.py
import logging from finmag.field import Field from finmag.drivers.sundials_integrator import SundialsIntegrator from finmag.drivers.scipy_integrator import ScipyIntegrator log = logging.getLogger(name='finmag') def llg_integrator(llg, m0, backend="sundials", **kwargs): # XXX TODO: Passing the tablewriter argument on like this is a # complete hack and this should be refactored. The same # is true with saving snapshots. Neither saving average # fields nor VTK snapshots should probably happen in # this class but rather in the Simulation class (?). # -- Max, 11.12.2012 # Yes, I think that's right. We could give callback functions # to the run_until and relax function to give control back to the # simulation class. # -- Hans, 17/12/2012 # assert isinstance(m0, Field) log.info("Creating integrator with backend {} and arguments {}.".format(backend, kwargs)) if backend == "scipy": return ScipyIntegrator(llg, m0, **kwargs) elif backend == "sundials": return SundialsIntegrator(llg, m0.get_ordered_numpy_array_xxx(), **kwargs) else: raise ValueError("backend must be either scipy or sundials")
1,284
41.833333
93
py
finmag
finmag-master/src/finmag/drivers/sundials_integrator.py
import logging from finmag.native import sundials EPSILON = 1e-15 log = logging.getLogger(name='finmag') class SundialsIntegrator(object): """ Sundials time integrator. We always start integration from t = 0. Attributes: cur_t The time up to which integration has been carried out. """ def __init__(self, llg, m0, t0=0.0, reltol=1e-6, abstol=1e-6, nsteps=10000, method="bdf_gmres_prec_id", tablewriter=None): assert method in ("adams", "bdf_diag", "bdf_gmres_no_prec", "bdf_gmres_prec_id") self.llg = llg self.cur_t = t0 self.m = m0.copy() self.tablewriter = tablewriter if method == "adams": integrator = sundials.cvode( sundials.CV_ADAMS, sundials.CV_FUNCTIONAL) else: integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON) self.integrator = integrator integrator.init(llg.sundials_rhs, self.cur_t, self.m) if method == "bdf_diag": integrator.set_linear_solver_diag() elif method == "bdf_gmres_no_prec": integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE) integrator.set_spils_jac_times_vec_fn(self.llg.sundials_jtimes) elif method == "bdf_gmres_prec_id": integrator.set_linear_solver_sp_gmr(sundials.PREC_LEFT) integrator.set_spils_jac_times_vec_fn(self.llg.sundials_jtimes) integrator.set_spils_preconditioner( llg.sundials_psetup, llg.sundials_psolve) integrator.set_scalar_tolerances(reltol, abstol) self.max_steps = nsteps @property def max_steps(self): return self._max_steps @max_steps.setter def max_steps(self, value): self._max_steps = value self.integrator.set_max_num_steps(value) def advance_time(self, t): """ *Arguments* t : float Target time to integrate to Returns ``True`` or ``False`` depending on whether target time ``t`` has been reached. Given a target time ``t``, this function integrates towards ``t``. If ``max_steps`` was set and the number of steps for the integration are reached, we interrupt the calculation and return False. If ``t`` is reached within the number of allowed steps, it will return True. """ # The following check is required because sundials does not like to # integrate up to t=0, if the cvode solver was initialised for t=0. if t == 0 and abs(t - self.cur_t) < EPSILON: return True # if t <= self.cur_t and this is not the value with which we started, # we should complain: elif t <= self.cur_t: raise RuntimeError( "t={:.3g}, self.cur_t={:.3g} -- why are we integrating " "into the past?".format(t, self.cur_t)) try: self.integrator.advance_time(t, self.m) except RuntimeError, msg: # if we have reached max_num_steps, the error message will read # something like "Error in CVODE:CVode (CV_TOO_MUCH_WORK): # At t = 0.258733, mxstep steps taken before reaching tout." if "CV_TOO_MUCH_WORK" in msg.message: # we have integrated up to cvode's internal time self.cur_t = self.integrator.get_current_time() log.error("The integrator has reached its maximum of {} steps.\n" "The time is t = {} whereas you requested t = {}.\n" "You can increase the maximum number of steps if " "you really need to with integrator.max_steps = n.".format( self.max_steps, self.integrator.get_current_time(), t)) # not used, but this would be the right value reached_tout = False raise else: reached_tout = False raise else: self.cur_t = t reached_tout = True # in any case: put integrated degrees of freedom from cvode object # back into llg object # Weiwei: change the default m to sundials_m since sometimes we need to # extend the default equation. self.llg.sundials_m = self.m # actually writes to the field class (c.f. llg.py) return reached_tout def advance_steps(self, steps): """ Run the integrator for `steps` internal steps. """ old_max_steps = self.max_steps self.max_steps = steps try: # we can't tell sundials to run a certain number of steps # so we try integrating for a very long time but set it to # stop after the specified number of steps self.integrator.advance_time(self.cur_t + 1, self.m) except RuntimeError, msg: if "CV_TOO_MUCH_WORK" in msg.message: pass # this is the error we expect else: raise self.cur_t = self.integrator.get_current_time() self.llg.sundials_m = self.m self.max_steps = old_max_steps # TODO: Remove debug flag again once we are sure that re-initialising the integrator # doesn't cause a performance overhead. def reinit(self, debug=True): """ Reinitialise memory for CVODE. Useful if there is a drastic (non-continuous) change in the right hand side of the ODE. By calling this function, we inform the integrator that it should not assuming smoothness of the RHS. Should be called when we change the applied field, abruptly, for example. """ if debug: log.debug("Re-initialising CVODE integrator.") self.integrator.reinit(self.cur_t, self.llg.sundials_m) # FIXME: rename sundials_m n_rhs_evals = property(lambda self: self.integrator.get_num_rhs_evals( ), "Number of function evaluations performed") def stats(self): """ Return integrator stats as dictionary. Keys are nsteps, nfevals, nlinsetups, netfails, qlast, qcur, hinused, hlast, hcur, tcur and the meanings are (from CVODE 2.7 documentation, section 4.5, page 46) nsteps (long int) number of steps taken by cvode. nfevals (long int) number of calls to the user's f function. nlinsetups (long int) number of calls made to the linear solver setup function. netfails (long int) number of error test failures. qlast (int) method order used on the last internal step. qcur (int) method order to be used on the next internal step. hinused (realtype) actual value of initial step size. hlast (realtype) step size taken on the last internal step. hcur (realtype) step size to be attempted on the next internal step. tcur (realtype) current internal time reached. """ stats = self.integrator.get_integrator_stats() nsteps, nfevals, nlinsetups, netfails, qlast, qcur, hinused, hlast, hcur, tcur = stats d = {'nsteps': nsteps, 'nfevals': nfevals, 'nlinsetups': nlinsetups, 'netfails': netfails, 'qlast': qlast, 'qcur': qcur, 'hinused': hinused, 'hlast': hlast, 'hcur': hcur, 'tcur': tcur } return d
7,695
38.875648
101
py
finmag
finmag-master/src/finmag/drivers/__init__.py
0
0
0
py
finmag
finmag-master/src/finmag/drivers/scipy_integrator.py
import logging import numpy as np from scipy.integrate import ode EPSILON = 1e-15 log = logging.getLogger(name="finmag") class ScipyIntegrator(object): def __init__(self, llg, m0, reltol=1e-6, abstol=1e-6, nsteps=10000, method="bdf", tablewriter=None, **kwargs): self.m_field = m0 self.solve_for = llg.solve_for self.cur_t = 0.0 self.ode = ode(self.rhs, jac=None) self.ode.set_integrator( "vode", method=method, rtol=reltol, atol=abstol, nsteps=nsteps, **kwargs) self.ode.set_initial_value(self.m_field.as_array(), 0) self._n_rhs_evals = 0 self.tablewriter = tablewriter n_rhs_evals = property( lambda self: self._n_rhs_evals, "Number of function evaluations performed") def rhs(self, t, y): self._n_rhs_evals += 1 return self.solve_for(y, t) def advance_time(self, t): if t == 0 and abs(t - self.cur_t) < EPSILON: # like sundials, scipy doesn't like integrating to 0 when # it was initialized with t = 0 return new_m = self.ode.integrate(t) assert self.ode.successful() self.m_field.from_array(new_m) self.cur_t = t def reinit(self): log.debug("{}: This integrator doesn't support reinitialisation.".format( self.__class__.__name__))
1,359
31.380952
114
py
finmag
finmag-master/src/finmag/drivers/tests/test_integrator_raises_exception_on_exceed_maxsteps.py
import os import pytest import finmag steps = 10 def test_can_change_maxsteps(): sim = finmag.example.barmini() sim.create_integrator() sim.integrator.max_steps = steps assert sim.integrator.max_steps == steps def test_time_run_until(): sim = finmag.example.barmini() sim.create_integrator() sim.integrator.max_steps = steps t = 1e-9 with pytest.raises(RuntimeError) as exc_info: sim.run_until(t) assert sim.t < t def test_time_advance_time(): sim = finmag.example.barmini() sim.create_integrator() sim.integrator.max_steps = steps t = 1e-9 with pytest.raises(RuntimeError) as exc_info: sim.advance_time(t) assert sim.t < t # check that integration was aborted def test_time_default_max_steps(): """The idea for this test was to check the default max_steps, but the simulation for this runs about 12 minutes. So I have changed the code below, to stop after 10 steps. HF, Sept 2014 """ sim = finmag.example.barmini() t = 20e-9 # create integrator sim.create_integrator() # set steps to a small number sim.integrator.max_steps = 10 # now run until we exceed 10 steps with pytest.raises(RuntimeError) as exc_info: sim.advance_time(t) assert sim.t < t
1,301
24.529412
70
py
finmag
finmag-master/src/finmag/drivers/tests/test_relaxation.py
import os import numpy as np import dolfin as df import matplotlib.pyplot as plt from finmag import Simulation from finmag.energies import Zeeman, Exchange, Demag from finmag.util.consts import ONE_DEGREE_PER_NS MODULE_DIR = os.path.abspath(os.path.dirname(__file__)) def test_easy_relaxation(do_plot=False): """ This is a simulation we expect to relax well, meant to catch some obvious errors in the relaxation code. """ mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(50, 10, 10), 10, 2, 2) Ms = 0.86e6 A = 13.0e-12 sim = Simulation(mesh, Ms, name="test_relaxation") sim.set_m((1, 0, 0)) sim.add(Zeeman((0, Ms, 0))) sim.add(Exchange(A)) sim.add(Demag()) sim.schedule(Simulation.save_averages, every=1e-12, at_end=True) sim.relax() if do_plot: plot_averages(sim) assert sim.t < 3e-10 def plot_averages(sim): fig = plt.figure() ax1 = fig.add_subplot(111) t, mx, my, mz = np.array(zip(* np.loadtxt("test_relaxation.ndt"))) t = t * 1e9 ax1.plot(t, mx, "b-", label="$m_\mathrm{x}$") ax1.plot(t, my, "b--", label="$m_\mathrm{y}$") ax1.plot(t, mz, "b:", label="$m_\mathrm{z}$") ax1.set_xlabel("time (ns)") ax1.set_ylabel("average magnetisation", color="b") for tl in ax1.get_yticklabels(): tl.set_color("b") ax1.legend() ax2 = ax1.twinx() t, max_dmdt_norms = np.array(zip(* sim.relaxation['dmdts'])) ax2.semilogy(t * 1e9, max_dmdt_norms / ONE_DEGREE_PER_NS, "ro") ax2.set_ylabel("maximum dm/dt (1/ns)", color="r") ax2.axhline(y=1, xmin=0.5, color="red", linestyle="--") ax2.annotate("threshold", xy=(0.15, 1.1), color="r") #ax2.set_ylabel(r"$\max \left( \left | \frac{\mathrm{d}m}{\mathrm{d}t} \right | \right) \, \left( \mathrm{ns}^{-1} \right )$", color="r", fontsize=16) for tl in ax2.get_yticklabels(): tl.set_color("r") plt.savefig(os.path.join(MODULE_DIR, "test_relaxation.png")) if __name__ == "__main__": test_easy_relaxation(do_plot=True)
2,025
30.65625
154
py
finmag
finmag-master/src/finmag/drivers/tests/test_scipy.py
import dolfin as df from finmag import Simulation def _test_scipy_advance_time(): mesh = df.UnitIntervalMesh(10) sim = Simulation(mesh, Ms=1, unit_length=1e-9, integrator_backend="scipy") sim.set_m((1, 0, 0)) sim.advance_time(1e-12) sim.advance_time(2e-12) sim.advance_time(2e-12) sim.advance_time(2e-12) def test_scipy_advance_time_zero_first(): mesh = df.UnitIntervalMesh(10) sim = Simulation(mesh, Ms=1, unit_length=1e-9, integrator_backend="scipy") sim.set_m((1, 0, 0)) sim.advance_time(0) sim.advance_time(1e-12) sim.advance_time(2e-12) sim.advance_time(2e-12) sim.advance_time(2e-12) if __name__ == "__main__": test_scipy_advance_time_zero_first()
723
25.814815
78
py
finmag
finmag-master/src/finmag/drivers/tests/sundials_nsteps_test.py
import finmag import os def test_integrator_get_set_max_steps(tmpdir): """ Tests setting and getting of nsteps """ os.chdir(str(tmpdir)) sim = finmag.example.barmini() sim.run_until(0) # create integrator object steps = sim.integrator.max_steps assert steps != 42 # would be an odd default value sim.integrator.max_steps = 42 steps2 = sim.integrator.max_steps assert steps2 == 42 sim.integrator.max_steps = steps assert steps == sim.integrator.max_steps def test_integrator_stats(tmpdir): """ Tests the stats """ os.chdir(str(tmpdir)) sim = finmag.example.barmini() sim.run_until(0) # create integrator object stats = sim.integrator.stats() # All stats should be zero before we do any work for key in stats: assert stats[key] == 0.0 def test_integrator_n_steps_only(tmpdir): """ Test integration for a few steps only """ os.chdir(str(tmpdir)) sim = finmag.example.barmini() sim.create_integrator() assert sim.integrator.stats()['nsteps'] == 0 sim.integrator.advance_steps(1) assert sim.integrator.stats()['nsteps'] == 1 # check also value of cur_t is up-to-date assert sim.integrator.cur_t == sim.integrator.stats()['tcur'] # expect also last time step size to be the same as current time # because we have only done one step assert sim.integrator.stats()['tcur'] == sim.integrator.stats()['hlast'] sim.integrator.advance_steps(1) assert sim.integrator.stats()['nsteps'] == 2 sim.integrator.advance_steps(2) assert sim.integrator.stats()['nsteps'] == 4 # check also value of cur_t is up-to-date assert sim.integrator.cur_t == sim.integrator.stats()['tcur']
1,747
28.133333
76
py
finmag
finmag-master/src/finmag/drivers/tests/test_relax_two_times.py
import dolfin as df from finmag import Simulation from finmag.energies import Zeeman def test_relax_two_times(): """ Test whether we can call the relax method on Sim two times in a row. """ mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(10, 10, 10), 2, 2, 2) Ms = 0.86e6 sim = Simulation(mesh, Ms) sim.set_m((1, 0, 0)) external_field = Zeeman((0, Ms, 0)) sim.add(external_field) sim.relax() t0 = sim.t # time needed for first relaxation external_field.set_value((0, 0, Ms)) sim.relax() t1 = sim.t - t0 # time needed for second relaxation assert sim.t > t0 assert abs(t1 - t0) < 1e-10
656
22.464286
72
py
finmag
finmag-master/src/finmag/drivers/tests/test_integrators.py
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations # Copyright (C) 2012 University of Southampton # Do not distribute # # CONTACT: [email protected] # # AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected]) import unittest from finmag.tests.jacobean.domain_wall_cobalt import setup_domain_wall_cobalt, domain_wall_error from finmag.drivers.llg_integrator import llg_integrator from datetime import datetime NODE_COUNT = 100 END_TIME = 1e-10 class IntegratorTests(unittest.TestCase): def run_test(self, backend, method, nsteps=40000): llg = setup_domain_wall_cobalt(node_count=NODE_COUNT) integrator = llg_integrator( llg, llg.m_field, backend, method=method, nsteps=nsteps) t = datetime.now() integrator.advance_time(END_TIME) dt = datetime.now() - t print "backend=%s, method=%s: elapsed time=%s, n_rhs_evals=%s, error=%g" % ( backend, method, dt, integrator.n_rhs_evals, domain_wall_error(llg.m_field.as_array(), NODE_COUNT)) def test_scipy_bdf(self): self.run_test("scipy", "bdf") def test_scipy_adams(self): self.run_test("scipy", "adams") def test_sundials_adams(self): self.run_test("sundials", "bdf_diag") def test_sundials_bdf_diag(self): self.run_test("sundials", "adams") def test_sundials_bdf_gmres_no_prec(self): self.run_test("sundials", "bdf_gmres_no_prec") def test_sundials_bdf_gmres_prec_id(self): self.run_test("sundials", "bdf_gmres_prec_id")
1,632
31.019608
96
py
finmag
finmag-master/src/finmag/drivers/tests/sundials_reinit_test.py
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations # Copyright (C) 2012 University of Southampton # Do not distribute # # CONTACT: [email protected] # # AUTHOR(S) OF THIS FILE: Hans Fangohr (copied template from Dmitri for # integrator tests) """The purpose of this file is to test whether the reinit() function is working correctly. This should tell Sundials' cvode integrator that it should expect some drastic change in the equation we integrate. While the values of the dynamic degrees of freedom should remain the same, we want to warn the integrator that the right hand side may change quickly. In the magnetic context, this could be a sudden change in the applied field which will propagate to the effective field, and thus result in a sudden change in the torque from the precession term (and others). After receiving the re-init signal, sundials will forget the history of the integration, and start to explore the equations again to determine appropriate time steps. It would be nice to show that this actually happens, but I have not found a good example yet. This file can be run directly, and it shows that using re-init can actually decrease the total number of function evaluations. It also shows that sundials resets the counters for the evalutaions of the right hand side. So for now, this is the only firm criterion used here for testing. Hans, 30 June 2012 """ from finmag.tests.jacobean.domain_wall_cobalt import setup_domain_wall_cobalt, \ domain_wall_error from finmag.drivers.llg_integrator import llg_integrator from datetime import datetime NODE_COUNT = 100 END_TIME1 = 0.1e-10 END_TIME2 = END_TIME1 + 0.1e-10 def run_test(backend, method, mode='onego', nsteps=40000): llg = setup_domain_wall_cobalt(node_count=NODE_COUNT) integrator = llg_integrator( llg, llg.m_field, backend, method=method, nsteps=nsteps) t = datetime.now() if mode == 'onego': END_TIME = END_TIME1 + END_TIME2 elif mode == 'twogoes' or mode == 'twogoesreinit': END_TIME = END_TIME1 else: raise ValueError( "Can only understand 'onego', 'twogoes', twogoesreinit'.") integrator.advance_time(END_TIME) dt = datetime.now() - t print "backend=%s, method=%s: elapsed time=%s, n_rhs_evals=%s, error=%g" % ( backend, method, dt, integrator.n_rhs_evals, domain_wall_error(integrator.m, NODE_COUNT)) if mode == 'onego': return integrator if mode == 'twogoesreinit': # check that rhs counter goes back to zero print "re-initialising" integrator.reinit() assert integrator.n_rhs_evals == 0 else: print "Not re-initialising" integrator.advance_time(END_TIME2) print "backend=%s, method=%s: elapsed time=%s, n_rhs_evals=%s, error=%g" % ( backend, method, dt, integrator.n_rhs_evals, domain_wall_error(integrator.m, NODE_COUNT)) print("second call to integrator.n_rhs_evals ={}".format( integrator.n_rhs_evals)) return integrator def test_reinit_resets_num_rhs_eval_counter(): int = run_test("sundials", "bdf_diag", mode='twogoesreinit') int = run_test("sundials", "adams", mode='twogoesreinit') int = run_test("sundials", "adams", mode='twogoesreinit') return if __name__ == '__main__': # the actual test test_reinit_resets_num_rhs_eval_counter() print "Demo how nhs_rhs_evals changes with and without reinit" int = run_test("sundials", "bdf_diag", mode='twogoes') int = run_test("sundials", "bdf_gmres_no_prec", mode='twogoesreinit') int = run_test("sundials", "adams", mode='onego') # def not_used_here_test_scipy(): # return run_test("scipy", "bdf") # def test_scipy_bdf(self): # self.run_test("scipy", "bdf") # def test_scipy_adams(self): # self.run_test("scipy", "adams") # def test_sundials_adams(self): # self.run_test("sundials", "bdf_diag") # def test_sundials_bdf_diag(self): # self.run_test("sundials", "adams") # def test_sundials_bdf_gmres_no_prec(self): # self.run_test("sundials", "bdf_gmres_no_prec") # def test_sundials_bdf_gmres_prec_id(self): # self.run_test("sundials", "bdf_gmres_prec_id")
4,324
33.055118
90
py
finmag
finmag-master/src/finmag/energies/test_energies_in_regions.py
from __future__ import division import numpy as np import dolfin as df import pytest import os import finmag from finmag.field import Field #from finmag.energies import Zeeman, TimeZeeman, DiscreteTimeZeeman, OscillatingZeeman from finmag.energies import Zeeman #from finmag.util.consts import mu0 from finmag.util.meshes import pair_of_disks from finmag.util.helpers import vector_valued_function #from math import sqrt, pi, cos, sin class MultiDomainTest(object): def __init__(self, mesh, get_domain_id, m_vals, Ms, unit_length=1e-9): """ `get_domain_id` is a function of the form (x, y, z) -> id which maps some point coordinates in the mesh to an integer identifying the domain which the point belongs to. """ self.mesh = mesh self.get_domain_id = get_domain_id self.domain_ids = [get_domain_id(pt) for pt in mesh.coordinates()] self.Ms = Field(df.FunctionSpace(mesh, 'DG', 0), Ms) self.unit_length = unit_length #self.rtol = rtol domain_classes = {} for k in self.domain_ids: class DomainK(df.SubDomain): def inside(self, pt, on_boundary): return get_domain_id(pt) == k domain_classes[k] = DomainK() domains = df.CellFunction("size_t", mesh) domains.set_all(0) for k, d in domain_classes.items(): d.mark(domains, k) self.submeshes = [df.SubMesh(mesh, domains, i) for i in self.domain_ids] self.dx = df.Measure("dx")[domains] def m_init(pt): return m_vals[self.get_domain_id(pt)] self.V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3) self.m = Field(self.V) self.m.set(m_init, normalised=True) def compute_energies_on_subdomains(self, interaction): """ Given some interaction (such as Zeeman, Demag, Exchange, etc.), compute the associated energies on each subdomain as well as the total energy. *Returns* A pair (E_subdmns, E_total), where E_subdmns is a dictionary of energies indexed by the subdomain indices, and E_total is the total energy of the interaction. """ interaction.setup(self.m, self.Ms, unit_length=self.unit_length) return {k: interaction.compute_energy(dx=self.dx(k)) for k in self.domain_ids},\ interaction.compute_energy(df.dx) def check_energy_consistency(self, interaction): E_domains, E_total = self.compute_energies_on_subdomains(interaction) finmag.logger.debug("Energies on subdomains: {}".format(E_domains)) finmag.logger.debug("Sum of energies on subdomains: {}; total energy: {}".format( sum(E_domains.values()), E_total)) assert np.allclose( sum(E_domains.values()), E_total, atol=0, rtol=1e-12) @pytest.mark.slow def test_energies_in_separated_subdomains(tmpdir): """ Create a mesh with two subdomains. For each energy class compute the energy on each subdomain and compare with the total energy on the whole mesh. Also compare with analytical expressions if feasible. """ os.chdir(str(tmpdir)) # Create a mesh consisting of two disks (with different heights) d = 30.0 h1 = 5.0 h2 = 10.0 sep = 10.0 maxh = 2.5 Ms = 8.6e5 unit_length = 1e-9 RTOL = 5e-3 # achievable relative tolerance depends on maxh zeeman = Zeeman(1e6 * np.array([1, 0, 0])) mesh = pair_of_disks(d, d, h1, h2, sep, theta=0, maxh=maxh) def get_domain_id(pt): x, y, z = pt return 1 if (np.linalg.norm([x, y]) < 0.5 * (d + sep)) else 2 m_vals = {1: [1, 0, 0], 2: [0.5, -0.8, 0]} multi_domain_test = MultiDomainTest( mesh, get_domain_id, m_vals, Ms, unit_length=unit_length) multi_domain_test.check_energy_consistency(zeeman) # The same test for a mesh with subdomains that touch will fail for some reason. # XXX TODO: need to investigate this. @pytest.mark.xfail def test_energies_in_touching_subdomains(): # Max, I fixed some things in here (missing m_vals, Ms, Zeeman and unit_length.) # Also changed 'get_domain_id' to 'get_domain_id2' in the MultiDomainTest # call below. Could you check this is what you meant? (Maybe the # test even passes now?) XXX TODO, 5 Oct 2013, Hans zeeman = Zeeman(1e6 * np.array([1, 0, 0])) m_vals = {1: [1, 0, 0], 2: [0.5, -0.8, 0]} box_mesh = df.BoxMesh(df.Point(-50, -20, 0), df.Point(50, 20, 5), 30, 10, 2) Ms = 8.6e5 unit_length = 1e-9 def get_domain_id2(pt): return 1 if (pt[0] < 0) else 2 multi_domain_test = MultiDomainTest( box_mesh, get_domain_id2, m_vals, Ms, unit_length=unit_length) # The next line fails for touching subdomains. Need to investigate this. multi_domain_test.check_energy_consistency(zeeman)
4,938
34.278571
89
py
finmag
finmag-master/src/finmag/energies/energy_base.py
import logging import dolfin as df import numpy as np from aeon import timer from finmag.util.meshes import nodal_volume from finmag.util import helpers from finmag.util.consts import mu0 from finmag.field import Field logger = logging.getLogger('finmag') class EnergyBase(object): """ Computes a field for a given energy functional. It is a class from which particular energies should derived from. The derived classes should fill in the necessary details and call methods on the parent. See e.g. Exchange and UniaxialAnisotropy for examples. *Arguments* method possible methods are * 'box-assemble' * 'box-matrix-numpy' * 'box-matrix-petsc' [Default] * 'project' in_jacobian True or False -- decides whether the interaction is included in the Jacobian. At the moment, we think (all) 'box' methods work (and the method is used in Magpar and Nmag). - 'box-assemble' is a slower version that assembles the field H for a given m in every iteration. - 'box-matrix-numpy' precomputes a matrix g, so that H = g * m. - 'box-matrix-petsc' is the same mathematical scheme as 'box-matrix-numpy', but uses a PETSc linear algebra backend that supports sparse matrices to exploit the sparsity of g (default choice). - 'project': does not use the box method but 'properly projects' the field into the function space. This is provided for reasons of completeness but is potentially untested. """ _supported_methods = ['box-assemble', 'box-matrix-numpy', 'box-matrix-petsc', 'project', 'direct'] def __init__(self, method="box-matrix-petsc", in_jacobian=False): if method not in self._supported_methods: logger.error("Can't create '{}' object with method '{}'. " "Possible choices are {}.".format( self.__class__.__name__, method, self._supported_methods)) raise ValueError("Unsupported method '{}' should be " "one of {}.".format(method, self._supported_methods)) else: logger.debug("Creating {} object with method {},{} in " "Jacobian.".format(self.__class__.__name__, method, " not " if not in_jacobian else "")) self.in_jacobian = in_jacobian self.method = method def setup(self, E_integrand, m, Ms, unit_length=1): """ Function to be called after the energy object has been constructed. *Arguments* E_integrand dolfin form that represents the term inside the energy integral, as a function of m (and maybe Ms) if assembled m magnetisation field (usually normalised) Ms Saturation magnetisation (scalar, or scalar dolfin function) unit_length real length of 1 unit in the mesh """ ###license_placeholder### assert isinstance(m, Field) assert isinstance(Ms, Field) self.E_integrand = E_integrand dofmap = m.mesh_dofmap() self.S1 = df.FunctionSpace(m.mesh(), "CG", 1, constrained_domain=dofmap.constrained_domain) self.m = m self.Ms = Ms self.unit_length = unit_length self.E = E_integrand * df.dx self.nodal_E = df.dot(E_integrand, df.TestFunction(self.S1)) * df.dx self.dE_dm = df.Constant(-1.0 / mu0) * \ df.derivative(E_integrand / self.Ms.f * df.dx, self.m.f) self.dim = m.mesh_dim() self.nodal_volume_S1 = nodal_volume(self.S1, self.unit_length) # Same as nodal_volume_S1, just three times in an array # to have the same number of elements in the array as the # field to be able to divide it. self.nodal_volume_S3 = nodal_volume(self.m.functionspace) if self.method == 'box-assemble': self.__compute_field = self.__compute_field_assemble elif self.method == 'box-matrix-numpy': self.__setup_field_numpy() self.__compute_field = self.__compute_field_numpy elif self.method == 'box-matrix-petsc': self.__setup_field_petsc() self.__compute_field = self.__compute_field_petsc elif self.method == 'project': self.__setup_field_project() self.__compute_field = self.__compute_field_project elif self.method == 'direct': self.__compute_field = self.__compute_field_petsc else: logger.error("Can't create '{}' object with method '{}'. " "Possible choices are " "{}.".format(self.__class__.__name__, self.method, self._supported_methods)) raise ValueError("Unsupported method '{}' should be one of " "{}.".format(self.method, self._supported_methods)) @timer.method def compute_energy(self): """ Return the total energy, i.e. energy density integrated over the whole mesh [in units of Joule]. *Returns* Float The energy. """ E = df.assemble(self.E) * self.unit_length ** self.dim return E @timer.method def energy_density(self): """ Compute the energy density, .. math:: \\frac{E}{V}, where V is the volume of each node. *Returns* numpy.ndarray Coefficients of dolfin vector of energy density. """ nodal_E = df.assemble(self.nodal_E).array() * \ self.unit_length ** self.dim return nodal_E / self.nodal_volume_S1 def energy_density_function(self): """ Compute the exchange energy density the same way as the energy_density function above, but return a dolfin function to allow probing. *Returns* dolfin.Function The energy density function object. """ if not hasattr(self, "E_density_function"): self.E_density_function = df.Function(self.S1) self.E_density_function.vector()[:] = self.energy_density() return self.E_density_function @timer.method def compute_field(self): """ Compute the field associated with the energy. *Returns* numpy.ndarray The coefficients of the dolfin-function in a numpy array. """ H = self.__compute_field() return H def average_field(self): """ Compute the average field. """ return helpers.average_field(self.compute_field()) def __compute_field_assemble(self): return df.assemble(self.dE_dm).array() / self.nodal_volume_S3 def __setup_field_petsc(self): """ Same as __setup_field_numpy but with a petsc backend. """ g_form = df.derivative(self.dE_dm, self.m.f) self.g_petsc = df.PETScMatrix() df.assemble(g_form, tensor=self.g_petsc) self.H_petsc = df.PETScVector() def __compute_field_petsc(self): if not hasattr(self, "g_petsc"): self.__setup_field_petsc() self.g_petsc.mult(self.m.f.vector(), self.H_petsc) return self.H_petsc.array() / self.nodal_volume_S3 def __setup_field_numpy(self): """ Linearise dE_dm with respect to m. As we know this is linear (at least for exchange, and uniaxial anisotropy? Should add reference to Werner Scholz paper and relevant equation for g), this creates the right matrix to compute dE_dm later as dE_dm = g * m. We essentially compute a Taylor series of the energy in m, and know that the first two terms (for exchange: dE_dm = Hex, and ddE_dmdm = g) are the only finite ones as we know the expression for the energy. """ g_form = df.derivative(self.dE_dm, self.m.f) self.g = df.assemble(g_form).array() def __compute_field_numpy(self): Mvec = self.m.f.vector().array() H_ex = np.dot(self.g, Mvec) return H_ex / self.nodal_volume_S3 def __setup_field_project(self): # Note that we could make this 'project' method faster by # computing the matrices that represent a and L, and only to # solve the matrix system in 'compute_field'(). # IF this method is actually useful, we can do that. HF 16 Feb 2012 self.a = df.dot(df.TrialFunction(self.m.functionspace), df.TestFunction(self.m.functionspace)) * df.dx self.L = self.dE_dm self.H_project = df.Function(self.m.functionspace) def __compute_field_project(self): df.solve(self.a == self.L, self.H_project) return self.H_project.vector().array()
9,254
34.190114
81
py
finmag
finmag-master/src/finmag/energies/cubic_anisotropy.py
import logging import numpy as np import dolfin as df from aeon import timer from finmag.field import Field from energy_base import EnergyBase from finmag.util import helpers from finmag.util.consts import mu0 from finmag.native import llg as native_llg logger = logging.getLogger('finmag') class CubicAnisotropy(EnergyBase): """ Compute the cubic anisotropy field. *Arguments* K1, K2, K3 The anisotropy constants. u1, u2, u3 The anisotropy axes. Should be unit vectors. *Example of Usage* Refer to the UniaxialAnisotropy class. """ def __init__(self, u1, u2, K1, K2=0, K3=0, name='CubicAnisotropy', assemble=False): """ Define a cubic anisotropy with anisotropy constants `K1`, `K2`, `K3` (in J/m^3) and corresponding axes `u1`, `u2` and `u3`. if assemble = True, the box-assemble will be used, seems that box-assemble method has introduced extra error!!! """ self.u1_value = u1 self.u2_value = u2 self.u3_value = np.cross(u1, u2) # u3 perpendicular to u1 and u2 self.K1_value = K1 self.K2_value = K2 self.K3_value = K3 self.uv = 1.0 * np.array([self.u1_value, self.u2_value, self.u3_value]) self.uv.shape = (-1,) self.name = name super(CubicAnisotropy, self).__init__("box-assemble", in_jacobian=True) self.assemble = assemble @timer.method def setup(self, m, Ms, unit_length=1): dofmap = m.functionspace.dofmap() cg_scalar_functionspace = df.FunctionSpace( m.mesh(), "Lagrange", 1, constrained_domain=dofmap.constrained_domain) cg_vector_functionspace = df.VectorFunctionSpace( m.mesh(), "Lagrange", 1, 3, constrained_domain=dofmap.constrained_domain) self.K1_field = Field(cg_scalar_functionspace, self.K1_value, name='K1') self.K2_field = Field(cg_scalar_functionspace, self.K2_value, name='K2') self.K3_field = Field(cg_scalar_functionspace, self.K3_value, name='K3') self.u1_field = Field(cg_vector_functionspace, self.u1_value, name='u1') self.u2_field = Field(cg_vector_functionspace, self.u2_value, name='u2') self.u3_field = Field(cg_vector_functionspace, self.u3_value, name='u3') self.volumes = df.assemble(df.TestFunction(cg_scalar_functionspace) * df.dx) self.K1 = df.assemble( self.K1_field.f * df.TestFunction(cg_scalar_functionspace) * df.dx).array() / self.volumes self.K2 = df.assemble( self.K2_field.f * df.TestFunction(cg_scalar_functionspace) * df.dx).array() / self.volumes self.K3 = df.assemble( self.K3_field.f * df.TestFunction(cg_scalar_functionspace) * df.dx).array() / self.volumes u1msq = df.dot(self.u1_field.f, m.f) ** 2 u2msq = df.dot(self.u2_field.f, m.f) ** 2 u3msq = df.dot(self.u3_field.f, m.f) ** 2 E_term1 = self.K1_field.f * (u1msq * u2msq + u2msq * u3msq + u3msq * u1msq) E_term2 = self.K2_field.f * (u1msq * u2msq * u3msq) E_term3 = self.K3_field.f * (u1msq ** 2 * u2msq ** 2 + u2msq ** \ 2 * u3msq ** 2 + u3msq ** 2 * u1msq ** 2) E_integrand = E_term1 if self.K2_value != 0: E_integrand += E_term2 if self.K3_value != 0: E_integrand += E_term3 super(CubicAnisotropy, self).setup(E_integrand, m, Ms, unit_length) if not self.assemble: self.H = self.m.get_numpy_array_debug() self.Ms = self.Ms.get_numpy_array_debug() self.compute_field = self.__compute_field_directly def __compute_field_directly(self): m = self.m.get_numpy_array_debug() m.shape = (3, -1) self.H.shape = (3, -1) native_llg.compute_cubic_field( m, self.Ms, self.H, self.uv, self.K1, self.K2, self.K3) m.shape = (-1,) self.H.shape = (-1,) return self.H
4,052
33.347458
102
py
finmag
finmag-master/src/finmag/energies/exchange.py
import logging import dolfin as df from aeon import timer from energy_base import EnergyBase from finmag.field import Field logger = logging.getLogger('finmag') class Exchange(EnergyBase): """ Compute the exchange field. .. math:: E_{\\text{exch}} = \\int_\\Omega A (\\nabla M)^2 dx *Arguments* A the exchange constant method See documentation of EnergyBase class for details. name name of the object *Example of Usage* .. code-block:: python import dolfin as df from finmag.energies.exchange import Exchange from finmag.field import Field # Define a mesh representing a cube with edge length L L = 1e-8 # m n = 5 mesh = df.BoxMesh(df.Point(0, L, 0), df.Point(L, 0, L), n, n, n) A = 1.3e-11 # J/m exchange constant Ms = 0.8e6 # A/m saturation magnetisation # Initial magnetisation S3 = df.VectorFunctionSpace(mesh, 'CG', 1) m = Field(S3, (1, 0, 0)) exchange = Exchange(A) exchange.setup(m, Ms) # Compute exchange energy. E_ex = exchange.compute_energy() # Compute exchange effective field. H_ex = exchange.compute_field() # Using 'box-matrix-numpy' method (fastest for small matrices) exchange_np = Exchange(A, method='box-matrix-numpy') exchange_np.setup(m, Ms) H_exch_np = exchange_np.compute_field() """ def __init__(self, A, method='box-matrix-petsc', name='Exchange'): self.A_value = A # Value of A, later converted to a Field object. self.name = name super(Exchange, self).__init__(method, in_jacobian=True) @timer.method def setup(self, m, Ms, unit_length=1): """ Function to be called after the energy object has been constructed. *Arguments* m magnetisation field (usually normalised) Ms Saturation magnetisation (scalar, or scalar dolfin function) unit_length real length of 1 unit in the mesh """ assert isinstance(m, Field) assert isinstance(Ms, Field) # Create an exchange constant Field object A in DG0 function space. dg_functionspace = df.FunctionSpace(m.mesh(), 'DG', 0) self.A = Field(dg_functionspace, self.A_value, name='A') del(self.A_value) # Multiplication factor used for exchange energy computation. self.exchange_factor = df.Constant(1.0/unit_length**2) # An expression for computing the exchange energy. E_integrand = self.exchange_factor * self.A.f * \ df.inner(df.grad(m.f), df.grad(m.f)) super(Exchange, self).setup(E_integrand, m, Ms, unit_length)
2,933
26.942857
76
py
finmag
finmag-master/src/finmag/energies/anisotropy_test.py
import pytest import textwrap import dolfin as df import numpy as np from finmag.field import Field from finmag.energies import UniaxialAnisotropy from finmag.util.consts import mu0 TOLERANCE = 1e-12 @pytest.fixture(scope="module") def fixt(): """ Create an UniaxialAnisotropy object that will be re-used during testing. """ mesh = df.UnitCubeMesh(1, 1, 1) functionspace = df.VectorFunctionSpace(mesh, "Lagrange", 1) K1 = 1 Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1) a = df.Constant((0, 0, 1)) m = Field(functionspace) anis = UniaxialAnisotropy(K1, a) anis.setup(m, Ms) return {"anis": anis, "m": m, "a": a, "Ms": Ms, "K1": K1} def test_interaction_accepts_name(fixt): """ Check that the interaction accepts a 'name' argument and has a 'name' attribute. """ K1 = 1 a = df.Constant((0, 0, 1)) anis = UniaxialAnisotropy(K1, a, name='MyAnisotropy') assert hasattr(anis, 'name') @pytest.mark.parametrize(("m", "expected_E"), [ ((0, 0, 1), 0), ((0, 0, -1), 0), ((0, 1, 0), 1), ((-1, 0, 0), 1)]) def test_anisotropy_energy_simple_configurations(fixt, m, expected_E): """ Test some parallel and orthogonal configurations of m and a. """ mesh = df.UnitCubeMesh(1, 1, 1) functionspace = df.VectorFunctionSpace(mesh, "Lagrange", 1) K1 = 1 Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1) a = df.Constant((0, 0, 1)) m_field = Field(functionspace) m_field.set(df.Constant(m)) anis = UniaxialAnisotropy(K1, a) anis.setup(m_field, Ms) E = anis.compute_energy() print "With m = {}, expecting E = {}. Got E = {}.".format(m, expected_E, E) #assert abs(E - expected_E) < TOLERANCE assert np.allclose(E, expected_E, atol=1e-14, rtol=TOLERANCE) def test_anisotropy_energy_analytical(fixt): """ Compare one UniaxialAnisotropy energy with the corresponding analytical result. The magnetisation is m = (0, sqrt(1 - x^2), x) and the easy axis still a = (0, 0, 1). The squared dot product in the energy integral thus gives dot(a, m)^2 = x^2. Integrating x^2 gives (x^3)/3 and the analytical result with the constants we have chosen is 1 - 1/3 = 2/3. """ mesh = df.UnitCubeMesh(1, 1, 1) functionspace = df.VectorFunctionSpace(mesh, "Lagrange", 1) K1 = 1 Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1) a = df.Constant((0, 0, 1)) m = Field(functionspace) m.set(df.Expression(("0", "sqrt(1 - pow(x[0], 2))", "x[0]"), degree=1)) anis = UniaxialAnisotropy(K1, a) anis.setup(m, Ms) E = anis.compute_energy() expected_E = float(2) / 3 print "With m = (0, sqrt(1-x^2), x), expecting E = {}. Got E = {}.".format(expected_E, E) #assert abs(E - expected_E) < TOLERANCE assert np.allclose(E, expected_E, atol=1e-14, rtol=TOLERANCE) def test_anisotropy_field(fixt): """ Compute one anisotropy field by hand and compare with the UniaxialAnisotropy result. """ TOLERANCE = 1e-14 c = df.Constant((1 / np.sqrt(2), 0, 1 / np.sqrt(2))) fixt["m"].set(c) H = fixt["anis"].compute_field() v = df.TestFunction(fixt["m"].functionspace) g_ani = df.Constant(fixt["K1"] / (mu0 * fixt["Ms"].value)) * ( 2 * df.dot(fixt["a"], fixt["m"].f) * df.dot(fixt["a"], v)) * df.dx volume = df.assemble(df.dot(v, df.Constant((1, 1, 1))) * df.dx).array() dE_dm = df.assemble(g_ani).array() / volume print(textwrap.dedent(""" With m = (1, 0, 1)/sqrt(2), expecting: H = {}, got: H = {}. """.format(H.reshape((3, -1)).mean(axis=1), dE_dm.reshape((3, -1)).mean(axis=1)))) assert np.allclose(H, dE_dm, atol=0, rtol=TOLERANCE) def test_anisotropy_field_supported_methods(fixt): """ Check that all supported methods give the same results as the default method. """ TOLERANCE = 1e-13 fixt["m"].set(df.Constant((1 / np.sqrt(2), 0, 1 / np.sqrt(2)))) H_default = fixt["anis"].compute_field() supported_methods = list(UniaxialAnisotropy._supported_methods) # No need to compare default method with itself. supported_methods.remove(fixt["anis"].method) for method in supported_methods: anis = UniaxialAnisotropy(fixt["K1"], fixt["a"], method=method) anis.setup(fixt["m"], fixt["Ms"]) H = anis.compute_field() print(textwrap.dedent(""" With method '{}', expecting: H = {}, got: H = {}. """.format(method, H_default.reshape((3, -1)).mean(axis=1), H.reshape((3, -1)).mean(axis=1)))) assert np.allclose(H, H_default, atol=0, rtol=TOLERANCE)
4,795
32.538462
93
py
finmag
finmag-master/src/finmag/energies/dw_fixed_energy.py
import os import textwrap import dolfin as df import numpy as np from demag import Demag class FixedEnergyDW(object): def __init__(self, left=(1, 0, 0), right=(-1, 0, 0), repeat_time=5, name='FixedEnergyDW'): self.left = left self.right = right self.repeat_time = repeat_time self.in_jacobian = False self.name = name def write_xml(self, filename, coordinates, cells): """ XXX TODO: According to Weiwei, this function is broken at the moment. Its purpose is to duplicate a given mesh (what for, though?). We should either fix it or remove it. Probably also move it to a more appropriate location (since it doesn't seem to have to do much with the FixedEnergyDW class?). -- Max, 16 Jan 2014 """ f = open(filename, 'w') f.write("""<?xml version="1.0"?>\n""") f.write("""<dolfin xmlns:dolfin="http://fenicsproject.org">\n""") f.write(""" <mesh celltype="tetrahedron" dim="3">\n""") f.write(""" <vertices size="%d">\n""" % len(coordinates)) for i in range(len(coordinates)): f.write(""" <vertex index="%d" x="%0.12f" y="%0.12f" z="%0.12f"/>\n""" % ( i, coordinates[i][0], coordinates[i][1], coordinates[i][2])) f.write(""" </vertices>\n""") f.write(""" <cells size="%d">\n""" % len(cells)) for i in range(len(cells)): f.write(""" <tetrahedron index="%d" v0="%d" v1="%d" v2="%d" v3="%d"/>\n""" % ( i, cells[i][0], cells[i][1], cells[i][2], cells[i][3])) f.write(""" </cells>\n""") f.write(""" </mesh>\n</dolfin>""") def bias_mesh(self, step): cds = np.array(self.mesh.coordinates()) cells = np.array(self.mesh.cells()) cells += len(cds) cells = np.concatenate((self.mesh.cells(), cells)) cds[:, 0] += self.xlength * step cds = np.concatenate((self.mesh.coordinates(), cds)) return cells, cds def setup(self, S3, m, Ms, unit_length=1): self.S3 = S3 self.mesh = S3.mesh() self.Ms = Ms n = self.mesh.num_vertices() self.tmp_field = np.zeros(6 * n) self.field = np.zeros((n, 3)) self.init_m = np.zeros((2 * n, 3)) c = self.mesh.coordinates() self.xlength = np.max(c[:, 0]) - np.min(c[:, 0]) self.__compute_field() tmp = self.tmp_field.reshape((3, -1), order='C') self.field = np.array(tmp[:, :n]) self.field.shape = (1, -1) self.field = self.field[0] def __compute_field(self): n = self.mesh.num_vertices() self.init_m[:n, 0] = 1 self.init_m[n:, :] = self.left for i in range(-self.repeat_time, 0): cells, cds = self.bias_mesh(i - 1e-10) filename = "mesh_%d.xml" % i self.write_xml(filename, cds, cells) demag = Demag(solver='Treecode') mesh = df.Mesh(filename) Vv = df.VectorFunctionSpace(mesh, 'Lagrange', 1) dg = df.FunctionSpace(mesh, "DG", 0) Ms_tmp = df.Function(dg) Ms_list = list(self.Ms.vector().array()) Ms_tmp.vector().set_local(np.array(Ms_list + Ms_list)) m = df.Function(Vv) tmp_init_m = self.init_m.reshape((1, -1), order='F')[0] m.vector().set_local(tmp_init_m) demag.setup(Vv, m, Ms_tmp) self.tmp_field += demag.compute_field() os.remove(filename) self.init_m[:n, 0] = -1 self.init_m[n:, :] = self.right for i in range(1, self.repeat_time + 1): cells, cds = self.bias_mesh(i + 1e-10) filename = "mesh_%d.xml" % i self.write_xml(filename, cds, cells) demag = Demag(solver='Treecode') mesh = df.Mesh(filename) Vv = df.VectorFunctionSpace(mesh, 'Lagrange', 1) dg = df.FunctionSpace(mesh, "DG", 0) Ms_tmp = df.Function(dg) Ms_list = list(self.Ms.vector().array()) Ms_tmp.vector().set_local(np.array(Ms_list + Ms_list)) m = df.Function(Vv) tmp_init_m = self.init_m.reshape((1, -1), order='F')[0] m.vector().set_local(tmp_init_m) demag.setup(Vv, m, Ms_tmp) self.tmp_field += demag.compute_field() os.remove(filename) def compute_field(self): return self.field if __name__ == '__main__': mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(500, 20, 5), 100, 4, 1) dw = FixedEnergyDW(repeat_time=5) S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1) m = df.Function(S3) dw.setup(S3, 1, 8.6e5, unit_length=1) m.vector().set_local(dw.compute_field()) print dw.compute_field().reshape((3, -1)) for x in range(100): print x * 5 + 2.5, m(x * 5 + 2.5, 17.5, 2.5)
5,152
33.817568
96
py
finmag
finmag-master/src/finmag/energies/cubic_anisotropy_test.py
import dolfin as df import numpy as np from finmag.field import Field from finmag.energies import CubicAnisotropy from finmag.util.meshes import mesh_volume import pytest unit_length = 1e-9 Ms = 876626 # A/m K1 = -8608726 K2 = -13744132 K3 = 1100269 u1 = (0, -0.7071, 0.7071) u2 = (0, 0.7071, 0.7071) u3 = (-1, 0, 0) # perpendicular to u1 and u2 def compute_cubic_energy(): m = (0, 0, 1) u1m = np.dot(u1, m) u2m = np.dot(u2, m) u3m = np.dot(u3, m) energy = K1 * \ (u1m ** 2 * u2m ** 2 + u1m ** 2 * u3m ** 2 + u2m ** 2 * u3m ** 2) energy += K2 * (u1m ** 2 * u2m ** 2 * u3m ** 2) energy += K3 * \ (u1m ** 4 * u2m ** 4 + u1m ** 4 * u3m ** 4 + u2m ** 4 * u3m ** 4) return energy def test_cubic_anisotropy_energy(): mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 40), 1, 1, 40) volume = mesh_volume(mesh) * unit_length ** 3 S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1) S1 = df.FunctionSpace(mesh, "Lagrange", 1) m = Field(S3) m.set((0, 0, 1)) Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms) ca = CubicAnisotropy(u1, u2, K1, K2, K3) ca.setup(m, Ms_dg, unit_length) energy = ca.compute_energy() # energy_expected = 8.3e-20 # oommf cubicEight_100pc.mif -> ErFe2.odt energy_expected = compute_cubic_energy() * volume print "cubic anisotropy energy = {}, expected {}.".format(energy, energy_expected) rel_diff = abs(energy - energy_expected) / abs(energy_expected) assert rel_diff < 1e-10
1,514
27.584906
86
py
finmag
finmag-master/src/finmag/energies/dmi_test.py
import pytest import numpy as np import dolfin as df from math import pi from finmag.energies import DMI, Exchange from finmag import Simulation from finmag.field import Field from finmag.util.helpers import vector_valued_function from finmag.util.pbc2d import PeriodicBoundary2D def test_dmi_uses_unit_length_2dmesh(): """ Set up a helical state in two meshes (one expressed in SI units the other expressed in nanometers) and compute energies and fields. """ A = 8.78e-12 # J/m D = 1.58e-3 # J/m^2 Ms = 3.84e5 # A/m energies = [] # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length # just to challenge the system a little: for unit_length in (1, 1e-4, 1e-9): radius = 200e-9 / unit_length maxh = 5e-9 / unit_length helical_period = (4 * pi * A / D) / unit_length k = 2 * pi / helical_period # HF 27 April 2014: The next command fails in dolfin 1.3 # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh) # The actual shape of the domain shouldn't matter for the test, # so let's use a Rectangular mesh which should work the same: nx = ny = int(round(radius / maxh)) mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny) S3 = df.VectorFunctionSpace(mesh, "CG", 1, dim=3) m_expr = df.Expression(("0", "cos(k * x[0])", "sin(k * x[0])"), k=k, degree=1) m = Field(S3, m_expr, name='m') dmi = DMI(D) Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms) dmi.setup(m, Ms_dg, unit_length=unit_length) energies.append(dmi.compute_energy()) H = df.Function(S3) H.vector()[:] = dmi.compute_field() print H(0.0, 0.0) print "Using unit_length = {}.".format(unit_length) print "Helical period {}.".format(helical_period) print "Energy {}.".format(dmi.compute_energy()) rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1]) print "Relative difference of energy {}.".format(rel_diff_energies) assert rel_diff_energies < 1e-13 rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2]) print "Relative difference2 of energy {}.".format(rel_diff_energies2) assert rel_diff_energies2 < 1e-13 def test_interaction_accepts_name(): """ Check that the interaction accepts a 'name' argument and has a 'name' attribute. """ dmi = DMI(1) assert hasattr(dmi, 'name') # We dont use PBC at the moment. If we do, we should make this pass first. @pytest.mark.xfail(reason="unfixed bug") def test_dmi_pbc2d(): mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 0.1), 2, 2, 1) pbc = PeriodicBoundary2D(mesh) S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, constrained_domain=pbc) m_expr = df.Expression(("0", "0", "1"), degree=1) m = Field(S3, m_expr, name='m') dmi = DMI(1) dmi.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), 1)) field = dmi.compute_field() assert np.max(field) < 1e-15 def test_dmi_pbc2d_1D(plot=False): def m_init_fun(p): if p[0] < 10: return [0.5, 0, 1] else: return [-0.5, 0, -1] mesh = df.RectangleMesh(df.Point(0, 0), df.Point(20, 2), 10, 1) m_init = vector_valued_function(m_init_fun, mesh) Ms = 8.6e5 sim = Simulation(mesh, Ms, pbc='2d', unit_length=1e-9) sim.set_m(m_init_fun) A = 1.3e-11 D = 5e-3 sim.add(Exchange(A)) sim.add(DMI(D)) sim.relax(stopping_dmdt=0.0001) if plot: sim.m_field.plot_with_dolfin() mx = [sim.m_field.probe([x + 0.5, 1])[0] for x in range(20)] assert np.max(np.abs(mx)) < 1e-6 if __name__ == "__main__": # test_dmi_pbc2d() test_dmi_pbc2d_1D(plot=True)
3,797
29.878049
86
py
finmag
finmag-master/src/finmag/energies/exchange_test.py
import pytest import numpy as np import dolfin as df from finmag.energies import Exchange from finmag.field import Field from math import sqrt, pi from finmag.util.consts import mu0 from finmag.util.pbc2d import PeriodicBoundary2D @pytest.fixture def fixt(): """ Create an Exchange object that will be re-used during testing. """ mesh = df.UnitCubeMesh(10, 10, 10) functionspace = df.VectorFunctionSpace(mesh, "CG", 1, 3) Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1) A = 1 m = Field(functionspace) exch = Exchange(A) exch.setup(m, Ms) return {"exch": exch, "m": m, "A": A, "Ms": Ms} def test_interaction_accepts_name(): """ Check that the interaction accepts a 'name' argument and has a 'name' attribute. """ exch = Exchange(13e-12, name='MyExchange') assert hasattr(exch, 'name') def test_there_should_be_no_exchange_for_uniform_m(fixt): """ Check that exchange field and energy are 0 for uniform magnetisation. """ FIELD_TOLERANCE = 6e-7 fixt["m"].set((1, 0, 0)) H = fixt["exch"].compute_field() print "Asserted zero exchange field for uniform m = (1, 0, 0), " + \ "got H =\n{}.".format(H.reshape((3, -1))) print "np.max(np.abs(H)) =", np.max(np.abs(H)) assert np.max(np.abs(H)) < FIELD_TOLERANCE ENERGY_TOLERANCE = 0.0 E = fixt["exch"].compute_energy() print "Asserted zero exchange energy for uniform m = (1, 0, 0), " + \ "got E = {:g}.".format(E) assert abs(E) <= ENERGY_TOLERANCE def test_exchange_energy_analytical(fixt): """ Compare one Exchange energy with the corresponding analytical result. """ REL_TOLERANCE = 1e-7 A = 1 mesh = df.UnitCubeMesh(10, 10, 10) Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1) functionspace = df.VectorFunctionSpace(mesh, "CG", 1, 3) m = Field(functionspace) m.set(df.Expression(("x[0]", "x[2]", "-x[1]"), degree=1)) exch = Exchange(A) exch.setup(m, Ms) E = exch.compute_energy() # integrating the vector laplacian, the latter gives 3 already expected_E = 3 print "With m = (0, sqrt(1-x^2), x), " + \ "expecting E = {}. Got E = {}.".format(expected_E, E) assert abs(E - expected_E) / expected_E < REL_TOLERANCE def test_exchange_energy_analytical_2(): """ Compare one Exchange energy with the corresponding analytical result. """ REL_TOLERANCE = 5e-5 lx = 6 ly = 3 lz = 2 nx = 300 ny = nz = 1 mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(lx, ly, lz), nx, ny, nz) unit_length = 1e-9 functionspace = df.VectorFunctionSpace(mesh, "CG", 1, 3) Ms = Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 8e5) A = 13e-12 m = Field(functionspace) m.set( df.Expression(['0', 'sin(2*pi*x[0]/l_x)', 'cos(2*pi*x[0]/l_x)'], l_x=lx, degree=1)) exch = Exchange(A) exch.setup(m, Ms, unit_length=unit_length) E_expected = A * 4 * pi ** 2 * \ (ly * unit_length) * (lz * unit_length) / (lx * unit_length) E = exch.compute_energy() print "expected energy: {}".format(E) print "computed energy: {}".format(E_expected) assert abs((E - E_expected) / E_expected) < REL_TOLERANCE def test_exchange_field_supported_methods(fixt): """ Check that all supported methods give the same results as the default method. """ A = 1 REL_TOLERANCE = 1e-12 mesh = df.UnitCubeMesh(10, 10, 10) Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 1) functionspace = df.VectorFunctionSpace(mesh, "CG", 1, 3) m = Field(functionspace) m.set(df.Expression(("0", "sin(x[0])", "cos(x[0])"), degree=1)) exch = Exchange(A) exch.setup(m, Ms) H_default = exch.compute_field() supported_methods = list(Exchange._supported_methods) # no need to compare default method with itself supported_methods.remove(exch.method) # the project method for the exchange is too bad supported_methods.remove("project") for method in supported_methods: exch = Exchange(A, method=method) exch.setup(m, Ms) H = exch.compute_field() print "With method '{}', expecting H =\n{}\n, got H =\n{}.".format( method, H_default.reshape((3, -1)).mean(1), H.reshape((3, -1)).mean(1)) rel_diff = np.abs((H - H_default) / H_default) assert np.nanmax(rel_diff) < REL_TOLERANCE def test_exchange_periodic_boundary_conditions(): mesh1 = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 0.1), 2, 2, 1) mesh2 = df.UnitCubeMesh(10, 10, 10) print(""" # for debugging, to make sense of output # testrun 0, 1 : mesh1 # testrun 2,3 : mesh2 # testrun 0, 2 : normal # testrun 1,3 : pbc """) testrun = 0 for mesh in [mesh1, mesh2]: pbc = PeriodicBoundary2D(mesh) S3_normal = df.VectorFunctionSpace(mesh, "Lagrange", 1) S3_pbc = df.VectorFunctionSpace(mesh, "Lagrange", 1, constrained_domain=pbc) for S3 in [S3_normal, S3_pbc]: print("Running test {}".format(testrun)) testrun += 1 FIELD_TOLERANCE = 6e-7 ENERGY_TOLERANCE = 0.0 m_expr = df.Expression(("0", "0", "1"), degree=1) m = Field(S3, m_expr, name='m') exch = Exchange(1) exch.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), 1)) field = exch.compute_field() energy = exch.compute_energy() print("m.shape={}".format(m.vector().array().shape)) print("m=") print(m.vector().array()) print("energy=") print(energy) print("shape=") print(field.shape) print("field=") print(field) H = field print "Asserted zero exchange field for uniform m = (1, 0, 0) " + \ "got H =\n{}.".format(H.reshape((3, -1))) print "np.max(np.abs(H)) =", np.max(np.abs(H)) assert np.max(np.abs(H)) < FIELD_TOLERANCE E = energy print "Asserted zero exchange energy for uniform m = (1, 0, 0), " + \ "Got E = {:g}.".format(E) assert abs(E) <= ENERGY_TOLERANCE if __name__ == "__main__": mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(2 * np.pi, 1, 1), 10, 1, 1) S = df.FunctionSpace(mesh, "Lagrange", 1) S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1) expr = df.Expression(("0", "cos(x[0])", "sin(x[0])"), degree=1) m = df.interpolate(expr, S3) exch = Exchange(1, pbc2d=True) exch.setup(S3, m, 1) print exch.compute_field() field = df.Function(S3) field.vector().set_local(exch.compute_field()) df.plot(m) df.plot(field) df.interactive()
6,795
29.475336
84
py
finmag
finmag-master/src/finmag/energies/init.py
import logging from demag import Demag, Demag2D, MacroGeometry from energy_base import EnergyBase from exchange import Exchange from anisotropy import UniaxialAnisotropy from cubic_anisotropy import CubicAnisotropy from zeeman import Zeeman, TimeZeeman, DiscreteTimeZeeman, OscillatingZeeman, TimeZeemanPython from dmi import DMI, DMI_interfacial from thin_film_demag import ThinFilmDemag from dw_fixed_energy import FixedEnergyDW log = logging.getLogger("finmag")
466
34.923077
94
py
finmag
finmag-master/src/finmag/energies/zeeman_test.py
from __future__ import division import numpy as np import dolfin as df import pytest import os import finmag import logging from finmag.field import Field from finmag import sim_with from finmag.energies import Zeeman, TimeZeeman, DiscreteTimeZeeman, OscillatingZeeman from finmag.util.consts import mu0 from finmag.util.meshes import pair_of_disks from finmag.example import sphere_inside_airbox from math import sqrt, pi, cos, sin from zeeman import DipolarField mesh = df.UnitCubeMesh(2, 2, 2) S1 = df.FunctionSpace(mesh, "Lagrange", 1) S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3) m = Field(S3) m.set(df.Constant((1, 0, 0))) Ms = Field(S1, value=1) TOL = 1e-14 logger = logging.getLogger('finmag') def diff(H_ext, expected_field): """ Helper function which computes the maximum deviation between H_ext and the expected field. """ H = H_ext.compute_field().reshape((3, -1)).mean(1) print "Got H={}, expecting H_ref={}.".format(H, expected_field) return np.max(np.abs(H - expected_field)) def test_interaction_accepts_name(): """ Check that the interaction accepts a 'name' argument and has a 'name' attribute. """ field_expr = df.Expression(("0", "t", "0"), t=0, degree=1) zeeman = Zeeman([0, 0, 1], name='MyZeeman') assert hasattr(zeeman, 'name') zeeman = TimeZeeman(field_expr, name='MyTimeZeeman') assert hasattr(zeeman, 'name') zeeman = DiscreteTimeZeeman( field_expr, dt_update=2, name='MyDiscreteTimeZeeman') assert hasattr(zeeman, 'name') def test_compute_energy(): """ Compute Zeeman energies of a cuboid and sphere for various arrangements. """ lx = 2.0 ly = 3.0 lz = 5.0 nx = ny = nz = 10 # XXX TODO: why does the approximation get # worse if we use a finer mesh?!? unit_length = 1e-9 mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(lx, ly, lz), nx, ny, nz) Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 8e5) H = 1e6 S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1) def check_energy_for_m(m, E_expected): """ Helper function to compare the computed energy for a given magnetisation with an expected analytical value. """ m_field = Field(S3) m_field.set(df.Constant(m)) H_ext = Zeeman(H * np.array([1, 0, 0])) H_ext.setup(m_field, Ms, unit_length=unit_length) E_computed = H_ext.compute_energy() assert np.allclose(E_computed, E_expected, atol=0, rtol=1e-12) volume = lx * ly * lz * unit_length ** 3 E_aligned = -mu0 * Ms.value * H * volume check_energy_for_m((1, 0, 0), E_aligned) check_energy_for_m((-1, 0, 0), -E_aligned) check_energy_for_m((0, 1, 0), 0.0) check_energy_for_m((0, 0, 1), 0.0) check_energy_for_m( (1 / sqrt(2), 1 / sqrt(2), 0), E_aligned * cos(pi * 45 / 180)) check_energy_for_m((-0.5, 2 / sqrt(3), 0), -E_aligned * cos(pi * 60 / 180)) def test_energy_density_function(): """ Compute the Zeeman energy density over the entire mesh, integrate it, and compare it to the expected result. """ mesh = df.RectangleMesh(df.Point(-50, -50), df.Point(50, 50), 10, 10) unit_length = 1e-9 H = 1e6 # Create simulation object. sim = finmag.Simulation(mesh, 1e5, unit_length=unit_length) # Set uniform magnetisation. def m_ferromagnetic(pos): return np.array([0., 0., 1.]) sim.set_m(m_ferromagnetic) # Assign zeeman object to simulation sim.add(Zeeman(H * np.array([0., 0., 1.]))) # Get energy density function edf = sim.get_interaction('Zeeman').energy_density_function() # Integrate it over the mesh and compare to expected result. total_energy = df.assemble(edf * df.dx) * unit_length expected_energy = -mu0 * H assert (total_energy + expected_energy) < 1e-6 def test_compute_energy_in_regions(tmpdir): os.chdir(str(tmpdir)) d = 30.0 h1 = 5.0 h2 = 10.0 sep = 10.0 maxh = 2.5 RTOL = 5e-3 # depends on maxh unit_length = 1e-9 Ms = 8e5 H = 1e6 mesh = pair_of_disks(d, d, h1, h2, sep, theta=0, maxh=maxh) S3 = df.VectorFunctionSpace(mesh, "CG", 1) # Create a mesh function for the two domains (each representing one disk), # where the regions are marked with '0' (first disk) and '1' (second disk). class Disk1(df.SubDomain): def inside(self, pt, on_boundary): x, y, z = pt return np.linalg.norm([x, y]) < 0.5 * (d + sep) class Disk2(df.SubDomain): def inside(self, pt, on_boundary): x, y, z = pt return np.linalg.norm([x, y, z]) > 0.5 * (d + sep) disk1 = Disk1() disk2 = Disk2() domains = df.CellFunction("size_t", mesh) domains.set_all(0) disk1.mark(domains, 1) disk2.mark(domains, 2) dx = df.Measure("dx")[domains] dx_disk_1 = dx(1) dx_disk_2 = dx(2) volume_1 = pi * (0.5 * d) ** 2 * h1 * unit_length ** 3 # volume of disk #1 volume_2 = pi * (0.5 * d) ** 2 * h2 * unit_length ** 3 # volume of disk #2 # E_aligned_1 = -mu0 * Ms * H * volume_1 # energy of disk #1 if m || H_ext # E_aligned_2 = -mu0 * Ms * H * volume_2 # energy of disk #2 if m || H_ext def check_energies(m=None, theta=None): """ Helper function to compare the computed energy for a given magnetisation with an expected analytical value. The argument theta is the angle between the magnetisation vector and the x-axis. """ # Exactly one of m, theta must be given assert((m is None or theta is None) and not ( m is None and theta is None)) if m is None: if theta is None: raise ValueError("Exactly one of m, theta must be given.") theta_rad = theta * pi / 180. m = (cos(theta_rad), sin(theta_rad), 0) else: if theta != None: raise ValueError("Exactly one of m, theta must be given.") m = m / np.linalg.norm(m) m_field = Field(S3) m_field.set(df.Constant(m)) H_ext = Zeeman(H * np.array([1, 0, 0])) H_ext.setup(m_field, Field(df.FunctionSpace(mesh, 'DG', 0), Ms), unit_length=unit_length) #E_analytical_1 = -mu0 * Ms * H * volume_1 * cos(theta_rad) E_analytical_1 = -mu0 * Ms * H * volume_1 * np.dot(m, [1, 0, 0]) E_analytical_2 = -mu0 * Ms * H * volume_2 * np.dot(m, [1, 0, 0]) E_analytical_total = E_analytical_1 + E_analytical_2 E_computed_1 = H_ext.compute_energy(dx=dx_disk_1) E_computed_2 = H_ext.compute_energy(dx=dx_disk_2) E_computed_total = H_ext.compute_energy(dx=df.dx) # Check that the sum of the computed energies for disk #1 and #2 equals # the total computed energy assert np.allclose( E_computed_1 + E_computed_2, E_computed_total, atol=0, rtol=1e-12) # Check that the computed energies are within the tolerance of the # analytical expressions assert np.allclose(E_computed_1, E_analytical_1, atol=0, rtol=RTOL) assert np.allclose(E_computed_2, E_analytical_2, atol=0, rtol=RTOL) assert np.allclose( E_computed_total, E_analytical_total, atol=0, rtol=RTOL) #finmag.logger.debug("E_computed: {}".format(E_computed)) #finmag.logger.debug("E_expected: {}".format(E_expected)) #finmag.logger.debug("E_computed - E_expected: {}".format(E_computed - E_expected)) # Check a bunch of energies for theta in [0, 20, 45, 60, 90, 144, 180]: check_energies(theta=theta) check_energies(theta=-theta) check_energies(m=(0, 0, 1)) check_energies(m=(2, -3, -1)) def test_value_set_update(): """ Test to check that the value member variable updates when set_value is called. """ init_value = [1., 2., 3.] second_value = [100., 200., 400.] zeeman = Zeeman(init_value) mesh = df.RectangleMesh(df.Point(0, 0), df.Point(1, 1), 10, 10) sim = finmag.Simulation(mesh, 1e5) sim.add(zeeman) zeeman.set_value(second_value) assert zeeman.value == second_value def test_time_zeeman_init(): field_expr = df.Expression(("0", "t", "0"), t=0, degree=1) field_lst = [1, 0, 0] field_tpl = (1, 0, 0) field_arr = np.array([1, 0, 0]) # These should work TimeZeeman(field_expr) TimeZeeman(field_expr, t_off=1e-9) # These should *not* work, since there is no time update with pytest.raises(ValueError): TimeZeeman(field_lst, t_off=None) with pytest.raises(ValueError): TimeZeeman(field_tpl, t_off=None) with pytest.raises(ValueError): TimeZeeman(field_arr, t_off=None) # These *should* work, since there is a time update (the field is # switched off at some point) TimeZeeman(field_lst, t_off=1e-9) TimeZeeman(field_tpl, t_off=1e-9) TimeZeeman(field_arr, t_off=1e-9) def test_time_dependent_field_update(): field_expr = df.Expression(("0", "t", "0"), t=0, degree=1) H_ext = TimeZeeman(field_expr) H_ext.setup(m, Field(df.FunctionSpace(m.mesh(), 'DG', 0), Ms)) assert diff(H_ext, np.array([0, 0, 0])) < TOL H_ext.update(1) assert diff(H_ext, np.array([0, 1, 0])) < TOL def test_time_dependent_field_switched_off(): # Check the time update (including switching off) with a varying field field_expr = df.Expression(("0", "t", "0"), t=0, degree=1) H_ext = TimeZeeman(field_expr, t_off=1) H_ext.setup(m, Field(df.FunctionSpace(m.mesh(), 'DG', 0), Ms)) assert diff(H_ext, np.array([0, 0, 0])) < TOL assert(H_ext.switched_off == False) H_ext.update(0.9) assert diff(H_ext, np.array([0, 0.9, 0])) < TOL assert(H_ext.switched_off == False) H_ext.update(2) assert diff(H_ext, np.array([0, 0, 0])) < TOL # It's off! assert(H_ext.switched_off == True) # The same again with a constant field a = [42, 0, 5] H_ext = TimeZeeman(a, t_off=1) H_ext.setup(m, Field(df.FunctionSpace(m.mesh(), 'DG', 0), Ms)) assert diff(H_ext, a) < TOL assert(H_ext.switched_off == False) H_ext.update(0.9) assert diff(H_ext, a) < TOL assert(H_ext.switched_off == False) H_ext.update(2) assert diff(H_ext, np.array([0, 0, 0])) < TOL # It's off! assert(H_ext.switched_off == True) def test_discrete_time_zeeman_updates_in_intervals(): field_expr = df.Expression(("0", "t", "0"), t=0, degree=1) H_ext = DiscreteTimeZeeman(field_expr, dt_update=2) H_ext.setup(m, Field(df.FunctionSpace(m.mesh(), 'DG', 0), Ms)) assert diff(H_ext, np.array([0, 0, 0])) < TOL H_ext.update(1) assert diff(H_ext, np.array([0, 0, 0])) < TOL # not yet updating H_ext.update(3) assert diff(H_ext, np.array([0, 3, 0])) < TOL def test_discrete_time_zeeman_check_arguments_are_sane(): """ At least one of the arguments 'dt_update' and 't_off' must be given. """ field_expr = df.Expression(("1", "2", "3"), degree=1) with pytest.raises(ValueError): H_ext = DiscreteTimeZeeman(field_expr, dt_update=None, t_off=None) def test_discrete_time_zeeman_switchoff_only(): """ Check that switching off a field works even if no dt_update is given (i.e. the field is just a pulse that is switched off after a while). """ field_expr = df.Expression(("1", "2", "3"), degree=1) H_ext = DiscreteTimeZeeman(field_expr, dt_update=None, t_off=2) H_ext.setup(m, Field(df.FunctionSpace(m.mesh(), 'DG', 0), Ms)) assert diff(H_ext, np.array([1, 2, 3])) < TOL assert(H_ext.switched_off == False) H_ext.update(1) assert diff(H_ext, np.array([1, 2, 3])) < TOL # not yet updating assert(H_ext.switched_off == False) H_ext.update(2.1) assert diff(H_ext, np.array([0, 0, 0])) < TOL assert(H_ext.switched_off == True) def test_oscillating_zeeman(): """ """ def check_field_at_time(t, val): H_osc.update(t) a = H_osc.compute_field().reshape(3, -1).T assert(np.allclose(a, val, atol=0, rtol=1e-8)) H = np.array([1e6, 0, 0]) freq = 2e9 t_off = 10e-9 H_osc = OscillatingZeeman(H0=H, freq=freq, phase=0, t_off=t_off) H_osc.setup(m, Ms) # Check that the field has the original value at the end of the # first few cycles. for i in xrange(19): check_field_at_time(i * 1.0 / freq, H) # Check that the field is switched off at the specified time (and # stays switched off thereafter) assert(H_osc.switched_off == False) check_field_at_time(t_off, [0, 0, 0]) assert(H_osc.switched_off == True) check_field_at_time(t_off + 1e-11, [0, 0, 0]) assert(H_osc.switched_off == True) check_field_at_time(t_off + 1, [0, 0, 0]) assert(H_osc.switched_off == True) # Check that the field values vary sinusoidally as expected phase = 0.1e-9 H_osc = OscillatingZeeman(H0=H, freq=freq, phase=phase, t_off=None) H_osc.setup(m, Ms) for t in np.linspace(0, 20e-9, 100): check_field_at_time(t, H * cos(2 * pi * freq * t + phase)) def test_dipolar_field_class(tmpdir): os.chdir(str(tmpdir)) H_dipole = DipolarField(pos=[0, 0, 0], m=[1, 0, 0], magnitude=3e9) mesh = df.BoxMesh(df.Point(-50, -50, -50), df.Point(50, 50, 50), 20, 20, 20) V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3) m_field = Field(V, value=df.Constant((1, 0, 0))) H_dipole.setup(m_field, Field(df.FunctionSpace(m.mesh(), 'DG', 0), 8.6e5), unit_length=1e-9) def compute_field_diffs(sim): vals_demag = sim.get_field_as_dolfin_function( 'Demag', region='air').vector().array().reshape(3, -1) vals_dipole = sim.get_field_as_dolfin_function( 'DipolarField', region='air').vector().array().reshape(3, -1) absdiffs = np.linalg.norm(vals_demag - vals_dipole, axis=0) reldiffs = absdiffs / np.linalg.norm(vals_dipole, axis=0) return absdiffs, reldiffs @pytest.mark.slow @pytest.mark.xfail(reason='dolfin 1.5') def test_compare_stray_field_of_sphere_with_dipolar_field(tmpdir, debug=False): """ Check that the stray field of a sphere in an 'airbox' is close to the field of a point dipole with the same magnetic moment. """ os.chdir(str(tmpdir)) # Create a mesh of a sphere enclosed in an "airbox" m_init = [7, -4, 3] # some random magnetisation direction center_sphere = [0, 0, 0] r_sphere = 3 r_shell = 30 l_box = 100 maxh_sphere = 2.5 maxh_shell = None maxh_box = 10.0 Ms_sphere = 8.6e5 sim = sphere_inside_airbox( r_sphere, r_shell, l_box, maxh_sphere, maxh_shell, maxh_box, center_sphere, m_init) if debug: sim.render_scene(field_name='Demag', region='air', representation='Outline', outfile='ddd_demag_field_air.png') sim.render_scene(field_name='Demag', region='sphere', representation='Outline', outfile='ddd_demag_field_sphere.png') # Add an external field representing a point dipole # (with the same magnetic moment as the sphere). dipole_magnitude = Ms_sphere * 4 / 3 * pi * r_sphere ** 3 logger.debug("dipole_magnitude = {}".format(dipole_magnitude)) H_dipole = DipolarField( pos=[0, 0, 0], m=m_init, magnitude=dipole_magnitude) sim.add(H_dipole) # Check that the absolute and relative difference between the # stray field of the sphere and the field of the point dipole # are below a given tolerance. absdiffs, reldiffs = compute_field_diffs(sim) assert np.max(reldiffs) < 0.4 assert np.mean(reldiffs) < 0.15 assert np.max(absdiffs) < 140.0 print np.max(reldiffs), np.mean(reldiffs), np.max(absdiffs)
15,728
33.953333
97
py
finmag
finmag-master/src/finmag/energies/dmi.py
import logging import dolfin as df from aeon import timer from finmag.field import Field from energy_base import EnergyBase from finmag.util.helpers import times_curl logger = logging.getLogger('finmag') class DMI(EnergyBase): """ Compute the Dzyaloshinskii-Moriya Interaction (DMI) field. .. math:: E_{\\text{DMI}} = \\int_\\Omega D \\vec{m} \\cdot (\\nabla \\times \\vec{m}) dx *Arguments* D the DMI constant method See documentation of EnergyBase class for details. dmi_type Options are 'auto', '1d', '2d', '3d' and 'interfacial'. Default value is 'auto' which means the dmi is automaticaly selected according to the mesh dimension. *Example of Usage* .. code-block:: python import dolfin as df from finmag.energies.dmi import DMI from finmag.field import Field # Define a mesh representing a cube with edge length L. L = 1e-8 # m n = 5 mesh = df.BoxMesh(df.Point(0, L, 0), df.Point(L, 0, L), n, n, n) D = 5e-3 # J/m**2 DMI constant Ms = 0.8e6 # A/m magnetisation saturation # Initial magnetisation S3 = df.VectorFunctionSpace(mesh, 'CG', 1) m = Field(S3, (1, 0, 0)) dmi = DMI(D) dmi.setup(m, Ms) # Compute DMI energy. E_dmi = dmi.compute_energy() # Compute DMI effective field. H_dmi = dmi.compute_field() # Using 'box-matrix-numpy' method (fastest for small matrices) dmi_np = DMI(D, method='box-matrix-numpy') dmi_np.setup(m, Ms) H_dmi_np = dmi_np.compute_field() """ def __init__(self, D, method='box-matrix-petsc', name='DMI', dmi_type='auto'): self.D_value = D # Value of D, later converted to a Field object. self.name = name self.dmi_type = dmi_type super(DMI, self).__init__(method, in_jacobian=True) @timer.method def setup(self, m, Ms, unit_length=1): # Create an exchange constant Field object A in DG0 function space. dg_functionspace = df.FunctionSpace(m.mesh(), 'DG', 0) self.D = Field(dg_functionspace, self.D_value, name='D') del(self.D_value) # Multiplication factor used for dmi energy computation. self.dmi_factor = df.Constant(1.0/unit_length) if self.dmi_type is '1d': dmi_dim = 1 elif self.dmi_type is '2d': dmi_dim = 2 elif self.dmi_type is '3d': dmi_dim = 3 else: dmi_dim = m.mesh_dim() # Select the right expression for computing the dmi energy. if self.dmi_type is 'interfacial': E_integrand = DMI_interfacial(m, self.dmi_factor*self.D.f, dim=dmi_dim) elif self.dmi_type is 'D2D': E_integrand = DMI_D2D(m, self.dmi_factor*self.D.f, dim=dmi_dim) else: E_integrand = self.dmi_factor*self.D.f*times_curl(m.f, dmi_dim) super(DMI, self).setup(E_integrand, m, Ms, unit_length) if self.method == 'direct': self.__setup_field_direct() def __setup_field_direct(self): dofmap = self.m.mesh_dofmap() S3 = df.VectorFunctionSpace(self.m.mesh(), "CG", 1, dim=3, constrained_domain=dofmap.constrained_domain) u3 = df.TrialFunction(S3) v3 = df.TestFunction(S3) self.g_petsc = df.PETScMatrix() df.assemble(-2*self.dmi_factor*self.D.f*df.inner(v3, df.curl(u3))*df.dx, tensor=self.g_petsc) self.H_petsc = df.PETScVector() def DMI_interfacial(m, D, dim): """ Input arguments: m is a Field object on a 1d or 2d space, D is the DMI constant dim is the mesh dimension. Returns the form to compute the DMI energy: D(m_x * dm_z/dx - m_z * dm_x/dx) + D(m_y * dm_z/dy - m_z * dm_y/dy) * df.dx References: [1] Rohart, S. and Thiaville A., Phys. Rev. B 88, 184422 (2013) """ gradm = df.grad(m.f) dmxdx = gradm[0, 0] dmydx = gradm[1, 0] dmzdx = gradm[2, 0] if dim == 1: dmxdy = 0 dmydy = 0 dmzdy = 0 else: # Works for both 2d mesh or 3d meshes. dmxdy = gradm[0, 1] dmydy = gradm[1, 1] dmzdy = gradm[2, 1] mx = m.f[0] my = m.f[1] mz = m.f[2] return D*(mx * dmzdx - mz * dmxdx) + D*(my * dmzdy - mz * dmydy) def DMI_D2D(m, D, dim): """ D2D type DMI w = D (L_xz^y + L_yz^x) where L_ij^k = m_i dm_j / dk - m_j dm_i / dk Hence: w = D [(m_x dm_z / dy - m_z dm_x / dy) + (m_y dm_z / dx - m_z dm_y / dx)] = D (m_x dm_z/dy + m_y dm_z/dx - m_z ( dm_x / dy + dm_y / dx)) """ if dim != 3: raise ValueError("This DMI Type does not work other than in 3-D.") gradm = df.grad(m.f) dmxdy = gradm[0, 1] dmydx = gradm[1, 0] dmzdx = gradm[2, 0] dmzdy = gradm[2, 1] mx = m.f[0] my = m.f[1] mz = m.f[2] return D * (mx * dmzdy + my * dmzdx - mz * ( dmxdy + dmydx))
5,280
29.703488
101
py
finmag
finmag-master/src/finmag/energies/random_thermal.py
import numpy as np import dolfin as df from finmag.util.consts import mu0, k_B from finmag.util.meshes import mesh_volume class RandomThermal(object): """ Thermal field from Simone, E. Martínez, L. López-Díaz, L. Torres and C.J. García-Cervera. Minimizing cell size dependence in micromagnetics simulations with thermal noise. J. Phys. D: Appl. Phys., vol. 40, pages 942-948, 2007. and Phys. Rev. Lett. 90, 20 (2003) ??? This field is not derived from an energy functional, and thus not a real magnetic field. It's just a mathematical representation of the thermal noise in the context of the theory of stochastic processes and doesn't have any physical meaning, other than the correct statistical properties of the noise. """ def __init__(self, alpha, gamma): """ alpha could be a numpy array or a number """ self.in_jacobian = False self.alpha = alpha self.gamma = gamma self.last_update_t = 0 self.dt = 0 def setup(self, S3, m, Ms, unit_length=1): mesh = S3.mesh() n_dim = mesh.topology().dim() self.V = mesh_volume(mesh=mesh) * unit_length ** n_dim self.Ms = Ms self.output_shape = df.Function(S3).vector().array().shape def update(self, t, T): self.dt = t - self.last_update_t self.last_update_t = t self.T = T def compute_field(self): rnd = np.random.normal(loc=0.0, scale=1.0, shape=self.output_shape) amplitude = np.sqrt( (10 * 2 * self.alpha * k_B * self.T) / (self.gamma * mu0 * self.Ms * self.V * self.dt)) return amplitude * rnd def compute_energy(self): return 0
1,736
28.948276
99
py
finmag
finmag-master/src/finmag/energies/thin_film_demag.py
import logging import numpy as np import dolfin as df logger = logging.getLogger('finmag') class ThinFilmDemag(object): """ Demagnetising field for thin films in the i-direction. Hj = Hk = 0 and Hi = - Mi. """ def __init__(self, direction="z", field_strength=None, in_jacobian=False, name='ThinFilmDemag'): """ field_strength is Ms by default """ assert direction in ["x", "y", "z"] self.direction = ord(direction) - 120 # converts x,y,z to 0,1,2 self.strength = field_strength self.in_jacobian = in_jacobian self.name = name in_jacobian_msg = "in Jacobian" if in_jacobian else "not in Jacobian" logger.debug("Creating {} object, {}.".format( self.__class__.__name__, in_jacobian_msg)) def setup(self, m, Ms, unit_length): self.m = m self.H = np.zeros((3, m.mesh().num_vertices())) if self.strength == None: self.S1 = df.FunctionSpace(m.mesh(), "Lagrange", 1) self.volumes = df.assemble(df.TestFunction(self.S1) * df.dx) Ms = df.assemble( Ms.f * df.TestFunction(self.S1) * df.dx).array() / self.volumes.array() self.strength = Ms def compute_field(self): m = self.m.get_numpy_array_debug().view().reshape((3, -1)) self.H[self.direction][:] = - self.strength * m[self.direction] return self.H.ravel() def compute_energy(self): return 0
1,491
30.083333
100
py
finmag
finmag-master/src/finmag/energies/magnetostatic_field_test.py
import numpy as np from finmag.util.consts import mu0 from finmag.energies.magnetostatic_field import MagnetostaticField def test_magnetostatic_field_for_uniformly_magnetised_sphere(): """ Our favourite test when it comes to demag! """ Ms = 1 m = np.array((1, 0, 0)) Nxx = Nyy = Nzz = 1.0 / 3.0 # demagnetising factors for sphere demag = MagnetostaticField(Ms, Nxx, Nyy, Nzz) H = demag.compute_field(m) H_expected = np.array((-1.0 / 3.0, 0.0, 0.0)) print "Got demagnetising field H =\n{}.\nExpected mean H = {}.".format(H, H_expected) TOL = 1e-15 diff = np.max(np.abs(H - H_expected)) print "Maximum difference to expected result per axis is {}. Comparing to limit {}.".format(diff, TOL) assert np.max(diff) < TOL def test_magnetostatic_energy_density_for_uniformly_magnetised_sphere(): Ms = 1 m = np.array((1, 0, 0)) Nxx = Nyy = Nzz = 1.0 / 3.0 # demagnetising factors for sphere demag = MagnetostaticField(Ms, Nxx, Nyy, Nzz) E = demag.compute_energy(m) E_expected = (1.0 / 6.0) * mu0 * Ms ** 2 print "Got E = {}. Expected E = {}.".format(E, E_expected) REL_TOL = 1e-15 rel_diff = abs(E - E_expected) / abs(E_expected) print "Relative difference is {:.3g}%. Comparing to limit {:.3g}%.".format( 100 * rel_diff, 100 * REL_TOL) assert rel_diff < REL_TOL
1,372
32.487805
106
py
finmag
finmag-master/src/finmag/energies/__init__.py
from init import *
19
9
18
py
finmag
finmag-master/src/finmag/energies/magnetostatic_field.py
import numpy as np from finmag.util.consts import mu0 EPS = 1e-15 class MagnetostaticField(object): """ Magnetostatic field for uniform magnetisation. The magnetostatic energy density for a uniform magnetisation can be written as: .. math:: U_{\\text{ms}} = \\frac{1}{2} \\mu_0 M_\\text{s}^2 \\left( N_{xx}m_x^2 + N_{yy}m_y^2 + N_{zz}m_z^2 \\right) where :math:`N_{xx}, N_{yy}, N_{zz}` are the shape-dependent demagnetising factors along the x, y, and z directions. The demagnetising factors sum to 1 and are smallest for directions along the longest dimensions of the magnetic element. The magnetostatic field is obtained by differentiating the energy with respect to the magnetisation: .. math:: \\vec{H}_{\\text{ms}} = - M_\\text{s} \\left( N_{xx}m_x \\hat{x} + N_{yy}m_y \\hat{y} + N_{zz}m_z \\hat{z} \\right) The magnetostatic field along a particular axis is proportional to the magnetisation along that axis and points opposite to the direction of the magnetisation. Since this class is more likely to be used in a macrospin model it doesn't try to fit in with the code written with the finite element method in mind. """ def __init__(self, Ms, Nxx, Nyy, Nzz): """ Initialise object with Ms and demagnetising factors. Ms is the saturation magnetisation in A/m and the demagnetising factors should be given as floats that sum to 1. """ assert abs( sum((Nxx, Nyy, Nzz)) - 1) < EPS, "Demagnetising factors do not sum to 1." self.Ms = Ms self.N = np.array((Nxx, Nyy, Nzz)) def compute_energy(self, m): """ Compute the magnetostatic energy density. The unit magnetisation m should be given as some kind of iterable of length 3. """ return mu0 * pow(self.Ms, 2) * np.dot(self.N, np.square(m)) / 2 def compute_field(self, m): """ Compute the magnetostatic field. The unit magnetisation m should be given as some kind of iterable of length 3. """ return - self.Ms * (self.N * m) # element-wise multiplication
2,238
29.671233
89
py
finmag
finmag-master/src/finmag/energies/thin_film_demag_test.py
import pytest import numpy as np import dolfin as df from finmag import Simulation as Sim from finmag.energies import ThinFilmDemag, Demag Ms = 8.6e5 def test_interaction_accepts_name(): """ Check that the interaction accepts a 'name' argument and has a 'name' attribute. """ demag = ThinFilmDemag() assert hasattr(demag, 'name') def compare_with_demag_from_initial_m(H_gen, m_init, atol=0, rtol=0): sim = Sim(df.UnitCubeMesh(2, 2, 2), Ms, unit_length=1e-9) sim.set_m(m_init) demag = ThinFilmDemag() sim.add(demag) H_computed = demag.compute_field() H_expected = H_gen(sim.m) diff = np.abs(H_computed - H_expected) print "Expected, with shape {}:\n".format(H_expected.shape), H_expected print "Got, with shape {}:\n".format(H_computed.shape), H_computed print "Difference:\n", diff assert np.allclose(H_computed, H_expected, atol=atol, rtol=rtol) def test_zero_thin_film_demag(): compare_with_demag_from_initial_m( lambda m: np.zeros(m.shape), (1, 0, 0), atol=1e-14) compare_with_demag_from_initial_m( lambda m: np.zeros(m.shape), (1, 1, 0), atol=1e-14) def test_thin_film_demag(): compare_with_demag_from_initial_m(lambda m: -Ms * m, (0, 0, 1), rtol=1e-14) @pytest.mark.slow def test_thin_film_demag_against_real_demag(): sim = Sim(df.BoxMesh(df.Point(0, 0, 0), df.Point(500e-9, 500e-9, 1e-9), 50, 50, 1), Ms) sim.set_m((0, 0, 1)) tfdemag = ThinFilmDemag() sim.add(tfdemag) H_tfdemag = tfdemag.compute_field().view().reshape((3, -1)).mean(1) demag = Demag() sim.add(demag) H_demag = demag.compute_field().view().reshape((3, -1)).mean(1) diff = np.abs(H_tfdemag - H_demag) / Ms print "Standard Demag:\n", H_demag print "ThinFilmDemag:\n", H_tfdemag print "Difference relative to Ms:\n", diff assert np.allclose(H_tfdemag, H_demag, atol=0.05 * Ms) # 5% of Ms sim.set_m((1, 0, 0)) H_tfdemag = tfdemag.compute_field().view().reshape((3, -1)).mean(1) H_demag = demag.compute_field().view().reshape((3, -1)).mean(1) print "Running again, changed m in the meantime." diff = np.abs(H_tfdemag - H_demag) / Ms print "Standard Demag:\n", H_demag print "ThinFilmDemag:\n", H_tfdemag print "Difference relative to Ms:\n", diff assert np.allclose(H_tfdemag, H_demag, atol=0.005 * Ms) # 0.5% of Ms
2,383
31.657534
91
py
finmag
finmag-master/src/finmag/energies/anisotropy.py
import logging import dolfin as df import numpy as np from aeon import timer from energy_base import EnergyBase from finmag.field import Field from finmag.util import helpers from finmag.util.consts import mu0 from finmag.native import llg as native_llg from finmag.field import Field logger = logging.getLogger('finmag') class UniaxialAnisotropy(EnergyBase): """ Compute the uniaxial anisotropy field. .. math:: E_{\\text{anis}} = \\int_\\Omega K_1 - (1 - a \\cdot m)^2 dx *Arguments* K1 The anisotropy constant K2 The anisotropy constant (default=0) axis The easy axis. Should be a unit vector. Ms The saturation magnetisation. method The method used to compute the anisotropy field. For alternatives and explanation, see EnergyBase class. *Example of Usage* .. code-block:: python import dolfin as df from finmag import UniaxialAnisotropy L = 1e-8; nL = 5; mesh = df.BoxMesh(df.Point(0, L, 0), df.Point(L, 0, L), nL, nL, nL) S3 = df.VectorFunctionSpace(mesh, 'Lagrange', 1) K = 520e3 # For Co (J/m3) a = df.Constant((0, 0, 1)) # Easy axis in z-direction m = df.project(df.Constant((1, 0, 0)), V) # Initial magnetisation Ms = 1e6 anisotropy = UniaxialAnisotropy(K, a) anisotropy.setup(S3, m) # Print energy print anisotropy.compute_energy() # Assign anisotropy field H_ani = anisotropy.compute_field() """ def __init__(self, K1, axis, K2=0, method="box-matrix-petsc", name='Anisotropy', assemble=True): """ Define a uniaxial anisotropy with (first) anisotropy constant `K1` (in J/m^3) and easy axis `axis`. K1 and axis can be passed as df.Constant or df.Function, although automatic convertion will be attempted from float for K1 and a sequence type for axis. It is possible to specify spatially varying anisotropy by using df.Functions. """ self.K1_value = K1 self.K2_value = K2 self.axis_value = axis self.name = name self.assemble = assemble super(UniaxialAnisotropy, self).__init__(method, in_jacobian=True) if K2 != 0: self.assemble = False @timer.method def setup(self, m, Ms, unit_length=1): """ Function to be called after the energy object has been constructed. *Arguments* m magnetisation field (usually normalised) Ms Saturation magnetisation field unit_length real length of 1 unit in the mesh """ assert isinstance(m, Field) assert isinstance(Ms, Field) cg_scalar_functionspace = df.FunctionSpace(m.mesh(), 'CG', 1) self.K1 = Field(cg_scalar_functionspace, self.K1_value, name='K1') self.K2 = Field(cg_scalar_functionspace, self.K2_value, name='K2') cg_vector_functionspace = df.VectorFunctionSpace(m.mesh(), 'CG', 1, 3) self.axis = Field(cg_vector_functionspace, self.axis_value, name='axis') # Anisotropy energy # HF's version inline with nmag, breaks comparison with analytical # solution in the energy density test for anisotropy, as this uses # the Scholz-Magpar method. Should anyway be an easy fix when we # decide on method. # FIXME: we should use DG0 space here? E_integrand = self.K1.f * \ (df.Constant(1) - (df.dot(self.axis.f, m.f)) ** 2) if self.K2_value != 0: E_integrand -= self.K2.f * df.dot(self.axis.f, m.f) ** 4 del(self.K1_value) del(self.K2_value) del(self.axis_value) super(UniaxialAnisotropy, self).setup(E_integrand, m, Ms, unit_length) if not self.assemble: self.H = self.m.get_numpy_array_debug() self.Ms = self.Ms.get_numpy_array_debug() self.u = self.axis.get_numpy_array_debug() self.K1_arr = self.K1.get_numpy_array_debug() self.K2_arr = self.K2.get_numpy_array_debug() self.volumes = df.assemble(df.TestFunction(cg_scalar_functionspace) * df.dx) self.compute_field = self.__compute_field_directly def __compute_field_directly(self): m = self.m.get_numpy_array_debug() m.shape = (3, -1) self.H.shape = (3, -1) self.u.shape = (3, -1) native_llg.compute_anisotropy_field( m, self.Ms, self.H, self.u, self.K1_arr, self.K2_arr) m.shape = (-1,) self.H.shape = (-1,) self.u.shape = (-1,) return self.H
4,846
30.888158
100
py
finmag
finmag-master/src/finmag/energies/zeeman.py
import logging import dolfin as df import numpy as np from finmag.field import Field from finmag.util.consts import mu0 from finmag.util.meshes import nodal_volume from finmag.util import helpers from math import pi, cos log = logging.getLogger("finmag") class Zeeman(object): def __init__(self, H, name='Zeeman', **kwargs): """ Specify an external field (in A/m). H can have any of the forms accepted by the function 'finmag.util.helpers.vector_valued_function' (see its docstring for details). """ self.H_value = H self.name = name self.kwargs = kwargs self.in_jacobian = False def setup(self, m, Ms, unit_length=1): """ Function to be called after the energy object has been constructed. *Arguments* m magnetisation field (usually normalised) Ms Saturation magnetisation (scalar, or scalar dolfin function) unit_length real length of 1 unit in the mesh """ self.m = m self.Ms = Ms self.unit_length = unit_length dofmap = self.m.functionspace.dofmap() self.S1 = df.FunctionSpace( m.mesh(), "Lagrange", 1, constrained_domain=dofmap.constrained_domain) # self.dim = S3.mesh().topology().dim() # self.nodal_volume_S1 = nodal_volume(self.S1, self.unit_length) self.set_value(self.H_value, **self.kwargs) def set_value(self, value, **kwargs): """ Set the value of the field (in A/m). `value` can have any of the forms accepted by the function 'finmag.util.helpers.vector_valued_function' (see its docstring for details). """ self.value = value dofmap = self.m.functionspace.dofmap() dg_vector_functionspace = df.VectorFunctionSpace(self.m.mesh(), 'CG', 1, 3, constrained_domain=dofmap.constrained_domain) self.H = Field(dg_vector_functionspace, value, name='H_ext') self.E = - mu0 * self.Ms.f * df.dot(self.m.f, self.H.f) # Energy density. def average_field(self): """ Compute the average applied field. """ return helpers.average_field(self.compute_field()) def compute_field(self): return self.H.get_numpy_array_debug() def compute_energy(self, dx=df.dx): dim = self.m.mesh_dim() E = df.assemble(self.E * dx) * self.unit_length ** dim return E def energy_density(self): """ Return energy density (as a finmag.Field object). """ # Previous version #return df.project(df.dot(self.m.f, self.H.f) * self.Ms.f * -mu0, self.S1).vector().array() # # New version using the Field class. Note that this is # currently ca. 6-7 times *slower* than the version above. # However, it is slightly more accurate (no rounding errors # due to projection), and we should be able to tune # performance in the Field class. return self.m.dot(self.H) * self.Ms * -mu0 def energy_density_function(self): if not hasattr(self, "E_density_function"): self.E_density_function = self.energy_density().f return self.E_density_function class DipolarField(Zeeman): def __init__(self, pos, m, magnitude=None, name='DipolarField'): """ Magnetostatic field of a point dipole at position `pos` with a fixed magnetic moment. If `magnitude` is `None`, the magnetic moment is simply given by `m`. Otherwise `m` is interpreted only as the *direction* of the magnetic moment and `magnitude` as its magnitude, i.e. the magnetic moment is given by: magnitude * (m / |m|) """ # XXX TODO: Check whether pos coincides with a mesh point and shift it by # an infinitesimal amount if so! self.pos = np.asarray(pos) if magnitude is None: self.m = np.asarray(m) else: self.m = magnitude * np.asarray(m) / np.linalg.norm(m) def H_fun(pt): v = self.pos - pt r = np.linalg.norm(v) #n = v / np.linalg.norm(v) return 1.0 / (4 * pi) * (3 * v * np.dot(self.m, v) / r ** 5 - self.m / r ** 3) Hx_expr = '1/(4*pi) * (3*(mx*(x[0]-posx)+my*(x[1]-posy)+mz*(x[2]-posz)) / pow(sqrt((x[0]-posx)*(x[0]-posx)+(x[1]-posy)*(x[1]-posy)+(x[2]-posz)*(x[2]-posz)), 5) * (x[0]-posx) - mx / pow(sqrt((x[0]-posx)*(x[0]-posx)+(x[1]-posy)*(x[1]-posy)+(x[2]-posz)*(x[2]-posz)), 3))' Hy_expr = '1/(4*pi) * (3*(mx*(x[0]-posx)+my*(x[1]-posy)+mz*(x[2]-posz)) / pow(sqrt((x[0]-posx)*(x[0]-posx)+(x[1]-posy)*(x[1]-posy)+(x[2]-posz)*(x[2]-posz)), 5) * (x[1]-posy) - my / pow(sqrt((x[0]-posx)*(x[0]-posx)+(x[1]-posy)*(x[1]-posy)+(x[2]-posz)*(x[2]-posz)), 3))' Hz_expr = '1/(4*pi) * (3*(mx*(x[0]-posx)+my*(x[1]-posy)+mz*(x[2]-posz)) / pow(sqrt((x[0]-posx)*(x[0]-posx)+(x[1]-posy)*(x[1]-posy)+(x[2]-posz)*(x[2]-posz)), 5) * (x[2]-posz) - mz / pow(sqrt((x[0]-posx)*(x[0]-posx)+(x[1]-posy)*(x[1]-posy)+(x[2]-posz)*(x[2]-posz)), 3))' H_expr = df.Expression([Hx_expr, Hy_expr, Hz_expr], mx=self.m[0], my=self.m[1], mz=self.m[2], posx=pos[0], posy=pos[1], posz=pos[2], degree=1) #super(DipolarField, self).__init__(H_fun, name=name) super(DipolarField, self).__init__(H_expr, name=name) class TimeZeeman(Zeeman): def __init__(self, field_expression, t_off=None, name='TimeZeeman'): """ Specify a time dependent external field (in A/m), which gets updated as continuously as possible. Pass in a dolfin expression that depends on time. Make sure the time variable is called t. It will get refreshed by calls to update. The argument t_off can specify a time at which the field will get switched off. Alternatively, `field_expression` can be a 3-array representing a constant field. In this case `t_off` must be specified, otherwise a ValueError is raised (this is a safety measure because in this case there would be no time update at all, so it's likely that the user intended to do something else). """ if isinstance(field_expression, (list, tuple, np.ndarray)): field_expression = np.asarray(field_expression) if not field_expression.shape == (3,): raise ValueError( "If field_expression is not a dolfin expression, it must " "be a 3-array (representing a constant external field)") if t_off is None: raise ValueError( "The argument 'field_expression' is a constant array, but " "t_off was not specified so there will be no time update " "at all. Use the Zeeman class instead of TimeZeeman if " "this is what you really want.") # Convert the array to a dolfin constant so that we can proceed as # normal field_expression = df.Constant(map(str, field_expression)) assert isinstance(field_expression, (df.Expression, df.Constant)) super(TimeZeeman, self).__init__(field_expression, name=name) # TODO: Maybe set a 'checkpoint' for the time integrator at # time t_off? (See comment in update() below.) self.t_off = t_off self.switched_off = False def update(self, t): if not self.switched_off: if self.t_off and t >= self.t_off: # TODO: It might be cleaner to explicitly set a # 'checkpoint' for the time integrator at time # t_off, otherwise there is the possibility of # it slightly "overshooting" and thus missing # the exact time the field is switched off. # (This should probably happen in __init__) self.switch_off() return self.value.t = t self.H.set(self.value) self.H.name = 'H_ext' def switch_off(self): # It might be nice to provide the option to remove the Zeeman # interaction from the simulation altogether (or at least # provide an option to do so) in order to avoid computing the # Zeeman energy at all once the field is switched off. log.debug("Switching external field off.") dofmap = self.m.functionspace.dofmap() self.H = Field(df.VectorFunctionSpace(self.m.mesh(), 'CG', 1, 3, constrained_domain=dofmap.constrained_domain), (0, 0, 0)) self.value = None self.switched_off = True class DiscreteTimeZeeman(TimeZeeman): def __init__(self, field_expression, dt_update=None, t_off=None, name='DiscreteTimeZeeman'): """ Specify a time dependent external field which gets updated in discrete time intervals. Pass in a dolfin expression that depends on time. Make sure the time variable is called t. It will get refreshed by calls to update, if more than dt_update time has passed since the last refresh. The argument t_off can specify a time at which the field will get switched off. If t_off is provided, dt_update can be `None` so that the field remains constant until it is switched off. """ if dt_update is None and t_off is None: raise ValueError("At least one of the arguments 'dt_update' and " "'t_off' must be given.") super(DiscreteTimeZeeman, self).__init__( field_expression, t_off, name=name) self.dt_update = dt_update self.t_last_update = 0.0 def update(self, t): if not self.switched_off: if self.t_off and t >= self.t_off: self.switch_off() return if self.dt_update is not None: dt_since_last_update = t - self.t_last_update if dt_since_last_update >= self.dt_update: self.value.t = t dofmap = self.m.functionspace.dofmap() dg_vector_functionspace = df.VectorFunctionSpace(self.m.mesh(), 'CG', 1, 3, constrained_domain=dofmap.constrained_domain) self.H = Field(dg_vector_functionspace, self.value, name='H_ext') log.debug("At t={}, after dt={}, update external field again.".format( t, dt_since_last_update)) class TimeZeemanPython(TimeZeeman): def __init__(self, df_expression, time_fun, t_off=None, name='TimeZeemanPython'): """ Faster version of the TimeZeeman class for the special case that only the amplitude (or the direction) of the field varies over time. That is, if `H_0` denotes the field at time t=0 then the field value at some point `x` at time `t` is assumed to be of the form: H(t, x) = H_0(x) * time_fun(t) In this situation, the dolfin.interpolate method only needs to be evaluated once at the beginning for the spatial expression, which saves a lot of computational effort. *Arguments* df_expression : dolfin.Expression The dolfin Expression representing the inital field value (this can vary spatially but must not depend on time). time_fun : callable Function representing the scaling factor for the amplitude at time. Note that if the given dolfin expression is a scalar, then the time_fun have to return a 3d vector, for example, a spatial rotational field around x-axis could be expressed as, Hy = h0(x,y,z)*cos(wt) Hz = h0(x,y,z)*sin(wt) t_off : float Time at which the field is switched off. """ assert isinstance(df_expression, (df.Expression, df.Constant)) self.df_expression = df_expression self.time_fun = time_fun self.t_off = t_off self.switched_off = False self.name = name self.in_jacobian = False self.scalar_df_expression = False if df_expression.value_size() == 1: self.scalar_df_expression = True def setup(self, m, Ms, unit_length=1): self.m = m self.Ms = Ms self.unit_length = unit_length if self.scalar_df_expression: dofmap = m.functionspace.dofmap() self.S1 = df.FunctionSpace( m.mesh(), "Lagrange", 1, constrained_domain=dofmap.constrained_domain) self.h0 = helpers.scalar_valued_function( self.df_expression, self.S1).vector().array() self.H0 = df.Function(m.functionspace) else: self.H0 = helpers.vector_valued_function( self.df_expression, self.m.functionspace) self.E = - mu0 * self.Ms.f * df.dot(self.m.f, self.H0) self.H_init = self.H0.vector().array() self.H = self.H_init.copy() def update(self, t): if not self.switched_off: if self.t_off and t >= self.t_off: self.switch_off() return if self.scalar_df_expression: tx, ty, tz = self.time_fun(t) self.H.shape = (3, -1) self.H[0, :] = self.h0 * tx self.H[1, :] = self.h0 * ty self.H[2, :] = self.h0 * tz self.H.shape = (-1,) else: self.H[:] = self.H_init[:] * self.time_fun(t) return self # for use in list comprehensions def switch_off(self): # It might be nice to provide the option to remove the Zeeman # interaction from the simulation altogether (or at least # provide an option to do so) in order to avoid computing the # Zeeman energy at all once the field is switched off. log.debug("Switching external field off.") self.H = np.zeros_like(self.H) self.value = None self.switched_off = True def average_field(self): """ Compute the average applied field. """ return helpers.average_field(self.compute_field()) def compute_field(self): return self.H def compute_energy(self, dx=df.dx): self.H0.vector().set_local(self.H) E = df.assemble(self.E * dx) * self.unit_length ** 3 return E class OscillatingZeeman(TimeZeemanPython): def __init__(self, H0, freq, phase=0, t_off=None, name='OscillatingZeeman'): """ Create a field which is constant in space but whose amplitude varies sinusoidally with the given frequency and phase. More precisely, the field value at time t is: H(t) = H0 * cos(2*pi*freq*t + phase) Where H0 is a constant 3-vector representing the 'base field'. *Arguments* H0 : 3-vector The constant 'base field' which is scaled by the oscillating amplitude. freq : float The oscillation frequency. phase : float The phase of the oscillation. t_off : float Time at which the field is switched off. """ H0_expr = df.Constant(map(str, H0)) def amplitude(t): return cos(2 * pi * freq * t + phase) super(OscillatingZeeman, self).__init__( H0_expr, amplitude, t_off=t_off, name=name)
15,581
37.474074
276
py