repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
finmag
|
finmag-master/dev/setup.py
|
#!/usr/bin/env python
# 2015-04-24, Created by H Fuchs <[email protected]>
# This does nothing more than installing the already-created
# finmag-tree.
from distutils.core import setup
setup(name = 'Finmag',
version = '1.0',
description = 'Finmag',
author = 'Hans Fangohr et al',
author_email = '[email protected]',
package_dir = {'': '/tmp/finmag/'},
packages = [
'finmag',
'finmag.scheduler',
'finmag.example.normal_modes',
'finmag.example',
'finmag.drivers',
'finmag.normal_modes.eigenmodes',
'finmag.normal_modes',
'finmag.normal_modes.deprecated',
'finmag.energies',
'finmag.energies.demag',
'finmag.tests.jacobean',
'finmag.tests',
'finmag.tests.demag',
'finmag.util',
'finmag.util.ode',
'finmag.util.ode.tests',
'finmag.util.tests',
'finmag.util.oommf',
'finmag.native',
'finmag.sim',
'finmag.physics',
'finmag.physics.llb',
'finmag.physics.llb.tests'
]
)
| 1,208 | 29.225 | 60 |
py
|
finmag
|
finmag-master/dev/sandbox/sumatra_template/main.py
|
import os
import sys
from sumatra.parameters import build_parameters
# The following line is important because Sumatra creates a parameter file
# 'on the fly' and passes its name to the script, so we should *not* use a
# hard-coded filename here.
paramsfile = sys.argv[1]
parameters = build_parameters(paramsfile)
# I like printing the sumatra label of this run:
smt_label = parameters['sumatra_label']
print "Sumatra label for this run: {}".format(smt_label)
sys.stdout.flush()
# Change into the datastore directory to run the simulation there.
# Note that this has to happen *after* reading the parameter above,
# otherwise it won't find the parameter file.
os.chdir(os.path.join('Data', smt_label))
# The variable 'parameters' defined above is a dictionary associating
# each parameter name with its value, so we can use this neat trick to
# make the parameters available as global variables:
globals().update(parameters)
# Alternatively, if we don't want to resort to "black magic", we can
# assign each parameter value separately to a variable:
Msat = parameters['Msat']
H_ext = parameters['H_ext']
A = parameters['A']
# ... etc. ...
#
# The main part of the script follows here.
#
| 1,194 | 31.297297 | 74 |
py
|
finmag
|
finmag-master/dev/sandbox/periodic/1d_pbc.py
|
import dolfin as df
import numpy as np
df.parameters.reorder_dofs_serial = False
#from finmag.energies.exchange import Exchange
#from finmag.util.consts import mu0
mu0=np.pi*4e-7
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
class PeriodicBoundary1(df.SubDomain):
def __init__(self, mesh):
super(PeriodicBoundary1, self).__init__()
self.mesh = mesh
self.dim=self.mesh.topology().dim()
def inside(self, x, on_boundary):
return df.near(x[0],0) and on_boundary
def map(self, x, y):
y[0] = x[0] - 1.0
if self.dim > 1:
y[1] = x[1]
if self.dim > 2:
y[2] = x[2]
print x,y
def exchange(mesh,m):
V = df.FunctionSpace(mesh, "Lagrange", 1)
u1 = df.TrialFunction(V)
v1 = df.TestFunction(V)
K = df.assemble(df.inner(df.grad(u1), df.grad(v1)) * df.dx).array()
L = df.assemble(v1 * df.dx).array()
h=-np.dot(K,m)/L
print K.shape,'\n',K
print 'L=',L
print 'm',m
return h
def periodic_exchange(mesh,m):
V = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=PeriodicBoundary1())
u1 = df.TrialFunction(V)
v1 = df.TestFunction(V)
K = df.assemble(df.inner(df.grad(u1), df.grad(v1)) * df.dx).array()
print K.shape,'\n',K
L = df.assemble(v1 * df.dx).array()
print L
print m
h=-np.dot(K,m)/L
#h[-1]=h[0]
return h
def plot_m(mesh,m,xs,m_an,m2,name):
fig=plt.figure()
plt.plot(xs,m_an,label='Analytical')
openbc=exchange(mesh,m)
plt.plot(xs,openbc,'--',label='OpenBC')
pbc=periodic_exchange(mesh,m2)
plt.plot(xs[:-1],pbc,'^',label='PBC')
plt.legend()
fig.savefig(name)
if __name__ == "__main__":
mesh = df.UnitSquareMesh(1,1)
mesh = df.BoxMesh(0,0,0,1,1,1,1,1,2)
S=df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=PeriodicBoundary1(mesh))
"""
mesh = df.UnitIntervalMesh(10)
S = df.FunctionSpace(mesh, "Lagrange", 1)
expr = df.Expression('cos(2*pi*x[0])')
M = df.interpolate(expr, S)
#df.plot(mesh)
#df.interactive()
S2 = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=PeriodicBoundary1())
M2 = df.interpolate(expr, S2)
xs=mesh.coordinates().flatten()
m_an=-(2*np.pi)**2*np.cos(2*np.pi*xs)
plot_m(mesh,M.vector().array(),xs,m_an,M2.vector().array(),'1d_cos.png')
"""
| 2,492 | 20.491379 | 87 |
py
|
finmag
|
finmag-master/dev/sandbox/periodic/2d_pbc.py
|
import dolfin as df
import numpy as np
df.parameters.reorder_dofs_serial = False
#from finmag.energies.exchange import Exchange
#from finmag.util.consts import mu0
mu0=np.pi*4e-7
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
class PeriodicBoundary2D(df.SubDomain):
def inside(self, x, on_boundary):
on_x = bool(df.near(x[0],0) and x[1]<1.0 and on_boundary)
on_y = bool(df.near(x[1],0) and x[0]<1.0 and on_boundary)
return on_x or on_y
def map(self, x, y):
y[0] = x[0] - 1.0
y[1] = x[1] - 1.0
if df.near(x[0],1) and x[1]<1.0:
y[1] = x[1]
if df.near(x[1],1) and x[0]<1.0:
y[0] = x[0]
y[2]=x[2]
print x,y
def exchange(mesh,m):
V = df.FunctionSpace(mesh, "Lagrange", 1)
u1 = df.TrialFunction(V)
v1 = df.TestFunction(V)
K = df.assemble(df.inner(df.grad(u1), df.grad(v1)) * df.dx).array()
L = df.assemble(v1 * df.dx).array()
h=-np.dot(K,m)/L
print K.shape,'\n',K
print 'L=',L
print 'm',m
return h
def periodic_exchange(mesh,m):
V = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=PeriodicBoundary2D())
u1 = df.TrialFunction(V)
v1 = df.TestFunction(V)
K = df.assemble(df.inner(df.grad(u1), df.grad(v1)) * df.dx).array()
print K.shape,'\n',K
L = df.assemble(v1 * df.dx).array()
print L
print m
h=-np.dot(K,m)/L
#h[-1]=h[0]
return h
if __name__ == "__main__":
#mesh = df.UnitSquareMesh(4,4)
mesh = df.BoxMesh(0,0,0,1,1,1,2,2,1)
S = df.FunctionSpace(mesh, "Lagrange", 1)
S2 = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=PeriodicBoundary2D())
"""
expr = df.Expression('cos(2*pi*x[0])')
M = df.interpolate(expr, S)
df.plot(M)
#df.interactive()
S2 = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=PeriodicBoundary2D())
M2 = df.interpolate(expr, S2)
print exchange(mesh,M.vector().array())
print '='*100
print periodic_exchange(mesh,M2.vector().array())
"""
| 2,237 | 20.314286 | 87 |
py
|
finmag
|
finmag-master/dev/sandbox/periodic/demag/energy.py
|
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag, Zeeman, UniaxialAnisotropy
from finmag.energies.zeeman import TimeZeemanPython
import matplotlib.pyplot as plt
from finmag.util.fileio import Tablereader
mesh = df.BoxMesh(0, 0, 0, 10, 10, 10, 2, 2, 2)
#mesh = df.BoxMesh(0, 0, 0, 30, 30, 100, 6, 6, 20)
#df.plot(mesh, interactive=True)
def relax_system():
Ms = 8.6e5
sim = Simulation(mesh, Ms, unit_length=1e-9, name = 'dy', pbc='1d')
sim.alpha = 0.01
sim.set_m((0.8,0.6,1))
sim.set_tol(1e-8, 1e-8)
A = 1.3e-11
sim.add(Exchange(A))
parameters = {
'absolute_tolerance': 1e-10,
'relative_tolerance': 1e-10,
'maximum_iterations': int(1e5)
}
Ts = []
for i in range(-9,10):
Ts.append((10*i,0,0))
demag = Demag(solver='Treecode')
demag.parameters['phi_1'] = parameters
demag.parameters['phi_2'] = parameters
sim.add(demag)
demag.compute_field()
sim.schedule('save_ndt', every=2e-12)
#sim.schedule('save_vtk', every=2e-12, filename='vtks/m.pvd')
#sim.schedule('save_m', every=2e-12, filename='npys/m.pvd')
sim.run_until(0.2e-9)
def plot_mx(filename='dy.ndt'):
data = Tablereader(filename)
ts=data['time']/1e-9
fig=plt.figure()
plt.plot(ts, data['E_total'], label='Total')
#plt.plot(ts, data['E_Demag'], label='Demag')
#plt.plot(ts, data['E_Exchange'], label='Exchange')
plt.xlabel('time (ns)')
plt.ylabel('energy')
plt.legend()
fig.savefig('energy.pdf')
if __name__ == '__main__':
#relax()
relax_system()
plot_mx()
| 1,831 | 21.9 | 76 |
py
|
finmag
|
finmag-master/dev/sandbox/periodic/demag/1d_test/demag_pbc_1d_ringdown.py
|
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag, Zeeman, UniaxialAnisotropy
from finmag.energies.zeeman import TimeZeemanPython
import matplotlib.pyplot as plt
from finmag.util.fileio import Tablereader
mesh = df.BoxMesh(-5, -5, -5, 5, 5, 5, 5, 5, 5)
def ring_down(m0=(1,0.01,0), pbc=None):
n = 49
assert n>=1 and n%2==1
Ms = 8.6e5
sim = Simulation(mesh, Ms, unit_length=1e-9, name = 'dy', pbc='1d')
sim.alpha = 1e-3
sim.set_m(m0)
sim.set_tol(1e-8, 1e-8)
parameters = {
'absolute_tolerance': 1e-10,
'relative_tolerance': 1e-10,
'maximum_iterations': int(1e5)
}
Ts = []
for i in range(-n/2+1,n/2+1):
Ts.append((10.*i,0,0))
demag = Demag(Ts=Ts)
demag.parameters['phi_1'] = parameters
demag.parameters['phi_2'] = parameters
sim.add(demag)
sim.add(Exchange(1.3e-11))
sim.schedule('save_ndt', every=2e-12)
sim.run_until(4e-9)
def plot_mx(filename='dy.ndt'):
fig=plt.figure()
plt.plot(ts, data['E_total'], label='Total')
#plt.plot(ts, data['E_Demag'], label='Demag')
#plt.plot(ts, data['E_Exchange'], label='Exchange')
plt.xlabel('time (ns)')
plt.ylabel('energy')
plt.legend()
fig.savefig('energy.pdf')
def fft(mx, dt=5e-12):
n = len(mx)
freq = np.fft.fftfreq(n, dt)
ft_mx = np.fft.fft(mx)
ft_abs = np.abs(ft_mx)
ft_phase = np.angle(ft_mx)
return freq, ft_abs, ft_phase
def plot_average_fft():
data = Tablereader('dy.ndt')
ts = data['time']
my = data['m_y']
dt = ts[1]-ts[0]
freq, ft_abs, phase = fft(my, dt)
fig=plt.figure()
plt.subplot(2,1,1)
plt.plot(ts*1e9,my,label='Real')
plt.xlabel('Time (ns)')
plt.ylabel('m_y')
plt.subplot(2,1,2)
plt.plot(freq*1e-9,ft_abs,'.-',label='Real')
plt.xlabel('Frequency (GHz)')
plt.ylabel('FFT')
plt.xlim([10,20])
#plt.ylim([0,10])
fig.savefig('average_fft.pdf')
if __name__ == '__main__':
#ring_down()
plot_average_fft()
| 2,294 | 19.491071 | 76 |
py
|
finmag
|
finmag-master/dev/sandbox/periodic/demag/1d_test/demag_pbc_1d.py
|
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag, Zeeman, UniaxialAnisotropy
from finmag.energies.zeeman import TimeZeemanPython
import matplotlib.pyplot as plt
from finmag.util.fileio import Tablereader
mesh = df.BoxMesh(-20, -20, -20, 20, 20, 20, 20, 20, 20)
def compute_field(n=1, m0=(1,0,0), pbc=None):
assert n>=1 and n%2==1
Ms = 1e6
sim = Simulation(mesh, Ms, unit_length=1e-9, name = 'dy', pbc=pbc)
sim.set_m(m0)
parameters = {
'absolute_tolerance': 1e-10,
'relative_tolerance': 1e-10,
'maximum_iterations': int(1e5)
}
Ts = []
for i in range(-n/2+1,n/2+1):
Ts.append((40.*i,0,0))
demag = Demag(Ts=Ts)
demag.parameters['phi_1'] = parameters
demag.parameters['phi_2'] = parameters
sim.add(demag)
sim.set_m((1,0,0))
field1 = sim.llg.effective_field.get_dolfin_function('Demag')
sim.set_m((0,0,1))
field2 = sim.llg.effective_field.get_dolfin_function('Demag')
return (field1(0,0,0)/Ms, field2(0,0,0)/Ms)
def plot_field():
ns = [1, 3, 5, 7, 11, 15, 21, 29, 59]
ns = [1,3,5,7, 29]
field1=[]
field2=[]
for n in ns:
f, g=compute_field(n=n)
field1.append(abs(f[0]))
field2.append(abs(g[2]))
#f2 = compute_field(n=n,m0=(1,0,0),pbc='1d')
#field_pbc.append(abs(f2[0]))
fig=plt.figure(figsize=(5, 5))
plt.subplot(2, 1, 1)
plt.plot(ns, field1, '.-')
plt.xlabel('Copies')
plt.ylabel('Field (Ms)')
plt.title('m aligned along x')
plt.subplot(2, 1, 2)
plt.plot(ns, field2, '.-')
plt.xlabel('Copies')
plt.ylabel('Field (Ms)')
plt.title('m aligned along z')
#plt.legend()
fig.savefig('fields.pdf')
if __name__ == '__main__':
plot_field()
| 2,009 | 22.103448 | 76 |
py
|
finmag
|
finmag-master/dev/sandbox/periodic/demag/1d_test/test_belement.py
|
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import dolfin as df
mesh = df.BoxMesh(-5, -5, -5, 5, 5, 5, 5, 5, 5)
from finmag.native.treecode_bem import compute_solid_angle_single
from finmag.native.treecode_bem import compute_boundary_element
from finmag.native.llg import compute_lindholm_L
def test_boundary_element(z=0.1):
p0 = np.array([ 5., 3., -3.])
p1 = np.array([ 5., 3., -5.])
p2 = np.array([ 5., 5., -3.])
p = np.array( [ 5., 3., -5.])
be = np.array([0.,0.,0.])
T = np.array([0.,0.,0.])
compute_boundary_element(p,p0,p1,p2,be,T)
be2 = compute_lindholm_L(p,p0,p1,p2)
print "[DDD Weiwei]: ", (z, be)
print "[DDD Dmitri]: ", (z, be2)
vert_bsa =np.zeros(4)
mc = np.array([0,1,2,3])
xyz = np.array([p,p0,p1,p2])
for j in range(4):
tmp_omega = compute_solid_angle_single(
xyz[mc[j]],
xyz[mc[(j+1)%4]],
xyz[mc[(j+2)%4]],
xyz[mc[(j+3)%4]])
vert_bsa[mc[j]]+=tmp_omega
print 'solid angle',vert_bsa/(4*np.pi)
return be
def plot_mx():
zs=np.linspace(0,-1e-2,21)
bs = []
for z in zs:
r = test_boundary_element(z)
bs.append(r[0])
fig=plt.figure()
plt.plot(zs, bs, '.-',label='field')
#plt.plot(ns, field_pbc, '.-',label='field2')
#plt.plot(ts, data['E_Demag'], label='Demag')
#plt.plot(ts, data['E_Exchange'], label='Exchange')
#plt.xlabel('copies')
#plt.ylabel('field')
plt.legend()
fig.savefig('res.pdf')
if __name__ == '__main__':
print test_boundary_element(z=.0)
#plot_mx()
| 1,731 | 22.726027 | 65 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/show_indexsummationwarning_demo.py
|
"""
Anisotropy Energy
Energy computed with two different, mathematically equivalent forms. The second
one produces the warning
Calling FFC just-in-time (JIT) compiler, this may take some time.
Summation index does not appear exactly twice: ?
when it is compiled.
"""
import dolfin as df
mesh = df.UnitCubeMesh(1, 1, 1)
V = df.VectorFunctionSpace(mesh,"CG",1)
a = df.Constant((0, 0, 1))
M = df.interpolate(df.Constant((0, 0, 8.6e5)), V)
print "Assembling first form."
Eform = 2.3 * df.dot(a, M) * df.dot(a, M) * df.dx
E = df.assemble(Eform)
print "E = ", E
print "Assembling second form."
Eform_alt = 2.3 * df.dot(a, M)**2 * df.dx
E_alt = df.assemble(Eform_alt)
print "E = ", E_alt
rel_diff = 100 * abs(E-E_alt)/E # in %
print "Difference is {} %.".format(rel_diff)
assert rel_diff < 0.1
| 803 | 23.363636 | 79 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/mixedspacestest.py
|
import dolfin as df
#Test wether we can combine DG0 and CG1 expressions. Background:
#
#To allow M to vary as a function of space, we would like to write
#M = Ms*m where
#
# Ms is a DG0 space (with values on the centre of the cells) and
# m the normalised magnetisation vector defined on the nodes of the mesh.
#
# Seems to work. Hans & Weiwei, 19 April 2012.
mesh = df.RectangleMesh(0,0,1,1,1,1)
VV = df.VectorFunctionSpace(mesh,"CG",1,dim=3)
V = df.FunctionSpace(mesh,"DG",0)
Ms = df.interpolate(df.Expression("1.0"),V)
m = df.interpolate(df.Expression(("2.0","0.0","0.0")),VV)
#m = df.interpolate(df.Expression("2.0"),VV)
L = df.dot(m,m)*Ms*df.dx
u = df.assemble(L)
print "expect u to be 4 (2*2*1):"
print "u=",u
| 724 | 24.892857 | 73 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/assemble_array.py
|
from dolfin import *
mesh = UnitCubeMesh(30,30,30)
V = FunctionSpace(mesh, "CG", 1)
v = TestFunction(V)
F = Function(V)
b = v*dx
L = assemble(b)
print \
"""
In this test case, F is a dolfin.Function, and L is computed as
\tL = assemble(v*dx),
where v is a dolfin.TestFunction. Say we want to divide F by L.
It we simply write
\tA1 = F.vector().array()/L.
This has proved to be much slower than
\tA2 = F.vector().array()/L.array().
Timings that computes A1 and A2 on a 30 x 30 x 30 unit cube
20 times each, show the issue:
"""
tic()
for i in range(20):
A1 = F.vector().array()/L
print "A1: %.4f sec." % toc()
tic()
for i in range(20):
A2 = F.vector().array()/L.array()
print "A2: %.4f sec." % toc()
| 717 | 17.410256 | 63 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/linear_algebra_timings.py
|
__author__ = "Anders Logg <[email protected]>"
__date__ = "2012-01-20"
__copyright__ = "Copyright (C) 2012 Anders Logg"
__license__ = "GNU LGPL version 3 or any later version"
# Last changed: 2012-01-20
from dolfin import *
import pylab
# Mesh sizes to check
mesh_sizes = [2, 3, 4, 6, 8, 11, 16, 23, 32, 45, 64, 91]
# Global data for plotting
_legends = []
_markers = "ov^<>1234sp*hH+xDd|_"
def create_linear_system(n):
"Create linear system for Poisson's equation on n x n x n mesh"
mesh = UnitCubeMesh(n, n, n)
V = FunctionSpace(mesh, "Lagrange", 1)
u = TrialFunction(V)
v = TestFunction(V)
f = Expression("sin(5*x[0])*sin(5*x[1])*sin(5*x[2])")
a = dot(grad(u), grad(v))*dx
L = f*v*dx
A = assemble(a)
b = assemble(L)
bc = DirichletBC(V, 0.0, DomainBoundary())
bc.apply(A, b)
x = Vector(b.size())
return A, x, b
def bench_solver(solver, preconditioner="default"):
"Benchmark given solver and preconditioner"
global _legends
global _markers
print "Computing timings for (%s, %s)..." % (solver, preconditioner)
# Compute timings
sizes = []
timings = []
for n in mesh_sizes:
A, x, b = create_linear_system(n)
N = b.size()
if N > 20000 and solver in ("lu", "cholesky"): break
sizes.append(N)
print " Solving linear system of size %d x %d" % (N, N)
tic()
solve(A, x, b, solver, preconditioner)
timings.append(toc())
# Plot timings
marker = _markers[len(_legends) % len(_markers)]
pylab.loglog(sizes, timings, "-%s" % marker)
# Store legend
backend = parameters["linear_algebra_backend"]
if preconditioner == "default":
_legends.append("%s %s" % (backend, solver))
else:
_legends.append("%s %s, %s" % (backend, solver, preconditioner))
# Timings for uBLAS
parameters["linear_algebra_backend"] = "uBLAS"
bench_solver("lu")
bench_solver("cholesky")
bench_solver("gmres", "ilu")
# Timings for PETSc
parameters["linear_algebra_backend"] = "PETSc"
bench_solver("lu")
bench_solver("gmres", "none")
bench_solver("gmres", "ilu")
bench_solver("cg", "none")
bench_solver("cg", "ilu")
bench_solver("gmres", "amg")
bench_solver("cg", "amg")
bench_solver("tfqmr", "ilu")
# Finish plot
pylab.grid(True)
pylab.title("Solving Poisson's equation with DOLFIN 1.0.0")
pylab.xlabel("N")
pylab.ylabel("CPU time")
pylab.legend(_legends, "upper left")
pylab.savefig("linear-algebra-timings.pdf")
pylab.savefig("linear-algebra-timings.png")
print("Data plotted in linear-algebra-timings.png/pdf")
pylab.show()
| 2,595 | 25.489796 | 72 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/project-interpolate.py
|
from dolfin import *
# Compare the project and interpolate methods for
# a small mesh size. Would assume that both
# methods give the same result in this
# simple case.
# Mesh and functionspace
L = 10e-9 #10 nm
n = 5
mesh = BoxMesh(0,0,0,L,L,L,n,n,n)
V = VectorFunctionSpace(mesh, "CG", 1)
# Initial magnetisation
M0 = Constant((0,0,1))
# Using project
Mp = project(M0, V)
print "This should be an array of 0 on the first 2/3 and 1 on the last 1/3:"
print Mp.vector().array()
print "... but it isn't. The reason is that the default tolerances for "
print "iterative solvers in dolfin (called as part of 'project') are affected"
print "by the scale of the coordinates, and work best if the coordinates are"
print "of order of unity."
print "Change L=1e-8 to L=1, and the accuracy improves."
# Using interpolate
Mi = interpolate(M0, V)
print "Interpolate gives the result we assumed:"
print Mi.vector().array()
| 923 | 27.875 | 78 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/eigenvalues.py
|
#based on https://answers.launchpad.net/dolfin/+question/199058
from dolfin import *
import math
mesh = UnitSquareMesh(30,30)
lv = [c.volume() for c in cells(mesh)]
print "ratio of max and min volume: ", max(lv)/min(lv)
V = FunctionSpace(mesh, "CG", 1)
u = TrialFunction(V)
v = TestFunction(V)
a = dot(grad(u), grad(v))*dx
A = PETScMatrix()
assemble(a, tensor=A)
def u0(x,on_boundary): return on_boundary
bc = DirichletBC(V,Constant(0.0),u0)
bc.apply(A)
eigensolver = SLEPcEigenSolver(A)
eigensolver.parameters["spectrum"] = "smallest real"
N=8
eigensolver.solve(N)
r=[]
l=[]
for i in range(N):
rr, c, rx, cx = eigensolver.get_eigenpair(i)
r.append (rr)
u = Function(V)
u.vector()[:] = rx
#HF: not sure what the next two lines are meant to do?
e=project(grad(u),VectorFunctionSpace(mesh,"CG",1))
l.append(assemble(dot(e,e)*dx)/assemble(u*u*dx))
plot(u,title='mode %d, EValue=%f EValue=%f' % (i,l[-1]/math.pi**2,rr/math.pi**2))
#from IPython import embed
#embed()
print "Eigenvalues from solver:\n"
for i in r: print i
print "Eigenvalues from eigenfunctions:\n"
for i in l: print i
interactive()
| 1,122 | 21.019608 | 82 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/dofs/dofmaptool.py
|
from dolfin import *
from fenicstools import DofMapPlotter
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
mesh = UnitSquareMesh(5, 5)
V = FunctionSpace(mesh, 'CG', 1)
if rank == 0:
dmp = DofMapPlotter(V)
print("""Pressing C, E, v with mouse hovering over cells 0 and 1 and d with mouse over cell 40 and 41 will light up cell and edge indices for the entire mesh and vertex and dof indices in cells 0, 1 and 40, 41 respectively. Thus we see that first mesh vertex is located at [0, 0] while the first dof is located at [0, 1]. The assignement u.vector[0] thus modifies the function's value at [0, 1].""")
dmp.plot()
dmp.show()
| 664 | 46.5 | 403 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/dofs/mesh_coordinates_and_function_values_via_dofmap.py
|
#!/usr/bin/env python
# This is a slightly adapted example from the FEniCS Q&A forum [1].
# It creates a 2D vector field 'v' on a 2D mesh (e.g. representing
# velocity of a flow) and then extracts these values out again and
# displays them in the form
#
# (x_i, y_i) --> (vx_i, vy_i)
#
# It also plots the field values using matplotlib and dolfin's own
# plotting functionality.
#
# This example is a good illustration of how to use the dofmap to
# access function data on a mesh. It is instructive to run this
# example in parallel, e.g.:
#
# mpirun -n 4 python foobar.py
#
# This will show that each process only knows about the coordinates
# of the part of the mesh that it owns. The plotting will also only
# plot the vector field on the corresponding part of the mesh.
#
# Note that in parallel the dofmap and mesh on each process also
# use 'ghost nodes'. These are mesh nodes which do not belong to
# the current process but are neighbours of nodes that do. These
# are dealt with in the 'try ... except IndexError' statements
# below.
#
# (Max, 15.5.2014)
#
# [1] http://fenicsproject.org/qa/1460/numpy-arrays-from-fenics-data
import matplotlib
matplotlib.use('wx')
from dolfin import *
import numpy as np
import pylab as pl
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
#mesh = RectangleMesh(0, 0, 20, 50, 4, 10)
mesh = UnitSquareMesh(10, 20)
V = VectorFunctionSpace(mesh, "CG", 1)
u = Function(V)
# Vertex based data
vertex_vector_values = np.zeros(mesh.num_vertices()*2)
vertex_vector_values[::2] = mesh.coordinates().sum(1)
vertex_vector_values[1::2] = 2-mesh.coordinates().sum(1)
dof_to_vertex_map = dof_to_vertex_map(V)
u.vector().set_local(vertex_vector_values[dof_to_vertex_map])
####### NOW GO THE OTHER WAY #####
arr = u.vector().array()
coor = mesh.coordinates()
vtd = vertex_to_dof_map(V)
values = list()
for i, dum in enumerate(coor):
try:
values.append([arr[vtd[2*i]],arr[vtd[2*i+1]]])
except IndexError:
print("[Process {}/{}] IndexError for i={}, dum={}".format(rank, size, i, dum))
values = np.array(values)
x = list()
y = list()
vx = list()
vy = list()
for i, dum in enumerate(coor):
try:
print '(%f,%f) -> (%f,%f)' %(coor[i][0], coor[i][1], values[i][0], values[i][1])
x.append(coor[i][0])
y.append(coor[i][1])
vx.append(values[i][0])
vy.append(values[i][1])
except IndexError:
print("[Process {}/{}] IndexError for i={}, dum={}".format(rank, size, i, dum))
#pl.quiver(x,y,vx,vy)
#pl.axis([0, 1.3, 0, 1.3])
#pl.show()
plot(u, axes=True, interactive=True)
| 2,626 | 28.188889 | 88 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/dofs/dofmap_cell_dofs.py
|
# Example motivated by dolfin mailing list (questions 221862):
# http://answers.launchpad.net/dolfin/+question/221862 -- see last reply from Hake
# The entry on the list is not actually very
# informative, but the snippet below seems to show the key idea.
#
# I am gathering this here, to push forward the whole dofmap question
# that was introduced with dolfin 1.1 (HF, Feb 2013)
# Addendum: The main (whole?) point of the dofmap seems to be
# for bookkeeping purposes when dolfin is running in parallel.
# Thus we're now also printing the process number and cell id
# in each process. It is instructive to look at the output of
# a command like this:
#
# mpirun -n 6 python example1.py | grep "cell #" | sort
#
# So it seems that cell id's are local to each process, and
# that V.dofmap().cell_dofs() gives a mapping from the local
# degrees of freedom of the cell to the global degrees of freedom.
#
# (Max, 15.3.2013)
import dolfin as df
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if rank == 0:
print("2d Example")
mesh = df.UnitSquareMesh(4, 4)
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
x = V.dofmap()
for cell in df.cells(mesh):
print "Process {}/{}, cell #{}: {}, {}".format(rank, size, cell.index(), cell, V.dofmap().cell_dofs(cell.index()))
#For a 1d example, uncomment the code below and run
# mpirun -np 2 python dofmap_cell_dofs.py | grep "topological" | sort
#
#if rank == 0:
# print("1d Example")
#
#mesh = df.IntervalMesh(20, -1, 1)
#V = df.FunctionSpace(mesh, 'CG', 1)
#x = V.dofmap()
#for cell in df.cells(mesh):
# print("Process {}/{}, cell #{}: {}, {}".format(rank, size, cell.index(), cell, V.dofmap().cell_dofs(cell.index())))
#
#
| 1,738 | 30.053571 | 121 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/instant/test_norm.py
|
from dolfin import *
import numpy as np
import os
def test_norm():
header_file = open("Foo/Foo.h", "r")
code = header_file.read()
header_file.close()
foo_module = compile_extension_module(
code=code, source_directory="Foo", sources=["Foo.cpp", "Bar.cpp"],
include_dirs=[".", os.path.abspath("Foo")])
mesh = UnitIntervalMesh(2)
V = VectorFunctionSpace(mesh, 'CG', 1, dim=3)
field = Function(V)
field.assign(Expression(("1", "2", "3")))
print "field", field.vector().array()
print "shape of field.vector.array", field.vector().array().shape
W = FunctionSpace(mesh, 'CG', 1)
norm = Function(W)
print "norm", norm.vector().array()
print "shape of norm.vector.array", norm.vector().array().shape
foo = foo_module.Foo()
foo.norm(field.vector(), norm.vector())
print "norm after computation", norm.vector().array()
expected = np.zeros(mesh.num_vertices())
expected += np.sqrt(1 + 2*2 + 3*3)
assert np.allclose(norm.vector().array(), expected)
| 1,039 | 29.588235 | 74 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/instant/simple.py
|
import dolfin as df
mesh = df.UnitIntervalMesh(1)
V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)
f = df.Function(V)
f.assign(df.Expression(("1", "2", "3")))
print f.vector().array()
| 186 | 19.777778 | 48 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/instant/norm_test.py
|
from dolfin import *
import instant
cpp_code = """
void dabla(dolfin::Vector& a, dolfin::Vector& b, double c, double d) {
for (unsigned int i=0; i < a.size(); i++) {
b.setitem(i, d*a[i] + c);
}
}
"""
include_dirs, flags, libs, libdirs = instant.header_and_libs_from_pkgconfig("dolfin")
headers= ["dolfin.h"]
func = instant.inline(cpp_code, system_headers=headers, include_dirs=include_dirs, libraries = libs, library_dirs = libdirs)
#func = instant.inline(cpp_code, system_headers=headers)
if __name__ == '__main__':
nx = ny = 1
mesh = df.UnitSquareMesh(nx, ny)
V = df.FunctionSpace(mesh, 'CG', 1)
Vv = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
f = df.interpolate(df.Expression("0"),V)
f1 = df.interpolate(df.Expression(("1","0","0")),Vv)
f2 = df.interpolate(df.Expression(("0","1","0")),Vv)
print 'a=',f1.vector().array()
print 'b=',f2.vector().array()
func(f1.vector(),f2,vector(),1.2,4.5)
print 'a=',f1.vector().array()
print 'b=',f2.vector().array()
| 1,047 | 25.871795 | 125 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/instant/dolfin_test_foo.py
|
"This demo program demonstrates how to include additional C++ code in DOLFIN."
# Copyright (C) 2013 Kent-Andre Mardal, Mikael Mortensen, Johan Hake
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2013-04-02
from dolfin import *
import numpy
import os
header_file = open("Foo/Foo.h", "r")
code = header_file.read()
header_file.close()
foo_module = compile_extension_module(
code=code, source_directory="Foo", sources=["Foo.cpp", "Bar.cpp"],
include_dirs=[".", os.path.abspath("Foo")])
mesh = UnitCubeMesh(10, 10, 10)
V = FunctionSpace(mesh, 'CG', 1)
f = Function(V)
foo = foo_module.Foo()
foo.foo(f)
foo.foo2(f.vector())
| 1,274 | 30.097561 | 78 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/local-arrays/parallel-with-coord.py
|
"""
Script to investigate the ordering process for Dolphin
meshes with periodic boundary conditions.
Want to assert that a local process contains the full
vector for each node and that they are not distributed.
"""
print "Hello World"
import dolfin as df
import numpy as np
# Explicit - do not reorder
df.parameters.reorder_dofs_serial = False
# Generate mesh and boundary conditions
nx = ny = 10
mesh = df.IntervalMesh(9,0,9)
class PeriodicBoundary(df.SubDomain):
def inside(self, x, on_boundary):
return bool(x[0] < 1+df.DOLFIN_EPS and x[0] > 1-df.DOLFIN_EPS and on_boundary)
def map(self, x, y):
y[0] = x[0] - 4.0
pbc = PeriodicBoundary()
# Vector space is populated with numbers representing coordinates
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3, constrained_domain=pbc)
expression = df.Expression(['x[0]+0.1', 'x[0]+0.2', 'x[0]+0.3'])
zero_v = df.interpolate(expression, V)
# What does the vector look like?
print("My vector is of shape %s." % zero_v.vector().array().shape)
print zero_v.vector().array()
# Store vector and assert that the full vector is present
my_vec = zero_v.vector().array()
my_vec_floor = np.floor(my_vec)
N = len(my_vec_floor)
for num in np.unique(my_vec_floor):
assert((N - np.count_nonzero(my_vec_floor - num)) == 3)
"""
Observations: 6 Feb 2015 HF:
fangohr@osiris:~/hg/finmag/sandbox/basics-dolfin/parallel/local-arrays$ dir
parallel.py parallel.py-n2.out parallel-with-coord.py run_parallel.sh
parallel.py-n1.out parallel.py-n3.out parallel-with-rank.py
fangohr@osiris:~/hg/finmag/sandbox/basics-dolfin/parallel/local-arrays$ cat parallel.py-n1.out
My vector is of shape 30.
[ 0.1 1.1 2.1 3.1 4.1 5.1 6.1 7.1 8.1 9.1 0.2 1.2 2.2 3.2 4.2
5.2 6.2 7.2 8.2 9.2 0.3 1.3 2.3 3.3 4.3 5.3 6.3 7.3 8.3 9.3]
fangohr@osiris:~/hg/finmag/sandbox/basics-dolfin/parallel/local-arrays$ cat parallel.py-n2.out
My vector is of shape 15.
My vector is of shape 15.
[ 0.1 0.2 0.3 1.1 1.2 1.3 2.1 2.2 2.3 3.1 3.2 3.3 4.1 4.2 4.3]
[ 5.1 5.2 5.3 6.1 6.2 6.3 7.1 7.2 7.3 8.1 8.2 8.3 9.1 9.2 9.3]
fangohr@osiris:~/hg/finmag/sandbox/basics-dolfin/parallel/local-arrays$ cat parallel.py-n3.out
My vector is of shape 9.
My vector is of shape 12.
My vector is of shape 9.
[ 3.1 3.2 3.3 4.1 4.2 4.3 5.1 5.2 5.3 6.1 6.2 6.3]
[ 0.1 0.2 0.3 1.1 1.2 1.3 2.1 2.2 2.3]
[ 7.1 7.2 7.3 8.1 8.2 8.3 9.1 9.2 9.3]
fangohr@osiris:~/hg/finmag/sandbox/basics-dolfin/parallel/local-arrays$
Interestingly, this uses the 'old' order xxxxx, yyyyy, zzzzz for the serial run, but
switches to xyz, xyz, xyz, xyz, xyz when run with mpi -n N where N >= 2 .
In other words: the df.parameters.reorder_dofs_serial = False
is ignored for parallel runs.
"""
| 2,777 | 34.615385 | 95 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/local-arrays/parallel-with-rank.py
|
print "Hello World"
import dolfin as df
mpi_world = df.mpi_comm_world()
rank = df.MPI.rank(mpi_world)
size = df.MPI.size(mpi_world)
me_str = "{}/{}".format(rank, size)
print("I am " + me_str)
#df.parameters.reorder_dofs_serial = False
nx = ny = 1
mesh = df.RectangleMesh(0, 0, 1, 1, nx, ny)
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
zero = df.Constant(list([0,0,1]))
zero_v = df.interpolate(zero, V)
print("{}: My vector is of shape {}.".format(me_str, zero_v.vector().array().shape))
#print zero_v.vector().array()
| 526 | 26.736842 | 84 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/local-arrays/parallel-with-coord-and-map.py
|
"""
Script to investigate the ordering process for Dolphin
meshes with periodic boundary conditions.
Want to assert that a local process contains the full
vector for each node and that they are not distributed.
"""
print "Hello World"
import dolfin as df
import numpy as np
#df.parameters.reorder_dofs_serial = False
df.parameters.reorder_dofs_serial = True
# Generate mesh and boundary conditions
nx = ny = 10
mesh = df.IntervalMesh(9,0,9)
class PeriodicBoundary(df.SubDomain):
def inside(self, x, on_boundary):
return bool(x[0] < 1+df.DOLFIN_EPS and x[0] > 1-df.DOLFIN_EPS and on_boundary)
def map(self, x, y):
y[0] = x[0] - 4.0
pbc = PeriodicBoundary()
# Vector space is populated with numbers representing coordinates
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3, constrained_domain=pbc)
expression = df.Expression(['x[0]+0.1', 'x[0]+0.2', 'x[0]+0.3'])
zero_v = df.interpolate(expression, V)
# What does the vector look like?
print("My vector is of shape %s." % zero_v.vector().array().shape)
print zero_v.vector().array()
# what does the vertex to dof map look like?
d2v = df.dof_to_vertex_map(V)
print("dof_to_vertex_map: {}".format(d2v))
v2d = df.vertex_to_dof_map(V)
print("vertex_to_dof_map: {}".format(v2d))
# Store vector and assert that the full vector is present
my_vec = zero_v.vector().array()
my_vec_floor = np.floor(my_vec)
N = len(my_vec_floor)
for num in np.unique(my_vec_floor):
assert((N - np.count_nonzero(my_vec_floor - num)) == 3)
"""
2015/02/06 HF, MAB:
fangohr@osiris:~/hg/finmag/sandbox/basics-dolfin/parallel/local-arrays$ mpirun -n 1 python parallel-with-coord-and-map.py
Hello World
My vector is of shape 30.
[ 0.1 0.2 0.3 1.1 1.2 1.3 2.1 2.2 2.3 3.1 3.2 3.3 4.1 4.2 4.3
5.1 5.2 5.3 6.1 6.2 6.3 7.1 7.2 7.3 8.1 8.2 8.3 9.1 9.2 9.3]
dof_to_vertex_map: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
25 26 27 28 29]
vertex_to_dof_map: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
25 26 27 28 29]
fangohr@osiris:~/hg/finmag/sandbox/basics-dolfin/parallel/local-arrays$ mpirun -n 2 python parallel-with-coord-and-map.py
Hello World
Hello World
Building mesh (dist 0a)
Number of global vertices: 10
Number of global cells: 9
Building mesh (dist 1a)
My vector is of shape 15.
My vector is of shape 15.
[ 5.1 5.2 5.3 6.1 6.2 6.3 7.1 7.2 7.3 8.1 8.2 8.3 9.1 9.2 9.3]
[ 0.1 0.2 0.3 1.1 1.2 1.3 2.1 2.2 2.3 3.1 3.2 3.3 4.1 4.2 4.3]
dof_to_vertex_map: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14]
dof_to_vertex_map: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14]
vertex_to_dof_map: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14]
vertex_to_dof_map: [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -15 -14 -13]
Observation: dof_to_vertex_map seems to provide local indices (that's good).
We don't know what the additional negative numbers are (last line above): could be a periodic point, or
a hint that this node is on a different process. Will need to look further into this.
"""
| 3,092 | 32.258065 | 122 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/local-arrays/parallel.py
|
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print("Hello World from {}/{}".format(rank, size))
import dolfin as df
#df.parameters.reorder_dofs_serial = False
nx = ny = 10
mesh = df.RectangleMesh(df.Point(0, 0), df.Point(1, 1), nx, ny)
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
vector_value_constant = df.Constant(list([0,1,2]))
our_function = df.interpolate(vector_value_constant, V)
print("{}: My vector is of shape {}.".format(rank, our_function.vector().array().shape))
print(our_function.vector().array())
| 568 | 28.947368 | 88 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/writing_and_reading_meshes/write_mesh.py
|
from dolfin import *
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print "This is process {}/{}".format(rank, size)
from common import mesh
F = HDF5File('meshfile_{:02d}.h5'.format(size), 'w')
F.write(mesh, 'mymesh')
print "Process {}/{} has written the mesh.".format(rank, size)
| 329 | 21 | 62 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/writing_and_reading_meshes/common.py
|
from dolfin import *
nx = 2
ny = 2
nz = 2
mesh = UnitCubeMesh(nx, ny, nz)
| 76 | 8.625 | 31 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/writing_and_reading_meshes/read_mesh.py
|
import sys
from dolfin import *
from mpi4py import MPI
try:
meshfilename = sys.argv[1]
except IndexError:
print "Error: expecting meshfilename as first argument."
sys.exit(0)
G = HDF5File(meshfilename, 'r')
mesh2 = Mesh()
G.read(mesh2, 'mymesh')
from common import mesh
def sort_nested(lst, level=1):
"""
Sort a list of lists up to the given level:
0 = don't sort
1 = sort outer list but keep inner lists untouched
2 = sort inner lists first and then sort outer list
"""
if level == 0:
return lst
elif level == 1:
return sorted(lst)
elif level == 2:
return sorted(map(sorted, lst))
else:
raise ValueError("Sorting level must be <= 2")
coords1 = mesh.coordinates().tolist()
coords2 = mesh2.coordinates().tolist()
cells1 = mesh.cells().tolist()
cells2 = mesh2.cells().tolist()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
if coords1 == coords2:
print "Process {}/{}: Coordinates match.".format(rank, size)
elif sort_nested(coords1, 1) == sort_nested(coords2, 1):
print "Process {}/{}: Coordinates match after sorting outer lists.".format(rank, size)
elif sort_nested(coords1, 2) == sort_nested(coords2, 2):
print "Process {}/{}: Coordinates match after complete sorting.".format(rank, size)
else:
print "Process {}/{}: Coordinates do NOT match.".format(rank, size)
if cells1 == cells2:
print "Process {}/{}: Cells match.".format(rank, size)
elif sort_nested(cells1, 1) == sort_nested(cells2, 1):
print "Process {}/{}: Cells match after sorting outer lists.".format(rank, size)
elif sort_nested(cells1, 2) == sort_nested(cells2, 2):
print "Process {}/{}: Cells match after complete sorting.".format(rank, size)
else:
print "Process {}/{}: Cells do NOT match.".format(rank, size)
| 1,825 | 28.934426 | 90 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/writing_and_reading_meshes/read_mesh_serial.py
|
import sys
from dolfin import *
try:
meshfilename = sys.argv[1]
except IndexError:
print "Error: expecting meshfilename as first argument."
sys.exit(0)
G = HDF5File(meshfilename, 'r')
mesh2 = Mesh()
G.read(mesh2, 'mymesh')
from common import mesh
def sort_nested(lst, level=1):
"""
Sort a list of lists up to the given level:
0 = don't sort
1 = sort outer list but keep inner lists untouched
2 = sort inner lists first and then sort outer list
"""
if level == 0:
return lst
elif level == 1:
return sorted(lst)
elif level == 2:
return sorted(map(sorted, lst))
else:
raise ValueError("Sorting level must be <= 2")
coords1 = mesh.coordinates().tolist()
coords2 = mesh2.coordinates().tolist()
cells1 = mesh.cells().tolist()
cells2 = mesh2.cells().tolist()
if coords1 == coords2:
print "Coordinates match."
elif sort_nested(coords1, 1) == sort_nested(coords2, 1):
print "Coordinates match after sorting outer lists."
elif sort_nested(coords1, 2) == sort_nested(coords2, 2):
print "Coordinates match after complete sorting."
else:
print "Coordinates do NOT match."
if cells1 == cells2:
print "Cells match."
elif sort_nested(cells1, 1) == sort_nested(cells2, 1):
print "Cells match after sorting outer lists."
elif sort_nested(cells1, 2) == sort_nested(cells2, 2):
print "Cells match after complete sorting."
else:
print "Cells do NOT match."
| 1,461 | 25.107143 | 60 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/writing_and_reading_meshes/write_mesh_serial.py
|
from dolfin import *
from common import mesh
F = HDF5File('meshfile_serial.h5', 'w')
F.write(mesh, 'mymesh')
| 110 | 17.5 | 39 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/exercises/cells_distribution.py
|
#!/usr/bin/env python
"""
A short example to show how the nodes are distributed on processes
without the knowledge of the data details. To run this script,
mpirun -n 8 python cells_distribution.py
"""
import dolfin as df
mpi_world = df.mpi_comm_world()
rank = df.MPI.rank(mpi_world)
size = df.MPI.size(mpi_world)
def fun(x):
return rank
## David, Becky, Hans 25 July 14: the next few lines are potentially
## useful, but not needed here.
## """
## A wrapper class that could be called by dolfin in parallel
## for a normal python function. In this way, we don't have to
## know the details of the data.
## """
## class HelperExpression(df.Expression):
## def __init__(self,value):
## super(HelperExpression, self).__init__()
## self.fun = value
##
## def eval(self, value, x):
## value[0] = self.fun(x)
##
if __name__=="__main__":
mesh = df.RectangleMesh(0, 0, 20, 20, 100, 100)
V = df.FunctionSpace(mesh, 'DG', 0)
#hexp = HelperExpression(fun) # if we want to use the HelperExpression
hexp = df.Expression("alpha", alpha=rank)
u = df.interpolate(hexp, V)
file = df.File('color_map.pvd')
file << u
| 1,196 | 23.9375 | 80 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/exercises/map_dof_rank.py
|
"""
Visuale distribution of degrees of freedom.
Invoke with
`mpirun -np N python map_dof_rank.py`
to run with N processes. Creates a `color_map.pvd` file which you can open with
paraview. When the --plot option is set, it will also display plots to screen.
"""
import argparse
import dolfin as df
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
class RankExpression(df.Expression):
"""
The value of this expression is equal to the
MPI rank of the process it is evaluated on.
"""
def eval(self, value, x):
value[0] = rank
if __name__=="__main__":
parser = argparse.ArgumentParser(description=("Visualise distribution of the degrees"
" of freedom. Will create `color_map.pvd` which you can open with paraview."
" Can also plot to screen."))
parser.add_argument('-p', '--plot', action='store_true', help='if set, plots rank to screen')
args = parser.parse_args()
if rank == 0:
print "You are running this example with {} processors.".format(size)
if size == 1:
print "To use N processes: `mpirun -np N python {}`.".format(__file__)
mesh = df.RectangleMesh(0, 0, 20, 20, 20, 20)
V = df.FunctionSpace(mesh, 'DG', 0)
rank_function = df.interpolate(RankExpression(), V)
rank_function.rename("rank", "unique id of process")
file = df.File('color_map.pvd')
file << rank_function
if args.plot:
title = "rank {} of {}".format(rank, size - 1)
df.plot(rank_function, interactive=True, title=title)
| 1,577 | 26.684211 | 97 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/parallel/computing_the_maximum_value_of_a_dolfin_vector/compute_max_of_vector.py
|
#!/usr/bin/env python
import dolfin as df
# Define a non-constant function with unique entries.
mesh = df.IntervalMesh(100, 0, 10)
V = df.FunctionSpace(mesh, 'CG', 1)
f = df.interpolate(df.Expression('x[0]'), V, degree=1)
rank = df.cpp.common.MPI.rank(df.mpi_comm_world())
size = df.cpp.common.MPI.size(df.mpi_comm_world())
print("[{}/{}] Local vector: {}".format(rank, size, f.vector().array()))
max_f = f.vector().max(); # get the local max (might be different on each node)
#max_f = df.cpp.common.MPI.max(df.mpi_comm_world(), max_f); # get the global max across all nodes
print("[{}/{}] max_f={}".format(rank, size, max_f))
| 635 | 34.333333 | 98 |
py
|
finmag
|
finmag-master/dev/sandbox/basics-dolfin/map/example.py
|
import numpy as np
def v2d_xxx_from_xyz(v2d):
"""
Return the vertex to dof map in the form
v2d_xxx = [x0, x1, x2 , ... y0, y1, y2, ..., z0, z1, z2, ... ]
using the vertex to dof map in the form (we are assuming this)
v2d = [x0, y0, z0, x1 , y1, z1, x2, y2, z2, ... ]
"""
# Copy the original map (to not reference it)
v2d_xxx = v2d.copy()
# Reshape to have 3 rows with n numbers of columns
v2d_xxx.shape=(3, -1)
# Get rows,columns values
(m, n) = v2d_xxx.shape
# For every column of v2d_xxx , copy the corresponding values from
# the xyz arrangement, for the 3 rows. For example:
#
# Row 0 (x values):
# [x0, x1, x2, , ... , xi, ... ]
# | | |
# v2d[3] v2d[6] v2d[3 * i]
#
# Row 1:
# [y0, y1, y2, , ... , yi, ... ]
# | | |
# v2d[4] v2d[7] v2d[3 * i + 1]
#
# Row 2: ...
for i in range(n):
v2d_xxx[0, i]=v2d[3 * i]
v2d_xxx[1, i]=v2d[3 * i + 1]
v2d_xxx[2, i]=v2d[3 * i + 2]
# Return to the usual shape (see top)
v2d_xxx.shape=(-1, )
return v2d_xxx
def d2v_fun(d2v, v2d):
"""
Return an ordered full length dof to vertex map, in case of
using Periodic Boundary Conditions
The result is similar than the v2d_xxx function
"""
# Copy the dof_to_vertex map
a = d2v.copy()
# Define one third of the length of the vortex to dof map
# (with PBC, v2d is larger than the d2v map, since the
# boundary vertexes are considered)
n = len(v2d) / 3
# Now redefine every value of 'a' (magic? check this in the future)
for i in range(len(a)):
j = d2v[i]
a[i]= (j % 3) * n + (j / 3)
a.shape=(-1, )
return a
import dolfin as df
df.parameters.reorder_dofs_serial = True
# Create a 1D mesh (interval) with 4 cells
# from 1 to 5
#
# |------|------|------|------|
# 1 5
#
mesh = df.IntervalMesh(4, 1, 5)
# Define PBCs at the extremes
class PeriodicBoundary(df.SubDomain):
# Define boundary at x = 1
# Left boundary is "target domain" G
def inside(self, x, on_boundary):
return bool(x[0] < 1 + df.DOLFIN_EPS and x[0] > 1 - df.DOLFIN_EPS and on_boundary)
# Map right boundary (H or x=5) to left boundary (G)
# The function -map- maps a coordinate x in domain H to a coordinate y in the domain G
def map(self, x, y):
y[0] = x[0] - 4
# Create periodic boundary condition
pbc = PeriodicBoundary()
# 3 dimensional vector space
V = df.VectorFunctionSpace(mesh, 'CG', 1, 3, constrained_domain=pbc)
# Set a vector field with values (x+0.1, x+0.2, x+0.3), defined on the interval
expression = df.Expression(['x[0]+0.1', 'x[0]+0.2', 'x[0]+0.3'], degree=1)
f = df.interpolate(expression, V)
# Print the maps
# d2v should be smaller than v2d with PBCs
# For this case, the values from one boundary are not shown in
# d2v, since they are the same than the opposite boundary. Thus, for
# a 3d vector field, 3 values are omitted
d2v = df.dof_to_vertex_map(V)
# The full system map. Notice that boundary values are repeated
v2d = df.vertex_to_dof_map(V)
print 'd2v ', 'length =', len(d2v), d2v
print 'v2d ', 'length =', len(v2d), v2d
print 'v2d_old (xxx) ', v2d_xxx_from_xyz(v2d)
a = []
# Unordered collection:
b = set()
# Add all different index values from v2d to 'b' (no repetition) and 'a'
for x in v2d:
if x not in b:
b.add(x)
a.append(x)
# Check if 'a' has the same length than the d2v (reduced) map
assert(len(a)==len(d2v))
v2d_xyz = np.array(a)
print 'v2d_xyz (reduced)', v2d_xyz
print '\n'
# Get the values from the vector field (unordered)
# It does not show one boundary
a = f.vector().get_local()
# Map the values from vertex to dof. Since v2d is larger
# than the reduced vector, we can see that the extra values
# are the repeated boundaries
b = a[v2d]
# Get the ordered form of the v2d map
c = a[v2d_xxx_from_xyz(v2d)]
print '(Vector function values)'
print 'a=', a
print '(Mapped values with boundaries -- > v2d)'
print 'b=', b
print '(Ordered mapped values from the v2d --> v2d_xxx)'
print 'c=', c
print '\n'
print '(Mapped values , no boundaries)'
print 'b2=', a[v2d_xyz]
print '(Mapped values from the reduced v2d_xyz)'
print 'c2=', a[v2d_xxx_from_xyz(v2d_xyz)]
print 'Mapped values --> d2v'
print 'b[d2v]=', b[d2v]
print 'c[...]=', c[d2v_fun(d2v, v2d)]
| 4,558 | 27.49375 | 90 |
py
|
finmag
|
finmag-master/dev/sandbox/treecode_bem/setup.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy
import os
#python setup.py build_ext --inplace
#NFFT_DIR = os.path.expanduser('~/nfft-3.2.0')
ext_modules = [
Extension("fastsum_lib",
sources = ['fast_sum.c','fast_sum_lib.pyx'],
include_dirs = [numpy.get_include()],
libraries=['m','gomp'],
extra_compile_args=["-fopenmp"],
#extra_link_args=["-g"],
)
]
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
| 602 | 22.192308 | 58 |
py
|
finmag
|
finmag-master/dev/sandbox/treecode_bem/fastdemag.py
|
import logging
import dolfin as df
import numpy as np
import finmag
import finmag.util.solver_benchmark as bench
import finmag.energies.demag.solver_base as sb
from finmag.util.timings import timings, mtimed
import time
from fastsum_lib import FastSum
from fastsum_lib import compute_solid_angle_single
from fastsum_lib import compute_boundary_element
from finmag.energies import Demag
logger = logging.getLogger(name='finmag')
__all__ = ["TreecodeBEM"]
class TreecodeBEM(sb.FemBemDeMagSolver):
def __init__(self,mesh,m, parameters=sb.default_parameters , degree=1, element="CG",
project_method='magpar', unit_length=1,Ms = 1.0,bench = False,
mac=0.3,p=3,num_limit=100):
sb.FemBemDeMagSolver.__init__(self,mesh,m, parameters, degree, element=element,
project_method = project_method,
unit_length = unit_length,Ms = Ms,bench = bench)
self.__name__ = "Treecode Demag Solver"
#Linear Solver parameters
method = parameters["poisson_solver"]["method"]
pc = parameters["poisson_solver"]["preconditioner"]
self.poisson_solver = df.KrylovSolver(self.poisson_matrix, method, pc)
self.phi1 = df.Function(self.V)
self.phi2 = df.Function(self.V)
# Eq (1) and code-block 2 - two first lines.
b = self.Ms*df.inner(self.w, df.grad(self.v))*df.dx
self.D = df.assemble(b)
self.p=p
self.mac=mac
self.num_limit=num_limit
self.mesh=mesh
self.bmesh = df.BoundaryMesh(mesh,False)
self.b2g_map = self.bmesh.vertex_map().array()
self.compute_triangle_normal()
self.__compute_bsa()
fast_sum=FastSum(p=self.p,mac=self.mac,num_limit=self.num_limit)
coords=self.bmesh.coordinates()
face_nodes=np.array(self.bmesh.cells(),dtype=np.int32)
fast_sum.init_mesh(coords,self.t_normals,face_nodes,self.vert_bsa)
self.fast_sum=fast_sum
self.phi2_b = np.zeros(self.bmesh.num_vertices())
def __compute_bsa(self):
vert_bsa=np.zeros(self.mesh.num_vertices())
mc=self.mesh.cells()
xyz=self.mesh.coordinates()
for i in range(self.mesh.num_cells()):
for j in range(4):
tmp_omega=compute_solid_angle_single(
xyz[mc[i][j]],
xyz[mc[i][(j+1)%4]],
xyz[mc[i][(j+2)%4]],
xyz[mc[i][(j+3)%4]])
vert_bsa[mc[i][j]]+=tmp_omega
vert_bsa=vert_bsa/(4*np.pi)-1
self.vert_bsa=vert_bsa[self.b2g_map]
def compute_triangle_normal(self):
self.t_normals=[]
for face in df.faces(self.mesh):
t=face.normal() #one must call normal() before entities(3),...
cells = face.entities(3)
if len(cells)==1:
self.t_normals.append([t.x(),t.y(),t.z()])
self.t_normals=np.array(self.t_normals)
#print 'diff=',self.t_normals_2-self.t_normals
#print np.array(self.face_nodes),self.bmesh.cells()
#used for debug
def get_B_length(self):
return self.fast_sum.get_B_length(),self.bnd_nodes_number**2
def solve(self):
# Compute phi1 on the whole domain (code-block 1, last line)
timings.start(self.__class__.__name__, "phi1 - matrix product")
g1 = self.D*self.m.vector()
timings.start_next(self.__class__.__name__, "phi1 - solve")
if self.bench:
bench.solve(self.poisson_matrix,self.phi1.vector(),g1, benchmark = True)
else:
timings.start_next(self.__class__.__name__, "1st linear solve")
self.poisson_iter = self.poisson_solver.solve(self.phi1.vector(), g1)
timings.stop(self.__class__.__name__, "1st linear solve")
# Restrict phi1 to the boundary
self.phi1_b = self.phi1.vector()[self.b2g_map]
timings.start_next(self.__class__.__name__, "Compute phi2 at boundary")
self.fast_sum.fastsum(self.phi2_b, self.phi1_b.array())
#self.fast_sum.directsum(self.res,self.phi1.vector().array())
#print 'phi2 at boundary',self.res
self.phi2.vector()[self.b2g_map[:]] = self.phi2_b
# Compute Laplace's equation inside the domain,
# eq. (2) and last code-block
timings.start_next(self.__class__.__name__, "Compute phi2 inside")
self.phi2 = self.solve_laplace_inside(self.phi2)
# phi = phi1 + phi2, eq. (5)
timings.start_next(self.__class__.__name__, "Add phi1 and phi2")
self.phi.vector()[:] = self.phi1.vector() \
+ self.phi2.vector()
timings.stop(self.__class__.__name__, "Add phi1 and phi2")
return self.phi
if __name__ == "__main__":
#n=5
#mesh = df.UnitCubeMesh(n, n, n)
from finmag.util.meshes import elliptic_cylinder,sphere
mesh = elliptic_cylinder(100,150,5,4.5,directory='meshes')
#mesh = sphere(10,2,directory='meshes')
#mesh = df.BoxMesh(0, 0, 0, 1000, 5, 5, 200, 1, 1)
Vv = df.VectorFunctionSpace(mesh, 'Lagrange', 1)
Ms = 8.6e5
expr = df.Expression(('cos(x[0])', 'sin(x[0])','0'))
m = df.interpolate(expr, Vv)
m = df.interpolate(df.Constant((0, 0, 1)), Vv)
demag=TreecodeBEM(mesh,m,mac=0.3,p=4,num_limit=100,Ms=Ms)
f1 = demag.compute_field()
from finmag.energies.demag.solver_fk import FemBemFKSolver as FKSolver
fk = FKSolver(mesh, m, Ms=Ms)
f2 = fk.compute_field()
f3=f1-f2
print f1[0:10],f2[0:10]
print np.average(np.abs(f3[-200:]/f1[-200:]))
| 5,815 | 31.49162 | 88 |
py
|
finmag
|
finmag-master/dev/sandbox/benchmarks/updating_expression_simple.py
|
import time
import math
import dolfin as df
import numpy as np
"""
This script works with dolfin 1.0.1.
In dolfin 1.1.0, the ordering of degrees of freedom does not correspond
to geometrical structures such as vertices anymore. This has to be
investigated further. For now, the re-ordering can be turned off using
the commented line below.
"""
# df.parameters.reorder_dofs_serial = False # for dolfin 1.1.0
"""
The goal is to compute the values of f(r, t) = sin(x) * sin(t) on a mesh for
different points in time. The cost of updating f a couple of times is measured.
"""
L = math.pi / 2; n = 10;
mesh = df.Box(0, 0, 0, L, L, L, n, n, n)
#mesh = df.BoxMesh(0, 0, 0, L, L, L, n, n, n) # dor dolfin 1.1.0
ts = np.linspace(0, math.pi / 2, 100)
# dolfin expression code
S = df.FunctionSpace(mesh, "CG", 1)
expr = df.Expression("sin(x[0]) * sin(t)", t = 0.0)
start = time.time()
for t in ts:
expr.t = t
f_dolfin = df.interpolate(expr, S)
t_dolfin = time.time() - start
print "Time needed for dolfin expression: {:.2g} s.".format(t_dolfin)
# explicit loop code
f_loop = np.empty(mesh.num_vertices())
start = time.time()
for t in ts:
for i, (x, y, z) in enumerate(mesh.coordinates()):
f_loop[i] = math.sin(x) * math.sin(t)
t_loop = time.time() - start
print "Time needed for loop: {:.2g} s.".format(t_loop)
# compute the ratio
ratio = t_dolfin / t_loop
if ratio >= 1:
print "Looping over numpy array is {:.2g} times faster than interpolating dolfin expression.".format(ratio)
else:
print "Interpolating the dolfin expression is {:.2g} times faster than looping over numpy array.".format(ratio)
# check correctness
if False:
print "\nMesh coordinates:\n", mesh.coordinates()
print "Function was sin(x) * sin(t)."
print "Dolfin vector at t=pi/2:\n\t", f_dolfin.vector().array()
print "Numpy vector at t=pi/2:\n\t", f_loop
max_diff = np.max(np.abs(f_dolfin.vector().array() - f_loop))
assert max_diff < 1e-14, "Maximum difference is d = {:.2g}.".format(max_diff)
| 2,015 | 28.217391 | 115 |
py
|
finmag
|
finmag-master/dev/sandbox/benchmarks/simple_timer.py
|
import time
class SimpleTimer():
"""
A simple timer which can be used as a context manager.
Usage example:
benchmark = SimpleTimer()
with benchmark:
do_stuff()
print benchmark.elapsed
A SimpleTimer instance can be reused.
"""
def __init__(self):
self.start = 0.0
self.stop = 0.0
def __enter__(self):
self.start = time.time()
def __exit__(self, type, value, traceback):
self.stop = time.time()
self.elapsed = self.stop - self.start
| 546 | 19.259259 | 58 |
py
|
finmag
|
finmag-master/dev/sandbox/benchmarks/test_method_efficiency.py
|
import dolfin as df
import time
from finmag.energies.anisotropy import UniaxialAnisotropy as ani
from finmag.energies.exchange import Exchange as exch
import numpy as np
def efficiency_test(method, n, field='exchange'):
"""
Test the efficiency of different methods
implemented in the different energy classes.
Possible fields so far is 'exchange' and 'anisotropy'.
"""
length = 20e-9
mesh = df.Rectangle(0, 0, length, length, n, n)
print "mesh.shape:",mesh.coordinates().shape
V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)
# Initial magnetisation
m0 = ("0.0",
"sqrt(1 - ((2*x[0]-L)/L)*((2*x[0]-L)/L))",
"(2*x[0]-L)/L")
m = df.interpolate(df.Expression(m0,L=length), V)
Ms = 0.86e6 # A/m, for example Py
if field == 'exchange':
C = 1.3e-11 # J/m exchange constant
energy = exch(C, method=method)
energy.setup(V, m, Ms)
elif field == 'anisotropy':
a = df.Constant((0, 0, 1)) # Easy axis
K = 520e3 # J/m^3, Co
energy = ani(V, K, a, method=method)
energy.setup(V, m, Ms)
else:
raise NotImplementedError("%s is not implemented." % field)
Ntest = 10
print "Calling exchange calculation %d times." % Ntest
time_start = time.time()
for i in xrange(Ntest):
H_ex = energy.compute_field()
time_end = time.time()
t1 = time_end-time_start
print "Method '%s' took %g seconds" % (method, t1)
return H_ex, t1
def correctness_test(results, ref_method, tol, rtol):
"""
Test correctness of the different methods by comparing
their results with eachother.
Worst case scenario: Everyone is equal to eachother, but
they are all wrong. Then we have a problem...
"""
# Sanity check
if not results.has_key(ref_method):
print "Cannot use %s method as reference." % ref_method
return
ref = results[ref_method]
print "\n\n\n*** Comparisons ***"
for method in results.iterkeys():
if method == ref_method:
continue
Hex = results[method]
assert len(ref) == len(Hex)
max_error, rel_error = 0, 0
for i in range(len(ref)):
diff = abs(ref[i] - Hex[i])
max_error2 = diff.max()
rel_error2 = max_error2/max(abs(ref[i]))
if max_error2 > max_error:
max_error = max_error2
if rel_error2 > rel_error:
rel_error = rel_error2
print "\nBetween '%s' and '%s' methods:" % \
(ref_method, method)
print "Max error: ", max_error
print "Relative error:", rel_error
assert max_error < tol
assert rel_error < rtol
def print_results(results, ns):
"""
Print timings and speedups.
"""
print "\n\n*** Timings ***"
for i, n in enumerate(ns):
print "\nn = %d" % n
for method in results.iterkeys():
print "'%s': %.2f ms" % (method, results[method][i]*1000)
print "\n*** Speedup ***"
print "(Larger than 1 means second method is faster than first method.)"
methods = results.keys()
nomethods = len(methods)
for i in range(nomethods - 1):
for j in range(i + 1, nomethods):
print "\n** '%s' vs '%s' **" % (methods[i], methods[j])
print "n ="
for k, n in enumerate(ns):
t1 = results[methods[i]][k]
t2 = results[methods[j]][k]
sp = t1/t2
print "%6d -> %5.2f" % (n, sp)
if __name__ == '__main__':
# This is the method to compare all
# the other methods against. This
# should be the one we are most sure
# is working correct.
ref_method = 'box-matrix-numpy'
# Field for which to compare efficiency and correctness
# (Comment out the one you are not interested in)
#field = "anisotropy"
field = "exchange"
methods = ['box-assemble', 'box-matrix-numpy', 'box-matrix-petsc']#, 'project']
ns = [1, 5, 10, 20, 50]
methods_times = {}
methods_field = {}
for method in methods:
times = []
fields = []
for n in ns:
H_ex, t = efficiency_test(method, n, field)
fields.append(H_ex)
times.append(t)
methods_times[method] = times
methods_field[method] = fields
correctness_test(methods_field, ref_method, 10e-7, 1e-14)
print_results(methods_times, ns)
print "These results were obtained for the %s field." % field
| 4,546 | 27.597484 | 83 |
py
|
finmag
|
finmag-master/dev/sandbox/benchmarks/vector_initialisation.py
|
#!/usr/bin/python
import numpy as np
import dolfin as df
import timeit
df.parameters.reorder_dofs_serial = False # for dolfin 1.1.0
# Define a rectangular mesh and a vector-function space on it
n1 = 100
n2 = 40
n3 = 25
print "Initialising mesh and function space..."
mesh = df.UnitCubeMesh(n1, n2, n3)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
print "Number of mesh nodes: {}".format(mesh.num_vertices())
# Define two different methods to create a constant vector-valued
# function on the space:
def constant_function_v1(v, S3):
val = np.empty((S3.mesh().num_vertices(), 3))
val[:] = v # we're using broadcasting here
fun = df.Function(S3)
fun.vector()[:] = val.transpose().reshape((-1,))
return fun
def constant_function_v2(v, S3):
val = df.Constant(v)
fun = df.interpolate(val, S3)
return fun
# Now create the 'same' function using each of these methods and time how long it takes:
v = [2,4,3] # some arbitrary vector
f1 = constant_function_v1(v, S3)
f2 = constant_function_v2(v, S3)
print "Performing timing measurements (this may take a while for larger meshes) ...\n"
t1 = min(timeit.Timer('constant_function_v1(v, S3)', 'from __main__ import constant_function_v1, v, S3').repeat(3, number=10))
t2 = min(timeit.Timer('constant_function_v2(v, S3)', 'from __main__ import constant_function_v2, v, S3').repeat(3, number=10))
print "Method 1 took {:g} seconds (using a numpy array to set the function vector directly) (best of 3 runs)".format(t1)
print "Method 2 took {:g} seconds (using df.interpolate) (best of 3 runs)".format(t2)
# Just for safety, check that the two functions have the same values on the nodes
print "\nPerforming safety check that the two functions are actually the same on all nodes ...",
assert(all(f1.vector() == f2.vector()))
print "passed!"
| 1,835 | 31.785714 | 126 |
py
|
finmag
|
finmag-master/dev/sandbox/benchmarks/demag_tolerances.py
|
import dolfin as df
from finmag.energies.demag.fk_demag import FKDemag
from finmag.util.meshes import sphere
from simple_timer import SimpleTimer
# nmag, dict ksp_tolerances in Simulation class constructor
# apparently gets passed to petsc http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/KSP/KSPSetTolerances.html
# dolfin
# managed to pass tolerances to KrylovSolver, may want to simply use "solve" instead
# https://answers.launchpad.net/dolfin/+question/193119
# check /usr/share/dolfin/demo
# check help(df.solve)
mesh = sphere(r=10.0, maxh=0.4)
S3 = df.VectorFunctionSpace(mesh, "CG", 1)
m = df.Function(S3)
m.assign(df.Constant((1, 0, 0)))
Ms = 1
unit_length = 1e-9
demag = FKDemag()
demag.setup(S3, m, Ms, unit_length)
benchmark = SimpleTimer()
print "Default df.KrylovSolver tolerances (abs, rel): Poisson ({}, {}), Laplace ({}, {}).".format(
demag._poisson_solver.parameters["absolute_tolerance"],
demag._poisson_solver.parameters["relative_tolerance"],
demag._laplace_solver.parameters["absolute_tolerance"],
demag._laplace_solver.parameters["relative_tolerance"])
with benchmark:
for i in xrange(10):
print demag.compute_field().reshape((3, -1)).mean(axis=1)
print "With default parameters {} s.".format(benchmark.elapsed)
demag._poisson_solver.parameters["absolute_tolerance"] = 1e-5
demag._laplace_solver.parameters["absolute_tolerance"] = 1e-5
demag._poisson_solver.parameters["relative_tolerance"] = 1e-5
demag._laplace_solver.parameters["relative_tolerance"] = 1e-5
with benchmark:
for i in xrange(10):
print demag.compute_field().reshape((3, -1)).mean(axis=1)
print "With higher tolerances {} s.".format(benchmark.elapsed)
| 1,713 | 38.860465 | 119 |
py
|
finmag
|
finmag-master/dev/sandbox/benchmarks/updating_expression.py
|
import numpy as np
import dolfin as df
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from math import sin
from simple_timer import SimpleTimer
benchmark = SimpleTimer()
"""
The goal is to compute the values of f(r, t) = sin(x) * sin(t) on a mesh for
different points in time. The cost of updating f a couple of times is measured.
"""
def time_dolfin(mesh, ts):
"""
Uses a dolfin expression to compute the values of f on the mesh and the times in ts.
"""
S = df.FunctionSpace(mesh, "CG", 1)
expr = df.Expression("sin(x[0]) * sin(t)", t = 0.0)
with benchmark:
for t in ts:
expr.t = t
f = df.interpolate(expr, S)
return benchmark.elapsed, f.vector().array()
def time_numpy_loop(mesh, ts):
"""
Uses numpy and a loop over the mesh coordinates to compute the values of f
on the mesh and the times in ts. This is what we think dolfin is doing
in C++ internally when we call time_dolfin.
"""
f = np.empty(mesh.num_vertices())
S = df.FunctionSpace(mesh, "CG", 1)
xs = df.interpolate(df.Expression("x[0]"), S).vector().array()
with benchmark:
for t in ts:
for i, x in enumerate(xs):
f[i] = sin(x) * sin(t)
return benchmark.elapsed, f
def time_numpy_vectorised(mesh, ts):
"""
Instead of looping over the coordinates like in time_numpy_loop, this
uses vectorised numpy code.
"""
f = np.empty(mesh.num_vertices())
S = df.FunctionSpace(mesh, "CG", 1)
xs = df.interpolate(df.Expression("x[0]"), S).vector().array()
with benchmark:
for t in ts:
f[:] = np.sin(xs) * sin(t)
return benchmark.elapsed, f
def time_numpy_smart(mesh, ts):
"""
This method uses additional knowledge about the function at hand.
The function `f(r, t) = sin(x) * sin(t)` is the product of the
space-dependent part `sin(x)` and the time-dependent part `sin(t)`.
Since the spatial discretisation doesn't change over time, the
space-dependent part of the function only needs to be computed
once. Multiplied by the time-dependent part at each time step, the full
function is reconstructed.
In a way, this method is not fair to the others, because it uses prior
knowledge about the function which the computer can't derive on its own.
"""
f = np.empty(mesh.num_vertices())
S = df.FunctionSpace(mesh, "CG", 1)
xs = df.interpolate(df.Expression("x[0]"), S).vector().array()
f_space_dependent_part = np.sin(xs)
with benchmark:
for t in ts:
f[:] = f_space_dependent_part * sin(t)
return benchmark.elapsed, f
L = np.pi / 2
dLs = [1, 2, 5, 7, 10, 12, 17, 20]
ts = np.linspace(0, np.pi / 2, 100)
vertices = []
runtimes = []
alternate_methods = [time_numpy_loop, time_numpy_vectorised, time_numpy_smart]
for i, dL in enumerate(dLs):
#mesh = df.Box(0, 0, 0, L, L, L, dL, dL, dL)
mesh = df.BoxMesh(0, 0, 0, L, L, L, dL, dL, dL)
print "Running for a mesh with {} vertices [{}/{}].".format(
mesh.num_vertices(), i+1, len(dLs))
# reference
t_dolfin, f_dolfin = time_dolfin(mesh, ts)
# other methods
runtimes_alternate_methods = []
for method in alternate_methods:
t, f = method(mesh, ts)
assert np.max(np.abs(f - f_dolfin)) < 1e-14
runtimes_alternate_methods.append(t)
vertices.append(mesh.num_vertices())
runtimes.append([t_dolfin] + runtimes_alternate_methods)
runtimes = zip(* runtimes)
plt.plot(vertices, runtimes[0], "b", label="dolfin interpolate expression")
plt.plot(vertices, runtimes[1], "r", label="numpy loop over coordinates")
plt.plot(vertices, runtimes[2], "g", label="numpy vectorised code")
plt.plot(vertices, runtimes[3], "c", label="numpy smart")
plt.xlabel("vertices")
plt.ylabel("time (s)")
plt.yscale("log")
plt.legend(loc="upper center", bbox_to_anchor=(0.5, 1.10), prop={'size':10})
plt.savefig("updating_expression.png")
print "Saved plot to 'updating_expression.png'."
| 4,038 | 31.312 | 88 |
py
|
finmag
|
finmag-master/dev/sandbox/cross_product/llg_example.py
|
import dolfin as df
import time
mesh = df.UnitSquareMesh(200, 200)
print mesh
S1 = df.FunctionSpace(mesh, 'CG', 1)
S3 = df.VectorFunctionSpace(mesh, 'CG', 1, 3)
u = df.TrialFunction(S3)
v = df.TestFunction(S3)
m = df.Function(S3) # magnetisation
Heff = df.Function(S3) # effective field
Ms = df.Function(S1) # saturation magnetisation
alpha = df.Function(S1) # damping
gamma = df.Constant(1) # gyromagnetic ratio
m.assign(df.Constant((1, 0, 0)))
Heff.assign(df.Constant((0, 0, 1)))
alpha.assign(df.Constant(1))
Ms.assign(df.Constant(1))
# just assembling it
LLG = -gamma/(1+alpha*alpha)*df.cross(m, Heff) - alpha*gamma/(1+alpha*alpha)*df.cross(m, df.cross(m, Heff))
L = df.dot(LLG, df.TestFunction(S3)) * df.dP
dmdt = df.Function(S3)
start = time.time()
for i in xrange(1000):
df.assemble(L, tensor=dmdt.vector())
stop = time.time()
print "delta = ", stop - start
print dmdt.vector().array()
# more linear algebra, same problem... still need to assemble the cross product
# we're doing even more work than before
a = df.dot(u, v) * df.dP
A = df.assemble(a)
b = df.Function(S3)
dmdt = df.Function(S3)
start = time.time()
for i in xrange(1000):
df.assemble(L, tensor=b.vector()) # this is what should go out of the loop
df.solve(A, dmdt.vector(), b.vector()) # some variation of this could stay in
stop = time.time()
print "delta = ", stop - start
print dmdt.vector().array()
| 1,400 | 28.1875 | 107 |
py
|
finmag
|
finmag-master/dev/sandbox/cross_product/benchmark.py
|
"""
An example code that computes the cross product of two dolfin functions.
This should be replaced in the parts of the code where dmdt is computed.
"""
import dolfin as df
import time
mesh = df.IntervalMesh(1000, 0, 1)
S3 = df.VectorFunctionSpace(mesh, 'CG', 1, 3)
a = df.Function(S3)
b = df.Function(S3)
a.assign(df.Constant((1, 0, 0))) # unit x vector
b.assign(df.Constant((0, 1, 0))) # unit y vector
alpha = 01
gamma = 2.11e5
m = df.Function(S3)
m.assign(df.Constant((0, 0, 1)))
Heff = df.Function(S3)
Heff.assign(df.Constant((0, 0.3, 0.2)))
dmdt = df.Function(S3)
start = time.time()
for i in range(1000):
L = df.dot(-gamma/(1+alpha*alpha)*df.cross(m, Heff) - alpha*gamma/(1+alpha*alpha)*df.cross(m, df.cross(m, Heff)), df.TestFunction(S3)) * df.dP
df.assemble(L, tensor=dmdt.vector())
stop = time.time()
print 'Time:', stop-start
| 852 | 26.516129 | 146 |
py
|
finmag
|
finmag-master/dev/sandbox/cross_product/using_cpp.py
|
import time
import dolfin as df
from finmag.physics.equation import Equation
mesh = df.UnitSquareMesh(200, 200)
S1 = df.FunctionSpace(mesh, "CG", 1)
S3 = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)
alpha = df.Function(S1)
m = df.Function(S3)
H = df.Function(S3)
dmdt = df.Function(S3)
alpha.assign(df.Constant(1))
m.assign(df.Constant((1, 0, 0)))
H.assign(df.Constant((0, 1, 0)))
eq = Equation(m.vector(), H.vector(), dmdt.vector())
eq.set_alpha(alpha.vector())
eq.set_gamma(1.0)
start = time.time()
for i in xrange(10000):
eq.solve()
stop = time.time()
print "delta = ", stop - start
print dmdt.vector().array()
| 625 | 19.866667 | 52 |
py
|
finmag
|
finmag-master/dev/sandbox/cross_product/fem_cross.py
|
import time
import dolfin as df
mesh = df.UnitIntervalMesh(5)
V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)
u = df.TrialFunction(V)
v = df.TestFunction(V)
f = df.Function(V)
g = df.Function(V)
fxg = df.Function(V)
f.assign(df.Constant((1, 0, 0)))
g.assign(df.Constant((0, 1, 0)))
# FINITE ELEMENT METHOD
# but dP instead of dx (both work)
a = df.dot(u, v) * df.dP
L = df.dot(df.cross(f, g), v) * df.dP
u = df.Function(V)
start = time.time()
for _ in xrange(1000):
df.solve(a == L, u)
stop = time.time()
print "fem, delta = {} s.".format(stop - start)
print u.vector().array()
# LINEAR ALGEBRA FORMULATION
A = df.assemble(a)
b = df.Function(V)
u = df.Function(V)
start = time.time()
for _ in xrange(1000):
df.assemble(L, tensor=b.vector())
df.solve(A, u.vector(), b.vector())
stop = time.time()
print "la, delta = {} s.".format(stop - start)
print u.vector().array()
# JUST ASSEMBLING THE THING IN THE FIRST PLACE
u = df.Function(V)
start = time.time()
for _ in xrange(1000):
df.assemble(L, tensor=u.vector())
stop = time.time()
print "just assembling, delta = {} s.".format(stop - start)
print u.vector().array()
| 1,143 | 20.584906 | 59 |
py
|
finmag
|
finmag-master/dev/sandbox/cross_product/cross_product.py
|
"""
An example code that computes the cross product of two dolfin functions.
This should be replaced in the parts of the code where dmdt is computed.
"""
import dolfin as df
from distutils.version import LooseVersion
if LooseVersion(df.__version__) < LooseVersion('1.5.0'):
raise RuntimeError("This script requires at least dolfin version 1.5. It will run without error in 1.4, but the computed cross product will be zero!")
def cross_product(a, b, S3):
"""
Function computing the cross product of two vector functions.
The result is a dolfin vector.
"""
return df.assemble(df.dot(df.cross(a, b), df.TestFunction(S3)) * df.dP)
mesh = df.IntervalMesh(10, 0, 1)
S3 = df.VectorFunctionSpace(mesh, 'CG', 1, 3)
a = df.Function(S3)
b = df.Function(S3)
a.assign(df.Constant((1, 0, 0))) # unit x vector
b.assign(df.Constant((0, 1, 0))) # unit y vector
axb = cross_product(a, b, S3)
# An expected result is unit z vector and the type is dolfin vector.
print axb.array()
print type(axb)
| 1,009 | 31.580645 | 154 |
py
|
finmag
|
finmag-master/dev/sandbox/gpu/cupstest.py
|
from dolfin import *
from time import time
import math, random
#from finmag.util.meshes import from_geofile
def run_test():
solver = KrylovSolver("cg", "jacobi")
mesh = Box(0,0,0,30,30,100,10,10,30)
#mesh = Mesh(convert_mesh("bar.geo"))
#mesh = UnitCubeMesh(32,32,32)
V = FunctionSpace(mesh, "CG", 1)
W = VectorFunctionSpace(mesh, "CG", 1)
u = TrialFunction(V)
v = TestFunction(V)
w = TrialFunction(W)
A = assemble(inner(grad(u), grad(v))*dx)
D = assemble(inner(w, grad(v))*dx)
m = Function(W)
m.vector()[:] = random.random()
b = D*m.vector()
x = Vector()
start = time()
solver.solve(A, x, b)
return time() - start
parameters["linear_algebra_backend"] = "PETSc"
time1 = run_test()
parameters["linear_algebra_backend"] = "PETScCusp"
time2 = run_test()
print "Backend: PETSc, time: %g" % time1
print "Backend: PETScCusp, time: %g" % time2
print "Speedup: %g" % (time1/time2)
| 952 | 24.078947 | 50 |
py
|
finmag
|
finmag-master/dev/sandbox/gpu/finmag_backends.py
|
import os, time
import dolfin as df
from finmag.util.timings import default_timer
from finmag import Simulation
from finmag.energies import Exchange, Demag
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
# Run the nmag2 example on PETSc and PETScCUSP and compare timings
def run_finmag():
# Setup
setupstart = time.time()
mesh = df.Box(0,0,0,30,30,100,15,15,50)
sim = Simulation(mesh, Ms=0.86e6, unit_length=1e-9)
sim.set_m((1, 0, 1))
demag = Demag("FK")
demag.parameters["poisson_solver"]["method"] = "cg"
demag.parameters["poisson_solver"]["preconditioner"] = "jacobi"
demag.parameters["laplace_solver"]["method"] = "cg"
demag.parameters["laplace_solver"]["preconditioner"] = "bjacobi"
sim.add(demag)
sim.add(Exchange(13.0e-12))
# Dynamics
dynamicsstart = time.time()
sim.run_until(3.0e-10)
endtime = time.time()
# Write output to results.txt
output = open(os.path.join(MODULE_DIR, "results.txt"), "a")
output.write("\nBackend %s:\n" % df.parameters["linear_algebra_backend"])
output.write("\nSetup: %.3f sec.\n" % (dynamicsstart-setupstart))
output.write("Dynamics: %.3f sec.\n\n" % (endtime-dynamicsstart))
output.write(str(default_timer))
output.close()
# Need a clean file
if os.path.isfile(os.path.join(MODULE_DIR, "results.txt")):
os.remove(os.path.join(MODULE_DIR, "results.txt"))
df.parameters["linear_algebra_backend"] = "PETSc"
run_finmag()
df.parameters["linear_algebra_backend"] = "PETScCusp"
run_finmag()
| 1,491 | 30.744681 | 81 |
py
|
finmag
|
finmag-master/dev/sandbox/bulkdmi_direct/run.py
|
import os
import logging
import pylab as p
import numpy as np
import dolfin as df
df.parameters.reorder_dofs_serial = False
from dolfin import *
from finmag import Simulation as Sim
from finmag.field import Field
from finmag.energies import Exchange, Demag, DMI
from finmag.util.meshes import from_geofile, mesh_volume
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
REL_TOLERANCE = 5e-4
Ms = 8.6e5
unit_length = 1e-9
mesh = from_geofile(os.path.join(MODULE_DIR, "cylinder.geo"))
def init_m(pos):
x, y, z = pos
r = np.sqrt(x*x+y*y+z*z)
return 0, np.sin(r), np.cos(r)
def exact_field(pos):
x, y, z = pos
r = np.sqrt(x*x+y*y+z*z)
factor = 4e-3/(4*np.pi*1e-7*Ms)/unit_length
return -2*factor*np.array([-(z*np.cos(r)+y*np.sin(r))/r, x*np.sin(r)/r, x*np.cos(r)/r])
class HelperExpression(df.Expression):
def eval(self, value, x):
value[:] = exact_field(x)[:]
def value_shape(self):
return (3,)
def run_finmag():
sim = Sim(mesh, Ms, unit_length=unit_length)
sim.alpha = 0.5
sim.set_m(init_m)
exchange = Exchange(13.0e-12)
sim.add(exchange)
dmi = DMI(4e-3)
sim.add(dmi)
dmi_direct = DMI(4e-3, method='direct', name='dmi_direct')
sim.add(dmi_direct)
df.plot(sim.m_field.f, title='m')
fun = df.interpolate(HelperExpression(), sim.S3)
df.plot(fun, title='exact')
df.plot(Field(sim.S3, dmi.compute_field()).f, title='dmi_petsc')
df.plot(Field(sim.S3, dmi_direct.compute_field()).f, title='dmi_direct', interactive=True)
if __name__ == '__main__':
run_finmag()
| 1,611 | 19.666667 | 94 |
py
|
finmag
|
finmag-master/dev/sandbox/numpy/sundials_ode.py
|
import os
import numpy as np
from finmag.native import sundials
def call_back(t, y):
return y**2-y**3
class Test_Sundials(object):
def __init__(self, call_back, x0):
self.sim = call_back
self.x0 = x0
self.x = x0.copy()
self.ode_count = 0
self.create_integrator()
def create_integrator(self, reltol=1e-2, abstol=1e-2, nsteps=10000):
integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON)
integrator.init(self.sundials_rhs, 0, self.x0)
integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE)
integrator.set_scalar_tolerances(reltol, abstol)
integrator.set_max_num_steps(nsteps)
self.integrator = integrator
def sundials_rhs(self, t, y, ydot):
self.ode_count+=1
#The following line is very important!!!
ydot[:] = 0
ydot[:] = self.sim(t, y)
return 0
def run_step(self, steps, min_dt=1e-10):
self.t = 0
self.ts=[]
self.ys=[]
for i in range(steps):
cvode_dt = self.integrator.get_current_step()
if cvode_dt < min_dt:
cvode_dt = min_dt
self.t += cvode_dt
self.integrator.advance_time(self.t, self.x)
self.ts.append(self.t)
self.ys.append(self.x[0])
#print i
def print_info(self):
print 'sim t=%0.15g'%self.t
print 'x=',self.x
print 'rhs=%d'%self.ode_count
| 1,599 | 22.880597 | 72 |
py
|
finmag
|
finmag-master/dev/sandbox/numpy/test_ode.py
|
import os
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from sundials_ode import Test_Sundials
from sundials_ode import call_back
def plot_data(ts,ys):
fig=plt.figure()
plt.plot(ts, ys, '.-')
#plt.legend()
plt.grid()
fig.savefig('ts_y.pdf')
def test1():
y0 = np.array([0.9**i for i in range(100)])
#y0 = np.array([1e-4,100,0,0,0,0,0,0,0,0])
ts = Test_Sundials(call_back, y0)
ts.run_step(50, min_dt=1e-5)
plot_data(ts.ts,ts.ys)
ts.print_info()
if __name__ == '__main__':
test1()
| 645 | 13.355556 | 47 |
py
|
finmag
|
finmag-master/dev/sandbox/numpy/test_weiwei_bug.py
|
import numpy as np
"""
h2 = h - np.dot(h,t)*t + sf*t
self.Heff[i][:] = h2[:]
where h, t are 1d numpy arrays, and sf is a double number, if I change the code to,
h2 = np.dot(h,t)*t
h3 = h - h2 + sf*t
self.Heff[i][:] = h3[:]
does anyone know why h2 = h - np.dot(h,t)*t + sf*t is not safe? Weiwei
"""
def test_different_results():
h = np.random.random(10000)
t = np.random.random(10000)
sf = np.random.random()
one = h - np.dot(h, t) * t + sf * t
temp = np.dot(h, t) * t
two = h - temp + sf * t
print ""
print one
print two
print "Maximum difference: ", np.abs(one - two).max()
assert np.allclose(one, two, atol=1e-14, rtol=0)
if __name__ == "__main__":
test_different_results()
| 745 | 17.65 | 83 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/example.py
|
import dolfin as df
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import time
from baryakhtar import LLB
from finmag.energies import Zeeman
from finmag.energies import Demag
from exchange import Exchange
from finmag import Simulation as Sim
import finmag.energies as fe
#from finmag.energies import Exchange
def save_plot(x,y,filename):
fig=plt.figure()
plt.plot(x,y,'-.',markersize=3)
fig.savefig(filename)
def example1(Ms=8.6e5):
x0 = y0 = z0 = 0
x1 = y1 = z1 = 10
nx = ny = nz = 1
mesh = df.Box(x0, x1, y0, y1, z0, z1, nx, ny, nz)
S1 = df.FunctionSpace(mesh, "Lagrange", 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1,dim=3)
vis = df.Function(S3)
llb = LLB(S1,S3)
llb.alpha = 0.01
llb.beta = 0.0
llb.M0=Ms
llb.set_M((Ms, 0, 0))
llb.set_up_solver(jacobian=False)
llb.chi=1e-4
H_app = Zeeman((0, 0, 1e5))
H_app.setup(S3, llb._M,Ms=1)
llb.interactions.append(H_app)
exchange = Exchange(13.0e-12,1e-2)
exchange.chi=1e-4
exchange.setup(S3,llb._M, Ms, unit_length=1e-9)
llb.interactions.append(exchange)
max_time=2*np.pi/(llb.gamma*1e5)
ts = np.linspace(0, max_time, num=100)
mlist=[]
Ms_average=[]
for t in ts:
llb.run_until(t)
mlist.append(llb.M)
vis.vector()[:]=mlist[-1]
Ms_average.append(llb.M_average)
df.plot(vis)
time.sleep(0.00)
print 'llb times',llb.call_field_times
save_plot(ts,Ms_average,'Ms_%g-time.png'%Ms)
df.interactive()
def example2(Ms):
x0 = y0 = z0 = 0
x1 = 500
y1 = 10
z1 = 100
nx = 50
ny = 1
nz = 1
mesh = df.Box(x0, y0, z0, x1, y1, z1, nx, ny, nz)
S1 = df.FunctionSpace(mesh, "Lagrange", 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1,dim=3)
llb = LLB(S1,S3)
llb.alpha = 0.01
llb.beta = 0.0
llb.M0=Ms
llb.set_M((Ms, 0, 0))
llb.set_up_solver(jacobian=False)
llb.chi=1e-4
H_app = Zeeman((0, 0, 5e5))
H_app.setup(S3, llb._M,Ms=1)
llb.interactions.append(H_app)
exchange = Exchange(13.0e-12,1e-2)
exchange.chi=1e-4
exchange.setup(S3,llb._M, Ms, unit_length=1e-9)
llb.interactions.append(exchange)
demag = Demag("FK")
demag.setup(S3,llb._M, Ms=1)
llb.interactions.append(demag)
max_time = 1 * np.pi / (llb.gamma * 1e5)
ts = np.linspace(0, max_time, num=100)
for t in ts:
print t
llb.run_until(t)
df.plot(llb._M)
df.interactive()
def example3(Ms):
x0 = y0 = z0 = 0
x1 = 500
y1 = 10
z1 = 500
nx = 50
ny = 1
nz = 1
mesh = df.Box(x0, y0, z0, x1, y1, z1, nx, ny, nz)
sim = Sim(mesh, Ms, unit_length=1e-9)
sim.alpha = 0.01
sim.set_m((1, 0, 0.1))
H_app = Zeeman((0, 0, 5e5))
sim.add(H_app)
exch = fe.Exchange(13.0e-12)
sim.add(exch)
demag = Demag(solver="FK")
sim.add(demag)
llg=sim.llg
max_time = 1 * np.pi / (llg.gamma * 1e5)
ts = np.linspace(0, max_time, num=100)
for t in ts:
print t
sim.run_until(t)
df.plot(llg._m)
df.interactive()
if __name__=="__main__":
example2(8.6e5)
| 3,340 | 19.496933 | 58 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/compare_fd_fem_laplace.py
|
from dolfin import *
import numpy as np
def Laplace_FD_2d(mesh,u_e,gridm,gridn):
data=mesh.coordinates()
m,n=data.shape
assert((gridm+1)*(gridn+1)==m)
x=data[:,0]
y=data[:,1]
x=x.reshape((gridm+1,gridn+1),order='F')
y=y.reshape((gridm+1,gridn+1),order='F')
u=u_e.vector().array().reshape((gridm+1,gridn+1,2),order='F')
dx=(x[-1][0]-x[0][0])/gridm
dy=(y[0][-1]-y[0][0])/gridn
print 'dx*dx=',dx*dx,'dy*dy=',dy*dy
diff=np.zeros((gridm+1,gridn+1,2))
for i in range(gridm+1):
for j in range(gridn+1):
if i>0:
diff[i,j,0]+=(u[i-1,j,0]-u[i,j,0])/(dx*dx)
diff[i,j,1]+=(u[i-1,j,1]-u[i,j,1])/(dx*dx)
if j>0:
diff[i,j,0]+=(u[i,j-1,0]-u[i,j,0])/(dy*dy)
diff[i,j,1]+=(u[i,j-1,1]-u[i,j,1])/(dy*dy)
if i<gridm:
diff[i,j,0]+=(u[i+1,j,0]-u[i,j,0])/(dx*dx)
diff[i,j,1]+=(u[i+1,j,1]-u[i,j,1])/(dx*dx)
if j<gridn:
diff[i,j,0]+=(u[i,j+1,0]-u[i,j,0])/(dy*dy)
diff[i,j,1]+=(u[i,j+1,1]-u[i,j,1])/(dy*dy)
diff=diff.reshape(2*m,order='F')
print diff
Vv = VectorFunctionSpace(mesh, 'Lagrange', 1)
diff_u=Function(Vv)
diff_u.vector()[:]=diff[:]
return diff_u
def Laplace_FEM(mesh,u_e):
V = FunctionSpace(mesh, 'Lagrange', 1)
V3 = VectorFunctionSpace(mesh,'CG',1)
grad_u = project(grad(u_e))
tmp=project(div(grad_u))
return tmp
if __name__=='__main__':
gridm,gridn=100,1
mesh = Rectangle(0, 0, 10*np.pi, 10, gridm, gridn, 'left')
V = FunctionSpace(mesh, 'Lagrange', 1)
V3 = VectorFunctionSpace(mesh,'CG',1)
v0 = Expression(('-cos(x[0])','sin(x[0])'))
v0_e=Expression(('cos(x[0])','-sin(x[0])'))
v = interpolate(v0, V3)
v_e = interpolate(v0_e, V3)
fd=Laplace_FD_2d(mesh,v,gridm,gridn)
fem=Laplace_FEM(mesh,v)
diff=Function(V3)
diff.vector()[:]=fem.vector().array()-v_e.vector().array()
print diff.vector().array().reshape((gridm+1,gridn+1,2),order='F')
diff_fd=Function(V3)
diff_fd.vector()[:]=fd.vector().array()-v_e.vector().array()
plot(fem)
plot(diff)
plot(v_e)
plot(fd)
plot(diff_fd)
interactive()
| 2,272 | 26.059524 | 70 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/exchange.py
|
import dolfin as df
import numpy as np
import logging
from finmag.util.consts import mu0
from aeon import timer
from finmag.native import llg as native_llg
logger=logging.getLogger('finmag')
class Exchange(object):
def __init__(self, C, chi=1, in_jacobian=True):
self.C = C
self.chi = chi
self.in_jacobian=in_jacobian
@timer.method
def setup(self, S3, M, M0, unit_length=1.0):
self.S3 = S3
self.M = M
self.M0 = M0
self.unit_length = unit_length
self.Ms2=np.array(self.M.vector().array())
self.mu0 = mu0
self.exchange_factor = 2.0 * self.C / (self.mu0 * M0**2 * self.unit_length**2)
u3 = df.TrialFunction(S3)
v3 = df.TestFunction(S3)
self.K = df.PETScMatrix()
df.assemble(df.inner(df.grad(u3),df.grad(v3))*df.dx, tensor=self.K)
self.H = df.PETScVector()
self.vol = df.assemble(df.dot(v3, df.Constant([1, 1, 1])) * df.dx).array()
self.coeff1=-self.exchange_factor/(self.vol)
self.coeff2=-0.5/(self.chi*M0**2)
def compute_field(self):
self.K.mult(self.M.vector(), self.H)
v=self.M.vector().array()
native_llg.baryakhtar_helper_M2(v,self.Ms2)
relax = self.coeff2*(self.Ms2-self.M0**2)*v
return self.coeff1*self.H.array()+relax
if __name__ == "__main__":
mesh = df.Interval(3, 0, 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
C = 1
expr = df.Expression(('4.0*sin(x[0])', '4*cos(x[0])','0'), degree=1)
Ms = 2
M = df.interpolate(expr, S3)
exch2 = Exchange(C)
exch2.setup(S3, M, Ms)
print 'ex2',exch2.compute_field()
from finmag.energies.exchange import Exchange
exch3 = Exchange(C)
exch3.setup(S3, M, Ms)
print 'ex3',exch3.compute_field()
| 1,895 | 26.478261 | 86 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/simple_demag.py
|
import logging
import numpy as np
import dolfin as df
from finmag.util import helpers
logger = logging.getLogger('finmag')
class SimpleDemag(object):
"""
Demagnetising field for thin films in the i-direction.
Hj = Hk = 0 and Hi = - Mi.
"""
def __init__(self, Ms, Nx=0, Ny=0.5, Nz=0.5, in_jacobian=False, name='SimpleDemag'):
"""
field_strength is Ms by default
"""
self.Ms = Ms
self.Nx = Nx
self.Ny = Ny
self.Nz = Nz
self.name = name
self.in_jacobian = in_jacobian
in_jacobian_msg = "in Jacobian" if in_jacobian else "not in Jacobian"
logger.debug("Creating {} object, {}.".format(
self.__class__.__name__, in_jacobian_msg))
def setup(self, S3, m, Ms, unit_length):
self.m = m
self.H = np.zeros((3, S3.mesh().num_vertices()))
self.Ms_array = np.zeros(3*S3.mesh().num_vertices())
if isinstance(self.Ms, (int,float)):
self.Ms_array[:] = self.Ms
else:
self.Ms_array[:] = self.Ms[:]
self.Ms_array.shape=(3,-1)
def compute_field(self):
m = self.m.vector().array().view().reshape((3, -1))
self.H[0][:] = -self.Nx*m[0][:]*self.Ms_array[0]
self.H[1][:] = -self.Ny*m[1][:]*self.Ms_array[1]
self.H[2][:] = -self.Nz*m[2][:]*self.Ms_array[2]
return self.H.ravel()
def compute_energy(self):
return 0
def average_field(self):
"""
Compute the average field.
"""
return helpers.average_field(self.compute_field())
| 1,636 | 26.283333 | 88 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/baryakhtar_test.py
|
import os
import dolfin as df
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from finmag.energies import Zeeman
from finmag.energies import Demag
from sandbox.baryakhter.baryakhtar import LLB
from sandbox.baryakhter.exchange import Exchange
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_llb(do_plot=False):
#mesh = df.BoxMesh(0, 0, 0, 2, 2, 2, 1, 1, 1)
mesh = df.IntervalMesh(1,0,2)
Ms = 8.6e5
sim = LLB(mesh)
sim.alpha = 0.5
sim.beta = 0.0
sim.M0 = Ms
sim.set_M((Ms,0,0))
sim.set_up_solver(reltol=1e-7, abstol=1e-7)
sim.add(Exchange(1.3e-11,chi=1e-7))
H0 = 1e5
sim.add(Zeeman((0, 0, H0)))
steps = 100
dt = 1e-12; ts = np.linspace(0, steps * dt, steps+1)
precession_coeff = sim.gamma
mz_ref = []
mz = []
real_ts=[]
for t in ts:
print t,sim.Ms
sim.run_until(t)
real_ts.append(sim.t)
mz_ref.append(np.tanh(precession_coeff * sim.alpha * H0 * sim.t))
#mz.append(sim.M[-1]/Ms) # same as m_average for this macrospin problem
mz.append(sim.m[-1])
mz=np.array(mz)
if do_plot:
ts_ns = np.array(real_ts) * 1e9
plt.plot(ts_ns, mz, "b.", label="computed")
plt.plot(ts_ns, mz_ref, "r-", label="analytical")
plt.xlabel("time (ns)")
plt.ylabel("mz")
plt.title("integrating a macrospin")
plt.legend()
plt.savefig(os.path.join(MODULE_DIR, "test_sllg.png"))
print("Deviation = {}, total value={}".format(
np.max(np.abs(mz - mz_ref)),
mz_ref))
assert np.max(np.abs(mz - mz_ref)) < 2e-7
if __name__ == "__main__":
test_llb(do_plot=True)
| 1,765 | 23.527778 | 79 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/baryakhtar_example.py
|
import dolfin as df
import numpy as np
import scipy.integrate.ode as scipy_ode
import matplotlib.pyplot as plt
import time
from finmag.drivers.llg_integrator import llg_integrator
from llb import LLB
from finmag.energies import Zeeman
from test_exchange import BaryakhtarExchange
def cross_times(a,b):
assert(len(a)==len(b))
assert(len(a)%3==0)
res=np.zeros(len(a))
for i in range(0,len(a),3):
res[i]=a[i+1]*b[i+2]-a[i+2]*b[i+1]
res[i+1]=a[i+2]*b[i]-a[i]*b[i+2]
res[i+2]=a[i]*b[i+1]-a[i+1]*b[i]
return res
def ode45_solve_llg():
ns=[i for i in range(10)]
Hz=1e6
Happ=np.array([[0,0,Hz] for i in ns])
Happ.shape=(-1,)
print Happ
Ms=8.6e2
m0=np.array([1,2,3])
M0=np.array([m0*Ms for i in ns])
M0.shape=(-1,)
gamma=2.21e5
count=[]
count_jac=[]
def ode_rhs(t,M):
tmp=cross_times(M,Happ)
tmp[:]*=-gamma
count.append(1)
return tmp
def jac(t,M):
count_jac.append(1)
B=np.zeros((len(M),len(M)))
for i in range(0,len(M),3):
B[i,i]=0
B[i,i+1]=-Happ[i+2]
B[i,i+2]=Happ[i+1]
B[i+1,i]=Happ[i+2]
B[i+1,i+1]=0
B[i+1,i+2]=-Happ[i]
B[i+2,i]=-Happ[i+1]
B[i+2,i+1]=Happ[i]
B[i+2,i+2]=0
return B
ode45=scipy_ode(ode_rhs)
ode45.set_integrator('vode', method='bdf')
ode45.set_initial_value(M0,0)
max_time=2*np.pi/(gamma*Hz)*5
dt=max_time/100
ts=np.arange(0,max_time,dt)
while ode45.successful() and ode45.t+dt<=ts[-1]:
ode45.integrate(ode45.t+dt)
print ode45.t,ode45.y,len(count),len(count_jac)
def save_plot(x,y,filename):
fig=plt.figure()
plt.plot(x,y,'-.',markersize=3)
fig.savefig(filename)
def example1(Ms=8.6e5):
x0 = y0 = z0 = 0
x1 = y1 = z1 = 10
nx = ny = nz = 1
mesh = df.Box(x0, x1, y0, y1, z0, z1, nx, ny, nz)
S1 = df.FunctionSpace(mesh, "Lagrange", 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1,dim=3)
vis = df.Function(S3)
llb = LLB(S1,S3,rtol=1e-6,atol=1e-10)
llb.Ms=Ms
llb.alpha = 0.0
llb.set_m((1, 1, 1))
H_app = Zeeman((0, 0, 1e5))
H_app.setup(S3, llb._m,Ms=Ms)
llb.interactions.append(H_app)
exchange = BaryakhtarExchange(13.0e-12,1e-5)
exchange.setup(S3,llb._m,llb._Ms)
llb.interactions.append(exchange)
max_time=2*np.pi/(llb.gamma*1e5)
ts = np.linspace(0, max_time, num=100)
mlist=[]
Ms_average=[]
for t in ts:
llb.run_until(t)
mlist.append(llb.M)
vis.vector()[:]=mlist[-1]
Ms_average.append(llb.M_average)
df.plot(vis)
time.sleep(0.00)
print llb.count
save_plot(ts,Ms_average,'Ms_%g-time.png'%Ms)
df.interactive()
def example1_sundials(Ms):
x0 = y0 = z0 = 0
x1 = y1 = z1 = 10
nx = ny = nz = 1
mesh = df.Box(x0, x1, y0, y1, z0, z1, nx, ny, nz)
S1 = df.FunctionSpace(mesh, "Lagrange", 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1,dim=3)
vis = df.Function(S3)
llb = LLB(S1,S3)
llb.alpha = 0.00
llb.set_m((1, 1, 1))
llb.Ms=Ms
H_app = Zeeman((0, 0, 1e5))
H_app.setup(S3, llb._m,Ms=Ms)
llb.interactions.append(H_app)
exchange = BaryakhtarExchange(13.0e-12,1e-2)
exchange.setup(S3,llb._m,llb._Ms)
llb.interactions.append(exchange)
integrator = llg_integrator(llb, llb.M, abstol=1e-10, reltol=1e-6)
max_time=2*np.pi/(llb.gamma*1e5)
ts = np.linspace(0, max_time, num=50)
mlist=[]
Ms_average=[]
for t in ts:
integrator.advance_time(t)
mlist.append(integrator.m.copy())
llb.M=mlist[-1]
vis.vector()[:]=mlist[-1]
Ms_average.append(llb.M_average)
df.plot(vis)
time.sleep(0.0)
print llb.count
save_plot(ts,Ms_average,'Ms_%g-time-sundials.png'%Ms)
df.interactive()
if __name__=="__main__":
#ode45_solve_llg()
example1(1)
example1(8.6e5)
example1_sundials(8.6e5)
example1_sundials(1)
| 4,070 | 22.668605 | 70 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/baryakhtar.py
|
import logging
import numpy as np
import dolfin as df
from finmag.util import helpers
import finmag.util.consts as consts
from finmag.util.fileio import Tablewriter
from finmag.native import sundials
from finmag.native import llg as native_llg
from finmag.util.timings import default_timer
from finmag.energies import Zeeman
#default settings for logger 'finmag' set in __init__.py
#getting access to logger here
logger = logging.getLogger(name='finmag')
class LLB(object):
"""
Implementation of the Baryakhtar equation
"""
def __init__(self, mesh, chi=0.001, unit_length=1e-9, name='unnamed', auto_save_data=True, type=1.0):
#type=1 : cubic crystal
#type=0 : uniaxial crystal
self.mesh = mesh
self.S1 = df.FunctionSpace(mesh, "Lagrange", 1)
self.S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1,dim=3)
#self._Ms = df.Function(self.S1)
self._M = df.Function(self.S3)
self._m = df.Function(self.S3)
self.M = self._M.vector().array()
self.dm_dt = np.zeros(self.M.shape)
self.H_eff = np.zeros(self.M.shape)
self.call_field_times=0
self.call_field_jtimes=0
self.chi = chi
self.unit_length = unit_length
self.set_default_values()
self.auto_save_data=auto_save_data
self.name = name
self.sanitized_name = helpers.clean_filename(name)
self.type = type
assert (type==0 or type==1.0)
if self.auto_save_data:
self.ndtfilename = self.sanitized_name + '.ndt'
self.tablewriter = Tablewriter(self.ndtfilename, self, override=True)
def set_default_values(self):
self._alpha_mult = df.Function(self.S1)
self._alpha_mult.assign(df.Constant(1.0))
self._beta_mult = df.Function(self.S1)
self._beta_mult.assign(df.Constant(1.0))
self.alpha = 0.5 # alpha for solve: alpha * _alpha_mult
self.beta=0
self.t = 0.0 # s
self.do_precession = True
u3 = df.TrialFunction(self.S3)
v3 = df.TestFunction(self.S3)
self.K = df.PETScMatrix()
df.assemble(1.0/self.unit_length**2*df.inner(df.grad(u3),df.grad(v3))*df.dx, tensor=self.K)
self.H_laplace = df.PETScVector()
self.H_eff_vec = df.PETScVector(len(self.M))
self.vol = df.assemble(df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx).array()
self.gamma = consts.gamma
#source for gamma: OOMMF manual, and in Werner Scholz thesis,
#after (3.7), llg_gamma_G = m/(As).
self.c = 1e11 # 1/s numerical scaling correction \
# 0.1e12 1/s is the value used by default in nmag 0.2
self.M0 = 8.6e5 # A/m saturation magnetisation
self.t = 0.0 # s
self._pins=np.zeros(self.S1.mesh().num_vertices(),dtype="int")
self._pre_rhs_callables=[]
self._post_rhs_callables=[]
self.interactions = []
@property
def alpha(self):
"""The damping factor :math:`\\alpha`."""
return self._alpha
@alpha.setter
def alpha(self, value):
fun = df.Function(self.S1)
if not isinstance(value, df.Expression):
value=df.Constant(value)
fun.assign(value)
self.alpha_vec = fun.vector().array()
self._alpha = self.alpha_vec
@property
def beta(self):
return self._beta
@beta.setter
def beta(self, value):
self._beta = value
self.beta_vec = self._beta * self._beta_mult.vector().array()
def add(self,interaction):
interaction.setup(self.S3, self._M, self.M0, self.unit_length)
self.interactions.append(interaction)
if interaction.__class__.__name__=='Zeeman':
self.zeeman_interation=interaction
self.tablewriter.entities['zeeman']={
'unit': '<A/m>',
'get': lambda sim: sim.zeeman_interation.average_field(),
'header': ('h_x', 'h_y', 'h_z')}
self.tablewriter.update_entity_order()
@property
def pins(self):
return self._pins
@pins.setter
def pins(self, value):
self._pins[:]=helpers.scalar_valued_function(value,self.S1).vector().array()[:]
def set_M(self, value, **kwargs):
self._M = helpers.vector_valued_function(value, self.S3, normalise=False)
self.M[:]=self._M.vector().array()[:]
def set_up_solver(self, reltol=1e-6, abstol=1e-6, nsteps=100000,jacobian=False):
integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON)
integrator.init(self.sundials_rhs, 0, self.M)
if jacobian:
integrator.set_linear_solver_sp_gmr(sundials.PREC_LEFT)
integrator.set_spils_jac_times_vec_fn(self.sundials_jtimes)
integrator.set_spils_preconditioner(self.sundials_psetup, self.sundials_psolve)
else:
integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE)
integrator.set_scalar_tolerances(reltol, abstol)
integrator.set_max_num_steps(nsteps)
self.integrator = integrator
def compute_effective_field(self):
H_eff = np.zeros(self.M.shape)
for interaction in self.interactions:
if interaction.__class__.__name__=='TimeZeemanPython':
interaction.update(self.t)
H_eff += interaction.compute_field()
self.H_eff = H_eff
self.H_eff_vec.set_local(H_eff)
def compute_laplace_effective_field(self):
self.K.mult(self.H_eff_vec, self.H_laplace)
return -1.0*self.H_laplace.array()/self.vol
def run_until(self, t):
if t <= self.t:
return
self.integrator.advance_time(t, self.M)
self._M.vector().set_local(self.M)
self.t = t
if self.auto_save_data:
self.tablewriter.save()
def sundials_rhs(self, t, y, ydot):
self.t = t
self.M[:] = y[:]
self._M.vector().set_local(self.M)
for func in self._pre_rhs_callables:
func(self.t)
self.call_field_times+=1
self.compute_effective_field()
delta_Heff = self.compute_laplace_effective_field()
#print self.H_eff
#print 'delta_Heff',self.H_eff*0.01-delta_Heff*self.beta_vec[0]
default_timer.start("sundials_rhs", self.__class__.__name__)
# Use the same characteristic time as defined by c
native_llg.calc_baryakhtar_dmdt(self.M,
self.H_eff,
delta_Heff,
self.dm_dt,
self.alpha_vec,
self.beta_vec,
self.M0,
self.gamma,
self.type,
self.do_precession,
self.pins)
default_timer.stop("sundials_rhs", self.__class__.__name__)
for func in self._post_rhs_callables:
func(self)
ydot[:] = self.dm_dt[:]
return 0
def sundials_jtimes(self, mp, J_mp, t, m, fy, tmp):
"""
"""
default_timer.start("sundials_jtimes", self.__class__.__name__)
self.call_field_jtimes+=1
self._M.vector().set_local(m)
self.compute_effective_field()
print self.call_field_times,self.call_field_jtimes
native_llg.calc_baryakhtar_jtimes(self._M.vector().array(),
self.H_eff,
mp,
J_mp,
self.gamma,
self.chi,
self.M0,
self.do_precession,
self.pins)
default_timer.stop("sundials_jtimes", self.__class__.__name__)
self.sundials_rhs(t, m, fy)
# Nonnegative exit code indicates success
return 0
def sundials_psetup(self, t, m, fy, jok, gamma, tmp1, tmp2, tmp3):
# Note that some of the arguments are deliberately ignored, but they
# need to be present because the function must have the correct signature
# when it is passed to set_spils_preconditioner() in the cvode class.
return 0, not jok
def sundials_psolve(self, t, y, fy, r, z, gamma, delta, lr, tmp):
# Note that some of the arguments are deliberately ignored, but they
# need to be present because the function must have the correct signature
# when it is passed to set_spils_preconditioner() in the cvode class.
z[:] = r
return 0
@property
def Ms(self):
a = self.M
a.shape=(3,-1)
res = np.sqrt(a[0]*a[0] + a[1]*a[1] + a[2]*a[2])
a.shape=(-1,)
return res
@property
def m(self):
mh = helpers.fnormalise(self.M)
self._m.vector().set_local(mh)
return mh
@property
def m_average(self):
self._m.vector().set_local(helpers.fnormalise(self.M))
mx = df.assemble(df.dot(self._m, df.Constant([1, 0, 0])) * df.dx)
my = df.assemble(df.dot(self._m, df.Constant([0, 1, 0])) * df.dx)
mz = df.assemble(df.dot(self._m, df.Constant([0, 0, 1])) * df.dx)
volume = df.assemble(df.Constant(1)*df.dx, mesh=self.mesh)
return np.array([mx, my, mz])/volume
| 10,120 | 31.543408 | 105 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/__init__.py
| 0 | 0 | 0 |
py
|
|
finmag
|
finmag-master/dev/sandbox/baryakhter/llb.py
|
import logging
import numpy as np
import scipy
import dolfin as df
import finmag.util.helpers as h
import finmag.util.consts as consts
from finmag.native import llg as native_llg
from finmag.util.timings import mtimed, default_timer
#default settings for logger 'finmag' set in __init__.py
#getting access to logger here
logger = logging.getLogger(name='finmag')
class LLB(object):
"""
Solves the Baryakhtar or LLB equation.
The equation reads
.. math::
\\frac{d\\vec{M}}{dt} = -\\gamma_{LL} (\\vec{M} \\times \\vec{H}) - \\alpha \\gamma_{LL} (\\vec{M} \\times [ \\vec{M} \\times \\vec{H}])
where :math:`\\gamma_{LL} = \\frac{\\gamma}{1+\\alpha^2}`. In our code
:math:`-\\gamma_{LL}` is referred to as *precession coefficient* and
:math:`-\\alpha\\gamma_{LL}` as *damping coefficient*.
"""
@mtimed
def __init__(self, S1, S3, do_precession=True,rtol=1e-6,atol=1e-6):
logger.debug("Creating LLG object.")
self.S1 = S1
self.S3 = S3
self.DG = df.FunctionSpace(S1.mesh(), "DG", 0)
self._m = df.Function(self.S3)
self._Ms_cell = df.Function(self.DG)
self._Ms = df.Function(self.S3)
self._M = df.Function(self.S3)
self.dM_dt=np.zeros(len(self._m.vector().array()))
self.rtol=rtol
self.atol=atol
self.count=1
self.count2=1
self.do_precession = do_precession
self.vol = df.assemble(df.dot(df.TestFunction(S3), df.Constant([1, 1, 1])) * df.dx).array()
self.Volume=None #will be computed on demand, and carries volume of the mesh
self.set_default_values()
def set_default_values(self):
self._alpha_mult = df.Function(self.S1)
self._alpha_mult.assign(df.Constant(1))
self.alpha = 0.5 # alpha for solve: alpha * _alpha_mult
self.beta=0
self.gamma = consts.gamma
#source for gamma: OOMMF manual, and in Werner Scholz thesis,
#after (3.7), llg_gamma_G = m/(As).
self.c = 1e11 # 1/s numerical scaling correction \
# 0.1e12 1/s is the value used by default in nmag 0.2
self.Ms = 8.6e5 # A/m saturation magnetisation
self.t = 0.0 # s
self.pins = [] # nodes where the magnetisation gets pinned
self._pre_rhs_callables=[]
self._post_rhs_callables=[]
self.interactions = []
def set_pins(self, nodes):
self._pins = np.array(nodes, dtype="int")
def pins(self):
return self._pins
pins = property(pins, set_pins)
def spatially_varying_alpha(self, baseline_alpha, multiplicator):
"""
Accepts a dolfin function over llg.S1 of values
with which to multiply the baseline alpha to get the spatially
varying alpha.
"""
self._alpha_mult = multiplicator
self.alpha = baseline_alpha
@property
def alpha(self):
"""The damping factor :math:`\\alpha`."""
return self._alpha
@alpha.setter
def alpha(self, value):
self._alpha = value
# need to update the alpha vector as well, which is
# why we have this property at all.
self.alpha_vec = self._alpha * self._alpha_mult.vector().array()
@property
def M(self):
"""The magnetisation, with length Ms."""
return self._M.vector().array()
@M.setter
def M(self, v):
assert(len(self.M)==len(v))
n=len(v)/3
for i1 in range(n):
i2=n+i1
i3=n+i2
tmp=np.sqrt(v[i1]*v[i1]+v[i2]*v[i2]+v[i3]*v[i3])
self._Ms.vector()[i1]=tmp
self._Ms.vector()[i2]=tmp
self._Ms.vector()[i3]=tmp
self._m.vector()[i1]=v[i1]/tmp
self._m.vector()[i2]=v[i2]/tmp
self._m.vector()[i3]=v[i3]/tmp
self._M.vector().set_local(v)
@property
def M_average(self):
"""The average magnetisation, computed with m_average()."""
tmp=self.M
tmp.shape=(3,-1)
res=np.average(tmp,axis=1)
tmp.shape=(-1,)
return res
@property
def Ms(self):
"""
Ms at nodes
"""
return self._Ms.vector().array()
@Ms.setter
def Ms(self, value):
"""
Set the Ms
"""
try:
val = df.Constant(value)
except:
print 'Sorry, only a constant value is acceptable.'
raise AttributeError
tmp_Ms = df.interpolate(val, self.DG)
self._Ms_cell.vector()[:]=tmp_Ms.vector()
tmp = df.assemble(self._Ms_cell*df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx)
self._Ms.vector().set_local(tmp/self.vol)
self._M.vector().set_local(self.Ms*self.m)
@property
def m(self):
return self._m.vector().array()
@m.setter
def m(self, value):
# Not enforcing unit length here, as that is better done
# once at the initialisation of m.
self._m.vector()[:] = value
self._M.vector().set_local(self.Ms*self.m)
def set_m(self, value, **kwargs):
"""
Set the magnetisation (scaled automatically).
There are several ways to use this function. Either you provide
a 3-tuple of numbers, which will get cast to a dolfin.Constant, or
a dolfin.Constant directly.
Then a 3-tuple of strings (with keyword arguments if needed) that will
get cast to a dolfin.Expression, or directly a dolfin.Expression.
You can provide a numpy.ndarray of nodal values of shape (3*n,),
where n is the number of nodes.
Finally, you can pass a function (any callable object will do) which
accepts the coordinates of the mesh as a numpy.ndarray of
shape (3, n) and returns the magnetisation like that as well.
You can call this method anytime during the simulation. However, when
providing a numpy array during time integration, the use of
the attribute m instead of this method is advised for performance
reasons and because the attribute m doesn't normalise the vector.
"""
if isinstance(value, tuple):
if isinstance(value[0], str):
# a tuple of strings is considered to be the ingredient
# for a dolfin expression, whereas a tuple of numbers
# would signify a constant
val = df.Expression(value, degree=1, **kwargs)
else:
val = df.Constant(value)
new_m = df.interpolate(val, self.S3)
elif isinstance(value, (df.Constant, df.Expression)):
new_m = df.interpolate(value, self.S3)
elif isinstance(value, (list, np.ndarray)):
new_m = df.Function(self.S3)
new_m.vector()[:] = value
elif hasattr(value, '__call__'):
coords = np.array(zip(* self.S3.mesh().coordinates()))
new_m = df.Function(self.S3)
new_m.vector()[:] = value(coords).flatten()
else:
raise AttributeError
new_m.vector()[:] = h.fnormalise(new_m.vector().array())
self._m.vector()[:] = new_m.vector()[:]
tmp = df.assemble(self._Ms_cell*df.dot(df.TestFunction(self.S3), df.Constant([1, 1, 1])) * df.dx)
self._Ms.vector().set_local(tmp/self.vol)
self._M.vector().set_local(self.Ms*self.m)
self.prepare_solver()
def compute_effective_field(self):
H_eff = np.zeros(self.m.shape)
for interaction in self.interactions:
H_eff += interaction.compute_field()
self.H_eff = H_eff
def solve(self,t,M):
self.M=M
for func in self._pre_rhs_callables:
func(self)
self.compute_effective_field()
default_timer.start("solve", self.__class__.__name__)
self.count+=1
dM_dt=self.dM_dt
h=self.H_eff
dh=self.compute_laplace_effective_field()
m=self.M
Ms=self.Ms
alpha=self.alpha
beta=self.beta
n=len(m)/3
for i1 in range(0,n):
i2=n+i1
i3=n+i2
dM_dt[i1]=self.gamma*Ms[i1]*(alpha*h[i1]-beta*dh[i1])
dM_dt[i2]=self.gamma*Ms[i2]*(alpha*h[i2]-beta*dh[i2])
dM_dt[i3]=self.gamma*Ms[i3]*(alpha*h[i3]-beta*dh[i3])
if self.do_precession:
dM_dt[i1] -= self.gamma*(m[i2]*h[i3]-m[i3]*h[i2])
dM_dt[i2] -= self.gamma*(m[i3]*h[i1]-m[i1]*h[i3])
dM_dt[i3] -= self.gamma*(m[i1]*h[i2]-m[i2]*h[i1])
default_timer.stop("solve", self.__class__.__name__)
for func in self._post_rhs_callables:
func(self)
return dM_dt
def jacobian(self,t,M):
self.M=M
for func in self._pre_rhs_callables:
func(self)
self.compute_effective_field()
B=self.jac
Heff=self.H_eff
Heff2=self.compute_laplace_effective_field()
Ms=self.Ms
gamma=self.gamma
alpha=self.alpha
beta=self.beta
for i in range(0,len(M),3):
B[i,i]=0
B[i,i+1]=-gamma*Heff[i+2]
B[i,i+2]=gamma*Heff[i+1]
B[i+1,i]=gamma*Heff[i+2]
B[i+1,i+1]=0
B[i+1,i+2]=-gamma*Heff[i]
B[i+2,i]=-gamma*Heff[i+1]
B[i+2,i+1]=gamma*Heff[i]
B[i+2,i+2]=0
B[i,i]+=gamma*2*M[i]/Ms[i]*(alpha*Heff[i]-beta*Heff2[i])
B[i,i+1]+=gamma*2*M[i+1]/Ms[i+1]*(alpha*Heff[i]-beta*Heff2[i])
B[i,i+2]+=gamma*2*M[i+2]/Ms[i+2]*(alpha*Heff[i]-beta*Heff2[i])
B[i+1,i]+=gamma*2*M[i]/Ms[i]*(alpha*Heff[i+1]-beta*Heff2[i+1])
B[i+1,i+1]+=gamma*2*M[i+1]/Ms[i+1]*(alpha*Heff[i+1]-beta*Heff2[i+1])
B[i+1,i+2]+=gamma*2*M[i+2]/Ms[i+2]*(alpha*Heff[i+2]-beta*Heff2[i+2])
B[i+2,i]+=gamma*2*M[i]/Ms[i]*(alpha*Heff[i+2]-beta*Heff2[i+2])
B[i+2,i+1]+=gamma*2*M[i+1]/Ms[i+1]*(alpha*Heff[i+2]-beta*Heff2[i+2])
B[i+2,i+2]+=gamma*2*M[i+2]/Ms[i+2]*(alpha*Heff[i+2]-beta*Heff2[i+2])
return B
def compute_laplace_effective_field(self):
grad_u = df.project(df.grad(self._M))
tmp=df.project(df.div(grad_u))
return tmp.vector().array()
def prepare_solver(self):
self.ode=scipy.integrate.ode(self.solve,self.jacobian)
self.jac=np.zeros((len(self.M),len(self.M)))
self.ode.set_integrator('vode', method='bdf',atol=self.atol,rtol=self.rtol)
self.ode.set_initial_value(self.M,0)
def run_until(self,time):
while self.ode.successful() and self.ode.t<time:
self.ode.integrate(time)
self.M=self.ode.y
return self.ode.successful()
def solve_sundials(self):
for func in self._pre_rhs_callables:
func(self.t)
self.compute_effective_field()
self.count+=1
default_timer.start("solve_sundials", self.__class__.__name__)
# Use the same characteristic time as defined by c
char_time = 0.1/self.c
# Prepare the arrays in the correct shape
m = self.M
m.shape = (3, -1)
H_eff = self.H_eff
delta_Heff=self.compute_laplace_effective_field()
H_eff.shape = (3, -1)
delta_Heff.shape = (3, -1)
dMdt = np.zeros(m.shape)
# Calculate dm/dt
native_llg.calc_baryakhtar_dmdt(m, H_eff,delta_Heff, self.t, dMdt, self.pins,
self.gamma, self.alpha_vec, 0,
char_time, self.do_precession)
dMdt.shape = (-1,)
m.shape = (-1,)
H_eff.shape = (-1,)
default_timer.stop("solve_sundials", self.__class__.__name__)
for func in self._post_rhs_callables:
func(self)
return dMdt
# Computes the dm/dt right hand side ODE term, as used by SUNDIALS CVODE
def sundials_rhs(self, t, y, ydot):
ydot[:] = self.solve_for(y, t)
return 0
def sundials_psetup(self, t, m, fy, jok, gamma, tmp1, tmp2, tmp3):
if not jok:
self.m = m
self.compute_effective_field()
self._reuse_jacobean = True
return 0, not jok
def sundials_psolve(self, t, y, fy, r, z, gamma, delta, lr, tmp):
z[:] = r
return 0
# Computes the Jacobian-times-vector product, as used by SUNDIALS CVODE
def sundials_jtimes(self, mp, J_mp, t, m, fy, tmp):
default_timer.start("sundials_jtimes", self.__class__.__name__)
assert m.shape == self.m.shape
assert mp.shape == m.shape
assert tmp.shape == m.shape
# First, compute the derivative H' = dH_eff/dt
self.M = mp
Hp = tmp.view()
Hp[:] = 0.
for inter in self.interactions:
if inter.in_jacobian:
Hp[:] += inter.compute_field()
if not hasattr(self, '_reuse_jacobean') or not self._reuse_jacobean:
# If the field m has changed, recompute H_eff as well
if not np.array_equal(self.M, m):
self.M = m
self.compute_effective_field()
m.shape = (3, -1)
mp.shape = (3, -1)
H = self.H_eff.view()
H.shape = (3, -1)
Hp.shape = (3, -1)
J_mp.shape = (3, -1)
# Use the same characteristic time as defined by c
char_time = 0.1 / self.c
native_llg.calc_baryakhtar_jtimes(m, H, mp, Hp, t, J_mp, self.gamma/(1+self.alpha**2),
self.alpha, char_time, self.do_precession)
# TODO: Store pins in a np.ndarray(dtype=int) and assign 0's in C++ code
J_mp[:, self.pins] = 0.
J_mp.shape = (-1, )
m.shape = (-1,)
mp.shape = (-1,)
tmp.shape = (-1,)
default_timer.stop("sundials_jtimes", self.__class__.__name__)
# Nonnegative exit code indicates success
return 0
def solve_for(self, M, t):
self.M = M
self.t = t
value = self.solve_sundials()
return value
| 14,236 | 31.065315 | 144 |
py
|
finmag
|
finmag-master/dev/sandbox/baryakhter/anisotropy.py
|
import dolfin as df
import logging
from finmag.util.timings import mtimed
from finmag.util.consts import mu0
logger = logging.getLogger('finmag')
class UniaxialAnisotropy(object):
def __init__(self, K1, axis):
self.K1=K1
self.axis = df.Constant(axis)
@mtimed
def setup(self, S3, M, Ms0, unit_length=1):
self.S3 = S3
self.M = M
v3 = df.TestFunction(S3)
E = -df.Constant(self.K1/Ms0**2) *((df.dot(self.axis, self.M)) ** 2) * df.dx
# Gradient
self.dE_dM = df.Constant(-1.0/mu0) * df.derivative(E, self.M)
self.vol = df.assemble(df.dot(v3, df.Constant([1, 1, 1])) * df.dx).array()
self.K = df.PETScMatrix()
self.H = df.PETScVector()
g_form = df.derivative(self.dE_dM, self.M)
df.assemble(g_form, tensor=self.K)
def compute_field(self):
self.K.mult(self.M.vector(), self.H)
return self.H.array()/self.vol
if __name__ == "__main__":
from dolfin import *
m = 1e-8
Ms = 0.8e6
n = 5
mesh = Box(0, m, 0, m, 0, m, n, n, n)
S3 = VectorFunctionSpace(mesh, "Lagrange", 1)
C = 1.3e-11 # J/m exchange constant
M = project(Constant((Ms, 0, 0)), S3) # Initial magnetisation
uniax = UniaxialAnisotropy(K1=1e11, axis=[1, 0, 0])
uniax.setup(S3, M, Ms)
print uniax.compute_field()
| 1,355 | 24.584906 | 84 |
py
|
finmag
|
finmag-master/dev/sandbox/new_energy_classes/test_zeeman.py
|
import dolfin as df
import numpy as np
from finmag.util.consts import mu0
from finmag import sim_with, Field
np.set_printoptions(precision=2)
def generate_random_vectors(num=10, length=1.0):
v = np.random.random_sample((num, 3))
v_norms = np.linalg.norm(v, axis=1)
return length * v / v_norms[:, np.newaxis]
class TestZeeman(object):
@classmethod
def setup_class(cls):
"""
Create a box mesh and a simulation object on this mesh which
will be used to compute Zeeman energies in the individual tests.
"""
# The mesh and simulation are only created once for the entire
# test class and are re-used in each test method for efficiency.
cls.Lx, cls.Ly, cls.Lz = 100, 30, 10
nx, ny, nz = 30, 10, 5
mesh = df.BoxMesh(0, 0, 0, cls.Lx, cls.Ly, cls.Lz, nx, ny, nz)
unit_length = 1e-9
cls.mesh_vol = cls.Lx * cls.Ly * cls.Lz * unit_length**3
cls.S3 = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
# The values for Ms and m_init are arbitrary as they will be
# ovewritten in the tests.
cls.sim = sim_with(mesh, Ms=1, m_init=(1, 0, 0), demag_solver=None,
unit_length=unit_length)
@classmethod
def set_simulation_parameters(cls, M, H):
"""
Sett m, Ms and H on the pre-created simulation object. This should
be done before calling any of the other functions which compute the
energy, energy density, etc. using Finmag.
"""
Ms = np.linalg.norm(M)
m = M / Ms
cls.sim.m = m
cls.sim.Ms = Ms
cls.sim.set_H_ext(H)
@classmethod
def compute_energy_with_finmag(cls):
"""
Compute Zeeman energy of the pre-created simulation using Finmag.
"""
return cls.sim.get_interaction('Zeeman').compute_energy()
@classmethod
def compute_energy_density_with_finmag(cls):
"""
Compute Zeeman energy density of the pre-created simulation
using Finmag.
"""
return cls.sim.get_interaction('Zeeman').energy_density()
@classmethod
def compute_field_with_finmag(cls):
"""
Compute Zeeman field of the pre-created simulation using Finmag.
"""
return cls.sim.get_interaction('Zeeman').H
@classmethod
def create_linearly_varying_field(cls, H0, H1):
"""
Return a Field which varies linearly from H0 at mesh corner (0, 0, 0)
to H1 at mesh corner (cls.Lx, cls.Ly, cls.Lz).
"""
H_field = Field(cls.S3,
df.Expression(
['(1-x[0]/Lx)*H0x + x[0]/Lx*H1x',
'(1-x[1]/Ly)*H0y + x[1]/Ly*H1y',
'(1-x[2]/Lz)*H0z + x[2]/Lz*H1z'],
H0x=H0[0], H0y=H0[1], H0z=H0[2],
H1x=H1[0], H1y=H1[1], H1z=H1[2],
Lx=cls.Lx, Ly=cls.Ly, Lz=cls.Lz))
return H_field
def test_total_energy_with_constant_field(self):
"""
Check Zeeman energy for some random (but spatially uniform)
values of magnetisation M and external field H. The energy
computed with Finmag is compared to the value obtained from
an analytical expression.
"""
Ms = 8e5 # saturation magnetisation
H_norm = 3e4 # strength of external field
num = 3
M_vals = generate_random_vectors(num, length=Ms)
H_vals = generate_random_vectors(num, length=H_norm)
for M in M_vals:
for H in H_vals:
print("Comparing energy for M={}, H={}".format(M, H))
self.set_simulation_parameters(M, H)
energy_expected = -mu0 * self.mesh_vol * np.dot(M, H)
energy_finmag = self.compute_energy_with_finmag()
energy_density_expected = -mu0 * np.dot(M, H)
energy_density_finmag = self.compute_energy_density_with_finmag().get_ordered_numpy_array()
zeeman_field_expected = Field(self.S3, H)
zeeman_field_computed = self.compute_field_with_finmag()
np.testing.assert_allclose(energy_expected, energy_finmag)
np.testing.assert_allclose(energy_density_expected, energy_density_finmag)
assert zeeman_field_expected.allclose(zeeman_field_computed)
def test_total_energy_linearly_varying_field(self):
"""
Check Zeeman energy for some random (but spatially uniform) values
of magnetisation M and external field H, where M is spatially
uniform but H varies linearly between opposite corners of the
cuboid mesh. The energy computed with Finmag is compared to
the value obtained from an analytical expression.
"""
Ms = 8e5 # saturation magnetisation
H_norm = 3e4 # strength of external field
num = 3
M_vals = generate_random_vectors(num, length=Ms)
H0_vals = generate_random_vectors(num, length=H_norm)
H1_vals = generate_random_vectors(num, length=H_norm)
for M in M_vals:
for H0, H1 in zip(H0_vals, H1_vals):
print("Comparing energy for M={}, H0={}, H1={}".format(M, H0, H1))
H_field = self.create_linearly_varying_field(H0, H1)
M_field = Field(self.S3, M)
self.set_simulation_parameters(M, H_field)
energy_expected = -mu0 * self.mesh_vol * np.dot((H0 + H1)/2, M)
energy_finmag = self.compute_energy_with_finmag()
np.testing.assert_allclose(energy_expected, energy_finmag)
energy_density_expected = -mu0 * M_field.dot(H_field)
energy_density_finmag = self.compute_energy_density_with_finmag()
zeeman_field_expected = H_field
zeeman_field_computed = self.compute_field_with_finmag()
np.testing.assert_allclose(energy_expected, energy_finmag)
assert energy_density_expected.allclose(energy_density_finmag)
assert zeeman_field_expected.allclose(zeeman_field_computed)
| 6,130 | 36.384146 | 107 |
py
|
finmag
|
finmag-master/dev/sandbox/airbox/test_airbox.py
|
import os
import dolfin as df
import matplotlib.pyplot as plt
from finmag.util.meshes import from_geofile, plot_mesh_regions
from finmag.util.helpers import piecewise_on_subdomains
from finmag import sim_with
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_two_regions_with_same_Ms():
"""
Define a mesh with two regions but set the same value of Ms in
each of them and let the system relax. Then do the same with a
single-region mesh and check that the results are the same.
"""
raise NotImplementedError(
"This is being tested in the .ipynb file, but there are still "
"unresolved issues because the results don't look similar enough, "
"so this needs more investigation before it can be included here.")
def test_airbox_method():
"""
Define a mesh with two regions (Permalloy and 'air'), where
the Permalloy region is e.g. a sphere of radius 1.0 and the 'air'
region is a cube with sides of length 10. Next set Ms in both
regions, where Ms in the 'air' region is either zero or has a
very low value. Then run the simulation and and check that the
value of the external field in the 'air' region coincides with the
field of a dipole.
"""
mesh = from_geofile("mesh.geo")
mesh_region = df.MeshFunction("uint", mesh, "mesh_mat.xml")
# Visualise mesh regions to check they look right (this will be
# removed in the final test).
#plot_mesh_regions(mesh_region, regions=[1, 2], colors=["green", "red"],
# alphas=[1.0, 0.25])
#plt.show()
# Define different values for Ms on each subdomain
Ms_vals = (8.6e5, 0)
Ms = piecewise_on_subdomains(mesh, mesh_region, Ms_vals)
sim = sim_with(mesh, Ms=Ms, m_init=(1.0, 0.0, 0), alpha=1.0,
unit_length=1e-9, A=13.0e-12, demag_solver='FK')
print "Computing effective field..."
H_eff = sim.effective_field()
print "Computed field: {}".format(H_eff)
sim.relax(save_snapshots=True, save_every=1e-11,
filename="snapshots/snapshot.pvd")
if __name__ == '__main__':
test_airbox_method()
| 2,138 | 35.87931 | 76 |
py
|
finmag
|
finmag-master/dev/sandbox/energy_density/run_nmag.py
|
import nmag
from nmag import SI
mat_Py = nmag.MagMaterial(name="Py",
Ms=SI(0.86e6,"A/m"),
exchange_coupling=SI(13.0e-12, "J/m"),
llg_damping=0.5)
sim = nmag.Simulation("bar")
sim.load_mesh("coarse_bar.nmesh.h5",
[("Py", mat_Py)],
unit_length=SI(1e-9,"m"))
#sim.load_mesh("bar.nmesh.h5",
# [("Py", mat_Py)],
# unit_length=SI(1e-9,"m"))
sim.set_m([1,0,1])
dt = SI(5e-12, "s")
######
# After ten time steps, plot the energy density
# from z=0nm to z=100nm through the center of the body.
######
sim.advance_time(dt*10)
f = open("nmag_exch_Edensity.txt", "w")
f2 = open("nmag_demag_Edensity.txt", "w")
for i in range(100):
f.write("%g " % sim.probe_subfield_siv("E_exch_Py", [15e-9, 15e-9, 1e-9*i]))
f2.write("%g " % sim.probe_subfield_siv("E_demag_Py", [15e-9, 15e-9, 1e-9*i]))
f.close()
f2.close()
| 944 | 25.25 | 82 |
py
|
finmag
|
finmag-master/dev/sandbox/energy_density/extract_oommf.py
|
import os
import numpy as np
from finmag.util.oommf.ovf import OVFFile
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
EPSILON = 1e-15
bar_size = (30e-9, 30e-9, 100e-9)
bar_x_mid = bar_size[0]/2; bar_y_mid = bar_size[1]/2; bar_z_mid = bar_size[2]/2;
def extract_line_across_scalar_field(ovf_filename, txt_filename):
ovf_file = OVFFile(ovf_filename)
fl = ovf_file.get_field()
bar_center_idx = fl.lattice.get_closest((bar_x_mid, bar_y_mid, bar_z_mid))
bar_center_coords = fl.lattice.get_pos_from_idx(bar_center_idx)
print "In {}:".format(ovf_filename)
print " The node closest to the center has the indices {} and coordinates\n {}.".format(
bar_center_idx, bar_center_coords)
assert abs(bar_center_coords[0] - bar_x_mid) < EPSILON
assert abs(bar_center_coords[1] - bar_y_mid) < EPSILON
bar_center_x, bar_center_y, _ = bar_center_idx
energies_along_z_direction = fl.field_data[0][bar_center_x][bar_center_y]
coords_of_z_axis = [fl.lattice.get_pos_from_idx((bar_center_x, bar_center_y, i))[2] for i in range(50)]
np.savetxt(os.path.join(MODULE_DIR, txt_filename), energies_along_z_direction)
np.savetxt(os.path.join(MODULE_DIR, "oommf_coords_z_axis.txt"), coords_of_z_axis)
extract_line_across_scalar_field(
"bar-Oxs_Demag-demag-Energy_density-00-0000760.oef",
"oommf_demag_Edensity.txt")
extract_line_across_scalar_field(
"bar-Oxs_UniformExchange-exc-Energy_density-00-0000760.oef",
"oommf_exch_Edensity.txt")
| 1,505 | 39.702703 | 107 |
py
|
finmag
|
finmag-master/dev/sandbox/energy_density/run_finmag.py
|
import numpy as np
import pylab as p
from dolfin import Mesh
from finmag.sim.llg import LLG
from finmag.drivers.llg_integrator import llg_integrator
# Create mesh
mu = 1e-9
mesh = Mesh("coarse_bar.xml.gz")
#mesh = Mesh("bar.xml.gz")
# Setup LLG
llg = LLG(mesh, unit_length=mu)
llg.Ms = 0.86e6
llg.A = 13.0e-12
llg.alpha = 0.5
llg.set_m((1,0,1))
llg.setup(use_exchange=True, use_dmi=False, use_demag=True, demag_method="FK")
# Set up time integrator
integrator = llg_integrator(llg, llg.m)
dt = 5e-12
######
# After ten time steps, plot the energy density
# from z=0 to z=100 through the center of the body.
######
# Integrate
integrator.run_until(dt*10)
exch = llg.exchange.energy_density_function()
demag = llg.demag.energy_density_function()
finmag_exch, finmag_demag = [], []
R = range(100)
for i in R:
finmag_exch.append(exch([15, 15, i]))
finmag_demag.append(demag([15, 15, i]))
# Read nmag data
nmag_exch = [float(i) for i in open("nmag_exch_Edensity.txt", "r").read().split()]
nmag_demag = [float(i) for i in open("nmag_demag_Edensity.txt", "r").read().split()]
# Read oommf data
oommf_exch = np.genfromtxt("oommf_exch_Edensity.txt")
oommf_demag = np.genfromtxt("oommf_demag_Edensity.txt")
oommf_coords = np.genfromtxt("oommf_coords_z_axis.txt") * 1e9
# Plot exchange energy density
p.plot(R, finmag_exch, 'o-', R, nmag_exch, 'x-', oommf_coords, oommf_exch, "+-")
p.xlabel("nm")
p.title("Exchange energy density")
p.legend(["Finmag", "Nmag", "oommf"])
p.savefig("exch.png")
# Plot demag energy density
p.figure()
p.plot(R, finmag_demag, 'o-', R, nmag_demag, 'x-', oommf_coords, oommf_demag, "+-")
p.xlabel("nm")
p.title("Demag energy density")
p.legend(["Finmag", "Nmag", "oommf"])
p.savefig("demag.png")
#p.show()
print "Plots written to exch.png and demag.png"
| 1,788 | 26.523077 | 84 |
py
|
finmag
|
finmag-master/dev/sandbox/energy_density/anisotropy/finmag_anisotropy.py
|
import numpy as np
import dolfin as df
from finmag.sim.anisotropy import UniaxialAnisotropy
TOL = 1e-15
def test_anisotropy_energy_density():
"""
Written in sperical coordinates, the equation for the
anisotropy energy density reads
E/V = K*sin^2(theta),
where theta is the angle between the magnetisation and
the easy axis. With a magnetisation pointing 45 degrees
between the x- and z-axis, and using the z-axis as the
easy axis, theta becomes pi/4. sin^2(pi/4) evaluates
to 1/2, and with K set to 1 in this simple test case,
we expect the energy density to be 1/2 at every node.
"""
# 5 simplices between 0 and 1 nm.
mesh = df.Interval(5, 0, 1e-9)
V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)
# Initial magnetisation 45 degress between x- and z-axis.
m_vec = df.Constant((1/np.sqrt(2), 0, 1/np.sqrt(2)))
m = df.interpolate(m_vec, V)
# Easy axis in z-direction.
a = df.Constant((0, 0, 1))
# These are 1 just to simplify the analytical solution.
K = 1
Ms = 1
anis = UniaxialAnisotropy(V, m, K, a, Ms)
density = anis.energy_density()
deviation = np.abs(density - 0.5)
print "Anisotropy energy density (expect array of 0.5):"
print density
print "Max deviation: %g" % np.max(deviation)
assert np.all(deviation < TOL), \
"Max deviation %g, should be zero." % np.max(deviation)
if __name__ == '__main__':
test_anisotropy_energy_density()
| 1,482 | 28.66 | 63 |
py
|
finmag
|
finmag-master/dev/sandbox/energy_density/anisotropy/nmag_anisotropy.py
|
import numpy as np
import nmag
from nmag import SI
import nmeshlib.unidmesher as unidmesher
mesh_unit = SI(1e-9, "m") # mesh unit (1 nm)
layers = [(0.0, 1.0)] # the mesh
discretization = 0.2 # discretization
def m0(r):
"""Initial magnetisation 45 degrees between x- and z-axis."""
return [1/np.sqrt(2), 0, 1/np.sqrt(2)]
mat_Py = nmag.MagMaterial(name="Py",
Ms=SI(1,"A/m"),
anisotropy=nmag.uniaxial_anisotropy(axis=[0, 0, 1], K1=SI(1, "J/m^3")))
sim = nmag.Simulation("Simple anisotropy", do_demag=False)
# Write mesh to file
mesh_file_name = '1d_x6.nmesh'
mesh_lists = unidmesher.mesh_1d(layers, discretization)
unidmesher.write_mesh(mesh_lists, out=mesh_file_name)
sim.load_mesh(mesh_file_name,
[("Py", mat_Py)],
unit_length=mesh_unit)
sim.set_m(m0)
print sim.get_subfield("E_anis_Py")
| 897 | 27.967742 | 97 |
py
|
finmag
|
finmag-master/dev/sandbox/xdmf_and_hdf5/parallel-xdmf-mesh-and-vector/output.py
|
from dolfin import *
mesh = UnitSquareMesh(5, 5)
V = FunctionSpace(mesh, 'CG', 1)
u = interpolate(Expression('x[0]'), V)
f = File('mesh.xdmf')
f << mesh
print("mesh we have written: {}".format(mesh.coordinates().shape))
del f
fu = File('u.xdmf')
fu << u
print("u we have written: {}".format(u.vector().array().shape))
del fu
| 334 | 14.227273 | 66 |
py
|
finmag
|
finmag-master/dev/sandbox/xdmf_and_hdf5/parallel-xdmf-mesh-and-vector/input.py
|
from dolfin import *
mesh2 = Mesh("mesh.xdmf")
print("Mesh we have read: {}".format(mesh2.coordinates().shape))
print("Can't read back from xdmf file, see\nhttps://answers.launchpad.net/dolfin/+question/222230 ")
#V2 = FunctionSpace(mesh2, 'CG', 1)
#u2 = Function(V2, 'u.xdmf')
#u << f
#print "vector we have read: {}".format(u2.vector().array().shape)
#del f2
| 365 | 25.142857 | 100 |
py
|
finmag
|
finmag-master/dev/sandbox/xdmf_and_hdf5/parallel-hdf5-mesh-and-vector/output.py
|
from dolfin import *
mesh = UnitSquareMesh(5, 5)
V = FunctionSpace(mesh, 'CG', 1)
u = interpolate(Expression('x[0]'), V)
f = HDF5File(mesh.mpi_comm(), 'u.h5', 'w') # works
f.write(mesh, 'mesh')
print "mesh we have written: {}".format(mesh.coordinates().shape)
f.write(u, 'u')
print "vector we have written: {}".format(u.vector().array().shape)
del f
| 358 | 21.4375 | 67 |
py
|
finmag
|
finmag-master/dev/sandbox/xdmf_and_hdf5/parallel-hdf5-mesh-and-vector/input.py
|
from dolfin import *
mesh2 = Mesh()
f2 = HDF5File(mesh2.mpi_comm(), 'u.h5', 'r')
# The 3rd parameter in df.HDF5File.read is use_partition_from_file.
# When dolfin runs in parallel the mesh is divided/partitioned and
# each process has a one partition. When a mesh is saved in parallel
# the details of how the mesh is partitioned is also saved. If the
# data is then read in again this data is then available, but is
# naturally only relevant if the same number of processes are being
# used to read as were used to save the data.
f2.read(mesh2, 'mesh', False)
print("Mesh we have read: {}".format(mesh2.coordinates().shape))
V2 = FunctionSpace(mesh2, 'CG', 1)
u2 = Function(V2)
f2.read(u2, 'u')
print "vector we have read: {}".format(u2.vector().array().shape)
del f2
| 777 | 34.363636 | 68 |
py
|
finmag
|
finmag-master/dev/sandbox/bugs/erratic_spins_and_increasing_energy/energy.py
|
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag, Zeeman, UniaxialAnisotropy
from finmag.energies.zeeman import TimeZeemanPython
import matplotlib.pyplot as plt
from finmag.util.fileio import Tablereader
mesh = df.Mesh('mesh_a3cc0220d03c921e8cfa9ecf5da5fc74.xml.gz')
def relax_system():
Ms = 8.6e5
sim = Simulation(mesh, Ms, unit_length=1e-9, name = 'dy')
sim.alpha = 0.001
sim.set_m(np.load('m_000088.npy'))
#sim.set_tol(1e-6, 1e-6)
A = 1.3e-11
sim.add(Exchange(A))
parameters = {
'absolute_tolerance': 1e-10,
'relative_tolerance': 1e-10,
'maximum_iterations': int(1e5)
}
demag = Demag()
demag.parameters['phi_1'] = parameters
demag.parameters['phi_2'] = parameters
print demag.parameters
sim.add(demag)
sim.schedule('save_ndt', every=2e-12)
sim.schedule('save_vtk', every=2e-12, filename='vtks/m.pvd')
sim.schedule('save_m', every=2e-12, filename='npys/m.pvd')
sim.run_until(1e-9)
def plot_mx(filename='dy.ndt'):
data = Tablereader(filename)
ts=data['time']/1e-9
fig=plt.figure()
plt.plot(ts, data['E_total'], label='Total')
#plt.plot(ts, data['E_Demag'], label='Demag')
#plt.plot(ts, data['E_Exchange'], label='Exchange')
plt.xlabel('time (ns)')
plt.ylabel('energy')
plt.legend()
fig.savefig('energy.pdf')
if __name__ == '__main__':
#relax()
#relax_system()
plot_mx()
| 1,675 | 21.052632 | 76 |
py
|
finmag
|
finmag-master/dev/sandbox/bugs/energy/exchange/energy.py
|
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag, Zeeman, UniaxialAnisotropy
from finmag.energies.zeeman import TimeZeemanPython
import matplotlib.pyplot as plt
from finmag.util.fileio import Tablereader
"""
This script shows that the cellsize do make a difference, for example,
the following mesh will lead to a energy increase even the cellsize is
only sightly larger than the default one.
mesh = df.BoxMesh(-100,0,0,100,40,4,60,10,1)
"""
mesh = df.BoxMesh(-100,0,0,100,40,4,70,14,1)
def varying_field(pos):
x = pos[0]
return (1e3,100*x,0)
def excite_system():
Ms = 8.0e5
sim = Simulation(mesh, Ms, unit_length=1e-9, name = 'dy')
sim.alpha = 0.001
sim.set_m((1,0,0))
sim.set_tol(1e-6, 1e-6)
A = 1.3e-11
sim.add(Exchange(A))
sim.add(Zeeman(varying_field))
sim.schedule('save_ndt', every=2e-12)
sim.run_until(0.5e-9)
def plot_mx(filename='dy.ndt'):
data = Tablereader(filename)
ts=data['time']/1e-9
fig=plt.figure()
plt.plot(ts, data['E_total'], label='Total')
#plt.plot(ts, data['E_Demag'], label='Demag')
#plt.plot(ts, data['E_Exchange'], label='Exchange')
plt.xlabel('time (ns)')
plt.ylabel('energy')
plt.legend()
fig.savefig('energy.pdf')
if __name__ == '__main__':
#relax()
excite_system()
plot_mx()
| 1,523 | 20.166667 | 76 |
py
|
finmag
|
finmag-master/dev/sandbox/bugs/energy/demag/energy.py
|
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag, Zeeman, UniaxialAnisotropy
from finmag.energies.zeeman import TimeZeemanPython
import matplotlib.pyplot as plt
from finmag.util.fileio import Tablereader
mesh = df.BoxMesh(-100,0,0,100,40,4,100,20,2)
def m_init_dw(pos):
x = pos[0]
if x < -10:
return (1,0,0)
elif x > 10:
return (-1,0,0)
else:
return (0,1,0)
def varying_field(pos):
x = pos[0]
return (1e5,100*x,0)
def excite_system():
Ms = 8.0e5
sim = Simulation(mesh, Ms, unit_length=1e-9, name = 'dy')
sim.alpha = 0.001
#sim.set_m(np.load('relaxed.npy'))
sim.set_m((1,0,0))
sim.set_tol(1e-8, 1e-8)
A = 1.3e-11
Kx = 8e4
#sim.add(Exchange(A))
sim.add(Zeeman(varying_field))
#sim.add(UniaxialAnisotropy(Kx,axis=[1,0,0], name='Kx'))
parameters = {
'absolute_tolerance': 1e-10,
'relative_tolerance': 1e-10,
'maximum_iterations': int(1e5)
}
demag = Demag()
demag.parameters['phi_1'] = parameters
demag.parameters['phi_2'] = parameters
sim.add(demag)
sim.schedule('save_ndt', every=2e-12)
sim.run_until(0.2e-9)
def plot_mx(filename='dy.ndt'):
data = Tablereader(filename)
ts=data['time']/1e-9
fig=plt.figure()
plt.plot(ts, data['E_total'], label='Total')
#plt.plot(ts, data['E_Demag'], label='Demag')
#plt.plot(ts, data['E_Exchange'], label='Exchange')
plt.xlabel('time (ns)')
plt.ylabel('energy')
plt.legend()
fig.savefig('energy.pdf')
if __name__ == '__main__':
#relax()
excite_system()
plot_mx()
| 1,858 | 19.428571 | 76 |
py
|
finmag
|
finmag-master/dev/sandbox/bugs/too-many-open-files/many-simulation-objects.py
|
import os
import psutil # sudo apt-get install python-psutils
import dolfin as df
import finmag
# get handle to this process
p = psutil.Process(os.getpid())
def openfiles():
return p.get_open_files()
def openfilescount():
return len(openfiles())
def create_finmag_sim_object(name):
mesh = df.UnitIntervalMesh(1)
sim = finmag.Simulation(mesh, Ms=1, unit_length=1e-9, name=name)
return sim
def create_sims(base='sim', start=0, stop=20):
sims = []
for i in range(start, stop):
name = '%s-%04d' % (base, i)
print("Creating object %s" % name)
sims.append(create_finmag_sim_object(name))
print("name=%s, i=%d, %d files open" % (name, i, openfilescount()))
return sims
sims = create_sims()
sims = sims + create_sims(base='sim2')
| 801 | 20.675676 | 75 |
py
|
finmag
|
finmag-master/dev/sandbox/bugs/too-many-open-files/counting-simulations-prototype.py
|
class Simulation(object):
max_instance_counter = 0
instances = {}
def __init__(self):
self.id = Simulation.max_instance_counter
Simulation.max_instance_counter += 1
assert self.id not in Simulation.instances.keys()
Simulation.instances[self.id] = self
def __str__(self):
return "{} / {}".format(self.id, Simulation.max_instance_counter)
def shutdown(self):
"""Remove all (cyclic) references to this object - should only
be called in preparation of getting the object going out of
scope and being garbage collected."""
assert self.id in Simulation.instances.keys()
del Simulation.instances[self.id]
def __del__(self):
print("Instance {} is garbage collected".format(self.id))
def instances_list_all(self):
for id_ in sorted(Simulation.instances.keys()):
if id_ != None: # can happen if instances have been deleted
print("sim {}: {}".format(id_, Simulation.instances[id_]))
def instances_delete_all_others(self):
for id_ in sorted(Simulation.instances.keys()):
if id_ != None: # can happen if instances have been deleted
if id_ != self.id: # do not delete ourselves, here
sim = Simulation.instances[id_]
sim.shutdown()
del sim
def instances_in_memory_count(self):
return sum([1 for id_ in Simulation.instances.keys() if id_ != None])
def test_shutdown1():
a = Simulation()
assert a.instances_in_memory_count() == 1
print(a)
a.shutdown()
assert a.instances_in_memory_count() == 0
del a
def test_shutdown2():
a = Simulation()
assert a.instances_in_memory_count() == 1
print(a)
b = Simulation()
assert a.instances_in_memory_count() == 2
a.shutdown()
assert a.instances_in_memory_count() == 1
b.shutdown()
assert a.instances_in_memory_count() == 0
assert Simulation.max_instance_counter == 2 + 1
del a
assert Simulation.max_instance_counter == 2 + 1
del b
assert Simulation.max_instance_counter == 2 + 1
if __name__ == "__main__":
test_shutdown1()
test_shutdown2()
def demo():
b = Simulation()
print(b)
del b
c = Simulation()
print(c)
print("Instances alive: {}".format(c.instances_in_memory_count()))
c.instances_list_all()
c.instances_delete_all_others()
print("Instances alive: {}".format(c.instances_in_memory_count()))
c.instances_list_all()
| 2,562 | 27.797753 | 77 |
py
|
finmag
|
finmag-master/dev/sandbox/parallel_dolfin_integration/array_integrator_parallel.py
|
# This script attempts to solve an unphysical Temperature (T)-based dolfin
# problem within an array-based integrator. It initialises a gaussian, and
# decays all values independently.
#
# Written for dolfin 1.6.0.
#
# Run with mpirun -n 2 python array_intergrator_parallel.py.
import dolfin as df
import dolfinh5tools
import integrators
import numpy as np
import scipy.integrate
import sys
# For parallelness, get rank.
rank = df.mpi_comm_world().Get_rank()
# Specifying the initial problem.
mesh = df.IntervalMesh(20, -1, 1)
funcSpace = df.FunctionSpace(mesh, 'CG', 1)
initState = df.Expression("exp(-(pow(x[0], 2) / 0.2))") # Gaussian
initFuncVal = df.interpolate(initState, funcSpace)
initialArray = initFuncVal.vector().array()
# Gather the initial array
initRecv = df.Vector()
initFuncVal.vector().gather(initRecv, np.array(range(funcSpace.dim()), "intc"))
# Print stuff.
print("{}: My vector is of shape {}.".format(rank, initialArray.shape[0]))
print("{}: My array looks like:\n {}.".format(rank, initialArray))
print("{}: My mesh.coordinates are:\n {}."
.format(rank, funcSpace.mesh().coordinates()))
print("{}: The initial gathered array looks like:\n {}."
.format(rank, initRecv.array()))
# Defining behaviour in time using dolfin.
def dTt_dolfin(T, t):
"""
Finds dT/dt using dolfin.
Arguments:
T: Array representing the temperature at a specific time.
t: Single value of time at which to find the derivative.
Returns:
The derivative of T with respect to t as an array.
"""
# Convert T to dolfin function from array.
TOld = df.Function(funcSpace)
TOld.vector()[:] = T
# Solve the "linear" problem to find dT/dt.
# This 'a' represents the unknown, which contains no new information, but
# will eventually contribute to the solution later.
TNew = df.TrialFunction(funcSpace)
v = df.TestFunction(funcSpace)
a = TNew * v * df.dx
# 'f' here represents an expression (but not a dolfin expression) which
# describes the mathematical function that calculates dT/dt from T.
f = TOld * df.Constant(-0.9) # df.inner(df.grad(TOld), df.grad(TOld)) # <!> Failure here?
# This 'L' represents what we know, and will be used to calculate our
# solution eventually.
L = f * v
L *= df.dx
# This is not actually the solution, but it is where the solution will end
# up eventually, once the solver has done its work.
solutionEventually = df.Function(funcSpace)
# The problem defines what we want to know, what we do know, and where to
# put the solution. The solution argument is not actually the solution
# (yet), but it's where the solution will end up eventually.
problem = df.LinearVariationalProblem(a, L, solutionEventually)
# The solver solves the problem eventually.
solver = df.LinearVariationalSolver(problem)
# Now we solve the problem. solutionEventually is now populated with the
# solution to the problem.
solver.solve()
# Convert and return our solution.
return solutionEventually.vector().array()
# Calculate derivative dT/dx (and by extension dT/dt).
# dTdx = df.inner(df.grad(TOld), df.grad(TOld))
# dTdt = dTdx * df.Constant(0.1)
# outFunc = df.dot(dTdt * df.interpolate(df.Expression(["1."]), funcSpace),
# df.Expression(["1."]))
# dTdx = df.grad(TOld)[0, 0]
# dTdt = df.project(dTdx * 0.1, funcSpace)
# return -0.1 * T
# Convert and return the derivative dT/dt.
# return outFunc.vector().array()
# Defining behaviour in time, clumsily. This behaviour is replicated by
# dTt_dolfin.
# def dTt(T):
# """
# Finds dT/dt clumsily.
# This represents an unphysical linear decay.
# Arguments:
# T: Array representing the temperature at a specific time.
# funcSpace: Dolfin function space to interpolate T to.
# Returns:
# The derivative of T with respect to t as an array.
# """
# return T * -0.9
def run_until(t, T0, steps=100, integrator='odeint'):
"""
Integrates the problem for time t.
Arguments:
t: Float determining total time to integrate over.
T0: Array denoting initial temperature.
steps: Integer number of integration steps to perform over the time t.
integrator: String denoting the type of integrator to use for time
integration. Options are 'odeint' and 'euler'.
Returns integrated quantity as an array.
"""
tSteps = np.linspace(0, t, steps + 1)
T = T0 # Initial Temperature
# Here are two integrators you can choose between, because variety is
# the spice of life.
if integrator == 'odeint':
T = scipy.integrate.odeint(dTt_dolfin, T, tSteps)[-1]
elif integrator == 'euler':
for zI in xrange(1, len(tSteps)):
T = integrators.euler(T, dTt_dolfin(T, t), tSteps[1] - tSteps[0])
else:
raise ValueError('Integrator not recognised. Please use "euler" or '
'"odeint".')
return T
print("{}: Integrating...".format(rank))
tEnd = 1
T = run_until(tEnd, initFuncVal.vector().array())
print("{}: My vector is of shape {}.".format(rank, len(T)))
print("{}: My array looks like:\n {}.".format(rank, T))
# Create function space (and by extension, a vector object) for a fancy Dolfin
# gathering operation, so we can plot data.
TSend = df.Function(funcSpace)
TSend.vector()[:] = T
TRecv = df.Vector()
TSend.vector().gather(TRecv, np.array(range(funcSpace.dim()), "intc"))
print("{}: The gathered array looks like:\n {}.".format(rank, TRecv.array()))
# Plot the curves. This should look bizarre, as the gather reconstructs the
# data in the incorrect order.
if rank == 0:
import matplotlib.pyplot as plt
plt.plot(initRecv.array())
plt.plot(TRecv.array())
plt.title("This should look bizarre\n when running in parallel.")
plt.show()
plt.close()
# Save this data. This stores data in the correct order intelligently.
sd = dolfinh5tools.lib.openh5("array_integrator_parallel", funcSpace, mode="w")
sd.save_mesh()
sd.write(initFuncVal, "T", 0)
sd.write(TSend, "T", tEnd)
sd.close()
# Test our data against a known solution.
T0 = initRecv.array()
T1 = TRecv.array()
try:
assert (np.abs(T0 / T1 - np.exp(0.9)) < 1e-6).all() # Known solution.
print("{}: Solution is correct on this process.".format(rank))
except AssertionError:
print("{}: T0/T1 =\n{}.".format(rank, T0/T1))
raise
# Now that you've got to here, we run the script
# "load_array_integrator_parallel_data.py" to plot the data in the correct
# order, using the data we have just saved.
| 6,663 | 32.656566 | 93 |
py
|
finmag
|
finmag-master/dev/sandbox/parallel_dolfin_integration/load_array_integrator_parallel_data.py
|
import matplotlib.pyplot as plt
from dolfinh5tools import openh5
hFile = openh5("array_integrator_parallel", mode="r")
plt.plot(hFile.read(t=0, field_name="T").vector().array())
plt.plot(hFile.read(t=1, field_name="T").vector().array())
plt.show()
| 249 | 30.25 | 58 |
py
|
finmag
|
finmag-master/dev/sandbox/parallel_dolfin_integration/integrators.py
|
# This library includes some functions for integrating arrays.
def euler(Tn, dTndt, tStep):
"""
Performs Euler integration to obtain T_{n+1}.
Arguments:
Tn: Array-like representing Temperature at time t_n.
dTndt: Array-like representing dT/dt at time t_n.
tStep: Float determining the time to step over.
Returns T_{n+1} as an array-like.
"""
return Tn + dTndt * tStep
| 416 | 26.8 | 62 |
py
|
finmag
|
finmag-master/dev/sandbox/hysteresis/hysteresis_example.py
|
import dolfin as df
from numpy import linspace
from math import cos, sin, pi
from finmag import sim_with
from finmag.util.meshes import ellipsoid
r1 = 30.0
r2 = 10.0
r3 = 10.0
maxh = 3.0
Ms = 1e6 # A/m
A = 13.0e-12 # J/m
alpha = 1.0 # large damping for quick convergence
H = 1e6 # external field strength in A/m
m_init = (1, 0, 0)
# Create a few external field values (at 45 degree angles
# to each other, sweeping a half-circle).
H_ext_list = [(cos(t)*H, sin(t)*H, 0.01*H) for t in linspace(0, pi, 5)]
mesh = ellipsoid(r1, r2, r3, maxh)
sim = sim_with(mesh, Ms, m_init, alpha=alpha, unit_length=1e-9, A=A, demag_solver='FK')
sim.hysteresis(H_ext_list[1:3], filename="snapshots/hysteresis_example/hysteresis_ellipsoid.pvd", save_snapshots=True, save_every=10e-12, force_overwrite=True)
| 817 | 30.461538 | 159 |
py
|
finmag
|
finmag-master/dev/sandbox/hysteresis/hysteresis_loop_example.py
|
import matplotlib
matplotlib.use('Agg')
from numpy import array
from finmag import sim_with
from finmag.util.meshes import ellipsoid
import matplotlib.pyplot as plt
import sys
import logging
logger = logging.getLogger("finmag")
# This example essentially reproduces Example 2.3 in the nmag manual;
# see: http://nmag.soton.ac.uk/nmag/current/manual/html/example_hysteresis_ellipsoid/doc.html
r1 = 30.0
r2 = 10.0
r3 = 10.0
maxh = 3.0
Ms = 1e6 # A/m
A = 13.0e-12 # J/m
alpha = 1.0 # large damping for quick convergence
H = 1e6 # external field strength in A/m
m_init = (1, 0, 0)
H_max = 1e6 # maximum external field strength in A/m
direction = array([1.0, 0.01, 0.0])
N = 20
if len(sys.argv) > 1:
N = int(sys.argv[1])
print "Using N={}".format(N)
if len(sys.argv) > 2:
maxh = float(sys.argv[2])
print "Using maxh={}".format(maxh)
mesh = ellipsoid(r1, r2, r3, maxh)
sim = sim_with(mesh, Ms, m_init, alpha=alpha, unit_length=1e-9, A=A, demag_solver='FK')
print sim.mesh_info()
#(hvals, mvals) = sim.hysteresis_loop(H_max, direction, N, filename="snapshots/hysteresis_loop_example/hysteresis_ellipsoid.pvd", save_snapshots=True, save_every=10e-12, force_overwrite=True)
(hvals, mvals) = sim.hysteresis_loop(H_max, direction, N)
plt.plot(hvals, mvals,'o-', label='maxh={}'.format(maxh))
plt.ylim((-1.1, 1.1))
plt.title("Hysteresis loop: ellipsoid (r1={}, r2={}, r3={})".format(r1,r2,r3,maxh))
plt.legend(loc='best')
plt.savefig('plot_hysteresis_loop__maxh-{:04.1f}_N-{:03d}.pdf'.format(maxh, N))
| 1,534 | 30.326531 | 191 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/macrospin_ode_jac.py
|
from dolfin import *
from scipy.integrate import ode
from numpy import linspace
from values import c, M0
set_log_level(21)
m = 1e-5
mesh = Box(0,m,0,m,0,m,1,1,1)
# Functions
V = VectorFunctionSpace(mesh, "CG", 1)
u = TrialFunction(V)
v = TestFunction(V)
# Parameters
alpha = 0.5
gamma = 2.211e5
p = Constant(gamma/(1 + alpha**2))
c = c()
# Applied field.
H = Constant((0, 1e5, 0))
# Initial direction of the magnetic field.
Ms, M0 = M0()
M = Function(V)
M.assign(M0)
# Variational forms
a = inner(u, v)*dx
L = inner((-p*cross(M,H)
-p*alpha/Ms*cross(M,cross(M,H))
-c*(inner(M,M) - Ms**2)*M/Ms**2), v)*dx
# Time derivative of the magnetic field.
dM = Function(V)
J = derivative(L, M)
counter = 0
def f(t, y):
global counter
counter += 1
M.vector()[:] = y
solve(a==L, dM)
return dM.vector().array()
def j(t, y):
Jac = assemble(J)
return Jac.array()
y0 = M.vector().array()
t0 = 0
r = ode(f, j).set_integrator('vode', method='bdf', with_jacobian=True)
r.set_initial_value(y0, t0)
t1 = 1e-9
dt = 1e-11
while r.successful() and r.t < t1-dt:
r.integrate(r.t + dt)
print r.y
print counter
| 1,149 | 16.96875 | 70 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/macrospin_odeint_nojac.py
|
from dolfin import *
from scipy.integrate import odeint
from numpy import linspace
from values import c, M0
set_log_level(21)
m = 1e-5
mesh = Box(0,m,0,m,0,m,1,1,1)
# Functions
V = VectorFunctionSpace(mesh, "CG", 1)
u = TrialFunction(V)
v = TestFunction(V)
# Parameters
alpha = 0.5
gamma = 2.211e5
p = Constant(gamma/(1 + alpha**2))
c = c()
# Applied field.
H = Constant((0, 1e5, 0))
# Initial direction of the magnetic field.
Ms, M0 = M0()
M = Function(V)
M.assign(M0)
# Variational forms
a = inner(u, v)*dx
L = inner((-p*cross(M,H)
-p*alpha/Ms*cross(M,cross(M,H))
-c*(inner(M,M) - Ms**2)*M/Ms**2), v)*dx
# Time derivative of the magnetic field.
dM = Function(V)
counter = 0
def f(y, t):
global counter
counter += 1
M.vector()[:] = y
solve(a==L, dM)
return dM.vector().array()
ts = linspace(0, 1e-9, 100)
y0 = M.vector().array()
ys, infodict = odeint(f, y0, ts, Dfun=None, full_output=True)
print ys[-1]
print counter
| 971 | 17.692308 | 61 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/timings.py
|
import commands, time, sys
files = ['macrospin_odeint_nojac', 'macrospin_odeint_jac', 'macrospin_ode_nojac', 'macrospin_ode_jac']
names = ['odeint', 'odeint with jacobian', 'ode', 'ode with jacobian']
for nr, f in enumerate(files):
cmd = 'python %s.py' % (f)
# Run all scripts once before timing starts, to avoid compiler timing.
status, output = commands.getstatusoutput(cmd)
if status != 0:
sys.stderr.write(output + '\n')
sys.exit(status)
t0 = time.time()
for i in range(10):
status, output = commands.getstatusoutput(cmd)
if status != 0:
sys.stderr.write(output + '\n')
sys.exit(status)
t1 = time.time()
print 'Time using %s: %.2f sec.' % (names[nr], t1-t0)
| 754 | 31.826087 | 102 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/macrospin_odeint_jac.py
|
from dolfin import *
from scipy.integrate import odeint
from numpy import linspace
from values import c, M0
set_log_level(21)
m = 1e-5
mesh = Box(0,m,0,m,0,m,1,1,1)
# Functions
V = VectorFunctionSpace(mesh, "CG", 1)
u = TrialFunction(V)
v = TestFunction(V)
# Parameters
alpha = 0.5
gamma = 2.211e5
p = Constant(gamma/(1 + alpha**2))
c = c()
# Applied field.
H = Constant((0, 1e5, 0))
# Initial direction of the magnetic field.
Ms, M0 = M0()
M = Function(V)
M.assign(M0)
# Variational forms
a = inner(u, v)*dx
L = inner((-p*cross(M,H)
-p*alpha/Ms*cross(M,cross(M,H))
-c*(inner(M,M) - Ms**2)*M/Ms**2), v)*dx
# Time derivative of the magnetic field.
dM = Function(V)
J = derivative(L, M)
counter = 0
def f(y, t):
global counter
counter += 1
M.vector()[:] = y
solve(a==L, dM)
return dM.vector().array()
def j(y, t):
Jac = assemble(J)
return Jac.array()
ts = linspace(0, 1e-9, 100)
y0 = M.vector().array()
ys, infodict = odeint(f, y0, ts, Dfun=j, full_output=True)
print ys[-1]
print counter
| 1,050 | 17.438596 | 58 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/conflicting_ode_odeint_exchange.py
|
from dolfin import *
from finmag.sim.exchange import Exchange
from numpy import linspace
from scipy.integrate import odeint, ode
set_log_level(21)
# FIXME: Figure out of this extreme inconsistency between ode and odeint
# FIXME: Make odeint convert when using jacobian
# FIXME: Make odeint convert when adding applied field
# FIXME: Figure out why ode gives the same result with and without an
# applied field.
#
# TODO: When the fixmes above are solved, compare run with and without
# jacobian. This should show that we get a speedup with the jacobian.
# Parameters
alpha = 0.5
gamma = 2.211e5
c = Constant(1e12)
C = 1.3e-11
p = Constant(gamma/(1 + alpha**2))
Ms = 8.6e5
#Ms = 1
#length = 20e-9
#simplexes = 10
length = 20e-9
simplexes = 10
# Mesh and functions
mesh = Interval(simplexes, 0, length)
V = VectorFunctionSpace(mesh, 'CG', 1, dim=3)
u = TrialFunction(V)
v = TestFunction(V)
# Initially distributed in an arch along the x-axis, pointing in y-direction.
# Applied field in z-directions, so we expect the magnetisation to become
# uniform quickly and align in z-direction.
# Orientations
left_right = '2*x[0]/L - 1'
up_down = 'sqrt(1 - (2*x[0]/L - 1)*(2*x[0]/L - 1))'
# Initial
M0 = Expression((left_right, up_down, '0.0'), L=length)
M = interpolate(M0, V)
#
# Exchange
H_exch = Exchange(V, M, C, Ms)
# Applied
H_app = project(Constant((0, 0, 1e5)), V)
# Effective
H_eff = Function(V)
H_eff.vector()[:] = H_exch.compute_field() + H_app.vector()
Ms = Constant(Ms)
# Variational forms
a = inner(u, v)*dx
L = inner((-p*cross(M, H_eff)
-p*alpha/Ms*cross(M, cross(M, H_eff))
-c*(inner(M, M) - 1)*M), v)*dx
# Time derivative of the magnetic field.
dM = Function(V)
# Jacobian
J = derivative(L, M)
def f(y, t):
# Update M and H_eff
M.vector()[:] = y
H_eff.vector()[:] = H_exch.compute_field() + H_app.vector()
#print H_eff.vector().array().reshape((3, -1))
print t
solve(a==L, dM)
return dM.vector().array()
def j(t, y):
return assemble(J).array()
# Using odeint
ts = linspace(0, 1e-9, 2)
y0 = M.vector().array()
ys, infodict = odeint(f, y0, ts, full_output=True)
#ys, infodict = odeint(f, y0, ts, rtol=10, Dfun=j, full_output=True)
print ys[-1]
#print infodict
"""
# Using ode
y0 = M.vector().array()
t0, t1, dt = 0, 1e-9, 1e-11
r = ode(f).set_integrator('vode', method='bdf', with_jacobian=False)
#r = ode(f, j).set_integrator('vode', method='bdf', with_jacobian=True)
r.set_initial_value(y0, t0)
while r.successful() and r.t < t1-dt:
r.integrate(r.t + dt)
print r.y
"""
| 2,558 | 23.141509 | 77 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/values.py
|
from dolfin import *
def c():
return 1e8
def M0():
Ms = 1e8
return Ms, Constant((0.8*Ms, -0.1*Ms*2, 0.1*Ms*0))
| 126 | 11.7 | 54 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/macrospin_ode_nojac.py
|
from dolfin import *
from scipy.integrate import ode
from numpy import linspace
from values import c, M0
set_log_level(21)
m = 1e-5
mesh = Box(0,m,0,m,0,m,1,1,1)
# Functions
V = VectorFunctionSpace(mesh, "CG", 1)
u = TrialFunction(V)
v = TestFunction(V)
# Parameters
alpha = 0.5
gamma = 2.211e5
p = Constant(gamma/(1 + alpha**2))
c = c()
# Applied field.
H = Constant((0, 1e5, 0))
# Initial direction of the magnetic field.
Ms, M0 = M0()
M = Function(V)
M.assign(M0)
# Variational forms
a = inner(u, v)*dx
L = inner((-p*cross(M,H)
-p*alpha/Ms*cross(M,cross(M,H))
-c*(inner(M,M) - Ms**2)*M/Ms**2), v)*dx
# Time derivative of the magnetic field.
dM = Function(V)
counter = 0
def f(t, y):
global counter
counter += 1
M.vector()[:] = y
solve(a==L, dM)
return dM.vector().array()
y0 = M.vector().array()
t0 = 0
r = ode(f).set_integrator('vode', method='bdf', with_jacobian=False)
r.set_initial_value(y0, t0)
t1 = 1e-9
dt = 1e-11
while r.successful() and r.t < t1-dt:
r.integrate(r.t + dt)
print r.y
print counter
| 1,066 | 17.084746 | 68 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/jacobian_test/reduced_problem.py
|
from dolfin import *
import finmag.util.consts as consts
# Mesh and functionspace
L = 10e-9 # 10 nm
mesh = Box(0,0,0,L,L,L,4,4,4)
V = VectorFunctionSpace(mesh, 'CG', 1)
# Parameters
alpha = 0.02
gamma = consts.gamma
# Initial magnetisation
m0_tuple = (("1",
"5 * pow(cos(pi * (x[0] * pow(10, 9) - 11) / 6.), 3) \
* pow(cos(pi * x[1] * pow(10, 9) / 6.), 3)",
"0"))
M = interpolate(Expression(m0_tuple), V)
# Exchange field
Eex = inner(grad(M), grad(M))*dx
Hex = derivative(Eex, M)
# TODO: Figure out a way to do this without using this scheme
H = Function(V)
H.vector()[:] = assemble(Hex)
# because then we (probably) loose the information that H depends on M,
# which is useful later when computing the jacobian.
# LLG equation
p = gamma / (1 + alpha*alpha)
q = alpha * p
u = TrialFunction(V)
v = TestFunction(V)
# L should contain Hex instead of the "manually" assigned H, but this fails.
a = inner(u, v)*dx
L = inner(-p * cross(M, H)
-q * cross(M, cross(M, H)), v) * dx
# Jacobian
J = derivative(L, M)
| 1,066 | 23.813953 | 76 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/jacobian_test/applied_exchange_anisotropy.py
|
from dolfin import *
import numpy as np
import finmag.util.consts as consts
# Mesh and functionspace
x0 = 0; x1 = 15e-9; nx = 30;
y0 = -4.5e-9; y1 = 4.5e-9; ny = 18;
z0 = -0.1e-9; z1 = 0.1e-9; nz = 1;
mesh = Box(x0, y0, z0, x1, y1, z1, nx, ny, nz)
V = VectorFunctionSpace(mesh, 'Lagrange', 1, dim=3)
# Parameters
Ms = 1e6
alpha = 0.02
gamma = consts.gamma
c = 1e11 # 1/s numerical scaling correction
C = 1.3e-11 # J/m exchange constant
K = Constant(520e3) # Anisotropy constant
a = Constant((np.sqrt(0.5), np.sqrt(0.5), 0)) # Easy axis
# Applied field
Happ = interpolate(Constant((Ms,0,0)), V)
# Initial magnetisation
m0_tuple = (("1",
"5 * pow(cos(pi * (x[0] * pow(10, 9) - 11) / 6), 3) \
* pow(cos(pi * x[1] * pow(10, 9) / 6), 3)",
"0"))
M = interpolate(Expression(m0_tuple), V)
# Exchange field
Eex = inner(grad(M), grad(M))*dx
Hex = derivative(Eex, M)
# Anisotropy field
Eani = K*(Constant(1) - (dot(a, M))**2)*dx
Hani = assemble(derivative(Eani, M)) # Try with assemble to see if that changes anything
print type(Happ), type(Hex), type(Hani)
exit()
# Effective field
Heff = Hex + Hani + Happ
# LLG equation
p = gamma / (1 + alpha*alpha)
q = alpha * p
u = TrialFunction(V)
v = TestFunction(V)
a = inner(u, v)*dx
L = inner(-p * cross(M, Heff)
-q * cross(M, cross(M, Heff))
-c * (inner(M, M) - 1) * M , v) * dx
# Jacobian
J = derivative(L, M)
| 1,422 | 23.534483 | 88 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/c-dolfin-compare/dolfin-spinwave2.py
|
from dolfin import *
import instant
from finmag.energies import Exchange
from scipy.integrate import ode
import finmag.util.helpers as h
import finmag.util.consts as consts
import os
import numpy as np
set_log_level(21)
def m_average(y, V, vol):
m = Function(V)
m.vector()[:] = y
mx = assemble(dot(m, Constant([1,0,0])) * dx)
my = assemble(dot(m, Constant([0,1,0])) * dx)
mz = assemble(dot(m, Constant([0,0,1])) * dx)
return np.array([mx, my, mz]) / vol
def dolfinsolve(A, dm, L):
b = assemble(L)
solve(A, dm.vector(), b)
return 0, dm.vector().array()
x0 = 0; x1 = 15e-9; nx = 30;
y0 = -4.5e-9; y1 = 4.5e-9; ny = 18;
z0 = -0.1e-9; z1 = 0.1e-9; nz = 1;
mesh = BoxMesh(x0, y0, z0, x1, y1, z1, nx, ny, nz)
nb_nodes = len(mesh.coordinates())
V = VectorFunctionSpace(mesh, 'Lagrange', 1, dim=3)
Volume = assemble(Constant(1)*dx(mesh))
# Defaults from LLG
alpha = 0.5
gamma = consts.gamma
c = 1e11 # 1/s numerical scaling correction
# 0.1e12 1/s is the value used by default in nmag 0.2
C = 1.3e-11 # J/m exchange constant
Ms = 8.6e5 # A/m saturation magnetisation
t = 0 # s
H_app = (0, 0, 0)
H_app = interpolate(Constant(H_app), V)
pins = []
# Defaults overwrite from spinwave program
Ms = 1e6
C = 1.3e-11
#llg.c = 1e11
alpha = 0.02
m0_tuple = (("1",
"5 * pow(cos(pi * (x[0] * pow(10, 9) - 11) / 6), 3) \
* pow(cos(pi * x[1] * pow(10, 9) / 6), 3)",
"0"))
M = interpolate(Expression(m0_tuple), V)
M.vector()[:] = h.fnormalise(M.vector().array())
m_arr = M.vector().array()
for i in xrange(nb_nodes):
x, y, z = mesh.coordinates()[i]
mx = 1; my = 0; mz = 0;
if 8e-9 < x < 14e-9 and -3e-9 < y < 3e-9:
pass
else:
m_arr[i] = mx; m_arr[i+nb_nodes] = my; m_arr[i+2*nb_nodes] = mz;
M.vector()[:] = m_arr
# LLG setup
exchange = Exchange(C)
exchange.setup(V, M, Ms)
H_eff = Function(V)
H_eff.vector()[:] = exchange.compute_field()
p = gamma / (1 + alpha*alpha)
q = alpha * p
u = TrialFunction(V)
v = TestFunction(V)
a = inner(u, v)*dx
L = inner(-p * cross(M, H_eff)
-q * cross(M, cross(M, H_eff))
-c * (inner(M,M) - 1) * M , v) * dx
dm = Function(V)
A = assemble(a)
# LLG solve_for
def solve_for(t, y):
M.vector()[:] = y
H_ex = exchange.compute_field()
H_eff.vector()[:] = H_ex + H_app.vector().array()
status, dMdt = dolfinsolve(A, dm, L)
if status == 0:
return dMdt
t0 = 0; dt = 0.05e-12; t1 = 10e-12
r = ode(solve_for).set_integrator("vode", method="bdf", rtol=1e-5, atol=1e-5)
r.set_initial_value(M.vector().array(), t0)
fh = open("averages_dolfin.txt", "w")
while r.successful() and r.t <= t1:
print "Integrating time = %gs" % (r.t)
mx, my, mz = m_average(r.y, V, Volume)
print str(r.t), " ", str(mx), " ", str(my), " ", str(mz)
fh.write(str(r.t) + " " + str(mx) + " " + str(my) + " " + str(mz) + "\n")
r.integrate(r.t + dt)
plot(M)
fh.close()
print "Done"
interactive()
| 2,980 | 24.698276 | 77 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/c-dolfin-compare/c-spinwave.py
|
from dolfin import *
import instant
from finmag.energies import Exchange
from scipy.integrate import ode
import finmag.util.helpers as h
import finmag.util.consts as consts
import os
import numpy as np
def m_average(y, V, vol):
m = Function(V)
m.vector()[:] = y
mx = assemble(dot(m, Constant([1,0,0])) * dx)
my = assemble(dot(m, Constant([0,1,0])) * dx)
mz = assemble(dot(m, Constant([0,0,1])) * dx)
return np.array([mx, my, mz]) / vol
def load_c_code():
"""
Loads the C-code in the file dmdt.c, that will later
get called to compute the right-hand side of the LLG equation.
"""
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(__location__, 'dmdt.c'), "r") as f:
c_code = f.read()
args = [["Mn", "M", "in"], ["Hn", "H", "in"],
["dMdtn", "dMdt", "out"], ["Pn", "P", "in"]]
return instant.inline_with_numpy(c_code, arrays = args)
csolve = load_c_code()
x0 = 0; x1 = 15e-9; nx = 30;
y0 = -4.5e-9; y1 = 4.5e-9; ny = 18;
z0 = -0.1e-9; z1 = 0.1e-9; nz = 1;
mesh = BoxMesh(x0, y0, z0, x1, y1, z1, nx, ny, nz)
nb_nodes = len(mesh.coordinates())
V = VectorFunctionSpace(mesh, 'Lagrange', 1, dim=3)
Volume = assemble(Constant(1)*dx(mesh))
# Defaults from LLG
alpha = 0.5
gamma = consts.gamma
c = 1e11 # 1/s numerical scaling correction
# 0.1e12 1/s is the value used by default in nmag 0.2
C = 1.3e-11 # J/m exchange constant
Ms = 8.6e5 # A/m saturation magnetisation
t = 0 # s
H_app = (0, 0, 0)
H_app = interpolate(Constant(H_app), V)
pins = []
# Defaults overwrite from spinwave program
Ms = 1e6
C = 1.3e-11
#llg.c = 1e11
alpha = 0.02
m0_tuple = (("1",
"5 * pow(cos(pi * (x[0] * pow(10, 9) - 11) / 6), 3) \
* pow(cos(pi * x[1] * pow(10, 9) / 6), 3)",
"0"))
m0 = Expression(m0_tuple)
m0 = interpolate(m0, V)
m0.vector()[:] = h.fnormalise(m0.vector().array())
m_func = m0
m_arr = m0.vector().array()
for i in xrange(nb_nodes):
x, y, z = mesh.coordinates()[i]
mx = 1; my = 0; mz = 0;
if 8e-9 < x < 14e-9 and -3e-9 < y < 3e-9:
pass
else:
m_arr[i] = mx; m_arr[i+nb_nodes] = my; m_arr[i+2*nb_nodes] = mz;
m_func.vector()[:] = m_arr
# LLG setup
exchange = Exchange(C)
exchange.setup(V, m_func, Ms)
# LLG solve_for
def solve_for(t, y):
m_func.vector()[:] = y
H_ex = exchange.compute_field()
H_eff = H_ex + H_app.vector().array()
status, dMdt = csolve(alpha, gamma, c,
m_func.vector().array(), H_eff,
m_func.vector().array().shape[0], pins)
if status == 0:
return dMdt
t0 = 0; dt = 0.05e-12; t1 = 10e-12
r = ode(solve_for).set_integrator("vode", method="bdf", rtol=1e-5, atol=1e-5)
r.set_initial_value(m_func.vector().array(), t0)
fh = open("averages_c.txt", "w")
while r.successful() and r.t <= t1:
print "Integrating time = %gs" % (r.t)
mx, my, mz = m_average(r.y, V, Volume)
fh.write(str(r.t) + " " + str(mx) + " " + str(my) + " " + str(mz) + "\n")
r.integrate(r.t + dt)
plot(m_func)
fh.close()
print "Done"
interactive()
| 3,167 | 26.076923 | 77 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/c-dolfin-compare/dolfin-spinwave.py
|
from dolfin import *
import instant
from finmag.energies import Exchange
from scipy.integrate import ode
import finmag.util.helpers as h
import os
import numpy as np
set_log_level(21)
def m_average(y, V, vol):
m = Function(V)
m.vector()[:] = y
mx = assemble(dot(m, Constant([1,0,0])) * dx)
my = assemble(dot(m, Constant([0,1,0])) * dx)
mz = assemble(dot(m, Constant([0,0,1])) * dx)
return np.array([mx, my, mz]) / vol
def dolfinsolve(alpha, gamma, c,
m_arr, H_eff,
V, pins):
# Ok, this should seriously be set up outside.
# Just for testing purposes.
p = gamma / (1 + alpha*alpha)
q = alpha * p
u = TrialFunction(V)
v = TestFunction(V)
M = Function(V)
M.vector()[:] = m_arr
H = Function(V)
H.vector()[:] = H_eff
a = inner(u, v)*dx
L = inner(-p * cross(M, H)
-q * cross(M, cross(M, H))
-c * (inner(M,M) - 1) * M , v) * dx
dm = Function(V)
solve(a == L, dm)
return 0, dm.vector().array()
x0 = 0; x1 = 15e-9; nx = 30;
y0 = -4.5e-9; y1 = 4.5e-9; ny = 18;
z0 = -0.1e-9; z1 = 0.1e-9; nz = 1;
mesh = BoxMesh(x0, y0, z0, x1, y1, z1, nx, ny, nz)
nb_nodes = len(mesh.coordinates())
V = VectorFunctionSpace(mesh, 'Lagrange', 1, dim=3)
Volume = assemble(Constant(1)*dx(mesh))
# Defaults from LLG
alpha = 0.5
gamma = 2.210173e5 # m/(As)
#source for gamma: OOMMF manual, and in Werner Scholz thesis,
#after (3.7), llg_gamma_G = m/(As).
c = 1e11 # 1/s numerical scaling correction
# 0.1e12 1/s is the value used by default in nmag 0.2
C = 1.3e-11 # J/m exchange constant
Ms = 8.6e5 # A/m saturation magnetisation
t = 0 # s
H_app = (0, 0, 0)
H_app = interpolate(Constant(H_app), V)
pins = []
# Defaults overwrite from spinwave program
Ms = 1e6
C = 1.3e-11
#llg.c = 1e11
alpha = 0.02
m0_tuple = (("1",
"5 * pow(cos(pi * (x[0] * pow(10, 9) - 11) / 6), 3) \
* pow(cos(pi * x[1] * pow(10, 9) / 6), 3)",
"0"))
m0 = Expression(m0_tuple)
m0 = interpolate(m0, V)
m0.vector()[:] = h.fnormalise(m0.vector().array())
m_func = m0
m_arr = m0.vector().array()
for i in xrange(nb_nodes):
x, y, z = mesh.coordinates()[i]
mx = 1; my = 0; mz = 0;
if 8e-9 < x < 14e-9 and -3e-9 < y < 3e-9:
pass
else:
m_arr[i] = mx; m_arr[i+nb_nodes] = my; m_arr[i+2*nb_nodes] = mz;
m_func.vector()[:] = m_arr
# LLG setup
exchange = Exchange(C)
exchange.setup(V, m_func, Ms)
# LLG solve_for
def solve_for(t, y):
m_func.vector()[:] = y
H_ex = exchange.compute_field()
H_eff = H_ex + H_app.vector().array()
status, dMdt = dolfinsolve(alpha, gamma, c,
m_func.vector().array(), H_eff,
V, pins)
if status == 0:
return dMdt
t0 = 0; dt = 0.05e-12; t1 = 10e-12
r = ode(solve_for).set_integrator("vode", method="bdf", rtol=1e-5, atol=1e-5)
r.set_initial_value(m_func.vector().array(), t0)
fh = open("averages_dolfin.txt", "w")
while r.successful() and r.t <= t1:
print "Integrating time = %gs" % (r.t)
mx, my, mz = m_average(r.y, V, Volume)
fh.write(str(r.t) + " " + str(mx) + " " + str(my) + " " + str(mz) + "\n")
r.integrate(r.t + dt)
plot(m_func)
fh.close()
print "Done"
interactive()
| 3,294 | 24.944882 | 77 |
py
|
finmag
|
finmag-master/dev/sandbox/timeintegration/c-dolfin-compare/dolfin-spinwave3.py
|
from dolfin import *
import instant
from finmag.energies import Exchange
from scipy.integrate import ode
import finmag.util.helpers as h
import finmag.util.consts as consts
import os
import numpy as np
parameters["linear_algebra_backend"] = "PETSc"
parameters["form_compiler"]["cpp_optimize"] = True
ffc_options = {"optimize": True, \
"eliminate_zeros": True, \
"precompute_basis_const": True, \
"precompute_ip_const": True, \
"quadrature_degree": 2}
set_log_level(21)
def m_average(y, V, vol):
m = Function(V)
m.vector()[:] = y
mx = assemble(dot(m, Constant([1,0,0])) * dx)
my = assemble(dot(m, Constant([0,1,0])) * dx)
mz = assemble(dot(m, Constant([0,0,1])) * dx)
return np.array([mx, my, mz]) / vol
x0 = 0; x1 = 15e-9; nx = 30;
y0 = -4.5e-9; y1 = 4.5e-9; ny = 18;
z0 = -0.1e-9; z1 = 0.1e-9; nz = 1;
mesh = BoxMesh(x0, y0, z0, x1, y1, z1, nx, ny, nz)
nb_nodes = len(mesh.coordinates())
V = VectorFunctionSpace(mesh, 'Lagrange', 1, dim=3)
Volume = assemble(Constant(1)*dx(mesh))
# Defaults from LLG
alpha = 0.5
gamma = consts.gamma
c = 1e11 # 1/s numerical scaling correction
# 0.1e12 1/s is the value used by default in nmag 0.2
C = 1.3e-11 # J/m exchange constant
Ms = 8.6e5 # A/m saturation magnetisation
t = 0 # s
H_app = (0, 0, 0)
H_app = interpolate(Constant(H_app), V)
pins = []
# Defaults overwrite from spinwave program
Ms = 1e6
alpha = 0.02
m0_tuple = (("1",
"5 * pow(cos(pi * (x[0] * pow(10, 9) - 11) / 6), 3) \
* pow(cos(pi * x[1] * pow(10, 9) / 6), 3)",
"0"))
M = interpolate(Expression(m0_tuple), V)
M.vector()[:] = h.fnormalise(M.vector().array())
m_arr = M.vector().array()
for i in xrange(nb_nodes):
x, y, z = mesh.coordinates()[i]
mx = 1; my = 0; mz = 0;
if 8e-9 < x < 14e-9 and -3e-9 < y < 3e-9:
pass
else:
m_arr[i] = mx; m_arr[i+nb_nodes] = my; m_arr[i+2*nb_nodes] = mz;
M.vector()[:] = m_arr
# LLG setup
exchange = Exchange(C)
exchange.setup(V, M, Ms)
H_eff = Function(V)
H_eff.vector()[:] = exchange.compute_field()
p = gamma / (1 + alpha*alpha)
q = alpha * p
u = TrialFunction(V)
v = TestFunction(V)
a = inner(u, v)*dx
L = inner(-p * cross(M, H_eff)
-q * cross(M, cross(M, H_eff))
-c * (inner(M,M) - 1) * M , v) * dx
dm = Function(V)
problem = LinearVariationalProblem(a, L, dm, form_compiler_parameters=ffc_options)
solver = LinearVariationalSolver(problem)
# LLG solve_for
def solve_for(t, y):
M.vector()[:] = y
H_ex = exchange.compute_field()
H_eff.vector()[:] = H_ex + H_app.vector().array()
solver.solve()
return dm.vector().array()
t0 = 0; dt = 0.05e-12; t1 = 10e-12
r = ode(solve_for).set_integrator("vode", method="bdf", rtol=1e-5, atol=1e-5)
r.set_initial_value(M.vector().array(), t0)
fh = open("averages_dolfin.txt", "w")
while r.successful() and r.t <= t1:
print "Integrating time = %gs" % (r.t)
mx, my, mz = m_average(r.y, V, Volume)
fh.write(str(r.t) + " " + str(mx) + " " + str(my) + " " + str(mz) + "\n")
r.integrate(r.t + dt)
plot(M)
fh.close()
print "Done"
interactive()
| 3,171 | 25.881356 | 82 |
py
|
finmag
|
finmag-master/dev/sandbox/skyrmions/sw_analysis.py
|
import numpy as np
import pylab as plt
import matplotlib.cm as cm
file_name = 'output.npz'
threshold = 3000
def find_max(a, threshold=0.5):
index = []
for i in range(1,len(a)-1):
if a[i-1] < a[i] > a[i+1] and a[i]>threshold:
index.append(i)
return index
#load the file with tsim, mx, my and mz components
npzfile = np.load(file_name)
#extract numpy arrays from file
tsim = npzfile['tsim']
mx = npzfile['mx']
my = npzfile['my']
mz = npzfile['mz']
#caluclate and subtract the average value of magnetisation for each component
mx -= np.average(mx)
my -= np.average(my)
mz -= np.average(mz)
#Fourier transform complex numpy arrays for all sample point magnetisation components
fmx = np.zeros([mx.shape[0], mx.shape[1], mx.shape[2]], dtype=complex)
fmy = np.zeros([my.shape[0], my.shape[1], my.shape[2]], dtype=complex)
fmz = np.zeros([mz.shape[0], mz.shape[1], mz.shape[2]], dtype=complex)
#compute the FFT for all mesh points and populate arrays fmx, fmy and fmz
for i in range(mx.shape[0]):
for j in range(mx.shape[1]):
fmx[i,j,:] = np.fft.fft(mx[i,j,:])
fmy[i,j,:] = np.fft.fft(my[i,j,:])
fmz[i,j,:] = np.fft.fft(mz[i,j,:])
#compute the average of FFT
#sum the fft arrays
s_fmx = 0
s_fmy = 0
s_fmz = 0
for i in xrange(mx.shape[0]):
for j in xrange(mx.shape[1]):
s_fmx += fmx[i,j,:]
s_fmy += fmy[i,j,:]
s_fmz += fmz[i,j,:]
#compute the average of FFT
fmx_av = s_fmx / mx.shape[0]*mx.shape[1]
fmy_av = s_fmy / mx.shape[0]*mx.shape[1]
fmz_av = s_fmz / mx.shape[0]*mx.shape[1]
#compute the frequency axis values
freq = np.fft.fftfreq(tsim.shape[-1], d=tsim[1]-tsim[0])
#create arrays for plotting (upper halves of arrays)
freq_plot = freq[0:len(freq)/2+1]
fmx_av_plot = np.absolute(fmx_av[0:len(fmx_av)/2+1])
fmy_av_plot = np.absolute(fmy_av[0:len(fmy_av)/2+1])
fmz_av_plot = np.absolute(fmz_av[0:len(fmz_av)/2+1])
#plot the average fft for all three components of magnetisation
#x component
plt.figure(1)
p1 = plt.subplot(311)
p1.plot(freq_plot, fmx_av_plot, label='mx')
plt.xlim([0,1e11])
plt.legend()
plt.xlabel('f')
plt.ylabel('|S|')
#y component
p2 = plt.subplot(312)
p2.plot(freq_plot, fmy_av_plot, label='my')
plt.xlim([0,1e11])
plt.legend()
plt.xlabel('f')
plt.ylabel('|S|')
#z component
p3 = plt.subplot(313)
p3.plot(freq_plot, fmz_av_plot, label='mz')
plt.xlim([0,1e11])
plt.legend()
plt.xlabel('f')
plt.ylabel('|S|')
#plt.show()
#find peaks in fft
mode_indices = find_max(fmx_av_plot, threshold = threshold)
print mode_indices
fig = 2
mode_number = 0
if mode_indices != []:
for i in mode_indices:
#plot the x component of the mode
#title_string = 'frequency: ' + str(freq_plot[i]) + ', component: x'
mode_x = np.absolute(fmx[:,:,i])
mode_y = np.absolute(fmy[:,:,i])
mode_z = np.absolute(fmz[:,:,i])
plt.figure(2)
plt.subplot(len(mode_indices),3,mode_number+1)
plt.imshow(mode_x, cmap = cm.Greys_r)
plt.subplot(len(mode_indices),3,mode_number+2)
plt.imshow(mode_y, cmap = cm.Greys_r)
plt.subplot(len(mode_indices),3,mode_number+3)
plt.imshow(mode_z, cmap = cm.Greys_r)
#plt.show()
mode_number += 3
#show the graphs
plt.show()
| 3,258 | 27.094828 | 85 |
py
|
finmag
|
finmag-master/dev/sandbox/skyrmions/sw_dmi.py
|
import dolfin as df
import numpy as np
import pylab as plt
from finmag.energies import Exchange, Zeeman, DMI
from finmag import Simulation as Sim
from math import pi
def plot_excitation(tsinc, Hsinc):
"""Plot the external excitation signal both in time and frequency domain."""
#time domain
plt.subplot(211)
plt.plot(tsinc, Hsinc)
#frequency domain
s_excitation = np.fft.fft(Hsinc)
f_excitation = np.fft.fftfreq(len(s_excitation), d=tsinc[1]-tsinc[0])
plt.subplot(212)
plt.plot(f_excitation, np.absolute(s_excitation))
plt.xlim([-2e12,2e12])
plt.show()
#output npz file name
file_name = 'sw_dmi'
tstart = 0
#time to converge into the static state
tstatic = 1e-9 #1ns
#excitation pulse duration
tpulse = 0.08e-9
#end time for recording oscilations
tend = 5e-9
#number of steps for recording oscilations
n_osc_steps = 5001
#number of time steps in excitation signal
n_sinc_steps = 501
toffset = 1e-20
#dimensions of the square thin-film sample
xdim = 7e-9
ydim = 7e-9
zdim = 1e-9
#mesh size in x, y and z directions
xv = 5
yv = 5
zv = 1
#magnetisation saturation
Ms = 1.567e5 #A/m
#simulated frequency maximum
fc = 200e9 #200GHz
#excitation signal amplitude
Hamp = 0.07*Ms
# Hsinc = f(tsinc) for the duration of tpulse
tsinc = np.linspace(-tpulse/2, tpulse/2, n_sinc_steps)
Hsinc = Hamp * np.sinc(2*pi*fc*tsinc)
#Gilbert damping for the spin waves recording
sw_alpha = 1e-20
mesh = df.BoxMesh(0, 0, 0, xdim, ydim, zdim, xv, yv, zv)
sim = Sim(mesh, Ms)
sim.set_m((1,1,1))
#exchange energy constant
A = 3.57e-13 #J/m
#DMI constant
D = 2.78e-3 #J/m**2
#external magnetic field
H = [0,0,0] #A/m
sim.add(Exchange(A)) #exchnage interaction
sim.add(DMI(D)) #DMI interaction
sim.add(Zeeman(H)) #Zeeman interaction
############################################################
#time series for the static state simulation
tsim = np.linspace(tstart, tstatic, 101)
#simulation to the ground state
sim.alpha = 1 #dynamics neglected
for t in tsim:
sim.run_until(t)
df.plot(sim.llg._m)
############################################################
############################################################
#excite the system with an external sinc field
tsim = np.linspace(tstatic+toffset, tstatic+tpulse, n_sinc_steps)
i = 0 #index for the extrenal excitation
for t in tsim:
H = [Hsinc[i], Hsinc[i], Hsinc[i]]
sim.add(Zeeman(H))
sim.run_until(t)
df.plot(sim.llg._m)
i += 1
############################################################
############################################################
#record spin waves
tsim = np.linspace(tstatic+tpulse+toffset, tend, n_osc_steps)
#decrease the Gilbert damping to previously chosen value
sim.alpha = sw_alpha
#turn off an external field
sim.add(Zeeman([0,0,0]))
#points at which the magnetisation is recorded
xs = np.linspace(0, xdim-1e-20, xv+1)
ys = np.linspace(0, ydim-1e-20, yv+1)
z = 0 #magnetisation read only in one x-y plane at z = 0
#make empty arrays for mx, my and mz recording
#don't remember why tsim-1
mx = np.zeros([xv+1, yv+1, n_osc_steps])
my = np.zeros([xv+1, yv+1, n_osc_steps])
mz = np.zeros([xv+1, yv+1, n_osc_steps])
for i in xrange(len(tsim)):
#simulate up to next time step
sim.run_until(tsim[i])
df.plot(sim.llg._m)
#record the magnetisation state
for j in range(len(xs)):
for k in range(len(ys)):
mx[j,k,i] = sim.llg._m(xs[j], ys[k], 0)[0]
my[j,k,i] = sim.llg._m(xs[j], ys[k], 0)[1]
mz[j,k,i] = sim.llg._m(xs[j], ys[k], 0)[2]
#############################################################
tsim = tsim - (tstatic+tpulse+toffset)
#save the file into the file_name.npz file
np.savez(file_name, tsim=tsim, mx=mx, my=my, mz=mz)
| 3,733 | 27.075188 | 80 |
py
|
finmag
|
finmag-master/dev/sandbox/skyrmions/skyrmion_dispersion.py
|
import numpy as np
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag, Zeeman
from finmag.energies.zeeman import TimeZeemanPython
import matplotlib.pyplot as plt
Rx = 5445
Ry = 60
Nx = 2178
Ny = 24
mesh = df.RectangleMesh(0,0,Rx,Ry,Nx,Ny)
def m_init_fun(pos):
return np.random.random(3)-0.5
def m_init_skyrmion(pos):
x = (pos[0])%45 - 22.5
y = pos[1] - 30
if x**2+y**2 < 5**2:
return (0,0,-1)
else:
return (0,0,1)
class MyExpression(df.Expression):
def __init__(self, h0, kc):
self.h0 = h0
self.kc = kc
def eval(self, value, x):
vy = 1.0
for i in range(Ny):
vy += np.sin(i*np.pi*x[1]/Ry)
xp = self.kc * (x[0] - Rx/2.0)
vy *= np.sinc(xp)
value[0] = 0
value[1] = vy*self.h0
value[2] = 0
def value_shape(self):
return (3,)
class AlphaExpression(df.Expression):
def eval(self, value, x):
if x[0] < 90:
value[0]=(90-x[0])*100
elif x[0] > 5355:
value[0]=(x[0]-5355)*100
else:
value[0]=1.0
def plot_m(sim):
df.plot(sim.llg._m)
def relax_system(mesh=mesh):
Ms = 8.6e5
sim = Simulation(mesh, Ms, pbc='1d', unit_length=1e-9, name = 'relax' )
sim.set_m(m_init_skyrmion)
A = 1.3e-11
D = 4e-3
sim.add(Exchange(A))
sim.add(DMI(D))
sim.add(Zeeman((0,0,0.4*Ms)))
#sim.run_until(1e-9)
#sim.schedule('save_vtk', at_end=True)
#sim.schedule(plot_m, every=1e-10, at_end=True)
sim.relax()
df.plot(sim.llg._m)
np.save('relaxed.npy',sim.m)
#df.interactive()
def save_data(sim, xs):
m = sim.llg._m
my = np.array([m(x, 30)[1] for x in xs])
np.save()
def find_skyrmion_center(fun):
from scipy.signal import argrelmin
xs = np.linspace(0, Rx, Nx+1)
mzs = np.array([fun(x, 30)[2] for x in xs])
mins = argrelmin(mzs)[0]
all=[]
xs_refine = np.linspace(-1.5, 1.5, 301)
for i in range(len(mins)):
mzs_refine = np.array([fun(x, 30)[2] for x in xs_refine+xs[mins[i]]])
mins_fine = argrelmin(mzs_refine)[0]
all.append(xs_refine[mins_fine[0]]+xs[mins[i]])
#print all, len(all)
for i in range(len(all)-1):
print all[i+1]-all[i]
xmin = all[0]
xmax = all[-1]
print xmin,xmax,len(mins), (xmax-xmin)/(len(mins)-1)
return np.linspace(xmin, xmax, len(mins))
#fig=plt.figure()
def excite_system():
Ms = 8.6e5
sim = Simulation(mesh, Ms,pbc='1d', unit_length=1e-9)
sim.alpha = 0.0001
sim.set_m(np.load('relaxed.npy'))
alpha_expr = AlphaExpression()
alpha_mult = df.interpolate(alpha_expr, sim.llg.S1)
sim.spatial_alpha(0.0001, alpha_mult)
#df.plot(alpha_mult)
#df.interactive()
#xs=find_skyrmion_center(sim.llg._m)
#
#assert(1==2)
A = 1.3e-11
D = 4e-3
sim.add(Exchange(A))
sim.add(DMI(D))
sim.add(Zeeman((0,0,0.4*Ms)))
GHz = 1e9
omega = 50 * 2 * np.pi * GHz
def time_fun(t):
return np.sinc(omega*(t-50e-12))
h0 = 1e3
kc = 1.0/45.0
H0 = MyExpression(h0,kc)
sim.add(TimeZeemanPython(H0,time_fun))
xs = find_skyrmion_center(sim.llg._m)
ts = np.linspace(0, 8e-9, 4001)
np.save('xs.npy',xs)
sim.create_integrator()
sim.integrator.integrator.set_scalar_tolerances(1e-8, 1e-8)
index = 0
for t in ts:
sim.run_until(t)
np.save('data/m_%d.npy'%index, sim.llg.m)
index += 1
if __name__ == '__main__':
#relax_system()
excite_system()
| 3,783 | 20.622857 | 77 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.