repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
finmag
|
finmag-master/src/finmag/energies/demag/fk_demag_test.py
|
import time
import pytest
import dolfin as df
import numpy as np
from math import pi
from finmag.energies.demag.fk_demag import FKDemag
from finmag.field import Field
from finmag.util.consts import mu0
from finmag.util.meshes import sphere, box
radius = 1.0
maxh = 0.2
unit_length = 1e-9
volume = 4 * pi * (radius * unit_length) ** 3 / 3
def setup_demag_sphere(Ms):
mesh = sphere(r=radius, maxh=maxh)
Ms_field = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
m_function = df.Function(S3)
m_function.assign(df.Constant((1, 0, 0)))
m = Field(S3, m_function)
demag = FKDemag()
demag.setup(m, Ms_field, unit_length)
return demag
def test_interaction_accepts_name():
"""
Check that the interaction accepts a 'name' argument and has a 'name' attribute.
"""
demag = FKDemag(name='MyDemag')
assert hasattr(demag, 'name')
def test_demag_field_for_uniformly_magnetised_sphere():
demag = setup_demag_sphere(1)
H = demag.compute_field().reshape((3, -1))
H_expected = np.array([-1.0 / 3.0, 0.0, 0.0])
print "Got demagnetising field H =\n{}.\nExpected mean H = {}.".format(
H, H_expected)
TOL = 7e-3
diff = np.max(np.abs(H - H_expected[:, np.newaxis]), axis=1)
print "Maximum difference to expected result per axis is {}. Comparing to limit {}.".format(diff, TOL)
assert np.max(diff) < TOL
TOL = 8e-3
spread = np.abs(H.max(axis=1) - H.min(axis=1))
print "The values spread {} per axis. Comparing to limit {}.".format(spread, TOL)
assert np.max(spread) < TOL
@pytest.mark.xfail # this test currently fails, probably due to refactoring in the Field class
@pytest.mark.slow # this test needs a minute to complete
def test_thin_film_argument_saves_time_on_thin_film():
mesh = box(0, 0, 0, 500, 50, 1, maxh=2.0, directory="meshes")
Ms = Field(df.FunctionSpace(mesh, 'DG', 0), 8e5)
unit_length = 1e-9
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
m_function = df.Function(S3)
m_function.assign(df.Constant((0, 0, 1)))
m = Field(S3, m_function)
demag = FKDemag()
demag.setup(m, Ms, unit_length)
now = time.time()
H = demag.compute_field()
elapsed = time.time() - now
del(demag)
demag = FKDemag(thin_film=True)
demag.setup(m, Ms, unit_length)
now = time.time()
H = demag.compute_field()
elapsed_thin_film = time.time() - now
saved_relative = (elapsed - elapsed_thin_film) / elapsed
print "FKDemag thin film settings saved {:.1%} of time.".format(saved_relative)
assert elapsed_thin_film < elapsed
# This was 20% initially, but in order to make tests more robust this
# value is reduced to 5%
assert saved_relative > 0.05
def test_demag_energy_for_uniformly_magnetised_sphere():
Ms = 800e3
demag = setup_demag_sphere(Ms)
E = demag.compute_energy()
# -mu0/2 Integral H * M with H = - M / 3
E_expected = (1.0 / 6.0) * mu0 * Ms ** 2 * volume
print "Got E = {}. Expected E = {}.".format(E, E_expected)
REL_TOL = 3e-2
rel_diff = abs(E - E_expected) / abs(E_expected)
print "Relative difference is {:.3g}%. Comparing to limit {:.3g}%.".format(
100 * rel_diff, 100 * REL_TOL)
assert rel_diff < REL_TOL
def test_energy_density_for_uniformly_magnetised_sphere():
Ms = 800e3
demag = setup_demag_sphere(Ms)
rho = demag.energy_density()
# -mu0/2 Integral H * M with H = - M / 3
E_expected = (1.0 / 6.0) * mu0 * Ms ** 2 * volume
rho_expected = E_expected / volume
print "Got mean rho = {:.3e}. Expected rho = {:.3e}.".format(np.mean(rho), rho_expected)
REL_TOL = 1.7e-2
rel_diff = np.max(np.abs(rho - rho_expected)) / abs(rho_expected)
print "Maximum relative difference = {:.3g}%. Comparing to limit {:.3g}%.".format(
100 * rel_diff, 100 * REL_TOL)
assert rel_diff < REL_TOL
def test_energy_density_for_uniformly_magnetised_sphere_as_function():
Ms = 800e3
demag = setup_demag_sphere(Ms)
rho = demag.energy_density_function()
print "Probing the energy density at the center of the sphere."
rho_center = rho([0.0, 0.0, 0.0])
# -mu0/2 Integral H * M with H = - M / 3
E_expected = (1.0 / 6.0) * mu0 * Ms ** 2 * volume
rho_expected = E_expected / volume
print "Got rho = {:.3e}. Expected rho = {:.3e}.".format(rho_center, rho_expected)
REL_TOL = 1.3e-2
rel_diff = np.max(np.abs(rho_center - rho_expected)) / abs(rho_expected)
print "Maximum relative difference = {:.3g}%. Comparing to limit {:.3g}%.".format(
100 * rel_diff, 100 * REL_TOL)
assert rel_diff < REL_TOL
def test_regression_Ms_numpy_type():
mesh = sphere(r=radius, maxh=maxh)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
m_function = df.Function(S3)
m_function.assign(df.Constant((1, 0, 0)))
m = Field(S3, m_function)
Ms = np.sqrt(6.0 / mu0) # math.sqrt(6.0 / mu0) would work
demag = FKDemag()
Ms_field = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)
demag.setup(m, Ms_field, unit_length) # this used to fail
| 5,117 | 33.348993 | 106 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/fk_demag_pbc.py
|
"""
Computation of the demagnetising field using the Fredkin-Koehler
technique and the infamous magpar method.
Rationale: The previous implementation in FemBemFKSolver (child class
of FemBemDeMagSolver) was kind of a mess. This does the same thing in the same
time with less code. Should be more conducive to further optimisation or as
a template for other techniques like the GCR.
"""
import logging
import numpy as np
import dolfin as df
from finmag.native.treecode_bem import compute_solid_angle_single
from finmag.native.treecode_bem import compute_boundary_element
from finmag.native.treecode_bem import build_boundary_matrix
logger = logging.getLogger('finmag')
class MacroGeometry(object):
def __init__(self, nx=None, ny=None, dx=None, dy=None, Ts=None):
"""
If Ts is not None the other parameters will be ignored.
"""
self.nx = nx or 1
self.ny = ny or 1
self.dx = dx
self.dy = dy
self.Ts = Ts
if Ts != None and (nx != None or ny != None or dx != None and dy != None):
logger.warning(
"Ignoring arguments 'nx', 'ny', 'dx', 'dy' because 'Ts' is explicitly provided.")
else:
if self.nx < 1 or self.nx % 2 == 0 or self.ny < 1 or self.ny % 2 == 0:
raise Exception(
'Both nx and ny should be larger than 0 and must be odd.')
def compute_Ts(self, mesh):
if self.Ts is not None:
return self.Ts
dx, dy = self.find_mesh_info(mesh)
if self.dx is None:
self.dx = dx
if self.dy is None:
self.dy = dy
Ts = []
for i in range(-self.nx // 2 + 1, self.nx // 2 + 1):
for j in range(-self.ny // 2 + 1, self.ny // 2 + 1):
Ts.append([self.dx * i * 1.0, self.dy * j * 1.0, 0])
logger.debug("Creating macro-geometry with demag {} x {} tiles (dxdy: {} x {})".format(
self.nx, self.ny, self.dx, self.dy))
self.Ts = Ts
return self.Ts
def find_mesh_info(self, mesh):
xt = mesh.coordinates()
max_v = xt.max(axis=0)
min_v = xt.min(axis=0)
sizes = max_v - min_v
return sizes[0], sizes[1]
class BMatrixPBC(object):
def __init__(self, mesh, Ts=[(0., 0, 0)]):
self.mesh = mesh
self.bmesh = df.BoundaryMesh(self.mesh, 'exterior', False)
self.b2g_map = self.bmesh.entity_map(0).array()
#self.g2b_map = np.NaN * np.ones(self.mesh.num_vertices(), dtype=int)
# for (i, val) in enumerate(self.b2g_map):
#self.g2b_map[val] = i
self.__compute_bsa()
self.Ts = np.array(Ts, dtype=np.float)
n = self.bmesh.num_vertices()
self.bm = np.zeros((n, n))
self.compute_bmatrix()
def __compute_bsa(self):
vert_bsa = np.zeros(self.mesh.num_vertices())
mc = self.mesh.cells()
xyz = self.mesh.coordinates()
for i in range(self.mesh.num_cells()):
for j in range(4):
tmp_omega = compute_solid_angle_single(
xyz[mc[i][j]],
xyz[mc[i][(j + 1) % 4]],
xyz[mc[i][(j + 2) % 4]],
xyz[mc[i][(j + 3) % 4]])
vert_bsa[mc[i][j]] += tmp_omega
vert_bsa = vert_bsa / (4 * np.pi) - 1.0
self.vert_bsa = vert_bsa[self.b2g_map]
def __compute_bmatrix_T(self, T):
cds = self.bmesh.coordinates()
face_nodes = np.array(self.bmesh.cells(), dtype=np.int32)
be = np.array([0., 0., 0.])
#dof_indices = self.llg.S1.dofmap().dofs()
#d2v = df.dof_to_vertex_map(self.llg.S1)
#v2d = df.vertex_to_dof_map(self.llg.S1)
#vertex_indices_reduced = [d2v[i] for i in dof_indices]
for p in range(len(cds)):
for c in face_nodes:
i, j, k = c
compute_boundary_element(cds[p], cds[i], cds[j], cds[k], be, T)
self.bm[p][i] += be[0]
self.bm[p][j] += be[1]
self.bm[p][k] += be[2]
def compute_bmatrix(self):
cds = self.bmesh.coordinates()
face_nodes = np.array(self.bmesh.cells(), dtype=np.int32)
self.bm[:, :] = 0.0
for T in self.Ts:
build_boundary_matrix(
cds, face_nodes, self.bm, T, len(cds), len(face_nodes))
for p in range(self.bmesh.num_vertices()):
self.bm[p][p] += self.vert_bsa[p]
if __name__ == '__main__':
mesh = df.UnitCubeMesh(1, 1, 1)
b = BMatrixPBC(mesh)
b.compute_bmatrix()
print b.b2g_map, b.g2b_map
#bem, b2g_map = compute_bem_fk(df.BoundaryMesh(mesh, 'exterior', False))
# print bem
# print b2g_map
| 4,756 | 29.690323 | 97 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/demag_treecode.py
|
from dolfin import *
import dolfin as df
import numpy as np
from finmag.native.fast_sum_lib import FastSum
_nodes = (
(0.,),
(-0.5773502691896257,
0.5773502691896257),
(-0.7745966692414834,
0.,
0.7745966692414834),
(-0.861136311594053,
-0.3399810435848562,
0.3399810435848562,
0.861136311594053),
(-0.906179845938664,
-0.5384693101056829,
0.,
0.5384693101056829,
0.906179845938664))
_weights = (
(2.,),
(1.,
1.),
(0.5555555555555553,
0.888888888888889,
0.5555555555555553),
(0.3478548451374539,
0.6521451548625462,
0.6521451548625462,
0.3478548451374539),
(0.2369268850561887,
0.4786286704993665,
0.5688888888888889,
0.4786286704993665,
0.2369268850561887))
dunavant_x = (
(0.333333333333333,),
(0.666666666666667, 0.166666666666667, 0.166666666666667),
(0.333333333333333, 0.600000000000000,
0.200000000000000, 0.200000000000000),
(0.108103018168070, 0.445948490915965, 0.445948490915965,
0.816847572980459, 0.091576213509771, 0.091576213509771)
)
dunavant_y = (
(0.333333333333333,),
(0.166666666666667, 0.166666666666667, 0.666666666666667),
(0.333333333333333, 0.200000000000000,
0.200000000000000, 0.600000000000000),
(0.445948490915965, 0.445948490915965, 0.108103018168070,
0.091576213509771, 0.091576213509771, 0.816847572980459)
)
dunavant_w = (
(1.0,),
(0.333333333333333, 0.333333333333333, 0.333333333333333),
(-0.562500000000000, 0.520833333333333,
0.520833333333333, 0.520833333333333),
(0.223381589678011, 0.223381589678011, 0.223381589678011,
0.109951743655322, 0.109951743655322, 0.109951743655322)
)
dunavant_n = [1, 2, 3, 6]
tet_x = (
(0.25,),
(0.1381966011250110, 0.5854101966249680,
0.1381966011250110, 0.1381966011250110)
)
tet_y = (
(0.25,),
(0.1381966011250110, 0.1381966011250110,
0.5854101966249680, 0.1381966011250110)
)
tet_z = (
(0.25,),
(0.1381966011250110, 0.1381966011250110,
0.1381966011250110, 0.5854101966249680)
)
tet_w = (
(1.0,),
(0.25, 0.25, 0.25, 0.25)
)
def length(p1, p2):
return np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2 + (p1[2] - p2[2]) ** 2)
def length2(p1, p2):
return (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2 + (p1[2] - p2[2]) ** 2
def G(r1, r2):
r = length(r1, r2)
if r < 1e-12:
print 'hahahahahahahhahahahah'
return 0
return 1.0 / (r)
def compute_area(p1, p2, p3):
a = length(p1, p2)
b = length(p1, p3)
c = length(p2, p3)
s = (a + b + c) / 2.0
return np.sqrt(s * (s - a) * (s - b) * (s - c))
def compute_cell_volume(mesh):
V = df.FunctionSpace(mesh, 'DG', 0)
v = df.TestFunction(V)
tet_vol = df.assemble(v * df.dx)
return tet_vol.array()
def compute_minus_node_volume_vector(mesh):
V = VectorFunctionSpace(mesh, 'Lagrange', 1)
v = df.TestFunction(V)
node_vol = df.assemble(df.dot(v,
df.Constant([-1, -1, -1])) * df.dx)
return node_vol.array()
def compute_node_volume(mesh):
V = FunctionSpace(mesh, 'Lagrange', 1)
v = df.TestFunction(V)
node_vol = df.assemble(v * df.dx)
return node_vol.array()
def compute_node_area(mesh):
V = FunctionSpace(mesh, 'Lagrange', 1)
v = df.TestFunction(V)
node_area = df.assemble(v * df.ds)
tmp = node_area.array()
print 'area is: ', sum(tmp)
for i in range(len(tmp)):
if tmp[i] == 0:
tmp[i] = 1
return tmp
def compute_det(x1, y1, z1, x2, y2, z2, x3, y3, z3):
J1 = (x2 - x1) * (z3 - z1) - (x3 - x1) * (z2 - z1)
J2 = (x2 - x1) * (y3 - y1) - (x3 - x1) * (y2 - y1)
J3 = (y2 - y1) * (z3 - z1) - (y3 - y1) * (z2 - z1)
return J1, J2, J3
def compute_correction_simplified(sa, sb, sc, p1, p2, p3):
x1, y1, z1 = p1
x2, y2, z2 = p2
x3, y3, z3 = p3
J1, J2, J3 = compute_det(x1, y1, z1, x2, y2, z2, x3, y3, z3)
r1 = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)
r2 = np.sqrt((x3 - x1) ** 2 + (y3 - y1) ** 2 + (z3 - z1) ** 2)
r3 = np.sqrt((x3 - x2) ** 2 + (y3 - y2) ** 2 + (z3 - z2) ** 2)
fa = np.sqrt(J1 * J1 + J2 * J2 + J3 * J3)
fb = (sb - sc) * (r1 - r2) / (2 * r3 * r3)
fc = (sb + sc + 2 * sa) / (4.0 * r3) + \
(r2 * r2 - r1 * r1) * (sb - sc) / (4.0 * r3 ** 3)
fd = np.log(r1 + r2 + r3) - np.log(r1 + r2 - r3)
return fa * (fb + fc * fd)
class FastDemag():
def __init__(self, Vv, m, Ms, triangle_p=1, tetrahedron_p=0, p=6, mac=0.5):
self.m = m
self.Vv = Vv
self.Ms = Ms
self.triangle_p = triangle_p
self.tetrahedron_p = tetrahedron_p
self.mesh = Vv.mesh()
self.V = FunctionSpace(self.mesh, 'Lagrange', 1)
self.phi = Function(self.V)
self.phi_charge = Function(self.V)
self.field = Function(self.Vv)
u = TrialFunction(self.V)
v = TestFunction(self.Vv)
a = inner(grad(u), v) * dx
self.G = df.assemble(a)
self.L = compute_minus_node_volume_vector(self.mesh)
# self.compute_gauss_coeff_triangle()
# self.compute_gauss_coeff_tetrahedron()
# self.compute_affine_transformation_surface()
# self.compute_affine_transformation_volume()
# self.nodes=np.array(self.s_nodes+self.v_nodes)
# self.weights=np.array(self.s_weight+self.v_weight)
# self.charges=np.array(self.s_charge+self.v_charge)
self.compute_triangle_normal()
fast_sum = FastSum(
p=p, mac=mac, num_limit=500, triangle_p=triangle_p, tetrahedron_p=tetrahedron_p)
xt = self.mesh.coordinates()
tet_nodes = np.array(self.mesh.cells(), dtype=np.int32)
fast_sum.init_mesh(
xt, self.t_normals, self.face_nodes_array, tet_nodes)
self.fast_sum = fast_sum
self.res = np.zeros(len(self.mesh.coordinates()))
def compute_gauss_coeff_triangle(self):
n = self.triangle_p
self.s_x = dunavant_x[n]
self.s_y = dunavant_y[n]
self.s_w = np.array(dunavant_w[n]) / 2.0
print self.s_x, self.s_y
def compute_gauss_coeff_tetrahedron(self):
n = self.tetrahedron_p
self.v_x = tet_x[n]
self.v_y = tet_y[n]
self.v_z = tet_z[n]
self.v_w = np.array(tet_w[n]) / 6.0
def compute_triangle_normal(self):
self.face_nodes = []
self.face_norms = []
self.t_normals = []
for face in df.faces(self.mesh):
t = face.normal() # one must call normal() before entities(3),...
cells = face.entities(3)
if len(cells) == 1:
face_nodes = face.entities(0)
self.face_nodes.append(face_nodes)
self.face_norms.append(t)
self.t_normals.append([t.x(), t.y(), t.z()])
self.t_normals = np.array(self.t_normals)
self.face_nodes_array = np.array(self.face_nodes, dtype=np.int32)
def compute_affine_transformation_surface(self):
m = self.m.vector().array()
m = m.reshape((-1, 3), order='F')
cs = self.mesh.coordinates()
self.face_nodes = []
self.face_norms = []
self.t_normals = []
for face in df.faces(self.mesh):
t = face.normal() # one must call normal() before entities(3),...
cells = face.entities(3)
if len(cells) == 1:
face_nodes = face.entities(0)
self.face_nodes.append(face_nodes)
self.face_norms.append(t)
self.t_normals.append([t.x(), t.y(), t.z()])
self.t_normals = np.array(self.t_normals)
self.face_nodes_array = np.array(self.face_nodes, dtype=np.int32)
self.s_nodes = []
self.s_weight = []
self.s_charge = []
def compute_det_xy(x1, y1, z1, x2, y2, z2, x3, y3, z3):
a = y2 * z1 - y3 * z1 - y1 * z2 + y3 * z2 + y1 * z3 - y2 * z3
b = x2 * z1 - x3 * z1 - x1 * z2 + x3 * z2 + x1 * z3 - x2 * z3
c = x2 * y1 - x3 * y1 - x1 * y2 + x3 * y2 + x1 * y3 - x2 * y3
det = abs((x2 - x1) * (y3 - y1) - (x3 - x1) * (y2 - y1))
det *= np.sqrt((a * a + b * b) / (c * c) + 1)
return det
for i in range(len(self.face_nodes)):
f_c = self.face_nodes[i]
x1, y1, z1 = cs[f_c[0]]
x2, y2, z2 = cs[f_c[1]]
x3, y3, z3 = cs[f_c[2]]
c11 = x2 - x1
c12 = x3 - x1
c21 = y2 - y1
c22 = y3 - y1
c31 = z2 - z1
c32 = z3 - z1
t = self.face_norms[i]
if abs(t.z()) > abs(t.x()) and abs(t.z()) > abs(t.y()):
det = compute_det_xy(x1, y1, z1, x2, y2, z2, x3, y3, z3)
elif abs(t.y()) > abs(t.x()):
det = compute_det_xy(z1, x1, y1, z2, x2, y2, z3, x3, y3)
else:
det = compute_det_xy(y1, z1, x1, y2, z2, x2, y3, z3, x3)
sa = (m[f_c[0]][0] * t.x() + m[f_c[0]][1]
* t.y() + m[f_c[0]][2] * t.z())
sb = (m[f_c[1]][0] * t.x() + m[f_c[1]][1]
* t.y() + m[f_c[1]][2] * t.z())
sc = (m[f_c[2]][0] * t.x() + m[f_c[2]][1]
* t.y() + m[f_c[2]][2] * t.z())
for j in range(len(self.s_x)):
x = c11 * self.s_x[j] + c12 * self.s_y[j] + x1
y = c21 * self.s_x[j] + c22 * self.s_y[j] + y1
z = c31 * self.s_x[j] + c32 * self.s_y[j] + z1
self.s_nodes.append([x, y, z])
self.s_weight.append(det * self.s_w[j])
self.s_charge.append(
sa + (sb - sa) * self.s_x[j] + (sc - sa) * self.s_y[j])
def compute_affine_transformation_volume(self):
v = TestFunction(self.V)
K = df.assemble(df.div(self.m) * v * df.dx)
L = df.assemble(v * df.dx)
rho = K.array() / L.array()
cs = self.mesh.coordinates()
m = self.m.vector().array()
n = len(m) / 3
def compute_divergence(cell):
i = cell.entities(0)
x1, y1, z1 = cs[i[1]] - cs[i[0]]
x2, y2, z2 = cs[i[2]] - cs[i[0]]
x3, y3, z3 = cs[i[3]] - cs[i[0]]
m0 = np.array([m[i[0]], m[i[0] + n], m[i[0] + 2 * n]])
m1 = np.array([m[i[1]], m[i[1] + n], m[i[1] + 2 * n]]) - m0
m2 = np.array([m[i[2]], m[i[2] + n], m[i[2] + 2 * n]]) - m0
m3 = np.array([m[i[3]], m[i[3] + n], m[i[3] + 2 * n]]) - m0
a1 = [y3 * z2 - y2 * z3, -x3 * z2 + x2 * z3, x3 * y2 - x2 * y3]
a2 = [-y3 * z1 + y1 * z3, x3 * z1 - x1 * z3, -x3 * y1 + x1 * y3]
a3 = [y2 * z1 - y1 * z2, -x2 * z1 + x1 * z2, x2 * y1 - x1 * y2]
v = x3 * y2 * z1 - x2 * y3 * z1 - x3 * y1 * z2 + \
x1 * y3 * z2 + x2 * y1 * z3 - x1 * y2 * z3
tmp = 0
for j in range(3):
tmp += a1[j] * m1[j] + a2[j] * m2[j] + a3[j] * m3[j]
tmp = -1.0 * tmp / v
return tmp, abs(v)
self.v_nodes = []
self.v_weight = []
self.v_charge = []
for cell in df.cells(self.mesh):
i = cell.entities(0)
rho, det = compute_divergence(cell)
x0, y0, z0 = cs[i[0]]
c11, c12, c13 = cs[i[1]] - cs[i[0]]
c21, c22, c23 = cs[i[2]] - cs[i[0]]
c31, c32, c33 = cs[i[3]] - cs[i[0]]
for j in range(len(self.v_w)):
x = c11 * self.v_x[j] + c21 * \
self.v_y[j] + c31 * self.v_z[j] + x0
y = c12 * self.v_x[j] + c22 * \
self.v_y[j] + c32 * self.v_z[j] + y0
z = c13 * self.v_x[j] + c23 * \
self.v_y[j] + c33 * self.v_z[j] + z0
self.v_charge.append(rho)
self.v_nodes.append([x, y, z])
self.v_weight.append(det * self.v_w[j])
def sum_directly(self):
cs = self.mesh.coordinates()
m = len(cs)
n = len(self.nodes)
res = np.zeros(m)
for i in range(m):
for j in range(n):
res[i] += G(cs[i], self.nodes[j]) * \
self.weights[j] * self.charges[j]
print 'directly', res
self.phi.vector().set_local(res)
def compute_field(self):
m = self.m.vector().array()
self.fast_sum.update_charge(m)
self.fast_sum.fastsum(self.res)
# self.fast_sum.exactsum(res)
self.fast_sum.compute_correction(m, self.res)
self.phi.vector().set_local(self.res * self.Ms / (4 * np.pi))
demag_field = self.G * self.phi.vector()
return demag_field.array() / self.L
class Demag():
def __init__(self, triangle_p=1, tetrahedron_p=1, p=3, mac=0.3, num_limit=100):
self.triangle_p = triangle_p
self.tetrahedron_p = tetrahedron_p
self.p = p
self.mac = mac
self.num_limit = num_limit
self.in_jacobian = False
def setup(self, Vv, m, Ms, unit_length=1):
self.m = m
self.Vv = Vv
self.Ms = Ms
self.mesh = Vv.mesh()
self.find_max_d()
# Note that the following change to the mesh coordinates will
# be reflected in the vector returned by self.mesh.coordinates().
# Unfortunately, we cannot say:
#
# self.mesh.coordinates()[:] /= self.max_d
#
# because Cython complains about this when compiling the
# binary distribution.
coords = self.mesh.coordinates()
coords /= self.max_d
self.V = FunctionSpace(self.mesh, 'Lagrange', 1)
self.phi = Function(self.V)
self.phi_charge = Function(self.V)
self.field = Function(self.Vv)
u = TrialFunction(self.V)
v = TestFunction(self.Vv)
a = inner(grad(u), v) * dx
self.G = df.assemble(a)
self.L = compute_minus_node_volume_vector(self.mesh)
self.compute_triangle_normal()
fast_sum = FastSum(p=self.p, mac=self.mac, num_limit=self.num_limit,
triangle_p=self.triangle_p, tetrahedron_p=self.tetrahedron_p)
xt = self.mesh.coordinates()
tet_nodes = np.array(self.mesh.cells(), dtype=np.int32)
fast_sum.init_mesh(
xt, self.t_normals, self.face_nodes_array, tet_nodes)
self.fast_sum = fast_sum
self.res = np.zeros_like(xt)
xt *= self.max_d # this changes self.mesh.coordinates() in-place!
def compute_triangle_normal(self):
self.face_nodes = []
self.face_norms = []
self.t_normals = []
for face in df.faces(self.mesh):
t = face.normal() # one must call normal() before entities(3),...
cells = face.entities(3)
if len(cells) == 1:
face_nodes = face.entities(0)
self.face_nodes.append(face_nodes)
self.face_norms.append(t)
self.t_normals.append([t.x(), t.y(), t.z()])
self.t_normals = np.array(self.t_normals)
self.face_nodes_array = np.array(self.face_nodes, dtype=np.int32)
def find_max_d(self):
xt = self.mesh.coordinates()
max_v = xt.max(axis=0)
min_v = xt.min(axis=0)
max_d = max(max_v - min_v)
self.max_d = max_d
def compute_field(self):
m = self.m.vector().array()
self.fast_sum.update_charge(m)
self.fast_sum.fastsum(self.res)
self.fast_sum.compute_correction(m, self.res)
self.phi.vector().set_local(self.res * self.Ms / (4 * np.pi))
demag_field = self.G * self.phi.vector()
return demag_field.array() / self.L
if __name__ == "__main__":
n = 10
#mesh = UnitCubeMesh(n, n, n)
#mesh = BoxMesh(df.Point(-1, 0, 0), df.Point(1, 1, 1), 10, 2, 2)
mesh = UnitSphere(5)
Vv = df.VectorFunctionSpace(mesh, 'Lagrange', 1)
Ms = 8.6e5
#expr = df.Expression(('cos(x[0])', 'sin(x[0])','0') degree=1)
m = interpolate(Constant((1, 0, 0)), Vv)
demag = Demag(triangle_p=1, tetrahedron_p=1, mac=0)
demag.setup(Vv, m, Ms)
demag.compute_field()
print '=' * 100, 'exact\n', demag.res
exact = demag.res
for p in [2, 3, 4, 5, 6, 7]:
demag = Demag(triangle_p=1, tetrahedron_p=1, mac=0.4, p=p)
demag.setup(Vv, m, Ms)
demag.compute_field()
print '=' * 100, 'mac=0.4 p=%d\n' % p, np.average(np.abs((demag.res - exact) / exact))
| 16,634 | 29.245455 | 94 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/fk_demag_2d.py
|
import numpy as np
import dolfin as df
from aeon import timer
from finmag.field import Field
from finmag.native.llg import compute_bem_fk
from finmag.util.consts import mu0
from finmag.util.meshes import nodal_volume
from finmag.util import helpers
from fk_demag import FKDemag
class Demag2D(FKDemag):
"""
To compute the demagnetisation field on a 2d mesh using the normal
Fredkin-Koehler method, the idea is to construct a 3d mesh based on the
given 2d mesh.
"""
def __init__(self, name='Demag2D', thickness=1, thin_film=False):
self.name = name
self.thickness = thickness
super(Demag2D, self).__init__(name=name, thin_film=thin_film)
def create_3d_mesh(self, mesh):
nv = mesh.num_vertices()
nc = mesh.num_cells()
h = self.thickness
mesh3 = df.Mesh()
editor = df.MeshEditor()
editor.open(mesh3, 3, 3)
editor.init_vertices(2 * nv)
editor.init_cells(3 * nc)
for v in df.vertices(mesh):
i = v.index()
p = v.point()
editor.add_vertex(i, p.x(), p.y(), 0)
editor.add_vertex(i + nv, p.x(), p.y(), h)
gid = 0
for c in df.cells(mesh):
i, j, k = c.entities(0)
editor.add_cell(gid, i, j, k, i + nv)
gid = gid + 1
editor.add_cell(gid, j, j + nv, k, i + nv)
gid = gid + 1
editor.add_cell(gid, k, k + nv, j + nv, i + nv)
gid = gid + 1
editor.close()
return mesh3
def build_mapping(self, S3, V3):
"""
S3 is the vector function space of the 2d mesh
V3 is the vector function space of the corresponding 3d mesh
"""
vert_to_dof2 = df.vertex_to_dof_map(S3)
dof_to_vert2 = df.dof_to_vertex_map(S3)
vert_to_dof3 = df.vertex_to_dof_map(V3)
dof_to_vert3 = df.dof_to_vertex_map(V3)
map_2d_to_3d = np.zeros(V3.dim(), dtype=np.int32)
n2d = S3.dim()
for i in range(n2d):
map_2d_to_3d[i] = vert_to_dof3[dof_to_vert2[i]]
map_2d_to_3d[i + n2d] = vert_to_dof3[dof_to_vert2[i] + n2d]
self.map_2d_to_3d = map_2d_to_3d
# print map_2d_to_3d
n3d = V3.dim()
map_3d_to_2d = np.zeros(V3.dim(), dtype=np.int32)
for i in range(V3.dim()):
map_3d_to_2d[i] = vert_to_dof2[dof_to_vert3[i] % n2d]
self.map_3d_to_2d = map_3d_to_2d
# print map_3d_to_2d
def create_dg3_from_dg2(self, mesh, dg2):
self.dg3 = df.FunctionSpace(mesh, 'DG', 0)
class HelperExpression(df.Expression):
def __init__(self, value):
super(HelperExpression, self).__init__()
self.fun = value
def eval(self, value, x):
value[0] = self.fun((x[0], x[1]))
hexp = HelperExpression(dg2)
fun = df.interpolate(hexp, self.dg3)
return fun
def setup(self, m, Ms, unit_length=1):
"""
Setup the FKDemag instance. Usually called automatically by the
Simulation object.
*Arguments*
m: finmag.Field
The unit magnetisation on a finite element space.
Ms: float
The saturation magnetisation in A/m.
unit_length: float
The length (in m) represented by one unit on the mesh. Default 1.
"""
self.m = m
self.unit_length = unit_length
mesh = m.mesh()
mesh3 = self.create_3d_mesh(mesh)
V1 = df.FunctionSpace(mesh3, "Lagrange", 1)
V3 = df.VectorFunctionSpace(mesh3, "Lagrange", 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
mm = Field(V3, df.Function(V3))
self.build_mapping(S3, V3)
Ms_dg3_value = self.create_dg3_from_dg2(mesh3, Ms)
Ms_dg3 = Field(self.dg3, Ms_dg3_value)
super(Demag2D, self).setup(mm, Ms_dg3, unit_length)
def compute_energy(self):
"""
Compute the total energy of the field.
.. math::
E_\\mathrm{d} = -\\frac12 \\mu_0 \\int_\\Omega
H_\\mathrm{d} \\cdot \\vec M \\mathrm{d}x
*Returns*
Float
The energy of the demagnetising field.
"""
self._H_func.vector()[:] = self.__compute_field()
return df.assemble(self._E) * self.unit_length ** self.m.mesh_dim()
@timer.method
def energy_density(self):
"""
Compute the energy density in the field.
.. math::
\\rho = \\frac{E_{\\mathrm{d}, i}}{V_i},
where V_i is the volume associated with the node i.
*Returns*
numpy.ndarray
The energy density of the demagnetising field.
"""
self._H_func.vector()[:] = self.__compute_field()
nodal_E = df.assemble(self._nodal_E).array() * \
self.unit_length ** self.m.mesh_dim()
return nodal_E / self._nodal_volumes
def __compute_field(self):
self.m.set_with_numpy_array_debug(
self.m.get_numpy_array_debug()[self.map_3d_to_2d])
self._compute_magnetic_potential()
return self._compute_gradient()
def compute_field(self):
"""
Compute the demagnetising field.
*Returns*
numpy.ndarray
The demagnetising field.
"""
f = self.__compute_field()[self.map_2d_to_3d]
f.shape = (2, -1)
f_avg = (f[0] + f[1]) / 2.0
f.shape = (-1,)
return f_avg
| 5,565 | 26.554455 | 77 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/benchmark.py
|
import time
import numpy as np
import dolfin as df
from finmag.energies import Demag
from finmag.field import Field
from finmag.util.meshes import sphere
import matplotlib.pyplot as plt
radius = 5.0
maxhs = [0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 1.0]
unit_length = 1e-9
m_0 = (1, 0, 0)
Ms = 1
H_ref = np.array((- Ms / 3.0, 0, 0))
vertices = []
solvers = ["FK", "FK", "GCR", "Treecode"]
solvers_label = ["FK", "FK opt", "GCR", "Treecode"]
timings = [[], [], [], []]
errors = [[], [], [], []]
for maxh in maxhs:
mesh = sphere(r=radius, maxh=maxh, directory="meshes")
vertices.append(mesh.num_vertices())
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
m_function = df.Function(S3)
m_function.assign(df.Constant(m_0))
m = Field(S3, m_function)
for i, solver in enumerate(solvers):
demag = Demag(solver)
if solver == "FK":
if i == 0:
demag.parameters["phi_1_solver"] = "default"
demag.parameters["phi_1_preconditioner"] = "default"
demag.parameters["phi_2_solver"] = "default"
demag.parameters["phi_2_preconditioner"] = "default"
if i == 1:
demag.parameters["phi_1_solver"] = "cg"
demag.parameters["phi_1_preconditioner"] = "ilu"
demag.parameters["phi_2_solver"] = "cg"
demag.parameters["phi_2_preconditioner"] = "ilu"
demag.setup(m, Ms, unit_length)
start = time.time()
for j in xrange(10):
H = demag.compute_field()
elapsed = (time.time() - start) / 10.0
H = H.reshape((3, -1)).mean(axis=1)
error = abs(H[0] - H_ref[0]) / abs(H_ref[0])
timings[i].append(elapsed)
errors[i].append(error)
fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title("Runtime")
for i, solver in enumerate(solvers):
ax.plot(vertices, timings[i], label=solvers_label[i])
ax.legend(loc=2)
ax.set_xlabel("vertices")
ax.set_ylabel("time (s)")
ax = fig.add_subplot(212)
ax.set_title("Inaccuracy")
for i, solver in enumerate(solvers):
ax.plot(vertices, errors[i], label=solvers_label[i])
ax.legend(loc=2)
ax.set_xlabel("vertices")
ax.set_ylabel("relative error (%)")
fig.tight_layout()
fig.savefig("benchmark.png")
| 2,283 | 29.864865 | 68 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/treecode_bem.py
|
import logging
import numpy as np
import dolfin as df
import distutils
from finmag.field import Field
from finmag.util.consts import mu0
from finmag.util.meshes import nodal_volume
from finmag.native.treecode_bem import FastSum
from finmag.native.treecode_bem import compute_solid_angle_single
from finmag.util import helpers
from fk_demag import FKDemag
logger = logging.getLogger(name='finmag')
class TreecodeBEM(FKDemag):
def __init__(self, mac=0.3, p=3, num_limit=100, correct_factor=10,
type_I=True, name='Demag', macrogeometry=None, thin_film=False):
super(TreecodeBEM, self).__init__(
name=name, macrogeometry=macrogeometry, thin_film=thin_film)
self.mac = mac
self.p = p
self.num_limit = num_limit
self.correct_factor = correct_factor
self.type_I = type_I
def setup(self, m, Ms, unit_length=1):
self.m = m
self.Ms = Ms
self.unit_length = unit_length
mesh = m.mesh()
self.S1 = df.FunctionSpace(mesh, "Lagrange", 1)
self.dim = mesh.topology().dim()
self._test1 = df.TestFunction(self.S1)
self._trial1 = df.TrialFunction(self.S1)
self._test3 = df.TestFunction(self.m.functionspace)
self._trial3 = df.TrialFunction(self.m.functionspace)
# for computation of energy
self._nodal_volumes = nodal_volume(self.S1, unit_length)
# we will copy field into this when we need the energy
self._H_func = df.Function(self.m.functionspace)
self._E_integrand = -0.5 * mu0 * \
df.dot(self._H_func, self.m.f * self.Ms.f)
self._E = self._E_integrand * df.dx
self._nodal_E = df.dot(self._E_integrand, self._test1) * df.dx
self._nodal_E_func = df.Function(self.S1)
# for computation of field and scalar magnetic potential
self._poisson_matrix = self._poisson_matrix()
self._poisson_solver = df.KrylovSolver(self._poisson_matrix.copy(),
self.parameters['phi_1_solver'], self.parameters['phi_1_preconditioner'])
self._poisson_solver.parameters.update(self.parameters['phi_1'])
self._laplace_zeros = df.Function(self.S1).vector()
self._laplace_solver = df.KrylovSolver(
self.parameters['phi_2_solver'], self.parameters['phi_2_preconditioner'])
self._laplace_solver.parameters.update(self.parameters['phi_2'])
# We're setting 'same_nonzero_pattern=True' to enforce the
# same matrix sparsity pattern across different demag solves,
# which should speed up things.
self._laplace_solver.parameters["preconditioner"][
"structure"] = "same_nonzero_pattern"
# solution of inhomogeneous Neumann problem
self._phi_1 = df.Function(self.S1)
# solution of Laplace equation inside domain
self._phi_2 = df.Function(self.S1)
self._phi = df.Function(self.S1) # magnetic potential phi_1 + phi_2
# To be applied to the vector field m as first step of computation of _phi_1.
# This gives us div(M), which is equal to Laplace(_phi_1), equation
# which is then solved using _poisson_solver.
self._Ms_times_divergence = df.assemble(
self.Ms.f * df.inner(self._trial3, df.grad(self._test1)) * df.dx)
# we move the bounday condition here to avoid create a instance each time when compute the
# magnetic potential
self.boundary_condition = df.DirichletBC(
self.S1, self._phi_2, df.DomainBoundary())
self.boundary_condition.apply(self._poisson_matrix)
self._setup_gradient_computation()
self.mesh = self.m.mesh()
self.bmesh = df.BoundaryMesh(self.mesh, 'exterior', False)
#self.b2g_map = self.bmesh.vertex_map().array()
self._b2g_map = self.bmesh.entity_map(0).array()
self.compute_triangle_normal()
self.__compute_bsa()
fast_sum = FastSum(p=self.p, mac=self.mac, num_limit=self.num_limit,
correct_factor=self.correct_factor, type_I=self.type_I)
coords = self.bmesh.coordinates()
face_nodes = np.array(self.bmesh.cells(), dtype=np.int32)
fast_sum.init_mesh(coords, self.t_normals, face_nodes, self.vert_bsa)
self.fast_sum = fast_sum
self.phi2_b = np.zeros(self.bmesh.num_vertices())
def __compute_bsa(self):
vert_bsa = np.zeros(self.mesh.num_vertices())
mc = self.mesh.cells()
xyz = self.mesh.coordinates()
for i in range(self.mesh.num_cells()):
for j in range(4):
tmp_omega = compute_solid_angle_single(
xyz[mc[i][j]],
xyz[mc[i][(j + 1) % 4]],
xyz[mc[i][(j + 2) % 4]],
xyz[mc[i][(j + 3) % 4]])
vert_bsa[mc[i][j]] += tmp_omega
vert_bsa = vert_bsa / (4 * np.pi) - 1
self.vert_bsa = vert_bsa[self._b2g_map]
def compute_triangle_normal(self):
self.t_normals = []
for face in df.faces(self.mesh):
t = face.normal() # one must call normal() before entities(3),...
cells = face.entities(3)
if len(cells) == 1:
self.t_normals.append([t.x(), t.y(), t.z()])
self.t_normals = np.array(self.t_normals)
def _compute_magnetic_potential(self):
# compute _phi_1 on the whole domain
g_1 = self._Ms_times_divergence * self.m.f.vector()
self._poisson_solver.solve(self._phi_1.vector(), g_1)
# compute _phi_2 on the boundary using the Dirichlet boundary
# conditions we get from BEM * _phi_1 on the boundary.
phi_1 = self._phi_1.vector()[self._b2g_map]
# In dolfin 1.4 and lower to access the array of a dolfin function, '.array()'
# is required. This is not needed in dolfin 1.5 and will cause error if is
# present. Need to check with dolfin version are using and act according for the
# following line of code.
if distutils.version.LooseVersion(df.__version__) >= '1.5.0':
self.fast_sum.fastsum(self.phi2_b, phi_1)
else:
self.fast_sum.fastsum(self.phi2_b, phi_1.array())
self._phi_2.vector()[self._b2g_map[:]] = self.phi2_b
A = self._poisson_matrix
b = self._laplace_zeros
self.boundary_condition.set_value(self._phi_2)
self.boundary_condition.apply(A, b)
# compute _phi_2 on the whole domain
self._laplace_solver.solve(A, self._phi_2.vector(), b)
# add _phi_1 and _phi_2 to obtain magnetic potential
self._phi.vector()[:] = self._phi_1.vector() + self._phi_2.vector()
def compare_field(f1, f2):
f1.shape = (3, -1)
f2.shape = (3, -1)
d = f1 - f2
res = []
for i in range(d.shape[1]):
v = f1[0][i] ** 2 + f1[1][i] ** 2 + f1[2][i] ** 2
t = d[0][i] ** 2 + d[1][i] ** 2 + d[2][i] ** 2
res.append(t / v)
f1.shape = (-1,)
f2.shape = (-1,)
return np.max(np.sqrt(res))
if __name__ == "__main__":
n = 4
#mesh = UnitCubeMesh(n, n, n)
#mesh = BoxMesh(df.Point(-1, 0, 0), df.Point(1, 1, 1), 10, 2, 2)
# mesh=sphere(3.0,0.3)
# mesh=df.Mesh('tet.xml')
#
#expr = df.Expression(('4.0*sin(x[0])', '4*cos(x[0])','0'), degree=1)
from finmag.util.meshes import elliptic_cylinder, sphere
mesh = elliptic_cylinder(100, 150, 5, 4.5, directory='meshes')
# mesh=box(0,0,0,5,5,100,5,directory='meshes')
#mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(100, 2, 2), 400, 2, 2)
mesh = sphere(15, 1, directory='meshes')
Vv = df.VectorFunctionSpace(mesh, "Lagrange", 1)
Ms = 8.6e5
expr = df.Expression(('cos(x[0])', 'sin(x[0])', '0'), degree=1)
m = Field(Vv, value=expr)
#m = df.interpolate(df.Constant((1, 0, 0)), Vv)
from finmag.energies.demag.fk_demag import FKDemag
import time
demag = TreecodeBEM(
mac=0.4, p=5, num_limit=1, correct_factor=10, type_I=False)
demag.setup(m, Ms, unit_length=1e-9)
start2 = time.time()
f2 = demag.compute_field()
stop2 = time.time()
fk = FKDemag()
fk.setup(m, Ms, unit_length=1e-9)
start = time.time()
f1 = fk.compute_field()
stop = time.time()
f3 = f1 - f2
print f1[0:10], f2[0:10]
print np.average(np.abs(f3[:200] / f1[:200]))
print 'max errror:', compare_field(f1, f2)
| 8,480 | 34.78481 | 120 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/benchmark_fk.py
|
import time
import pickle
import numpy as np
import dolfin as df
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from finmag.energies import Demag
from finmag.field import Field
from finmag.native.llg import compute_bem_fk
from finmag.util.meshes import box
now = time.time
create_mesh = lambda maxh: box(0, 0, 0, 500, 25, 1, maxh, directory="meshes")
maxhs = np.arange(1.0, 3.7, 0.2)
Ms = 1
m_0 = (1, 0, 0)
unit_length = 1e-9
default_params = ("default", "default", "default", "default")
opt_params = ("cg", "ilu", "cg", "ilu")
repetitions = 10
results_file = "results_fk_benchmark.txt"
def run_demag(repetitions, params, m, Ms, unit_length, bem=None, b2g_map=None):
demag = Demag("FK")
demag.parameters["phi_1_solver"] = params[0]
demag.parameters["phi_1_preconditioner"] = params[1]
demag.parameters["phi_2_solver"] = params[2]
demag.parameters["phi_2_preconditioner"] = params[3]
if bem is not None:
demag.precomputed_bem(bem, b2g_map)
demag.setup(m, Ms, unit_length)
start = now()
for j in xrange(repetitions):
H = demag.compute_field()
runtime = (now() - start) / repetitions
return H, runtime
try:
results = np.loadtxt(results_file)
except IOError:
results = np.zeros((len(maxhs), 4))
for i, maxh in enumerate(maxhs):
print "Mesh {}/{} with maxh = {:.3}.".format(i + 1, len(maxhs), maxh)
mesh = create_mesh(maxh)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
m_function = df.Function(S3)
m_function.assign(df.Constant(m_0))
m = Field(S3, m_function)
# Pre-compute BEM to save time.
bem, b2g_map = compute_bem_fk(df.BoundaryMesh(mesh, 'exterior', False))
print "Computing demagnetising field with default solver parameters..."
H_default, runtime_default = run_demag(
repetitions, default_params, m, Ms, unit_length, bem, b2g_map)
print "Computing demagnetising field with optimised solver parameters..."
H_opt, runtime_opt = run_demag(
repetitions, opt_params, m, Ms, unit_length, bem, b2g_map)
results[i, 0] = mesh.num_vertices()
results[i, 1] = runtime_default
results[i, 2] = runtime_opt
results[i, 3] = np.max(np.abs(H_default - H_opt))
np.savetxt(results_file, results) # Save results after every step.
vertices = results[:, 0]
runtimes_default = results[:, 1]
runtimes_opt = results[:, 2]
deviation = results[:, 3]
fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title("Runtime Comparison")
ax.plot(vertices, runtimes_default, label="default FK settings")
ax.plot(vertices, runtimes_opt, label="optimised FK settings")
ax.legend(loc=2)
ax.set_xlabel("vertices")
ax.set_ylabel("time (s)")
ax = fig.add_subplot(212)
ax.set_title("Deviation from Solution Obtained with Default Settings")
ax.plot(vertices, deviation)
ax.set_xlabel("vertices")
ax.set_ylabel("max. deviation")
fig.tight_layout()
fig.savefig("results_fk_benchmark.png")
| 3,020 | 31.836957 | 81 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/fk_demag.py
|
"""
Computation of the demagnetising field using the Fredkin-Koehler
technique and the infamous magpar method.
Rationale: The previous implementation in FemBemFKSolver (child class
of FemBemDeMagSolver) was kind of a mess. This does the same thing in the same
time with less code. Should be more conducive to further optimisation or as
a template for other techniques like the GCR.
"""
import numpy as np
import dolfin as df
import logging
from aeon import timer, Timer
from finmag.util.consts import mu0
from finmag.native.llg import compute_bem_fk
from finmag.util.meshes import nodal_volume
from finmag.util import helpers, configuration
from finmag.field import Field
from fk_demag_pbc import BMatrixPBC
logger = logging.getLogger('finmag')
fk_timer = Timer()
class FKDemag(object):
"""
Computation of the demagnetising field using the Fredkin-Koehler hybrid
FEM/BEM technique.
Fredkin, D.R. and Koehler, T.R., "`Hybrid method for computing
demagnetizing fields`_", IEEE Transactions on Magnetics, vol.26, no.2,
pp.415-417, Mar 1990.
.. _Hybrid method for computing demagnetizing fields:
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=106342
"""
def __init__(self, name='Demag', thin_film=False, macrogeometry=None,
solver_type=None, parameters=None):
"""
Create a new FKDemag instance.
The attribute `parameters` is a dict that contains the settings for the
solvers for the Neumann (potential phi_1) and Laplace (potential phi_2)
problems.
Setting the method used by the solvers:
Change the entries `phi_1_solver` and `phi_2_solver` to a value from
`df.list_krylov_solver_methods()`. Default is dolfin's default.
Setting the preconditioners:
Change the entries `phi_1_preconditioner` and `phi_2_preconditioner` to
a value from `df.list_krylov_solver_preconditioners()`. Default is
dolfin's default. There is a set of parameters optimised for thin films
(cg/ilu followed by default without preconditioner) that can be used by
passing in the argument 'thin_film` set to True.
Setting the tolerances:
Change the existing entries inside `phi_1` and `phi_2` which are
themselves dicts. You can add new entries to these dicts as well.
Everything which is understood by `df.KrylovSolver` is valid.
Allowed values for `solver_type` are 'Krylov','LU' and `None` (the
latter uses the value set in the .finmagrc file, defaulting to 'Krylov'
as no value is provided there).
"""
self.name = name
self.in_jacobian = False
default_parameters = {
'absolute_tolerance': 1e-6,
'relative_tolerance': 1e-6,
'maximum_iterations': int(1e4)
}
self.parameters = {
'phi_1_solver': 'default',
'phi_1_preconditioner': 'default',
'phi_1': default_parameters,
'phi_2_solver': 'default',
'phi_2_preconditioner': 'default',
'phi_2': default_parameters.copy()
}
if parameters is not None:
for (k, v) in parameters.items():
logger.debug(
"Setting demag solver parameter {}='{}'".format(k, v))
if k in ['phi_1', 'phi_2']:
# Since self.parameters['phi_1'] is a dictionary itself,
# only update the keys that are given (and similarly for
# 'phi_2').
for (k2, v2) in v.items():
self.parameters[k][k2] = v2
else:
self.parameters[k] = v
logger.debug("Demag parameters now: {}".format(self.parameters))
self.solver_type = solver_type
if thin_film:
self.parameters["phi_1_solver"] = "cg"
self.parameters["phi_1_preconditioner"] = "ilu"
self.parameters["phi_2_preconditioner"] = "none"
self.macrogeometry = macrogeometry
@timer.method
def setup(self, m, Ms, unit_length=1):
"""
Setup the FKDemag instance. Usually called automatically by the
Simulation object.
*Arguments*
m: finmag.Field
The unit magnetisation on a finite element space.
Ms: float
The saturation magnetisation in A/m.
unit_length: float
The length (in m) represented by one unit on the mesh. Default 1.
"""
assert isinstance(m, Field)
assert isinstance(Ms, Field)
self.m = m
self.Ms = Ms
self.unit_length = unit_length
self.S1 = df.FunctionSpace(self.m.mesh(), "Lagrange", 1)
self._test1 = df.TestFunction(self.S1)
self._trial1 = df.TrialFunction(self.S1)
self._test3 = df.TestFunction(self.m.functionspace)
self._trial3 = df.TrialFunction(self.m.functionspace)
# for computation of energy
self._nodal_volumes = nodal_volume(self.S1, unit_length)
self._H_func = df.Function(m.functionspace) # we will copy field into
# this when we need the
# energy
self._E_integrand = -0.5 * mu0 * \
df.dot(self._H_func, self.m.f * self.Ms.f)
self._E = self._E_integrand * df.dx
self._nodal_E = df.dot(self._E_integrand, self._test1) * df.dx
self._nodal_E_func = df.Function(self.S1)
# for computation of field and scalar magnetic potential
self._poisson_matrix = self._poisson_matrix()
self._laplace_zeros = df.Function(self.S1).vector()
# determine the solver type to be used (Krylov or LU); if the kwarg
# 'solver_type' is not provided, try to read the setting from the
# .finmagrc file; use 'Krylov' if this fails.
solver_type = self.solver_type
if solver_type is None:
solver_type = configuration.get_config_option(
'demag', 'solver_type', 'Krylov')
if solver_type == 'None': # if the user set 'solver_type = None' in
# the .finmagrc file, solver_type will be a
# string so we need to catch this here.
solver_type = 'Krylov'
logger.debug("Using {} solver for demag.".format(solver_type))
if solver_type == 'Krylov':
self._poisson_solver = df.KrylovSolver(self._poisson_matrix.copy(),
self.parameters['phi_1_solver'], self.parameters['phi_1_preconditioner'])
self._poisson_solver.parameters.update(self.parameters['phi_1'])
self._laplace_solver = df.KrylovSolver(
self.parameters['phi_2_solver'], self.parameters['phi_2_preconditioner'])
self._laplace_solver.parameters.update(self.parameters['phi_2'])
# We're setting 'same_nonzero_pattern=True' to enforce the
# same matrix sparsity pattern across different demag solves,
# which should speed up things.
#self._laplace_solver.parameters["preconditioner"][
# "structure"] = "same_nonzero_pattern"
elif solver_type == 'LU':
self._poisson_solver = df.LUSolver(self._poisson_matrix.copy())
self._laplace_solver = df.LUSolver()
self._poisson_solver.parameters["reuse_factorization"] = True
self._laplace_solver.parameters["reuse_factorization"] = True
else:
raise ValueError("Argument 'solver_type' must be either 'Krylov' or 'LU'. "
"Got: '{}'".format(solver_type))
with fk_timer('compute BEM'):
if not hasattr(self, "_bem"):
if self.macrogeometry is not None:
Ts = self.macrogeometry.compute_Ts(self.m.mesh())
pbc = BMatrixPBC(self.m.mesh(), Ts)
self._b2g_map = np.array(pbc.b2g_map, dtype=np.int)
self._bem = pbc.bm
else:
self._bem, self._b2g_map = compute_bem_fk(
df.BoundaryMesh(self.m.mesh(), 'exterior', False))
logger.debug("Boundary element matrix uses {:.2f} MB of memory.".format(
self._bem.nbytes / 1024. ** 2))
# solution of inhomogeneous Neumann problem
self._phi_1 = df.Function(self.S1)
# solution of Laplace equation inside domain
self._phi_2 = df.Function(self.S1)
self._phi = df.Function(self.S1) # magnetic potential phi_1 + phi_2
# To be applied to the vector field m as first step of computation of
# _phi_1. This gives us div(M), which is equal to Laplace(_phi_1),
# equation which is then solved using _poisson_solver.
self._Ms_times_divergence = df.assemble(
self.Ms.f * df.inner(self._trial3, df.grad(self._test1)) * df.dx)
# we move the boundary condition here to avoid create a instance each
# time when compute the magnetic potential
self.boundary_condition = df.DirichletBC(
self.S1, self._phi_2, df.DomainBoundary())
self.boundary_condition.apply(self._poisson_matrix)
self._setup_gradient_computation()
@timer.method
def precomputed_bem(self, bem, b2g_map):
"""
If the BEM and a boundary to global vertices map are known, they can be
passed to the FKDemag object with this method so it will skip
re-computing them.
"""
self._bem, self._b2g_map = bem, b2g_map
@timer.method
def compute_potential(self):
"""
Compute the magnetic potential.
*Returns*
df.Function
The magnetic potential.
"""
self._compute_magnetic_potential()
return self._phi
@timer.method
def compute_field(self):
"""
Compute the demagnetising field.
*Returns*
numpy.ndarray
The demagnetising field.
"""
self._compute_magnetic_potential()
return self._compute_gradient()
def average_field(self):
"""
Compute the average demag field.
"""
return helpers.average_field(self.compute_field())
@timer.method
def compute_energy(self):
"""
Compute the total energy of the field.
.. math::
E_\\mathrm{d} = -\\frac12 \\mu_0 \\int_\\Omega
H_\\mathrm{d} \\cdot \\vec M \\mathrm{d}x
*Returns*
Float
The energy of the demagnetising field.
"""
self._H_func.vector()[:] = self.compute_field()
return df.assemble(self._E) * self.unit_length ** self.m.mesh_dim()
@timer.method
def energy_density(self):
"""
Compute the energy density in the field.
.. math::
\\rho = \\frac{E_{\\mathrm{d}, i}}{V_i},
where V_i is the volume associated with the node i.
*Returns*
numpy.ndarray
The energy density of the demagnetising field.
"""
self._H_func.vector()[:] = self.compute_field()
nodal_E = df.assemble(self._nodal_E).array() * \
self.unit_length ** self.m.mesh_dim()
return nodal_E / self._nodal_volumes
@timer.method
def energy_density_function(self):
"""
Returns the energy density in the field as a dolfin function to allow probing.
*Returns*
dolfin.Function
The energy density of the demagnetising field.
"""
self._nodal_E_func.vector()[:] = self.energy_density()
return self._nodal_E_func
@fk_timer.method
def _poisson_matrix(self):
A = df.dot(df.grad(self._trial1), df.grad(self._test1)) * df.dx
return df.assemble(A) # stiffness matrix for Poisson equation
def _compute_magnetic_potential(self):
# compute _phi_1 on the whole domain
g_1 = self._Ms_times_divergence * self.m.f.vector()
with fk_timer("first linear solve"):
self._poisson_solver.solve(self._phi_1.vector(), g_1)
# compute _phi_2 on the boundary using the Dirichlet boundary
# conditions we get from BEM * _phi_1 on the boundary.
with fk_timer("using boundary conditions"):
phi_1 = self._phi_1.vector()[self._b2g_map]
self._phi_2.vector()[self._b2g_map[:]] = np.dot(
self._bem, phi_1)
#boundary_condition = df.DirichletBC(self.S1, self._phi_2, df.DomainBoundary())
#A = self._poisson_matrix.copy()
#b = self._laplace_zeros
#boundary_condition.apply(A, b)
A = self._poisson_matrix
b = self._laplace_zeros
self.boundary_condition.set_value(self._phi_2)
self.boundary_condition.apply(A, b)
# compute _phi_2 on the whole domain
with fk_timer("second linear solve"):
self._laplace_solver.solve(A, self._phi_2.vector(), b)
# add _phi_1 and _phi_2 to obtain magnetic potential
self._phi.vector()[:] = self._phi_1.vector() + self._phi_2.vector()
@fk_timer.method
def _setup_gradient_computation(self):
"""
Prepare the discretised gradient to use in :py:meth:`FKDemag._compute_gradient`.
We don't need the gradient field as a continuous field, we are only
interested in the values at specific points. It is thus a waste of
computational effort to use a projection of the gradient field, since
it performs the fairly large operation of assembling a matrix and
solving a linear system of equations.
"""
A = df.inner(self._test3, - df.grad(self._trial1)) * df.dx
# This can be applied to scalar functions.
self._gradient = df.assemble(A)
# The `A` above is in fact not quite the gradient, since we integrated
# over the volume as well. We will divide by the volume later, after
# the multiplication of the scalar magnetic potential. Since the two
# operations are symmetric (multiplying by volume, dividing by volume)
# we don't have to care for the units, i.e. unit_length.
b = df.dot(self._test3, df.Constant((1, 1, 1))) * df.dx
self._nodal_volumes_S3_no_units = df.assemble(b).array()
@fk_timer.method
def _compute_gradient(self):
"""
Get the demagnetising field from the magnetic scalar potential.
.. math::
\\vec{H}_{\\mathrm{d}} = - \\nabla \\phi (\\vec{r})
Using dolfin, we would translate this to
.. sourcecode::
H_d = df.project(- df.grad(self._phi), self.m.functionspace)
but the method used here is computationally less expensive.
"""
H = self._gradient * self._phi.vector()
return H.array() / self._nodal_volumes_S3_no_units
| 15,001 | 36.979747 | 124 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/belement_magpar.py
|
import instant
def return_bele_magpar():
args = [["n_bvert", "bvert", "in"], ["facv1_n", "facv1", "in"], [
"facv2_n", "facv2", "in"], ["facv3_n", "facv3", "in"], ["matele_n", "matele"]]
return instant.inline_with_numpy(C_CODE, arrays=args)
C_CODE = """
int Bele(int n_bvert,double* bvert,int facv1_n, double* facv1, int facv2_n, double* facv2,
int facv3_n,double* facv3,int matele_n,double* matele);
#define ND 3 /**< space dimensions (no of cartesian coordinates) */
#define NV 4 /**< number of vertices(=degrees of freedom) per element */
#define NF 4 /**< number of faces per element */
#define NN 3 /**< number of vertices per face */
#define C_BND -1 /**< indicator for boundary node/face */
#define C_INT -2 /**< indicator for interior node/face */
#define C_UNK -4 /**< indicator for unknown state */
#define D_EPS 1e-14 /**< threshold for equality of two real numbers */
#define PETSC_PI 3.1415926535897932384626433832795L
#define my_daxpy(a,b,c,d,e,f) {(e)[0]+=b*(c)[0];(e)[1]+=b*(c)[1];(e)[2]+=b*(c)[2];}
#define my_dcopy(a,b,c,d,e) {(d)[0]=(b)[0];(d)[1]=(b)[1];(d)[2]=(b)[2];}
#define my_dnrm2(a,b,c) sqrt((b)[0]*(b)[0]+(b)[1]*(b)[1]+(b)[2]*(b)[2])
#define my_dscal(a,b,c,d) {(c)[0]*=b;(c)[1]*=b;(c)[2]*=b;}
#define my_ddot(a,b,c,d,e) ((b)[0]*(d)[0]+(b)[1]*(d)[1]+(b)[2]*(d)[2])
#define douter(a,b,c,d) {(d)[0]=(b)[1]*(c)[2]-(b)[2]*(c)[1];(d)[1]=(b)[2]*(c)[0]-(b)[0]*(c)[2];(d)[2]=(b)[0]*(c)[1]-(b)[1]*(c)[0];}
int PointFromPlane(double *x, double *v1, double *v2, double *v3, double *d)
{
// computes the distance beetween the point x and the plane defined by v1, v2, v3
// note that x, v1, v2 and v3 are 3-dimensional arrays (pointer to double)
double ab[ND],ac[ND]; // vectors ab and ac
double n[ND]; // vector n, normal to the plane
/* calculate edge vectors */
my_dcopy(ND,v1,1,ab,1); // ab is now v1
my_daxpy(ND,-1.0,v2,1,ab,1); // ab = ab - v2
my_dcopy(ND,v1,1,ac,1); // ac is now v1
my_daxpy(ND,-1.0,v3,1,ac,1); // ac = ac - v3
// summary: ab = v1 - v2
// ac = v1 - v3
/* calculate normal vector */
douter(ND,ab,ac,n); // n = cross(ab, ac)
/* calculate distance */
// normally, this would have to be divided by norm(n), because n is not a unit vector
*d=my_ddot(ND,x,1,n,1)-my_ddot(ND,v1,1,n,1); // d = x \dot n - v1 \dot n
// or (x-v1) \dot n
return(0);
}
int Bele(int n_bvert,double* bvert,int facv1_n, double* facv1, int facv2_n, double* facv2,
int facv3_n,double* facv3,int matele_n,double* matele)
{
double *rr,zeta[ND],zetal;
double rho1[ND],rho2[ND],rho3[ND];
double rho1l,rho2l,rho3l;
double s1,s2,s3;
double eta1[ND],eta2[ND],eta3[ND];
double eta1l,eta2l,eta3l;
double xi1[ND],xi2[ND],xi3[ND];
double gamma1[ND],gamma2[ND],gamma3[ND];
double p[ND],a,omega;
matele[0]=matele[1]=matele[2]=0.0;
/* get coordinates of face's vertices */
my_dcopy(ND,facv1,1,rho1,1);
my_dcopy(ND,facv2,1,rho2,1);
my_dcopy(ND,facv3,1,rho3,1);
/* calculate edge vectors and store them in xi_j */
my_dcopy(ND,rho2,1,xi1,1);
my_daxpy(ND,-1.0,rho1,1,xi1,1);
my_dcopy(ND,rho3,1,xi2,1);
my_daxpy(ND,-1.0,rho2,1,xi2,1);
my_dcopy(ND,rho1,1,xi3,1);
my_daxpy(ND,-1.0,rho3,1,xi3,1);
/* calculate zeta direction */
douter(ND,xi1,xi2,zeta);
/* calculate area of the triangle */
zetal=my_dnrm2(ND,zeta,1);
a=0.5*zetal;
/* renorm zeta */
my_dscal(ND,1.0/zetal,zeta,1);
/* calculate s_j and normalize xi_j */
s1=my_dnrm2(ND,xi1,1);
my_dscal(ND,1.0/s1,xi1,1);
s2=my_dnrm2(ND,xi2,1);
my_dscal(ND,1.0/s2,xi2,1);
s3=my_dnrm2(ND,xi3,1);
my_dscal(ND,1.0/s3,xi3,1);
douter(ND,zeta,xi1,eta1);
douter(ND,zeta,xi2,eta2);
douter(ND,zeta,xi3,eta3);
gamma1[0]=gamma3[1]=my_ddot(ND,xi2,1,xi1,1);
gamma1[1]=my_ddot(ND,xi2,1,xi2,1);
gamma1[2]=gamma2[1]=my_ddot(ND,xi2,1,xi3,1);
gamma2[0]=gamma3[2]=my_ddot(ND,xi3,1,xi1,1);
gamma2[2]=my_ddot(ND,xi3,1,xi3,1);
gamma3[0]=my_ddot(ND,xi1,1,xi1,1);
/* get R=rr */
rr=bvert;
double d;
PointFromPlane(rr,rho1,rho2,rho3,&d);
if (fabs(d)<D_EPS) return(0);
/* calculate rho_j */
my_daxpy(ND,-1.0,rr,1,rho1,1);
my_daxpy(ND,-1.0,rr,1,rho2,1);
my_daxpy(ND,-1.0,rr,1,rho3,1);
/* zetal gives ("normal") distance of R from the plane of the triangle */
zetal=my_ddot(ND,zeta,1,rho1,1);
/* skip the rest if zetal==0 (R in plane of the triangle)
-> omega==0 and zetal==0 -> matrix entry=0
*/
if (fabs(zetal)<=D_EPS) {
return(0);
}
rho1l=my_dnrm2(ND,rho1,1);
rho2l=my_dnrm2(ND,rho2,1);
rho3l=my_dnrm2(ND,rho3,1);
double t_nom,t_denom;
t_nom=
rho1l*rho2l*rho3l+
rho1l*my_ddot(ND,rho2,1,rho3,1)+
rho2l*my_ddot(ND,rho3,1,rho1,1)+
rho3l*my_ddot(ND,rho1,1,rho2,1);
t_denom=
sqrt(2.0*
(rho2l*rho3l+my_ddot(ND,rho2,1,rho3,1))*
(rho3l*rho1l+my_ddot(ND,rho3,1,rho1,1))*
(rho1l*rho2l+my_ddot(ND,rho1,1,rho2,1))
);
/* catch special cases where the argument of acos
is close to -1.0 or 1.0 or even outside this interval
use 0.0 instead of D_EPS?
fixes problems with demag field calculation
suggested by Hiroki Kobayashi, Fujitsu
*/
if (t_nom/t_denom<-1.0) {
omega=(zetal >= 0.0 ? 1.0 : -1.0)*2.0*M_PI;
}
/* this case should not occur, but does - e.g. examples1/headfield */
else if (t_nom/t_denom>1.0) {
return(0);
}
else {
omega=(zetal >= 0.0 ? 1.0 : -1.0)*2.0*acos(t_nom/t_denom);
}
eta1l=my_ddot(ND,eta1,1,rho1,1);
eta2l=my_ddot(ND,eta2,1,rho2,1);
eta3l=my_ddot(ND,eta3,1,rho3,1);
p[0]=log((rho1l+rho2l+s1)/(rho1l+rho2l-s1));
p[1]=log((rho2l+rho3l+s2)/(rho2l+rho3l-s2));
p[2]=log((rho3l+rho1l+s3)/(rho3l+rho1l-s3));
matele[0]=(eta2l*omega-zetal*my_ddot(ND,gamma1,1,p,1))*s2/(8.0*PETSC_PI*a);
matele[1]=(eta3l*omega-zetal*my_ddot(ND,gamma2,1,p,1))*s3/(8.0*PETSC_PI*a);
matele[2]=(eta1l*omega-zetal*my_ddot(ND,gamma3,1,p,1))*s1/(8.0*PETSC_PI*a);
return(0);
}
"""
| 6,075 | 30.811518 | 137 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/belement.py
|
import dolfin as df
import numpy as np
import belement_magpar
import finmag.util.solid_angle_magpar as solid_angle_solver
compute_belement = belement_magpar.return_bele_magpar()
compute_solid_angle = solid_angle_solver.return_csa_magpar()
def GetDet3(x, y, z):
"""
helper function
"""
d = x[0] * y[1] * z[2] + x[1] * y[2] * z[0] \
+ x[2] * y[0] * z[1] - x[0] * y[2] * z[1] \
- x[1] * y[0] * z[2] - x[2] * y[1] * z[0]
return d
def GetTetVol(x1, x2, x3, x4):
"""
helper fuctioen
"""
v = GetDet3(x2, x3, x4) - GetDet3(x1, x3, x4) + \
GetDet3(x1, x2, x4) - GetDet3(x1, x2, x3)
return 1.0 / 6.0 * v
def compute_bnd_mapping(mesh, debug=False):
mesh.init()
number_nodes = mesh.num_vertices()
number_cells = mesh.num_cells()
number_nodes_bnd = 0
number_faces_bnd = 0
bnd_face_verts = []
gnodes_to_bnodes = np.zeros(number_nodes, int)
node_at_boundary = np.zeros(number_nodes, int)
nodes_xyz = mesh.coordinates()
for face in df.faces(mesh):
cells = face.entities(3)
if len(cells) == 1:
face_nodes = face.entities(0)
cell = df.Cell(mesh, cells[0])
cell_nodes = cell.entities(0)
# print set(cell_nodes)-set(face_nodes),face_nodes
tmp_set = set(cell_nodes) - set(face_nodes)
x1 = nodes_xyz[tmp_set.pop()]
x2 = nodes_xyz[face_nodes[0]]
x3 = nodes_xyz[face_nodes[1]]
x4 = nodes_xyz[face_nodes[2]]
tmp_vol = GetTetVol(x1, x2, x3, x4)
local_nodes = [face_nodes[0]]
if tmp_vol < 0:
local_nodes.append(face_nodes[2])
local_nodes.append(face_nodes[1])
else:
local_nodes.append(face_nodes[1])
local_nodes.append(face_nodes[2])
bnd_face_verts.append(local_nodes)
for i in face_nodes:
node_at_boundary[i] = 1
number_faces_bnd += 1
bnd_face_verts = np.array(bnd_face_verts)
number_nodes_bnd = 0
for i in range(number_nodes):
if node_at_boundary[i] > 0:
gnodes_to_bnodes[i] = number_nodes_bnd
number_nodes_bnd += 1
else:
gnodes_to_bnodes[i] = -1
if debug:
print 'cells number:', mesh.num_cells()
print 'nodes number:', mesh.num_vertices()
# print mesh.coordinates()
print 'faces:', mesh.num_faces()
print 'faces number at the boundary:', number_faces_bnd
print 'nodes number at the boundary:', number_nodes_bnd
for i in range(number_nodes):
tmp = gnodes_to_bnodes[i]
print 'global id=', i, nodes_xyz[i][0], nodes_xyz[i][1], nodes_xyz[i][2], tmp
for i in range(number_faces_bnd):
print ' ', bnd_face_verts[i][0], bnd_face_verts[i][1], bnd_face_verts[i][2]
return (bnd_face_verts, gnodes_to_bnodes, number_faces_bnd, number_nodes_bnd)
def BEM_matrix(mesh):
bnd_face_verts,\
gnodes_to_bnodes,\
number_faces_bnd,\
number_nodes_bnd = compute_bnd_mapping(mesh, debug=False)
B = np.zeros((number_nodes_bnd, number_nodes_bnd))
nodes_xyz = mesh.coordinates()
tmp_bele = np.array([0., 0., 0.])
number_nodes = mesh.num_vertices()
for i in range(number_nodes):
# skip the node at the boundary
if gnodes_to_bnodes[i] < 0:
continue
for j in range(number_faces_bnd):
# skip the node in the face
if i in set(bnd_face_verts[j]):
continue
compute_belement(nodes_xyz[i],
nodes_xyz[bnd_face_verts[j][0]],
nodes_xyz[bnd_face_verts[j][1]],
nodes_xyz[bnd_face_verts[j][2]], tmp_bele)
"""print 'tmp_bele',tmp_bele"""
for k in range(3):
tmp_i = gnodes_to_bnodes[i]
tmp_j = gnodes_to_bnodes[bnd_face_verts[j][k]]
B[tmp_i][tmp_j] += tmp_bele[k]
# the solid angle term ...
vert_bsa = np.zeros(number_nodes)
mapping_cell_nodes = mesh.cells()
for i in range(mesh.num_cells()):
for j in range(4):
tmp_omega = compute_solid_angle(
nodes_xyz[mapping_cell_nodes[i][j]],
nodes_xyz[mapping_cell_nodes[i][(j + 1) % 4]],
nodes_xyz[mapping_cell_nodes[i][(j + 2) % 4]],
nodes_xyz[mapping_cell_nodes[i][(j + 3) % 4]])
vert_bsa[mapping_cell_nodes[i][j]] += tmp_omega
for i in range(number_nodes):
tmp_i = gnodes_to_bnodes[i]
if tmp_i < 0:
continue
B[tmp_i][tmp_i] += vert_bsa[i] / (4 * np.pi) - 1
return B, gnodes_to_bnodes
def test_order():
mesh = df.Mesh('tet.xml')
xs = mesh.coordinates()
print xs
print mesh.cells()
print 'volume:', GetTetVol(xs[0], xs[1], xs[2], xs[3])
print 'volume:', GetTetVol(xs[1], xs[0], xs[2], xs[3])
print 'solid angle 1', compute_solid_angle(xs[0], xs[1], xs[2], xs[3])
print 'solid angle 2', compute_solid_angle(xs[0], xs[2], xs[1], xs[3])
bele = np.array([0, 0.0, 0])
compute_belement(xs[0], xs[1], xs[2], xs[3], bele)
print 'belement 1', bele
compute_belement(xs[0], xs[2], xs[1], xs[3], bele)
print 'belement 2', bele
if __name__ == "__main__":
# test_order()
mesh = df.Mesh('cube.xml')
print BEM_matrix(mesh)
| 5,506 | 28.929348 | 89 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/fk_demag_2d_test.py
|
import dolfin as df
import pytest
from finmag.field import Field
from finmag.energies.demag.fk_demag_2d import Demag2D
def test_create_mesh():
mesh = df.UnitSquareMesh(20, 2)
demag = Demag2D(thickness=0.1)
mesh3 = demag.create_3d_mesh(mesh)
coord1 = mesh.coordinates()
coord2 = mesh3.coordinates()
nv = len(coord1)
eps = 1e-16
for i in range(nv):
assert abs(coord1[i][0] - coord2[i][0]) < eps
assert abs(coord1[i][0] - coord2[i + nv][0]) < eps
assert abs(coord1[i][1] - coord2[i][1]) < eps
assert abs(coord1[i][1] - coord2[i + nv][1]) < eps
@pytest.mark.xfail
def test_demag_2d(plot=False):
mesh = df.UnitSquareMesh(4, 4)
Ms = 1.0
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
m0 = df.Expression(("0", "0", "1"), degree=1)
m = Field(S3, m0)
h = 0.001
demag = Demag2D(thickness=h)
demag.setup(m, Ms)
print demag.compute_field()
f0 = demag.compute_field()
m.set_with_numpy_array_debug(f0)
print demag.m.probe(0., 0., 0)
print demag.m.probe(1., 0., 0)
print demag.m.probe(0., 1., 0)
print demag.m.probe(1., 1., 0)
print '=' * 50
print demag.m.probe(0., 0., h)
print demag.m.probe(1., 0., h)
print demag.m.probe(0., 1., h)
print demag.m.probe(1., 1., h)
if plot:
df.plot(m.f)
df.interactive()
if __name__ == "__main__":
test_create_mesh()
test_demag_2d(plot=True)
| 1,461 | 21.492308 | 59 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/__init__.py
|
import logging
from fk_demag import FKDemag
from fk_demag_pbc import MacroGeometry
from fk_demag_2d import Demag2D
from treecode_bem import TreecodeBEM
log = logging.getLogger("finmag")
KNOWN_SOLVERS = {
'FK': FKDemag, 'Treecode': TreecodeBEM}
def Demag(solver='FK', *args, **kwargs):
if not solver in KNOWN_SOLVERS:
log.error(
"Tried to create a Demag object with unknown solver '{}'".format(solver))
raise NotImplementedError(
"Solver '{}' not implemented. Valid choices: one of '{}'.".format(solver, KNOWN_SOLVERS.keys()))
log.debug("Creating Demag object with solver '{}'.".format(solver))
return KNOWN_SOLVERS[solver](*args, **kwargs)
| 700 | 32.380952 | 108 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/demag_pbc_test.py
|
import pytest
import numpy as np
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Demag
from finmag import MacroGeometry
mesh_1 = df.BoxMesh(df.Point(-10, -10, -10), df.Point(10, 10, 10), 10, 10, 10)
mesh_3 = df.BoxMesh(df.Point(-30, -10, -10), df.Point(30, 10, 10), 30, 10, 10)
mesh_9 = df.BoxMesh(df.Point(-30, -30, -10), df.Point(30, 30, 10), 30, 30, 10)
def compute_field(mesh, nx=1, ny=1, m0=(1, 0, 0), pbc=None):
Ms = 1e6
sim = Simulation(mesh, Ms, unit_length=1e-9, name='dy', pbc=pbc)
sim.set_m(m0)
parameters = {
'absolute_tolerance': 1e-10,
'relative_tolerance': 1e-10,
'maximum_iterations': int(1e5)
}
demag = Demag(macrogeometry=MacroGeometry(nx=nx, ny=ny))
demag.parameters['phi_1'] = parameters
demag.parameters['phi_2'] = parameters
sim.add(demag)
field = sim.llg.effective_field.get_dolfin_function('Demag')
# XXX TODO: Would be good to compare all the field values, not
# just the value at a single point! (Max, 25.7.2014)
return field(0, 0, 0) / Ms
def test_field_1d():
m0 = (1, 0, 0)
f1 = compute_field(mesh_1, nx=3, m0=m0)
f2 = compute_field(mesh_3, nx=1, m0=m0)
error = abs((f1 - f2) / f2)
print f1, f2, error
assert max(error) < 0.012
m0 = (0, 0, 1)
f1 = compute_field(mesh_1, nx=3, m0=m0)
f2 = compute_field(mesh_3, nx=1, m0=m0)
error = abs((f1 - f2) / f2)
print f1, f2, error
assert max(error) < 0.02
@pytest.mark.slow
def test_field_2d():
m0 = (1, 0, 0)
f1 = compute_field(mesh_1, nx=3, ny=3, m0=m0)
f2 = compute_field(mesh_9, m0=m0)
error = abs((f1 - f2) / f2)
print f1, f2, error
assert max(error) < 0.01
m0 = (0, 0, 1)
f1 = compute_field(mesh_1, nx=3, ny=3, m0=m0)
f2 = compute_field(mesh_9, m0=m0)
error = abs((f1 - f2) / f2)
print f1, f2, error
assert max(error) < 0.004
if __name__ == '__main__':
test_field_1d()
test_field_2d()
| 2,013 | 25.853333 | 78 |
py
|
finmag
|
finmag-master/src/finmag/energies/demag/solver_base.py
|
import dolfin as df
import numpy as np
from aeon import Timer
from finmag.field import Field
from finmag.util import helpers
import finmag.util.solver_benchmark as bench
# Define default parameters for the fembem solvers
default_parameters = df.Parameters("demag_options")
poisson = df.Parameters("poisson_solver")
poisson.add("method", "default")
poisson.add("preconditioner", "default")
laplace = df.Parameters("laplace_solver")
laplace.add("method", "default")
laplace.add("preconditioner", "default")
default_parameters.add(poisson)
default_parameters.add(laplace)
demag_timer = Timer()
class FemBemDeMagSolver(object):
"""
Base Class for FEM/BEM Demag Solvers containing shared methods. For a top
level demag solver interface see class Demag in finmag/energies/demag.
*Arguments*
m
the finmag object representing the (unit) magnetisation field.
Ms
the saturation magnetisation
parameters
dolfin.Parameters of method and preconditioner to linear solvers
If not specified the defualt parameters contained in solver_base.py
are used.
degree
polynomial degree of the function space
element
finite element type, default is "CG" or LAgrange polynomial.
unit_length
the scale of the mesh, defaults to 1.
project_method
method to calculate the demag field from the potential
possible methods are
* 'magpar'
* 'project'
bench
set to True to run a benchmark of linear solvers
"""
def __init__(self, m, parameters=None, degree=1, element="CG",
project_method='magpar', unit_length=1, Ms=None, bench=False,
normalize=True, solver_type=None):
assert isinstance(m, Field)
assert isinstance(Ms, Field) # currently this means that Ms must be passed in (we don't have a default value)
self.m = m
# Problem objects and parameters
self.name = "Demag"
self.in_jacobian = False
self.unit_length = unit_length
self.degree = degree
self.bench = bench
self.parameters = parameters
# This is used in energy density calculations
self.mu0 = np.pi * 4e-7 # Vs/(Am)
# Mesh Facet Normal
self.n = df.FacetNormal(self.m.mesh())
# Spaces and functions for the Demag Potential
self.V = df.FunctionSpace(self.m.mesh(), element, degree)
self.v = df.TestFunction(self.V)
self.u = df.TrialFunction(self.V)
self.phi = df.Function(self.V)
# Space and functions for the Demag Field
self.W = df.VectorFunctionSpace(self.m.mesh(), element, degree, dim=3)
self.w = df.TrialFunction(self.W)
self.vv = df.TestFunction(self.W)
self.H_demag = df.Function(self.W)
# Interpolate the Unit Magentisation field if necessary
# A try block was not used since it might lead to an unneccessary (and potentially bad)
# interpolation
# if isinstance(m, df.Expression) or isinstance(m, df.Constant):
# self.m = df.interpolate(m,self.W)
# elif isinstance(m,tuple):
# self.m = df.interpolate(df.Expression(m, degree=1),self.W)
# elif isinstance(m,list):
# self.m = df.interpolate(df.Expression(tuple(m, degree=1)),self.W)
# else:
# self.m = m
# Normalize m (should be normalized anyway).
# if normalize:
# self.m.vector()[:] = helpers.fnormalise(self.m.vector().array())
assert isinstance(Ms, Field)
self.Ms = Ms
# Initilize the boundary element matrix variable
self.bem = None
# Objects that are needed frequently for linear solves.
self.poisson_matrix = self.build_poisson_matrix()
self.laplace_zeros = df.Function(self.V).vector()
# 2nd FEM.
if parameters:
method = parameters["laplace_solver"]["method"]
pc = parameters["laplace_solver"]["preconditioner"]
else:
method, pc = "default", "default"
if solver_type is None:
solver_type = 'Krylov'
solver_type = solver_type.lower()
if solver_type == 'lu':
self.laplace_solver = df.LUSolver()
self.laplace_solver.parameters["reuse_factorization"] = True
elif solver_type == 'krylov':
self.laplace_solver = df.KrylovSolver(method, pc)
# We're setting 'same_nonzero_pattern=True' to enforce the
# same matrix sparsity pattern across different demag solves,
# which should speed up things.
#self.laplace_solver.parameters["preconditioner"][
# "structure"] = "same_nonzero_pattern"
else:
raise ValueError(
"Wrong solver type specified: '{}' (allowed values: 'Krylov', 'LU')".format(solver_type))
# Objects needed for energy density computation
self.nodal_vol = df.assemble(self.v * df.dx).array()
self.ED = df.Function(self.V)
# Method to calculate the Demag field from the potential
self.project_method = project_method
if self.project_method == 'magpar':
self.__setup_field_magpar()
self.__compute_field = self.__compute_field_magpar
elif self.project_method == 'project':
self.__compute_field = self.__compute_field_project
else:
raise NotImplementedError("""Only methods currently implemented are
* 'magpar',
* 'project'""")
def solve():
return
def compute_field(self):
"""
Compute the demag field.
.. note::
The interface has to be changed to this later anyway, so
we can just keep it this way so we don't need to change the
examples later.
*Returns*
numpy.ndarray
The demag field.
"""
self.solve()
return self.__compute_field()
def compute_potential(self):
self.solve()
return self.phi
def scalar_potential(self):
"""Return the scalar potential."""
return self.phi
def compute_energy(self):
"""
Compute the demag energy defined by
.. math::
E_\\mathrm{demag} = -\\frac12 \\mu_0 \\int_\\Omega
H_\\mathrm{demag} \\cdot \\vec M \\mathrm{d}x
*Returns*
Float
The demag energy.
"""
self.H_demag.vector()[:] = self.compute_field()
E = -0.5 * self.mu0 * df.dot(self.H_demag, self.m.f * self.Ms.f) * df.dx
return df.assemble(E) * self.unit_length ** self.m.mesh_dim()
def energy_density(self):
"""
Compute the demag energy density,
.. math::
\\frac{E_\\mathrm{demag}}{V},
where V is the volume of each node.
*Returns*
numpy.ndarray
The demag energy density.
"""
self.H_demag.vector()[:] = self.compute_field()
E = df.dot(-0.5 * self.mu0 * df.dot(self.H_demag, self.m.f * self.Ms.f),
self.v) * df.dx
nodal_E = df.assemble(E).array()
return nodal_E / self.nodal_vol
def energy_density_function(self):
"""
Compute the demag energy density the same way as the
function above, but return a Function to allow probing.
*Returns*
dolfin.Function
The demag energy density.
"""
self.ED.vector()[:] = self.energy_density()
return self.ED
def build_poisson_matrix(self):
"""assemble a poisson equation 'stiffness' matrix"""
a = df.dot(df.grad(self.u), df.grad(self.v)) * df.dx
return df.assemble(a)
def solve_laplace_inside(self, function, solverparams=None):
"""Take a functions boundary data as a dirichlet BC and solve
a laplace equation"""
bc = df.DirichletBC(self.V, function, df.DomainBoundary())
A = self.poisson_matrix.copy()
b = self.laplace_zeros # .copy()
bc.apply(A, b)
if self.bench:
bench.solve(A, function.vector(), b, benchmark=True)
else:
demag_timer.start("2nd linear solve", self.__class__.__name__)
self.laplace_iter = self.laplace_solver.solve(
A, function.vector(), b)
demag_timer.stop("2nd linear solve", self.__class__.__name__)
return function
def __compute_field_project(self):
"""
Dolfin method of projecting the scalar potential
onto a dolfin.VectorFunctionSpace.
"""
Hdemag = df.project(-df.grad(self.phi), self.W)
return Hdemag.vector().array()
def __setup_field_magpar(self):
"""Needed by the magpar method we may use instead of project."""
# FIXME: Someone with a bit more insight in this method should
# write something about it in the documentation.
a = df.inner(df.grad(self.u), self.vv) * df.dx
b = df.dot(self.vv, df.Constant([-1, -1, -1])) * df.dx
self.G = df.assemble(a)
self.L = df.assemble(b).array()
def __compute_field_magpar(self):
"""Magpar method used by Weiwei."""
Hd = self.G * self.phi.vector()
Hd = Hd.array() / self.L
return Hd
def get_demagfield(self, phi=None, use_default_function_space=True):
"""
Returns the projection of the negative gradient of
phi onto a DG0 space defined on the same mesh
Note: Do not trust the viper solver to plot the DeMag field,
it can give some wierd results, paraview is recommended instead
use_default_function_space - If true project into self.W,
if false project into a Vector DG0 space
over the mesh of phi.
"""
if phi is None:
phi = self.phi
Hdemag = -df.grad(phi)
if use_default_function_space == True:
Hdemag = df.project(Hdemag, self.W)
else:
if self.D == 1:
Hspace = df.FunctionSpace(phi.function_space().mesh(), "DG", 0)
else:
Hspace = df.VectorFunctionSpace(
phi.function_space().mesh(), "DG", 0)
Hdemag = df.project(Hdemag, Hspace)
return Hdemag
| 10,596 | 33.858553 | 118 |
py
|
finmag
|
finmag-master/src/finmag/util/visualization_test.py
|
import finmag
import pytest
import textwrap
import subprocess
import os
import sys
import numpy as np
import dolfin as df
from glob import glob
from finmag.util.visualization import *
from finmag.util.visualization_impl import *
# Skipping this test for now because it still doesn't work on aleph0
# (although it works on my machine) -- Max, 7.6.2013
@pytest.mark.skipif("True")
def test_render_paraview_scene(tmpdir):
"""
This simply checks whether we can call the function. No check on
the output image produced is performed.
"""
tmpdir = str(tmpdir)
os.chdir(tmpdir)
# Check whether 'xpra' is installed
try:
subprocess.check_call(['xpra', '--version'])
except OSError:
finmag.logger.error("Could not find the 'xpra' executable, but it is needed to run this test. "
"Please install it using: 'sudo apt-get install xpra' (on Debian/Ubuntu-based systems).")
sys.exit(1)
sim = finmag.example.barmini()
sim.save_vtk('initial_state.pvd')
sim.schedule('save_vtk', filename='sim_relax.pvd', every=1e-10)
sim.run_until(4.2e-10)
display = find_unused_X_display()
finmag.logger.debug(
"Rendering Paraview scene on display :{} for test.".format(display))
subprocess.check_call(['xpra', 'start', ':{}'.format(display)])
try:
# XXX TODO: Maybe check various choices for all the individual
# arguments as well?
# Render a single snapshots of the initial state
render_paraview_scene('initial_state.pvd', 'initial_state.png', color_by_axis='Z',
rescale_colormap_to_data_range=False, debugging=False, use_display=display)
assert(os.path.exists('initial_state.png'))
# Render all snapshots captured during the relaxation
render_paraview_scene('sim_relax.pvd', 'sim_relaxation_all_snapshots.png', timesteps=None, color_by_axis='Z',
rescale_colormap_to_data_range=False, debugging=False, use_display=display)
assert(len(glob('sim_relaxation_all_snapshots*.png')) == 5)
# Render only selected snapshots captured during the relaxation
render_paraview_scene('sim_relax.pvd', 'sim_relaxation_selected_snapshots.png', timesteps=[0.0, 3.0], color_by_axis='Z',
rescale_colormap_to_data_range=False, debugging=False, use_display=display)
assert(len(glob('sim_relaxation_selected_snapshots*.png')) == 2)
# Here we test exporting timesteps that lie between the ones that are actually present in the .pvd file.
# This works but isn't very useful, because Paraview does not interpolate the snapshots, it simply renders
# the timesteps which are present in the .pvd file multiple times.
render_paraview_scene('sim_relax.pvd', 'sim_relaxation_intermediate_snapshots.png', timesteps=np.linspace(0, 3.0, 10), color_by_axis='Z',
rescale_colormap_to_data_range=False, debugging=False, use_display=display)
assert(len(glob('sim_relaxation_intermediate_snapshots*.png')) == 10)
finally:
subprocess.check_call(['xpra', 'stop', ':{}'.format(display)])
def test_flight_path_rotation():
# Random starting position and axis
p0 = [-1, 3, 2]
axis = [2.4, 5.2, 8.0]
f = flight_path_rotation(start_pos=p0, axis=axis, angle=360)
# A few sanity checks:
# For a full rotation, the inital and end points should coincide with p0.
assert(np.allclose(f(0), p0))
assert(np.allclose(f(1), p0))
# All connecting vectors between the initial position and any
# other should be orthogonal to the rotation axis.
for t in np.linspace(0, 1, 100):
v = f(t) - f(0)
assert(abs(np.dot(v, axis)) < 1e-8)
# If the starting position lies on the axis then all points on the
# flight path should coincide.
f = flight_path_rotation(start_pos=p0, axis=p0, angle=42.0)
for t in np.linspace(0, 1, 100):
assert(np.allclose(f(t), p0))
def test_flight_path_straight_line():
# Random starting position and axis
P0 = np.array([-1, 3, 2])
P1 = np.array([2.4, 5.2, 8.0])
f = flight_path_straight_line(P0, P1)
# The start and end point should coincide with P0 and P1
assert(np.allclose(f(0), P0))
assert(np.allclose(f(1), P1))
# Compare f(t) with an explicit linear interpolation between P0 and P1
t_vals = np.linspace(0, 1, 200)
for t in t_vals:
pt_expected = (1 - t) * P0 + t * P1
assert(np.allclose(f(t), pt_expected))
@pytest.mark.skipif("True")
def test_plot_dolfin_function(tmpdir):
os.chdir(str(tmpdir))
interval_mesh = df.UnitIntervalMesh(2)
square_mesh = df.UnitSquareMesh(2, 2)
cube_mesh = df.UnitCubeMesh(2, 2, 2)
S = df.FunctionSpace(cube_mesh, 'CG', 1)
V2 = df.VectorFunctionSpace(square_mesh, 'CG', 1, dim=3)
V3 = df.VectorFunctionSpace(cube_mesh, 'CG', 1, dim=3)
s = df.Function(S)
v2 = df.Function(V2)
v3 = df.Function(V3)
v3.vector()[:] = 1.0
# Wrong function space dimension
with pytest.raises(TypeError):
plot_dolfin_function(s, outfile='buggy.png')
# Plotting a 3D function on a 3D mesh should work
plot_dolfin_function(v3, outfile='plot.png')
assert(os.path.exists('plot.png'))
# Try 2-dimensional mesh as well
plot_dolfin_function(v2, outfile='plot_2d_mesh.png')
assert(os.path.exists('plot_2d_mesh.png'))
| 5,488 | 36.59589 | 145 |
py
|
finmag
|
finmag-master/src/finmag/util/fft_test.py
|
from __future__ import division
from fft import *
from numpy import sqrt, sin, cos, pi, exp, real, conj
from finmag.util.consts import gamma
import numpy as np
import os
import pytest
import matplotlib.pyplot as plt
import fft_test_helpers
def test_analytical_inverse_DFT(tmpdir, debug=False):
"""
We construct a simple signal as a superposition of two sinusoidal
oscillations with different frequencies. Then filter this signal
in the following two ways and check that we get the same result.
1) Set all Fourier coefficients except one to zero and do an
inverse Fourier transform.
2) Compute the inverse Fourier transform with a single coefficient
"analytically" (by applying the formula at [1] to the case of
just a single summand and writing the complex exponential as a
sum of a single sine and cosine).
[1] http://docs.scipy.org/doc/numpy/reference/routines.fft.html
"""
os.chdir(str(tmpdir))
n = 1000
tmin = 0.23 * pi
tmax = 4.23 * pi
dt = (tmax - tmin) / (n - 1)
# Time steps of the signal
ts = np.linspace(tmin, tmax, n)
# Define a simple signal that is a superposition of two waves
signal = sin(ts) + 2 * cos(3 * ts)
if debug:
# Plot the signal and its sin/cos components
plt.figure()
plt.plot(signal, 'x-', label='signal')
plt.plot(sin(ts), label='sin(t)')
plt.plot(cos(3 * ts), label='cos(3t)')
plt.legend()
plt.savefig('fft_test_01_signal.pdf')
# Perform a (real-valued) Fourier transform. Also store the
# frequencies corresponding to the Fourier coefficients.
rfft_vals = np.fft.rfft(signal)
rfft_freqs = np.arange(n // 2 + 1) / (dt * n)
# Determine indices of the two peaks
idx_peaks = sorted(abs(rfft_vals).argsort()[-2:])
assert(idx_peaks == [2, 6]) # sanity check that the peaks are as expected
# For each peak coefficient, filter the signal both using the
# inverse DFT and manually/analytically.
for k in idx_peaks:
# Filter the signal using the inverse DFT
rfft_vals_filtered = np.zeros_like(rfft_vals)
rfft_vals_filtered[k] = rfft_vals[k]
signal_filtered = np.fft.irfft(rfft_vals_filtered)
# Manually construct a filtered signal in various ways
A_k = rfft_vals[k] # Fourier coefficient at the peak
B_k = A_k.real
C_k = A_k.imag
print "Fourier coefficient at index k={} is: {}".format(k, A_k)
tt = 2 * pi * k * np.arange(n) / n
signal_analytical_1 = np.squeeze(
filter_frequency_component(signal, k, tmin, tmax))
signal_analytical_2 = 2.0 / n * (B_k * cos(tt) - C_k * sin(tt))
signal_analytical_3 = real(
1.0 / n * (A_k * exp(1j * tt) + conj(A_k) * exp(-1j * tt)))
base_oscillation = sin(ts) if (k == 2) else 2 * cos(3 * ts)
print "Maximum deviation of filtered signal from the base sinusoidal oscillation: {}".format(max(abs(base_oscillation - signal_filtered)))
assert np.allclose(
base_oscillation, signal_filtered, atol=0.05, rtol=0)
assert np.allclose(
signal_filtered, signal_analytical_1, atol=1e-11, rtol=0)
assert np.allclose(
signal_filtered, signal_analytical_2, atol=1e-11, rtol=0)
assert np.allclose(
signal_filtered, signal_analytical_3, atol=1e-11, rtol=0)
if debug:
plt.figure()
plt.plot(ts, base_oscillation, '-', label='sin(t)')
plt.plot(ts, signal_filtered, 'x', label='filtered (iDFT)')
plt.plot(
ts, signal_analytical_1, '-', label='filtered (analytical #1)')
plt.plot(
ts, signal_analytical_2, '.', label='filtered (analytical #1)')
plt.legend()
plt.savefig('fft_test_02_filtered_signal_for_k_{}.pdf'.format(k))
def test_wrong_file_suffix_for_power_spectral_density():
with pytest.raises(ValueError):
compute_power_spectral_density('datafile.foo')
def test_power_spectral_density_from_averaged_magnetisation(tmpdir):
"""
Write a time series of artificial magnetisation data (created from
a damped harmonic oscillator) to a .ndt file and use it to compute
the power spectral densities. Then compare them with the manually
computed ones.
"""
os.chdir(str(tmpdir))
RTOL = 1e-10
H = 1e6 # external field in A/m
omega = gamma * H # precession frequency
alpha = 0.5 # some sort of damping constant
##
# Step 1: Construct a time series of artificial magnetisation
# data and save it to a .ndt file.
##
t_step = 1e-11
t_ini = 0
t_end = 10e-9
ndt_filename = fft_test_helpers.create_test_ndt_file(
str(tmpdir), t_step, t_ini, t_end, omega, alpha)
##
# Step 2: compute the PSDs of a resampled time series, both by
# hand and using compute_power_spectral_density() and check that
# the results are the same.
##
t_step_res = 2e-11
t_ini_res = 1e-10
t_end_res = 9.9e-9
ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)
N = len(ts_resampled) // 2 + 1 # expected length of real-valued FFT
# Compute time series based on resampled timesteps
mx_res = exp(-ts_resampled * 1e8 / alpha) * sin(omega * ts_resampled)
my_res = exp(-ts_resampled * 1e8 / alpha) * cos(omega * ts_resampled)
mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)
# Compute 'analytical' power spectral densities of resampled time series
psd_mx_res_expected = abs(np.fft.rfft(mx_res)) ** 2
psd_my_res_expected = abs(np.fft.rfft(my_res)) ** 2
psd_mz_res_expected = abs(np.fft.rfft(mz_res)) ** 2
# Compute Fourier transform of resampled time series using FFT_m
freqs_res, psd_mx_res, psd_my_res, psd_mz_res = \
compute_power_spectral_density(
ndt_filename, t_step_res, t_ini=t_ini_res, t_end=t_end_res, subtract_values=None)
# Compare both results
assert(np.allclose(psd_mx_res, psd_mx_res_expected, atol=0, rtol=RTOL))
assert(np.allclose(psd_my_res, psd_my_res_expected, atol=0, rtol=RTOL))
assert(np.allclose(psd_mz_res, psd_mz_res_expected, atol=0, rtol=RTOL))
# Also check that the frequency range is as expected
freqs_np = np.fft.fftfreq(len(ts_resampled), d=t_step_res)[:N]
assert(np.allclose(freqs_res, freqs_np, atol=0, rtol=RTOL))
def test_power_spectral_density_from_spatially_resolved_magnetisation(tmpdir, debug=False):
"""
First we write some 'forged' spatially resolved magnetisation
dynamics to a bunch of .npy files (representing the time series).
The oscillation is exactly the same at every location, so that we
don't lose any information in the averaging process and can
compare with the analytical solution as in the previous test.
"""
os.chdir(str(tmpdir))
RTOL = 1e-10
H = 1e6 # external field in A/m
alpha = 0.5 # some sort of damping constant
omega = gamma * H # precession frequency
##
# Step 1: Construct a time series of artificial magnetisation
# data and save it to a bunch of .npy files.
##
t_step = 1e-11
t_ini = 0
t_end = 10e-9
# in a real application this would be the number of mesh vertices
num_vertices = 42
fft_test_helpers.create_test_npy_files(
str(tmpdir), t_step, t_ini, t_end, omega, alpha, num_vertices)
##
# Step 2: compute the FFT of a resampled time series, both by
# hand and using FFT_m.
##
# XXX TODO: Resampling timesteps is not supported when using .npy
# files. Either simplify the code below, or implement saving to
# .h5 files so that it's easier to implement resampling for
# spatially resolved data, too.
##
t_step_res = t_step
t_ini_res = t_ini
t_end_res = t_end
ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)
# Compute time series based on resampled timesteps
mx_res = exp(-ts_resampled * 1e8 / alpha) * sin(omega * ts_resampled)
my_res = exp(-ts_resampled * 1e8 / alpha) * cos(omega * ts_resampled)
mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)
# Compute 'analytical' Fourier transform of resampled time series and
# determine the power of the spectrum for each component. We also need
# to multiply by the number of mesh nodes because the numerical algorithm
# sums up all contributions at the individual nodes (but we can just
# multiply because they are all identical by construction).
psd_mx_expected = num_vertices * np.absolute(np.fft.rfft(mx_res)) ** 2
psd_my_expected = num_vertices * np.absolute(np.fft.rfft(my_res)) ** 2
psd_mz_expected = num_vertices * np.absolute(np.fft.rfft(mz_res)) ** 2
# Compute Fourier transform of resampled time series using
# compute_power_spectral_density.
freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \
compute_power_spectral_density(
'm_ringdown*.npy', t_step_res, t_ini=t_ini_res, t_end=t_end_res, subtract_values=None)
# Check that the analytically determined power spectra are the same as the
# computed ones.
assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))
assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))
assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))
if debug:
# Plot the spectra for debugging
fig = plt.figure(figsize=(20, 5))
ax = fig.gca()
ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')
ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')
ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')
ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')
ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')
ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')
ax.legend(loc='best')
fig.savefig('psd_m_McMichaelStiles.png')
def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):
"""
First we write some 'forged' spatially resolved magnetisation
dynamics to a bunch of .npy files (representing the time series).
we use two mesh regions with different oscillations, but we
compute the spectrum for the first region only and compare with an
analytical solution as in the previous tests.
"""
os.chdir(str(tmpdir))
RTOL = 1e-10
H1 = 1e6 # external field in A/m
alpha1 = 0.5 # some sort of damping constant
omega1 = gamma * H1 # precession frequency
H2 = 2.8e4 # external field in A/m
alpha2 = 0.3 # some sort of damping constant
omega2 = gamma * H2 # precession frequency
##
# Step 1: Construct a time series of artificial magnetisation
# data and save it to a bunch of .npy files.
##
t_step = 1e-11
t_ini = 0
t_end = 10e-9
N1 = 42 # in a real application this would be the number of mesh vertices
N2 = 23 # in a real application this would be the number of mesh vertices
fft_test_helpers.create_test_npy_files_with_two_regions(
str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)
##
# Step 2: compute the FFT of a resampled time series, both by
# hand and using FFT_m.
##
# XXX TODO: Resampling timesteps is not supported when using .npy
# files. Either simplify the code below, or implement saving to
# .h5 files so that it's easier to implement resampling for
# spatially resolved data, too.
##
t_step_res = t_step
t_ini_res = t_ini
t_end_res = t_end
ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)
# Compute time series based on resampled timesteps
mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)
my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)
mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)
# Compute 'analytical' Fourier transform of resampled time series and
# determine the power of the spectrum for each component. We also need
# to multiply by the number of mesh nodes because the numerical algorithm
# sums up all contributions at the individual nodes (but we can just
# multiply because they are all identical by construction).
psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2
psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2
psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2
# Compute Fourier transform of resampled time series using FFT_m
freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \
compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,
t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))
# Check that the analytically determined power spectra are the same as the
# computed ones.
assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))
assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))
assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))
if debug:
# Plot the spectra for debugging
fig = plt.figure(figsize=(20, 5))
ax = fig.gca()
ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')
ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')
ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')
ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')
ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')
ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')
ax.legend(loc='best')
fig.savefig('psd_m_McMichaelStiles.png')
def test_find_peak_near_frequency(tmpdir, debug=False):
"""
Check that `find_peak_near_frequency` works as expected, including
special cases, boundary cases etc.
"""
fft_freqs = [0, 1e9, 2e9, 3e9, 4e9, 5e9, 6e9, 7e9, 8e9]
fft_mx = [1, 4, 3, 2, 1.5, 1.3, 2.5, 1.1, 1.0]
fft_my = [1, 4, 3, 2, 1.5, 1.3, 1.1, 1.7, 1.0]
fft_mz = [2, 1, 1, 1, 1, 1, 1, 1, 1.5]
if debug == True:
# Plot the arrays for debugging
os.chdir(str(tmpdir))
fig = plt.figure()
ax = fig.gca()
ax.plot(fft_freqs, fft_mx, label='fft_mx')
ax.plot(fft_freqs, fft_my, label='fft_my')
ax.plot(fft_freqs, fft_mz, label='fft_mz')
ax.legend()
fig.savefig('fft_vals.png')
assert find_peak_near_frequency(1.5e9, fft_freqs, fft_mx) == (1e9, 1)
#assert find_peak_near_frequency(1.5e9, fft_freqs, [fft_mx, fft_my]) == (1, 1e9)
assert find_peak_near_frequency(5e9, fft_freqs, fft_mx) == (6e9, 6)
assert find_peak_near_frequency(5e9, fft_freqs, fft_my) == (7e9, 7)
assert find_peak_near_frequency(3.7e9, fft_freqs, fft_mx) == (6e9, 6)
# assert find_peak_near_frequency(4e9, fft_freqs, [fft_mx, fft_my]) ==
# None # no simultaneous peak
# Just to check special cases, boundary cases etc.
assert find_peak_near_frequency(1e9, fft_freqs, fft_mx) == (1e9, 1)
assert find_peak_near_frequency(0.9e9, fft_freqs, fft_mx) == (1e9, 1)
assert find_peak_near_frequency(1.1e9, fft_freqs, fft_mx) == (1e9, 1)
assert find_peak_near_frequency(-0.1e9, fft_freqs, fft_mx) == (1e9, 1)
assert find_peak_near_frequency(20e9, fft_freqs, fft_mx) == (6e9, 6)
assert find_peak_near_frequency(-0.5e9, fft_freqs, fft_mz) == (0e9, 0)
assert find_peak_near_frequency(0.5e9, fft_freqs, fft_mz) == (0e9, 0)
assert find_peak_near_frequency(1e9, fft_freqs, fft_mz) == (0e9, 0)
assert find_peak_near_frequency(6e9, fft_freqs, fft_mz) == (8e9, 8)
assert find_peak_near_frequency(8e9, fft_freqs, fft_mz) == (8e9, 8)
assert find_peak_near_frequency(9e9, fft_freqs, fft_mz) == (8e9, 8)
with pytest.raises(ValueError):
# An error should be raised if fft_vals doesn't have the same
# length as fft_freqs.
find_peak_near_frequency(2.5e9, fft_freqs, fft_vals=[0, 1])
@pytest.mark.requires_X_display
def test_plot_power_spectral_density(tmpdir):
os.chdir(str(tmpdir))
H = 1e6 # external field in A/m
omega = gamma * H # precession frequency
alpha = 0.5 # some sort of damping constant
t_step = 1e-11
t_ini = 0
t_end = 10e-9
# in a real application this would be the number of mesh vertices
num_vertices = 42
# Write a sample .ndt file with some artifical magnetisation data
ndt_filename = fft_test_helpers.create_test_ndt_file(
str(tmpdir), t_step, t_ini, t_end, omega, alpha)
# Write a sample .ndt file with some artifical magnetisation data
npy_filenames = fft_test_helpers.create_test_npy_files(
str(tmpdir), t_step, t_ini, t_end, omega, alpha, num_vertices)
kwargs = dict(t_step=t_step, t_ini=t_ini, t_end=t_end,
subtract_values=None, components="xy", figsize=(5, 4),
ticks=5, title="Power spectral densities")
fig1 = plot_power_spectral_density(
ndt_filename, log=False, outfilename='psd_ndt_nolog.png', **kwargs)
fig2 = plot_power_spectral_density(
ndt_filename, log=True, outfilename='psd_ndt_log.png', **kwargs)
fig3 = plot_power_spectral_density(
npy_filenames, log=False, outfilename='psd_npy_nolog.png', **kwargs)
fig4 = plot_power_spectral_density(
npy_filenames, log=True, outfilename='psd_npy_log.png', **kwargs)
assert(isinstance(fig1, plt.Figure))
assert(isinstance(fig2, plt.Figure))
assert(isinstance(fig3, plt.Figure))
assert(isinstance(fig4, plt.Figure))
assert(os.path.exists('psd_ndt_nolog.png'))
assert(os.path.exists('psd_ndt_log.png'))
assert(os.path.exists('psd_npy_nolog.png'))
assert(os.path.exists('psd_npy_log.png'))
| 18,029 | 40.639723 | 146 |
py
|
finmag
|
finmag-master/src/finmag/util/solver_benchmark.py
|
# Modified By Gabriel Balaban April 20, 20012
import dolfin
import ufl
import operator
import numpy as np
def solver_parameters(solver_exclude, preconditioner_exclude):
linear_solver_set = ["lu"]
linear_solver_set += [e[0] for e in dolfin.krylov_solver_methods()]
preconditioner_set = [e[0] for e in dolfin.krylov_solver_preconditioners()]
solver_parameters_set = []
for l in linear_solver_set:
if l in solver_exclude:
continue
for p in preconditioner_set:
if p in preconditioner_exclude:
continue
if (l == "lu" or l == "default") and p != "none":
continue
solver_parameters_set.append(
{"linear_solver": l, "preconditioner": p})
return solver_parameters_set
def print_benchmark_report(solver_timings, failed_solvers):
# Let's analyse the result of the benchmark test:
solver_timings = sorted(
solver_timings.iteritems(), key=operator.itemgetter(1))
failed_solvers = sorted(
failed_solvers.iteritems(), key=operator.itemgetter(1))
dolfin.info_blue("***********************************************")
dolfin.info_blue("********** Solver benchmark results: **********")
dolfin.info_blue("***********************************************")
for solver, timing in solver_timings:
dolfin.info_blue("%s: %.6f s" % (solver, timing))
for solver, reason in failed_solvers:
dolfin.info_red("%s: %s" % (solver, reason))
def replace_solver_settings(args, kwargs, parameters):
''' Replace the arguments of a solve call and replace the solver settings with the ones given in solver_settings. '''
# The way how to set the solver settings depends on how the system is solved:
# Adaptive solve
if "tol" in kwargs:
raise NotImplementedError(
'The benchmark solver is currently not implemented for adaptive solver calls.')
# Variational problem solver
elif isinstance(args[0], ufl.classes.Equation):
kwargs['solver_parameters'] = parameters
# Default case: call the c++ solve routine
else:
args = args[0:3] + \
(parameters['linear_solver'], parameters['preconditioner'])
return args, kwargs
def solve(*args, **kwargs):
''' This function overwrites the dolfin.solve function but provides additional functionality to benchmark
different solver/preconditioner settings. The arguments of equivalent to dolfin.solve except some (optional) additional parameters:
- benchmark = [True, False]: If True, the problem will be solved with all different solver/precondition combinations and the results reported.
If False, the problem is solved using the default solver settings.
- solve: An optional function parameter that is called instead of dolfin.solve. This parameter is useful if dolfin.solve is overwritten by a custom solver routine.
- solver_exclude: A list of solvers that are to be excluded from the benchmark.
- preconditioner_exclude: A list of preconditioners that are to be excluded from the benchmark.
- return_best: Option to return the fastest solver result only.
'''
# Retrieve the extended benchmark arguments.
if kwargs.has_key('benchmark'):
benchmark = kwargs.pop('benchmark')
else:
benchmark = False
if kwargs.has_key('solve'):
solve = kwargs.pop('solve')
else:
solve = dolfin.fem.solving.solve
if kwargs.has_key('solver_exclude'):
solver_exclude = kwargs.pop('solver_exclude')
else:
solver_exclude = []
if kwargs.has_key('preconditioner_exclude'):
preconditioner_exclude = kwargs.pop('preconditioner_exclude')
else:
preconditioner_exclude = []
if benchmark:
dolfin.info_blue("Running solver benchmark...")
solver_parameters_set = solver_parameters(
solver_exclude, preconditioner_exclude)
solver_timings = {}
failed_solvers = {}
ret = None
# Perform the benchmark
for parameters in solver_parameters_set:
solver_failed = False
# Replace the existing solver setting with the benchmark one's.
new_args, new_kwargs = replace_solver_settings(
args, kwargs, parameters)
# print "args,", new_args
# print "kwargs;", new_kwargs
# Solve the problem
timer = dolfin.Timer("Solver benchmark")
timer.start()
try:
ret = solve(*new_args)
except RuntimeError as e:
if 'diverged' in e.message.lower():
failure_reason = 'diverged'
else:
failure_reason = 'unknown'
pass
timer.stop()
# Check to see if the solver returned a zero solution
if np.all(args[1].array() == 0.0):
solver_failed = True
failure_reason = 'Zero Solution'
# Save the result
parameters_str = parameters[
"linear_solver"] + ", " + parameters["preconditioner"]
if solver_failed:
if not kwargs.has_key("return_best"):
dolfin.info_red(parameters_str + ": solver failed.")
failed_solvers[parameters_str] = failure_reason
else:
# print parameters_str
if not kwargs.has_key("return_best"):
dolfin.info(
parameters_str + ": " + str(timer.value()) + "s.")
solver_timings[parameters_str] = timer.value()
# Print the report
if kwargs.has_key("return_best"):
sortedtimings = sorted(
solver_timings.iteritems(), key=operator.itemgetter(1))
ret = {k[0]: solver_timings[k[0]]
for k in sortedtimings[:int(kwargs["return_best"])]}
print_benchmark_report(ret, {})
else:
print_benchmark_report(solver_timings, failed_solvers)
else:
ret = solve(*args)
return ret
| 6,181 | 37.6375 | 171 |
py
|
finmag
|
finmag-master/src/finmag/util/set_function_values.py
|
"""
Gathers all the ways we know about how to set the values of a dolfin function.
"""
import dolfin as df
def from_constant(function, constant):
"""
Set function values using dolfin constant.
"""
function.assign(constant)
def from_expression(function, expression):
"""
Set function values using dolfin expression.
"""
temp_function = df.interpolate(expression, function.function_space())
function.vector().set_local(temp_function.vector().get_local())
def from_field(function, field):
"""
Set function values using instance of Field class.
"""
if function.function_space() != field.function_space:
raise ValueError("function spaces do not match")
function.vector().set_local(field.function.get_local())
def from_function(function, other_function):
"""
Set function values using another dolfin function.
"""
if function.function_space() != other_function.function_space():
raise ValueError("function spaces do not match")
function.vector().set_local(function.vector().get_local())
def from_iterable(function, iterable):
"""
Set function values using iterable (like list, tuple, numpy array).
"""
if isinstance(function.function_space(), df.FunctionSpace):
pass
| 1,300 | 22.654545 | 78 |
py
|
finmag
|
finmag-master/src/finmag/util/mesh_templates_test.py
|
#!/usr/bin/env python
import pytest
import os
import numpy as np
import dolfin as df
from math import pi
from meshes import mesh_volume
from mesh_templates import *
import logging
# loose tolerance for bad approximations (e.g. for a spherical mesh)
TOL1 = 1e-2
TOL2 = 1e-7 # intermediate tolerance (used e.g. for the sum of two meshes;
# the strict tolerance won't work here because Netgen seems to
# mesh combined meshes slightly differently than their components)
# strict tolerance where we expect almost exact values (e.g. for a box mesh)
TOL3 = 1e-14
logger = logging.getLogger("finmag")
def check_mesh_volume(mesh, vol_expected, rtol, atol=0.0):
vol_mesh = mesh_volume(mesh)
logger.debug("Checking mesh volume. Expected: {}, got: {} (relative error: {})".format(
vol_expected, vol_mesh, abs((vol_expected - vol_mesh) / vol_expected)))
if not (np.allclose(vol_mesh, vol_expected, atol=atol, rtol=rtol)):
print "[DDD] Expected volume: {}".format(vol_expected)
print "[DDD] Computed volume: {}".format(vol_mesh)
assert(np.allclose(vol_mesh, vol_expected, atol=atol, rtol=rtol))
def test_mesh_templates(tmpdir):
os.chdir(str(tmpdir))
proto = MeshTemplate()
with pytest.raises(NotImplementedError):
proto.create_mesh('generic_mesh.xml.gz')
def test_disallowed_names(tmpdir):
"""
Check that a ValueError is raised if the user tried to use a name
for the mesh template that coincides with a Netgen primitive.
"""
for name in netgen_primitives:
with pytest.raises(ValueError):
_ = Sphere(r=10, name=name)
def test_hash():
sphere = Sphere(r=10, name='MySphere')
h1 = sphere.hash(maxh=3.0)
h2 = sphere.hash(maxh_MySphere=3.0)
h3 = sphere.hash(maxh=4.0)
assert h1 == '50f3b55770e40ba7a5f8e62d7ff7d327'
assert h1 == h2
assert h3 == '1ee55186811cfc21f22e17fbad35bfed'
def test_sphere(tmpdir):
os.chdir(str(tmpdir))
r = 20.0
sphere = Sphere(r, center=(2, 3, -4))
sphere.create_mesh(maxh=8.0, save_result=True, directory='foo')
sphere.create_mesh(
maxh=10.0, save_result=True, filename='bar/sphere.xml.gz')
assert(
os.path.exists('foo/sphere__center_2_0_3_0_-4_0__r_20_0__maxh_8_0.xml.gz'))
assert(os.path.exists('bar/sphere.xml.gz'))
mesh = sphere.create_mesh(maxh=2.5, save_result=False)
check_mesh_volume(mesh, 4. / 3 * pi * r ** 3, TOL1)
def test_elliptical_nanodisk(tmpdir):
os.chdir(str(tmpdir))
d1 = 30.0
d2 = 20.0
h = 5.0
nanodisk1 = EllipticalNanodisk(
d1, d2, h, center=(2, 3, -4), valign='bottom')
assert(nanodisk1.valign == 'bottom')
nanodisk2 = EllipticalNanodisk(
d1, d2, h, center=(2, 3, -4), valign='center')
assert(nanodisk2.valign == 'center')
nanodisk3 = EllipticalNanodisk(d1, d2, h, center=(2, 3, -4), valign='top')
assert(nanodisk3.valign == 'top')
with pytest.raises(ValueError):
# 'valign' must be one of 'top', 'bottom', 'center'
EllipticalNanodisk(d1, d2, h, center=(2, 3, -4), valign='foo')
mesh = nanodisk1.create_mesh(maxh=2.5)
assert(os.path.exists(
'elliptical_nanodisk__d1_30_0__d2_20_0__h_5_0__center_2_0_3_0_-4_0__valign_bottom__maxh_2_5.xml.gz'))
check_mesh_volume(mesh, pi * (0.5 * d1) * (0.5 * d2) * h, TOL1)
def test_nanodisk(tmpdir):
os.chdir(str(tmpdir))
d = 20.0
h = 5.0
nanodisk1 = Nanodisk(d, h, center=(2, 3, -4), valign='bottom')
assert(nanodisk1.valign == 'bottom')
nanodisk2 = Nanodisk(d, h, center=(2, 3, -4), valign='center')
assert(nanodisk2.valign == 'center')
nanodisk3 = Nanodisk(d, h, center=(2, 3, -4), valign='top')
assert(nanodisk3.valign == 'top')
with pytest.raises(ValueError):
Nanodisk(d, h, center=(2, 3, -4), valign='foo')
mesh = nanodisk1.create_mesh(maxh=2.5)
assert(os.path.exists(
'nanodisk__d_20_0__h_5_0__center_2_0_3_0_-4_0__valign_bottom__maxh_2_5.xml.gz'))
check_mesh_volume(mesh, pi * (0.5 * d) ** 2 * h, TOL1)
def test_mesh_sum(tmpdir):
os.chdir(str(tmpdir))
r1 = 10.0
r2 = 18.0
r3 = 12.0
maxh = 2.0
# This should raise an error because the two spheres have the same name
# (which is given automatically)
sphere1 = Sphere(r1, center=(-30, 0, 0))
sphere2 = Sphere(r2, center=(+30, 0, 0))
with pytest.raises(ValueError):
_ = sphere1 + sphere2
# Same again, but with different names
sphere1 = Sphere(r1, center=(-30, 0, 0), name='sphere_1')
sphere2 = Sphere(r2, center=(+30, 0, 0), name='sphere_2')
sphere3 = Sphere(r3, center=(0, 10, 0), name='sphere_3')
three_spheres = sphere1 + sphere2 + sphere3
mesh = three_spheres.create_mesh(
maxh=maxh, save_result=True, directory=str(tmpdir))
meshfilename = "mesh_sum__3c528b79a337a0ffa711746e7d346c81.xml.gz"
import glob # for debugging only; will be removed again soon
print("[DDD] Potential mesh files found: {}".format(glob.glob('*.xml.gz')))
assert(os.path.exists(os.path.join(str(tmpdir), meshfilename)))
vol1 = mesh_volume(sphere1.create_mesh(maxh=maxh))
vol2 = mesh_volume(sphere2.create_mesh(maxh=maxh))
vol3 = mesh_volume(sphere3.create_mesh(maxh=maxh))
vol_exact = sum([4. / 3 * pi * r ** 3 for r in [r1, r2, r3]])
check_mesh_volume(mesh, vol_exact, TOL1)
check_mesh_volume(mesh, vol1 + vol2 + vol3, TOL2)
def test_mesh_difference(tmpdir):
"""
Create two boxes with some overlap and subtract the second from the first.
Then check that the volume of the remaining part is as expected.
"""
os.chdir(str(tmpdir))
# Coordinates of the top-right-rear corner of box1 and
# the bottom-left-front corner of box2.
x1, y1, z1 = 50.0, 30.0, 20.0
x2, y2, z2 = 30.0, 20.0, 15.0
# Create the overlapping boxes
box1 = Box(0, 0, 0, x1, y1, z1, name='box1')
box2 = Box(x2, y2, z2, x1 + 10, y1 + 10, z1 + 10, name='box2')
box1_minus_box2 = box1 - box2
mesh = box1_minus_box2.create_mesh(
maxh=10.0, save_result=True, directory=str(tmpdir))
meshfilename = "mesh_difference__dd77171c4364ace36c40e5f5fe94951f.xml.gz"
assert(os.path.exists(os.path.join(str(tmpdir), meshfilename)))
vol_box1_exact = x1 * y1 * z1
vol_overlap_exact = (x1 - x2) * (y1 - y2) * (z1 - z2)
vol_exact = vol_box1_exact - vol_overlap_exact
check_mesh_volume(mesh, vol_exact, TOL3)
def test_maxh_with_mesh_primitive(tmpdir):
os.chdir(str(tmpdir))
prim = MeshPrimitive(name='foo')
assert(prim._get_maxh(maxh=2.0, maxh_foo=5.0) == 5.0)
assert(prim._get_maxh(maxh=2.0, maxh_bar=5.0) == 2.0)
with pytest.raises(ValueError):
prim._get_maxh(random_arg=42)
# We don't use full CSG strings here because we only want to test the maxh
# functionality
prim = MeshPrimitive(name='foo', csg_string='-maxh = {maxh_foo}')
assert(prim.csg_stub(maxh=2.0) == '-maxh = 2.0')
assert(prim.csg_stub(maxh_foo=3.0) == '-maxh = 3.0')
# 'personal' value of maxh should take precedence over generic one
assert(prim.csg_stub(maxh=2.0, maxh_foo=3.0) == '-maxh = 3.0')
with pytest.raises(ValueError):
prim.csg_stub(maxh_bar=4.0)
s = Sphere(r=10.0)
s.csg_stub(maxh=2.0)
s = Sphere(r=5.0, name='my_sphere')
s.csg_stub(maxh_my_sphere=3.0)
def test_mesh_specific_maxh(tmpdir):
"""
Check that we can pass in mesh-specific values of maxh by
providing a keyword argument of the form 'maxh_NAME', where
NAME is the name of the MeshTemplate.
"""
os.chdir(str(tmpdir))
sphere = Sphere(r=10.0, name='foobar')
mesh1 = sphere.create_mesh(maxh=5.0)
mesh2 = sphere.create_mesh(maxh_foobar=5.0)
with pytest.raises(ValueError):
sphere.create_mesh(maxh_quux=5.0)
def test_global_maxh_can_be_omitted_if_specific_maxh_is_provided(tmpdir):
os.chdir(str(tmpdir))
# Providing a global value for maxh or only the value specific to the
# sphere should both work.
sphere = Sphere(r=10.0, name='foobar')
mesh1 = sphere.create_mesh(maxh=3.0)
mesh2 = sphere.create_mesh(maxh_foobar=3.0)
# Same with a combined mesh: if all specific values for maxh are
# given then the global maxh can be omitted.
sphere1 = Sphere(r=10, name='sphere1')
sphere2 = Sphere(r=10, center=(20, 0, 0), name='sphere2')
two_spheres = sphere1 + sphere2
mesh = two_spheres.create_mesh(maxh_sphere1=4.0, maxh_sphere2=5.0)
def test_different_mesh_discretisations_for_combined_meshes(tmpdir):
"""
Check that we can create a mesh consisting of two spheres for which
we provide a generic value of maxh as well as a specific value for
the second spheres.
"""
os.chdir(str(tmpdir))
r1 = 10.0
r2 = 20.0
sphere1 = Sphere(r1, center=(-30, 0, 0), name='sphere1')
sphere2 = Sphere(r2, center=(+30, 0, 0), name='sphere2')
two_spheres = sphere1 + sphere2
# This should render the two spheres with different mesh discretisations.
# XXX TODO: How to best check that this worked correctly?!? Currently my best idea is
# to create the mesh twice, once with a fine and once with a coarse discretisation
# for the second sphere, and to check that the second mesh has fewer
# vertices.
mesh1 = two_spheres.create_mesh(
maxh=5.0, maxh_sphere2=8.0, save_result=True, directory=str(tmpdir))
mesh2 = two_spheres.create_mesh(
maxh=5.0, maxh_sphere2=10.0, save_result=True, directory=str(tmpdir))
assert(mesh1.num_vertices() > mesh2.num_vertices())
def test_box(tmpdir):
os.chdir(str(tmpdir))
x0, y0, z0 = 0, 0, 0
x1, y1, z1 = 10, 20, 30
box = Box(x0, y0, z0, x1, y1, z1)
box.create_mesh(maxh=8.0, save_result=True, directory='foo')
box.create_mesh(maxh=10.0, save_result=True, filename='bar/box.xml.gz')
assert(
os.path.exists('foo/box__0_0__0_0__0_0__10_0__20_0__30_0__maxh_8_0.xml.gz'))
assert(os.path.exists('bar/box.xml.gz'))
mesh = df.Mesh('bar/box.xml.gz')
check_mesh_volume(mesh, (x1 - x0) * (y1 - y0) * (z1 - z0), TOL3)
| 10,135 | 34.564912 | 109 |
py
|
finmag
|
finmag-master/src/finmag/util/pbc_test.py
|
import dolfin as df
from pbc2d import PeriodicBoundary2D, PeriodicBoundary1D
def test_pbc1d_2dmesh():
mesh = df.UnitSquareMesh(2, 2)
pbc = PeriodicBoundary1D(mesh)
S = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=pbc)
expr = df.Expression('cos(x[0])', degree=1)
M = df.interpolate(expr, S)
assert abs(M(0, 0.1) - M(1, 0.1)) < 1e-15
assert abs(M(0, 0) - M(1, 0)) < 1e-15
assert abs(M(0, 1) - M(1, 1)) < 2e-15
def test_pbc2d_2dmesh():
mesh = df.UnitSquareMesh(2, 2)
pbc = PeriodicBoundary2D(mesh)
S = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=pbc)
expr = df.Expression('cos(x[0])+x[1]', degree=1)
M = df.interpolate(expr, S)
assert abs(M(0, 0.1) - M(1, 0.1)) < 1e-15
assert abs(M(0, 0) - M(1, 0)) < 1e-15
assert abs(M(0, 0) - M(1, 1)) < 5e-15
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 1), 1, 1, 2)
def test_pbc2d_3dmesh():
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 1), 2, 2, 1)
pbc = PeriodicBoundary2D(mesh)
S = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=pbc)
expr = df.Expression('cos(x[0])+x[1]', degree=1)
M = df.interpolate(expr, S)
assert abs(M(0, 0, 0) - M(1, 0, 0)) < 1e-15
assert abs(M(0, 0, 0) - M(1, 1, 0)) < 1e-15
assert abs(M(0, 0.1, 0) - M(1, 0.1, 0)) < 1e-15
assert abs(M(0, 0, 0) - M(0.5, 0.5, 0)) > 0.1
assert abs(M(0, 0, 1) - M(0.5, 0.5, 1)) > 0.1
def test_pbc2d_3dmesh2():
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(50, 50, 3), 15, 15, 1)
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 1), 2, 2, 1)
pbc = PeriodicBoundary2D(mesh)
S = df.VectorFunctionSpace(mesh, "Lagrange", 1, constrained_domain=pbc)
expr = df.Expression(('cos(x[0]+x[2])', 'sin(x[0]+x[2])', '0'), degree=1)
M = df.interpolate(expr, S)
#file = df.File('poisson.pvd')
#file << M
# df.plot(M)
# df.interactive()
print abs(M(0, 0, 1) - M(0.5, 0.5, 1))
if __name__ == "__main__":
test_pbc1d_2dmesh()
test_pbc2d_2dmesh()
test_pbc2d_3dmesh()
test_pbc2d_3dmesh2()
| 2,111 | 24.756098 | 77 |
py
|
finmag
|
finmag-master/src/finmag/util/progress_bar.py
|
import progressbar
from datetime import datetime, timedelta
DISPLAY_DELAY = timedelta(seconds=1)
class ProgressBar(object):
def __init__(self, maximum_value):
self.pb = progressbar.ProgressBar(maxval=maximum_value,
widgets=[progressbar.ETA(), progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
self.display_time = datetime.now() + DISPLAY_DELAY
def update(self, value):
if datetime.now() > self.display_time:
self.pb.update(value)
| 535 | 30.529412 | 133 |
py
|
finmag
|
finmag-master/src/finmag/util/time_counter.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
from datetime import datetime, timedelta
import math
def td_seconds(td):
return td.microseconds * 1e-6 + td.seconds + td.days * 24 * 3600
# Note to self: using Student's t distribution here makes no sense since subsequent
# time measurements are not independent. Need to find a better way to
# produce confidence intervals...
STUDENT_T_095 = [1e100,
6.31375, 2.91999, 2.35336, 2.13185, 2.01505, 1.94318, 1.89458,
1.85955, 1.83311, 1.81246, 1.79588, 1.78229, 1.77093, 1.76131,
1.75305, 1.74588, 1.73961, 1.73406, 1.72913, 1.72472, 1.72074,
1.71714, 1.71387, 1.71088, 1.70814, 1.70562, 1.70329, 1.70113,
1.69913, 1.69726, 1.69552, 1.69389, 1.69236, 1.69092, 1.68957,
1.6883, 1.68709, 1.68595, 1.68488, 1.68385, 1.68288, 1.68195,
1.68107, 1.68023, 1.67943, 1.67866, 1.67793, 1.67722, 1.67655,
1.67591, 1.67528, 1.67469, 1.67412, 1.67356, 1.67303, 1.67252,
1.67203, 1.67155, 1.67109, 1.67065, 1.67022, 1.6698, 1.6694, 1.66901,
1.66864, 1.66827, 1.66792, 1.66757, 1.66724, 1.66691, 1.6666,
1.66629, 1.666, 1.66571, 1.66543, 1.66515, 1.66488, 1.66462, 1.66437,
1.66412, 1.66388, 1.66365, 1.66342, 1.6632, 1.66298, 1.66277,
1.66256, 1.66235, 1.66216, 1.66196, 1.66177, 1.66159, 1.6614,
1.66123, 1.66105, 1.66088, 1.66071, 1.66055, 1.66039, 1.66023
]
class counter:
def __init__(self, max_time_sec=1., min_time_sec=0.3, min_error=0.01, min_groups=3, max_groups=10, flops_per_iter=None, bytes_per_iter=None):
self.min_time_sec = min_time_sec
self.max_time_sec = max_time_sec
self.min_error_ratio = min_error
self.start_time = self.rep_start_time = self.rep_end_time = datetime.now()
self.min_groups = min_groups
self.max_groups = max_groups
self.group_times = []
self.rep_curr = -1
self.rep_count = 1
self.group_size = 0
self.group_count = 0
self.sum_times = 0
self.sum_times_sq = 0
self.flops_per_iter = flops_per_iter
self.bytes_per_iter = bytes_per_iter
def next(self):
self.rep_curr += 1
if self.rep_curr < self.rep_count:
return True
return self.advance_rep()
def advance_rep(self):
self.rep_end_time = datetime.now()
diff = td_seconds(self.rep_end_time - self.rep_start_time)
if not self.group_count:
# still need to find how many times to run the test until it takes
# at least max_time_sec/MAX_GROUPS seconds
if diff < self.min_time_sec / self.max_groups:
self.rep_count *= 2
self.rep_curr = 0
self.rep_start_time = self.rep_end_time
return True
else:
self.group_size = self.rep_count
self.add_time(diff)
self.rep_curr = 0
self.rep_start_time = self.rep_end_time
if self.group_count < self.min_groups or td_seconds(self.rep_start_time - self.start_time) < self.min_time_sec:
return True
if td_seconds(self.rep_start_time - self.start_time) > self.max_time_sec:
return False
return self.confidence_time_sec() / self.mean_time_sec() > self.min_error_ratio
def add_time(self, time):
self.sum_times += time
self.sum_times_sq += time * time
self.group_count += 1
self.group_times.append(time)
def mean_time_sec(self):
return self.sum_times / self.group_size / self.group_count
def calls_per_sec(self, factor):
return 1. * factor / self.mean_time_sec()
def confidence_time_sec(self):
if self.group_count < len(STUDENT_T_095):
cutoff = STUDENT_T_095[self.group_count]
else:
cutoff = 1.64485
s2 = (self.sum_times_sq - self.sum_times **
2 / self.group_count) / (self.group_count - 1)
conf = cutoff * math.sqrt(s2 / self.group_count)
return conf / self.group_size
def group_times_str(self, fmt, mult):
return " ".join(fmt % (mult * x / self.group_size) for x in self.group_times)
def flops_str(self):
if not self.flops_per_iter:
return ""
mean = self.mean_time_sec()
return ", %.2f Gflops" % (self.flops_per_iter / mean / 1e9)
def bytes_str(self):
if not self.bytes_per_iter:
return ""
mean = self.mean_time_sec()
return ", %.2f GB/sec" % (self.bytes_per_iter / mean / 1e9)
def __str__(self):
mean = self.mean_time_sec()
conf = self.confidence_time_sec()
if mean > 1e-3:
return "%.1f calls/sec, %.1f ms/call%s%s (%.1f%% error), [%s] (%d per group)" % (1 / mean, mean * 1e3, self.flops_str(), self.bytes_str(), conf / mean * 100, self.group_times_str("%.1f", 1e3), self.group_size)
else:
return "%.0f calls/sec, %.1f us/call%s%s (%.1f%% error), [%s] (%d per group)" % (1 / mean, mean * 1e6, self.flops_str(), self.bytes_str(), conf / mean * 100, self.group_times_str("%.1f", 1e6), self.group_size)
def time_str(time_sec):
if type(time_sec) is timedelta:
time_sec = time_sec.total_seconds()
if time_sec >= 1.:
return "%.1f s" % (time_sec,)
if time_sec >= 1e-3:
return "%.0fms" % (time_sec * 1e3,)
return "%.0fus" % (time_sec * 1e6,)
if __name__ == "__main__":
c = counter()
while c.next():
pass
print c
| 5,889 | 38.797297 | 221 |
py
|
finmag
|
finmag-master/src/finmag/util/visualization.py
|
from __future__ import division
import StringIO
import sh
import numpy as np
import textwrap
import logging
import tempfile
import shutil
import sys
import os
import re
import dolfin as df
import IPython.core.display
from visualization_impl import _axes, find_unused_X_display
from math import sin, cos, pi
logger = logging.getLogger("finmag")
# Unfortunately, there is an incompatibility between the Python
# modules 'paraview.servermanager' and 'vtk' which can cause a
# segfault if the vtk module is imported first. Since we cannot
# control whether this is done by the user (e.g. implicitly by
# importing dolfin), the workaround for now is to save a mini-script
# with the Paraview rendering command to a temporary file and execute
# that in a separate process.
#
# The actual plotting code is contained in the string 'plotting_code'
# below. Note that some of the code in there is intentionally
# duplicated in the rest of this file. This is so that once the
# segfault issue is resolved we can simply remove everything else and
# only take the code from the string, and it should hopefully work
# without changes.
# XXX TODO: The function 'find_valid_X_display' should ideally be
# defined in this module, but this isn't possible due to the paraview
# incompatibilities mentioned above. To avoid code duplication and
# errors due to not keeping the two in sync, we only define it in
# visualization_impl.py and import it here.
from visualization_impl import find_valid_X_display
from finmag.util import configuration
def flight_path_rotation(start_pos, axis=[0, 0, 1], angle=360):
"""
Return a function `f(t)` which defines a 'flight path' of a
rotating camera at time `t` (where `t` runs from 0 to 1).
*Arguments*
start_pos:
Starting position of the camera at time t=0.
axis:
Rotation axis. Default: [0, 0, 1] (i.e., the z-axis)
angle:
The angle (in degrees) of the entire rotating motion.
Default: 360 (= one full rotation).
"""
start_pos = np.asarray(start_pos)
axis_normalised = np.asarray(axis / np.linalg.norm(axis))
angle_rad = angle * pi / 180.
# Find the radial component of the starting position vector
r1 = start_pos - np.dot(start_pos, axis_normalised) * axis_normalised
# P0 is the 'anchor point' on the rotation axis
P0 = start_pos - r1
# Find another vector orthogonal to both the axis and to r1 (of
# the same length as r1). Together, r1 and r2 define the rotation
# plane.
r2 = np.cross(axis_normalised, r1)
print "P0: {}".format(P0)
print "r1: {}".format(r1)
print "r2: {}".format(r2)
def flight_path(t):
pos = P0 + cos(t * angle_rad) * r1 + sin(t * angle_rad) * r2
return pos
return flight_path
def flight_path_straight_line(start_pos, end_pos):
"""
Return a function `f(t)` which defines a 'flight path' of a camera
moving along a straight line between `start_pos` and `end_pos`
(where `t` runs from 0 to 1).
"""
start_pos = np.asarray(start_pos)
end_pos = np.asarray(end_pos)
def flight_path(t):
return (1 - t) * start_pos + t * end_pos
return flight_path
def render_paraview_scene(
pvd_file,
outfile=None,
field_name='m',
timesteps=None,
camera_position=[0, -200, +200],
camera_focal_point=[0, 0, 0],
camera_view_up=[0, 0, 1],
view_size=(800, 600),
magnification=1,
fit_view_to_scene=True,
color_by_axis=0,
colormap='coolwarm',
rescale_colormap_to_data_range=True,
show_colorbar=True,
colorbar_label_format='%-#5.2g',
add_glyphs=True,
glyph_type='cones',
glyph_scale_factor=1.0,
glyph_random_mode=True,
glyph_mask_points=True,
glyph_max_number_of_points=10000,
show_orientation_axes=False,
show_center_axes=False,
representation="Surface With Edges",
palette='screen',
use_parallel_projection=False,
trim_border=True,
rescale=None,
diffuse_color=None,
debug=False,
use_display=None):
# Convert color_by_axis to integer and store the name separately
try:
color_by_axis = _axes[color_by_axis.lower()]
except AttributeError:
if not color_by_axis in [0, 1, 2, -1]:
raise ValueError("color_by_axis must have one of the values "
"[0, 1, 2, -1] or ['x', 'y', 'z', 'magnitude']. "
"Got: {}".format(color_by_axis))
# Use absolute path for filenames because the script will be
# written to a temporary directory in a different location.
pvd_file = os.path.abspath(pvd_file)
if outfile is None:
_, outfile = tempfile.mkstemp(suffix='.png')
outfile_is_temporary = True
else:
outfile_is_temporary = False
outfile = os.path.abspath(outfile)
outdir = os.path.dirname(outfile)
if not os.path.exists(outdir):
logger.debug(
"Creating non-existing directory component '{}' of output filename.".format(outdir))
os.makedirs(outdir)
logger.debug("Done.")
#
# Create the temporary script. The string 'script_string' will
# contain a call to the function in 'visualization_impl.py' which
# has all the parameter values filled in correctly.
#
tmpdir = tempfile.mkdtemp()
scriptfile = os.path.join(tmpdir, 'render_scene.py')
script_string = textwrap.dedent("""
from visualization_impl import render_paraview_scene, find_valid_X_display
import os
if not os.environ.has_key('DISPLAY'):
display = find_valid_X_display()
if display is None:
raise RuntimeError("Could not render Paraview scene as no valid X display was found.")
else:
os.environ['DISPLAY'] = ':' + str(display)
render_paraview_scene(
'{}', '{}', {}, {},
{}, {}, {},
{}, {}, {}, {},
'{}', {}, {},
'{}', {}, '{}',
{}, {}, {},
{}, {},
{}, '{}', '{}', {},
{}, {}, {})
""".format(
pvd_file, outfile, repr(field_name), re.sub('\n', '', repr(timesteps)),
camera_position, camera_focal_point, camera_view_up,
view_size, magnification, fit_view_to_scene, color_by_axis,
colormap, rescale_colormap_to_data_range, show_colorbar,
colorbar_label_format, add_glyphs, glyph_type,
glyph_scale_factor, glyph_random_mode, glyph_mask_points,
glyph_max_number_of_points, show_orientation_axes,
show_center_axes, representation, palette, use_parallel_projection,
trim_border, rescale, diffuse_color))
with open(scriptfile, 'w') as f:
f.write(script_string)
vis_impl_script = os.path.join(
os.path.dirname(__file__), './visualization_impl.py')
if not os.path.exists(vis_impl_script):
vis_impl_script = os.path.join(
os.path.dirname(__file__), './visualization_impl.so')
if not os.path.exists(vis_impl_script):
raise RuntimeError(
"Cannot use Paraview visualisation. This should not happen.")
shutil.copy(vis_impl_script, tmpdir)
# Execute the script in a separate process
curdir_bak = os.getcwd()
xpra_display = None
use_xpra = configuration.get_config_option(
"visualization", "use_xpra", "True")
try:
display_bak = os.environ['DISPLAY']
except KeyError:
display_bak = None
try:
os.chdir(tmpdir)
if use_display is None:
use_display = configuration.get_config_option(
"visualization", "use_display", None)
if use_display == 'None':
use_display = None
if use_display is None and use_xpra.lower() != "false":
# Try to create a display using 'xpra'
try:
# Check whether 'xpra' is installed
sh.xpra('--version')
xpra_display = find_unused_X_display(xrange(10, 100))
sh.xpra('start', ':{}'.format(xpra_display))
use_display = xpra_display
logger.debug(
"Rendering Paraview scene on display :{} using xpra.".format(xpra_display))
except sh.CommandNotFound:
logger.warning(
"Could not find the 'xpra' executable. You may want to "
"install it to avoid annoying pop-up windows from "
"Paraview. Under Debian/Ubuntu you can install it via "
"'sudo apt-get install xpra'.")
xpra_display = None
if use_display is not None:
os.environ['DISPLAY'] = ':{}'.format(use_display)
script_stdout = StringIO.StringIO()
script_stderr = StringIO.StringIO()
sh.python('render_scene.py', _out=script_stdout, _err=script_stderr)
except sh.ErrorReturnCode as ex:
logger.error("Could not render Paraview scene. Stdout and stderr of the script: "
"'{}', '{}'".format(script_stdout.getvalue(), script_stderr.getvalue()))
# raise
finally:
if debug == True:
logger.debug("Temporary directory '{}' kept for debugging. You "
"can try to run 'render_script.py' manually "
"there.".format(tmpdir))
else:
shutil.rmtree(tmpdir)
os.chdir(curdir_bak) # change back into the original directory
if xpra_display is not None:
# XXX TODO: It may be nice to keep the xpra display open
# until Finmag exits, because we are likely to
# render more than one snapshot.
sh.xpra('stop', ':{}'.format(xpra_display))
if display_bak is not None:
os.environ['DISPLAY'] = display_bak
else:
os.environ.pop('DISPLAY', None)
try:
image = IPython.core.display.Image(filename=outfile)
except IOError:
# Something went wrong (missing X display?); let's not choke but return
# None instead.
image = None
if outfile_is_temporary:
# Clean up temporary file
os.remove(outfile)
return image
def plot_dolfin_function(f, **kwargs):
"""
Uses Paraview to plot the given dolfin Function (currently this
only works for a dolfin.Function representing a 3D vector field).
Returns an IPython.display.Image object containing the rendered
scene. All keyword arguments are passed on to the function
`finmag.util.visualization.render_paraview_scene`, which is used
internally.
"""
# Check that f represents a 3D vector field defined on a 3D mesh.
if not f.element().value_shape() == (3,):
raise TypeError(
"The function to be plotted must represent a 3D vector field.")
f.rename('f', 'f')
tmpdir = tempfile.mkdtemp()
tmpfilename = os.path.join(tmpdir, 'dolfin_function.pvd')
try:
# Save the function to a temporary file
funcfile = df.File(tmpfilename)
funcfile << f
# ignore this argument as we are using our own field_name
kwargs.pop('field_name', None)
return render_paraview_scene(tmpfilename, field_name='f', **kwargs)
finally:
shutil.rmtree(tmpdir)
# Set the docstring of the wrapped function so that it reflects the
# actual implementation.
from visualization_impl import render_paraview_scene as render_scene_impl
render_paraview_scene.__doc__ = render_scene_impl.__doc__
| 11,824 | 35.16208 | 108 |
py
|
finmag
|
finmag-master/src/finmag/util/dmi_helper_test.py
|
import pytest
import numpy as np
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, DMI, Zeeman
from dmi_helper import compute_skyrmion_number_2d
def init_skx_down(pos):
x = pos[0]
y = pos[1]
if (x - 50) ** 2 + (y - 50) ** 2 < 15 ** 2:
return (0, 0, -1)
else:
return (0, 0, 1)
def compute_skyrmion_number_2d_example():
mesh = df.CircleMesh(df.Point(0, 0), 20, 4)
Ms = 3.84e5
mu0 = 4 * np.pi * 1e-7
Hz = 0.2
sim = Simulation(mesh, Ms, unit_length=1e-9, name='sim')
sim.do_precession = False
sim.set_m(init_skx_down)
sim.add(Exchange(8.78e-12))
sim.add(DMI(-1.58e-3))
sim.add(Zeeman((0, 0, Hz / mu0)))
sim.relax(stopping_dmdt=1, dt_limit=1e-9)
#sim.m_field.plot_with_dolfin(interactive=True)
print compute_skyrmion_number_2d(sim.m_field.f)
def test_compute_skyrmion_number_2d_pbc():
mesh = df.RectangleMesh(df.Point(0, 0), df.Point(100, 100), 40, 40)
Ms = 8.6e5
sim = Simulation(mesh, Ms, pbc='2d', unit_length=1e-9)
sim.set_m(init_skx_down)
sim.add(Exchange(1.3e-11))
sim.add(DMI(D=4e-3))
sim.add(Zeeman((0, 0, 0.45 * Ms)))
sim.do_precession = False
sim.relax(stopping_dmdt=1, dt_limit=1e-9)
#df.plot(sim.m_field.f)
#df.interactive()
print np.max(sim.m_field.as_array())
sky_num = compute_skyrmion_number_2d(sim.m_field.f)
print 'sky_num = %g' % sky_num
assert sky_num < -0.95 and sky_num > -1.0
if __name__ == "__main__":
#compute_skyrmion_number_2d_example()
test_compute_skyrmion_number_2d_pbc()
| 1,611 | 21.704225 | 71 |
py
|
finmag
|
finmag-master/src/finmag/util/run_native_tests.py
|
if __name__ == "__main__":
import os
import sys
from finmag.util.native_compiler import pipe_output
os.chdir(os.path.dirname(os.path.abspath(__file__)) + "/../../../native")
res = pipe_output("make test")
sys.exit(res)
| 244 | 26.222222 | 77 |
py
|
finmag
|
finmag-master/src/finmag/util/fft.py
|
from __future__ import division
from scipy.interpolate import InterpolatedUnivariateSpline
from finmag.util.helpers import probe
from finmag.util.fileio import Tablereader
from glob import glob
from time import time
import numpy as np
import dolfin as df
import logging
from numpy import sin, cos, pi
logger = logging.getLogger("finmag")
def _aux_fft_m(filename, t_step=None, t_ini=None, t_end=None, subtract_values='first', vertex_indices=None):
"""
Helper function to compute the Fourier transform of magnetisation
data, which is either read from a single .ndt file (for spatially
averaged magnetisation) or from a series of .npy files (for
spatially resolved data). If necessary, the data is first
resampled at regularly spaced time intervals.
"""
# Load the data; extract time steps and magnetisation
if filename.endswith('.ndt'):
data = Tablereader(filename)
ts = data['time']
mx = data['m_x']
my = data['m_y']
mz = data['m_z']
elif filename.endswith('.npy'):
if t_ini == None or t_end == None or t_step == None:
raise ValueError(
"If 'filename' represents a series of .npy files then t_ini, t_end and t_step must be given explicitly.")
num_steps = int(np.round((t_end - t_ini) / t_step)) + 1
ts = np.linspace(t_ini, t_end, num_steps)
npy_files = sorted(glob(filename))
N = len(npy_files)
if (N != len(ts)):
raise RuntimeError(
"Number of timesteps (= {}) does not match number of .npy files found ({}). Aborting.".format(len(ts), N))
logger.debug("Found {} .npy files.".format(N))
if vertex_indices != None:
num_vertices = len(vertex_indices)
else:
num_vertices = len(np.load(npy_files[0])) // 3
vertex_indices = np.arange(num_vertices)
mx = np.zeros((N, num_vertices))
my = np.zeros((N, num_vertices))
mz = np.zeros((N, num_vertices))
for (i, npyfile) in enumerate(npy_files):
a = np.load(npyfile)
aa = a.reshape(3, -1)
mx[i, :] = aa[0, vertex_indices]
my[i, :] = aa[1, vertex_indices]
mz[i, :] = aa[2, vertex_indices]
else:
raise ValueError(
"Expected a single .ndt file or a wildcard pattern referring to a series of .npy files. Got: {}.".format(filename))
# If requested, subtract the first value of the time series
# (= relaxed state), or the average, or some other value.
if subtract_values == 'first':
mx -= mx[0]
my -= my[0]
mz -= mz[0]
elif subtract_values == 'average':
mx -= mx.mean(axis=0)
my -= my.mean(axis=0)
mz -= mz.mean(axis=0)
elif subtract_values != None:
try:
(sx, sy, sz) = subtract_values
mx -= sx
my -= sy
mz -= sz
except:
raise ValueError(
"Unsupported value for 'subtract_values': {}".format(subtract_values))
# Try to guess sensible values of t_ini, t_end and t_step if none
# were specified.
if t_step is None:
t_step = ts[1] - ts[0]
if not(np.allclose(t_step, np.diff(ts))):
raise ValueError("A value for t_step must be explicitly provided "
"since timesteps in the file '{}' are not "
"equidistantly spaced.".format(filename))
f_sample = 1. / t_step # sampling frequency
if t_ini is None:
t_ini = ts[0]
if t_end is None:
t_end = ts[-1]
# Resample the magnetisation if it was recorded at uneven
# timesteps (or not at the timesteps specified for the Fourier
# transform).
eps = 1e-8
num_steps = int(np.round((t_end - t_ini) / t_step)) + 1
ts_resampled = np.linspace(t_ini, t_end, num_steps)
if (ts.shape == ts_resampled.shape and np.allclose(ts, ts_resampled, atol=0, rtol=1e-7)):
#logger.debug("Data already given at the specified regular intervals. No need to resample.")
mx_resampled = mx
my_resampled = my
mz_resampled = mz
else:
logger.debug("Resampling data at specified timesteps.")
# Interpolating functions for mx, my, mz
f_mx = InterpolatedUnivariateSpline(ts, mx)
f_my = InterpolatedUnivariateSpline(ts, my)
f_mz = InterpolatedUnivariateSpline(ts, mz)
# Sample the interpolating functions at regularly spaced time steps
mx_resampled = np.array([f_mx(t) for t in ts_resampled])
my_resampled = np.array([f_my(t) for t in ts_resampled])
mz_resampled = np.array([f_mz(t) for t in ts_resampled])
fft_mx = np.fft.rfft(mx_resampled, axis=0)
fft_my = np.fft.rfft(my_resampled, axis=0)
fft_mz = np.fft.rfft(mz_resampled, axis=0)
# When using np.fft.fftfreq, the last frequency sometimes becomes
# negative; to avoid this we compute the frequencies by hand.
n = len(fft_mx)
freqs = np.arange(n) / (t_step * len(ts_resampled))
return freqs, fft_mx, fft_my, fft_mz
def filter_frequency_component(signal, k, t_start, t_end, ts_sampling=None):
"""
Filter the given signal by only keeping the frequency component
corresponding to the k-th Fourier coefficient.
XXX TODO: This is probably not the best interface. We should require
a frequency as the input and compute the index automatically.
*Arguments*
signal : numpy array
Must be a 2d array, where the first index represents time and
the second index the data. Thus `signal[i, :]` is the signal
at time `i`.
k : int
The index of the Fourier coefficient which should be used for
filtering.
t_start, t_end : float
First and last time step of the signal. Note that this function
assumes that the time steps are evenly spaced.
ts_sampling : numpy array
The time steps at which the filtered signal should be evaluated.
"""
n = len(signal)
# Fourier transform the signal
t0 = time()
rfft_vals = np.fft.rfft(signal, axis=0)
t1 = time()
logger.debug(
"Computing the Fourier transform took {:.2g} seconds".format(t1 - t0))
#rfft_freqs = np.arange(n // 2 + 1) / (dt*n)
# Only keep the Fourier coefficients for the given frequency component
A_k = rfft_vals[k]
# Since the DFT formula know nothing about the true timesteps at which the
# signal is given, we need to rescale the sampling timesteps so that they
# lie in the interval [0, 2*pi*k]
if ts_sampling is None:
ts_rescaled = (2 * pi * k * np.arange(n) / n)
else:
ts_rescaled = (ts_sampling - t_start) / \
(t_end - t_start) * 2 * pi * k * (n - 1) / n
# 'Transpose' the 1D vector so that the linear combination below
# produces the correct 2D output format.
ts_rescaled = ts_rescaled[:, np.newaxis]
signal_filtered = 2.0 / n * \
(A_k.real * cos(ts_rescaled) - A_k.imag * sin(ts_rescaled))
return signal_filtered
def compute_power_spectral_density(filename, t_step=None, t_ini=None, t_end=None, subtract_values='first', restrict_to_vertices=None):
"""
Compute the power spectral densities (= squares of the absolute
values of the Fourier coefficients) of the x, y and z components
of the magnetisation m, where the magnetisation data is either
read from a .ndt file (for spatially averages magnetisation; not
recommended, see below) or from a series of data files in .npy
format (recommended). If necessary, the data is first resampled at
regularly spaced time intervals.
Note that this performs a real-valued Fourier transform (i.e. it
uses np.fft.rfft internally) and thus does not return Fourier
coefficients belonging to negative frequencies.
If `filename` is the name of a single .ndt file then the Fourier
transform of the average magneisation is computed. Note that this
is *not* recommended since it may not detect modes that have
certain symmetries which are averaged out by this method. A better
way is to pass in a series of .npy files, which takes the
spatially resolved magnetisation into account.
*Arguments*
filename:
The .ndt file or .npy files containing the magnetisation values.
In the second case a pattern should be given (e.g. 'm_ringdown*.npy').
t_step:
Interval between consecutive time steps in the resampled data.
If the timesteps in the .ndt file are equidistantly spaced,
this distance is used as the default value.
t_ini:
Initial time for the resampled data (all input data before
this time is discarded). Defaults to the first time step saved
in the .ndt data file.
t_end:
Last time step for the resampled data (all input data after
this time is discarded). Defaults to the last time step saved
in the .ndt data file.
subtract_values: None | 3-tuple of floats | 'first' | 'average'
If specified, the given values are subtracted from the data
before computing the Fourier transform. This can be used to
avoid potentially large peaks at zero frequency. If a 3-tuple
is given then it is interpreted as the three values to
subtract from mx, my and mz, respectively. If 'first' or
'average' is given, the first/average values of mx, my, mz are
determined and subtracted.
restrict_to_vertices:
This argument only has an effect when computing the spectrum
from spatially resolved magnetisation data (i.e., from a
series of .npy files). If `restrict_to_vertices` is `None`
(the default), all mesh vertices are taken into account.
Otherwise `restrict_to_vertices` should be a list of vertex
indices. The spectrum then is then computed for the
magnetisation dynamics at these particular vertices only.
*Returns*
Returns a tuple (freqs, psd_mx, psd_my, psd_mz), where psd_mx,
psd_my, psd_mz are the power spectral densities of the x/y/z-component
of the magnetisation and freqs are the corresponding frequencies.
"""
if not filename.endswith('.npy') and restrict_to_vertices != None:
logger.warning("Ignoring argument 'restrict_to_vertices' because it "
"can only be used when reading the spatially resolved "
"magnetisation from a list of .npy files.")
freqs, fft_mx, fft_my, fft_mz = \
_aux_fft_m(filename, t_step=t_step, t_ini=t_ini, t_end=t_end,
subtract_values=subtract_values, vertex_indices=restrict_to_vertices)
psd_mx = np.absolute(fft_mx) ** 2
psd_my = np.absolute(fft_my) ** 2
psd_mz = np.absolute(fft_mz) ** 2
if filename.endswith('.npy'):
# Compute the power spectra and then do the spatial average
psd_mx = psd_mx.sum(axis=-1)
psd_my = psd_my.sum(axis=-1)
psd_mz = psd_mz.sum(axis=-1)
return freqs, psd_mx, psd_my, psd_mz
def find_peak_near_frequency(f_approx, fft_freqs, fft_vals):
"""
Given the Fourier spectrum of one or multiple magnetisation
components, find the peak closest to the given frequency.
This is a helper function for interactive use that allows to
quickly determine the exact location of a peak for which an
approximate location is known (e.g from a plot).
*Example*
>>> fft_freqs, fft_mx, fft_my, fft_mz = FFT_m('simulation.ndt', t_step=1e-11)
>>> # Let's say that we have identified a peak near 5.4 GHz in fft_my (e.g. from a plot)
>>> idx = find_peak_near_frequency(5.4e9, fft_freqs, fft_my)
*Arguments*
f_approx : float
The frequency near which a peak is to be found.
fft_freqs : array
An array of frequencies (as returned by FFT_m, for example).
The values are assumed to be ordered from smallest to largest.
fft_vals : array or list of arrays
The Fourier transform of one magnetisation component (m_x, m_y or m_z).
*Returns*
A pair `(freq, idx)`, where `idx` is the index of the exact peak
in the array fft_freqs and `freq` is the associated frequency,
i.e. freq=fft_freqs[idx].
"""
try:
from scipy.signal import argrelmax
except ImportError:
raise NotImplementedError(
"Need scipy >= 0.11, please install the latest version via: 'sudo pip install -U scipy'")
if not len(fft_freqs) == len(fft_vals):
raise ValueError("The arrays `fft_freqs` and `fft_vals` "
"must have the same length, "
"but {} != {}".format(len(fft_freqs), len(fft_vals)))
fft_freqs = np.asarray(fft_freqs)
fft_vals = np.asarray(fft_vals)
N = len(fft_freqs) - 1 # last valid index
peak_indices = list(argrelmax(fft_vals)[0])
# Check boundary extrema
if fft_vals[0] > fft_vals[1]:
peak_indices.insert(0, 0)
if fft_vals[N - 1] < fft_vals[N]:
peak_indices.append(N)
closest_peak_idx = peak_indices[
np.argmin(np.abs(fft_freqs[peak_indices] - f_approx))]
logger.debug("Found peak at {:.3f} GHz (index: {})".format(
fft_freqs[closest_peak_idx] / 1e9, closest_peak_idx))
return fft_freqs[closest_peak_idx], closest_peak_idx
def _plot_spectrum(freqs, psd_mx, psd_my, psd_mz, components="xyz", log=False,
xlim=None, ylim=None, ticks=21, figsize=None, title="", outfilename=None):
"""
Internal helper function to plot certain components of the spectrum.
This is only separated out from plot_power_spectral_density so that
it can be re-used elsewhere, e.g. in the NormalModeSimulation class.
"""
import matplotlib.pyplot as plt
freqs_GHz = freqs / 1e9
if log:
psd_mx = np.log(psd_mx)
psd_my = np.log(psd_my)
psd_mz = np.log(psd_mz)
fig = plt.figure(figsize=figsize)
ax = fig.gca()
if 'x' in components:
ax.plot(freqs_GHz, psd_mx, '.-', label=r'$m_x$')
if 'y' in components:
ax.plot(freqs_GHz, psd_my, '.-', label=r'$m_y$')
if 'z' in components:
ax.plot(freqs_GHz, psd_mz, '.-', label=r'$m_z$')
ax.set_xlabel('Frequency (GHz)')
ax.set_ylabel('Power spectral density{}'.format(' (log)' if log else ''))
ax.set_xlim(xlim)
ax.set_ylim(ylim)
xmin, xmax = ax.get_xlim()
ax.set_xticks(np.linspace(xmin, xmax, ticks))
ax.legend(loc='best')
ax.grid()
if title:
ax.set_title(title)
if outfilename is not None:
fig.savefig(outfilename)
return fig
def plot_power_spectral_density(filename, t_step=None, t_ini=None, t_end=None, subtract_values='first', components="xyz",
log=False, xlim=None, ylim=None, ticks=21, figsize=None, title="", outfilename=None, restrict_to_vertices=None):
"""
Plot the power spectral density of the components of the magnetisation m.
The arguments `t_step`, `t_ini`, `t_end` and `subtract_values` have the
same meaning as in the function `FFT_m`.
*Arguments*
components: string | list
A string or list containing the components to plot. Default: 'xyz'.
log: bool
If True (the default), the y-axis is plotted on a log scale.
figsize: pair of floats
The size of the resulting figure.
title: string
The figure title.
*Returns*
The matplotlib Figure instance containing the plot. If
`outfilename` is not None, it also saves the plot to the
specified file.
"""
if not set(components).issubset("xyz"):
raise ValueError("Components must only contain 'x', 'y' and 'z'. "
"Got: {}".format(components))
freqs, psd_mx, psd_my, psd_mz = \
compute_power_spectral_density(filename, t_step, t_ini=t_ini, t_end=t_end,
subtract_values=subtract_values, restrict_to_vertices=restrict_to_vertices)
return _plot_spectrum(freqs, psd_mx, psd_my, psd_mz, components=components,
log=log, xlim=xlim, ylim=ylim, ticks=ticks,
figsize=figsize, title=title, outfilename=outfilename)
def fft_at_probing_points(dolfin_funcs, pts):
"""
Given a list of dolfin Functions (on the same mesh) representing
field values at different time steps, as well as the x-, y- and
z-coordinates of some probing points, compute and return the
discrete Fourier transforms (over time) of these functions at each
point.
*Arguments*
dolfin_funcs: list of dolfin.Function
List of functions representing (say) the field values at
different time steps.
pts: points: numpy.array
An array of points where the FFT should be computed. Can
have arbitrary shape, except that the last axis must have
dimension 3. For example, if pts.shape == (10,20,5,3) then
the FFT is computeed at all points on a regular grid of
size 10 x 20 x 5.
*Returns*
A numpy.array `res` of the same shape as X, Y and Z, but with an
additional first axis which contains the coefficients if the Fourier
transform. For example, res[:, ...] represents the Fourier transform
over time as probed at the point (X[...], Y[...], Z[...]).
"""
vals_probed = np.ma.masked_array([probe(f, pts) for f in dolfin_funcs])
# vals_fft = np.ma.masked_array(np.fft.fft(vals_probed, axis=0),
# mask=np.ma.getmask(vals_probed))
# freqs = np.fft.fftfreq(
n = (len(dolfin_funcs) // 2) + 1
vals_fft = np.ma.masked_array(np.fft.rfft(vals_probed, axis=0),
mask=np.ma.getmask(vals_probed[:n, ...]))
return vals_fft
def plot_spatially_resolved_normal_modes(m_vals_on_grid, idx_fourier_coeff,
t_step=None, figsize=None, yshift_title=1.5,
show_colorbars=True, cmap=None):
"""
XXX Warning: The interface for this function hasn't matured yet,
so be prepared for it to change in the future.
Given the time evolution of the magnetisation (probed on a regular
grid), compute and plot the normal mode shapes corresponding to a
certain frequency. More precisely, this plots the absolute values
and the phase of the Fourier coefficients at each probing point
for each of m_x, m_y and m_z.
m_vals_on_grid : numpy.array of shape (nt, nx, ny, nz, 3)
Array containing the magnetisation values probed at regular
time steps on a regular grid. Here `nt` is the number of time
steps and `nx`, `ny`, `nz` determine the size of the probing
grid. Thus The 3-vector m_vals_on_grid[t0, x0, y0, z0]
contains the magnetisation at time `t0` probed at the grid
point with coordinates (x0, y0, z0).
idx_fourier_coeff : int
Index of the Fourier coefficient for which to compute the normal
modes. This should be between 0 and (number of files - 1).
t_step : float (optional)
The interval between subsequent time steps of the probed
magnetisation values. This is only relevant to print the
frequency of the normal mode in the figure title.
figsize : pair of float
The size of the resulting figure.
yshift_title : float
Amount by which the title should be shifted up (this can be
used to tweak the figure if the title overlaps with one of the
colorbars, say).
show_colorbars : bool
Whether to show a colorbar in each subplot (default: True).
cmap :
The colormap to use.
*Returns*
The matplotlib figure containing.
"""
import matplotlib.pyplot as plt
from matplotlib import cm
if cmap is None:
cmap = cm.jet
n = (m_vals_on_grid.shape[0] // 2) + 1
fft_vals = np.ma.masked_array(np.fft.rfft(m_vals_on_grid, axis=0),
mask=np.ma.getmask(m_vals_on_grid[:n, ...]))
fig = plt.figure(figsize=figsize)
axes = []
for k in [0, 1, 2]:
ax = fig.add_subplot(2, 3, k + 1)
ax.set_title('m_{}'.format('xyz'[k]))
im = ax.imshow(
abs(fft_vals[idx_fourier_coeff, :, :, 0, k]), origin='lower', cmap=cmap)
if show_colorbars:
fig.colorbar(im)
axes.append(ax)
ax = fig.add_subplot(2, 3, k + 3 + 1)
axes.append(ax)
ax.set_title('m_{} (phase)'.format('xyz'[k]))
im = ax.imshow(np.angle(
fft_vals[idx_fourier_coeff, :, :, 0, k], deg=True), origin='lower', cmap=cmap)
if show_colorbars:
fig.colorbar(im)
if t_step != None:
# XXX TODO: Which value of nn is the correct one?
#nn = n
nn = len(m_vals_on_grid)
fft_freqs = np.fft.fftfreq(nn, t_step)[:nn]
figure_title = "Mode shapes for frequency f={:.2f} GHz".format(
fft_freqs[idx_fourier_coeff] / 1e9)
plt.text(0.5, yshift_title, figure_title,
horizontalalignment='center',
fontsize=20,
transform=axes[2].transAxes)
else:
logger.warning(
"Omitting figure title because no t_step argument was specified.")
plt.tight_layout()
return fig
def export_normal_mode_animation_from_ringdown(npy_files, outfilename, mesh, t_step, k, scaling=0.2, dm_only=False, num_cycles=1, num_frames_per_cycle=20):
"""
Read a bunch of .npy files (containing the magnetisation sampled
at regular time steps) and export an animation of the normal mode
corresponding to a specific frequency.
npy_files : string (including shell wildcards) or list of filenames
The list of files containing the magnetisation values sampled
at the mesh vertices. There should be one file per stime step.
outfilename : string
Name of the .pvd file to which the animation is exported.
mesh : dolfin.Mesh or string
The mesh (or name of the .xml.gz file containing the mesh) on
which the magnetisation was sampled.
t_step : float
The interval between subsequent time steps.
k: int
Index of the frequency for which the normal mode is to be plotted.
scaling : float
If `dm_only` is False, this determines the maximum size of the
oscillation (relative to the magnetisation vector) in the
visualisation. If `dm_only` is True, this has no effect.
dm_only : bool (optional)
If False (the default), plots `m0 + scaling*dm(t)`, where m0 is the
average magnetisation and dm(t) the (spatially varying)
oscillation corresponding to the frequency of the normal mode.
If True, only `dm(t)` is plotted.
num_cycles : int
The number of cycles to be animated (default: 1).
num_frames_per_cycle : int
The number of snapshot per cycle to be exported (default: 20). Thus the
total number of exported frames is (num_frames_per_cycle * num_cycles).
"""
files = sorted(glob(npy_files)) if isinstance(
npy_files, str) else list(npy_files)
if len(files) == 0:
logger.error("Cannot produce normal mode animation. No input .npy "
"files found matching '{}'".format(npy_files))
return
if isinstance(mesh, str):
mesh = df.Mesh(mesh)
N = len(files) # number of timesteps
num_nodes = mesh.num_vertices()
# Read in the magnetisation dynamics from each .npy file and store
# it as successive time steps in the array 'signal'.
signal = np.empty([N, 3 * num_nodes])
for (i, filename) in enumerate(files):
signal[i, :] = np.load(filename)
logger.debug("Array with magnetisation dynamics occupies "
"{} MB of memory".format(signal.nbytes / 1024 ** 2))
# Fourier transform the signal
t0 = time()
fft_vals = np.fft.rfft(signal, axis=0)
t1 = time()
logger.debug(
"Computing the Fourier transform took {:.2g} seconds".format(t1 - t0))
fft_freqs = np.fft.fftfreq(N, d=t_step)[:len(fft_vals)]
# Only keep the k-th Fourier coefficient at each mesh node
# (combined in the array A_k).
A_k = fft_vals[k]
abs_k = np.abs(A_k)[np.newaxis, :]
theta_k = np.angle(A_k)[np.newaxis, :]
num_frames = num_frames_per_cycle * num_cycles
signal_filtered = np.empty([num_frames, 3 * num_nodes])
# frequency associated with the k-th Fourier coefficient
omega = fft_freqs[k]
cycle_length = 1.0 / omega
timesteps = np.linspace(
0, num_cycles * cycle_length, num_frames, endpoint=False)[:, np.newaxis]
t_end = (N - 1) * t_step
# Compute 'snapshots' of the oscillation and store them in signal_filtered
#
# TODO: Write a unit test for this formula, just to be 100% sure
# that it is correct!
signal_filtered = 2.0 / N * abs_k * \
cos(k * 2 * pi * timesteps / t_end + theta_k)
# Determine a sensible scaling factor so that the oscillations are
# visible but not too large. (Note that, even though it looks
# convoluted, computing the maximum value in this iterated way is
# actually much faster than doing it directly.)
maxval = max(np.max(signal_filtered, axis=0))
logger.debug("Maximum value of the signal: {}".format(maxval))
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
func = df.Function(V)
func.rename('m', 'magnetisation')
if dm_only == True:
signal_normal_mode = 1 / maxval * signal_filtered
else:
signal_normal_mode = signal.mean(
axis=0).T + scaling / maxval * signal_filtered
# XXX TODO: Should check for an existing file and ask for confirmation
# whether it should be overwritten!
logger.debug(
"Saving normal mode animation to file '{}'.".format(outfilename))
t0 = time()
f = df.File(outfilename, 'compressed')
# XXX TODO: We need the strange temporary array 'aaa' here because
# if we write the values directly into func.vector()
# then they end up being different (as illustrated in
# the code that is commented out)!!!
aaa = np.empty(3 * num_nodes)
# for i in xrange(len(ts)):
# for i in xrange(20):
for i in xrange(num_frames):
# if i % 20 == 0:
# print "i={} ".format(i),
# import sys
# sys.stdout.flush()
aaa[:] = signal_normal_mode[i][:]
func.vector()[:] = aaa
f << func
t1 = time()
logger.debug("Saving the data took {} seconds".format(t1 - t0))
# Shorter aliases that are easier to type
compute_psd = compute_power_spectral_density
plot_psd = plot_power_spectral_density
| 26,961 | 35.484438 | 155 |
py
|
finmag
|
finmag-master/src/finmag/util/consts.py
|
"""
A collection of constants we use (in SI units).
"""
from __future__ import division
from math import pi, sqrt
import numbers
mu0 = 4 * pi * 1e-7 # Vs/(Am)
k_B = 1.3806488e-23 # Boltzmann constant in J / K
h_bar = 1.054571726e-34 # reduced Plank constant in Js
e = 1.602176565e-19 # elementary charge in As
# m/(As) (source: OOMMF manual, and in Werner Scholz thesis, after (3.7),
# llg_gamma_G = m/(As))
gamma = 2.210173e5
ONE_DEGREE_PER_NS = 17453292.52 # rad/s
def exchange_length(A, Ms):
"""
Computes the exchange length, when given the exchange constant A
and the saturation magnetisation Ms.
"""
assert isinstance(A, numbers.Number)
assert isinstance(Ms, numbers.Number)
return sqrt(2 * A / (mu0 * Ms ** 2))
def bloch_parameter(A, K1):
"""
Computes the Bloch parameter, when given the exchange constant A
and the anisotropy energy density K1.
"""
return sqrt(A / K1)
def helical_period(A, D):
"""
Computes the Helical period of a Skyrmion, when given exchange
constant, A and the DMI strength,D.
"""
return 4 * pi * A / abs(D)
def flux_density_to_field_strength(B):
"""
Converts the magnetic flux density to the magnetic field strength.
Magnetic flux density B is expressed in Tesla, and the returned field
strength H is expressed in A/m.
"""
H = B / mu0
return H
def Oersted_to_SI(H):
"""
Converts the magnetic field strength H from Oersted to A/m.
"""
return H * 1e3 / (4 * pi)
| 1,525 | 22.476923 | 74 |
py
|
finmag
|
finmag-master/src/finmag/util/fileio.py
|
import os
import logging
import types
import numpy as np
from glob import glob
from types import TupleType, StringType
from aeon import timer
logger = logging.getLogger(name='finmag')
class Tablewriter(object):
# It is recommended that the comment symbol should end with a
# space so that there is no danger that it gets mangled up with
# the 'time' field because some of the code below relies on them
# being separated by some whitespace.
comment_symbol = '# '
def __init__(self, filename, simulation, override=False, entity_order=None, entities=None):
logger.debug("Creating DataWriter for file '%s'" % (filename))
# formatting for columns (could in principle be customized
# through extra arguments here)
precision = 12
charwidth = 18
self.float_format = "%" + str(charwidth) + '.' + str(precision) + "g "
self.string_format = "%" + str(charwidth) + "s "
# save_head records whether the headings (name and units)
# have been saved already
self.save_head = False
# entities:
# Idea is to have a dictionary of keys where the keys
# are reference names for the entities and
# the value is another dictionary, which has keys 'unit', 'get' and 'header':
# 'get' is the a function that takes a simulation object as the argument
# and returns the data to be saved.
#
# No doubt this can be done neater, more general, etc.
# For example, it would be desirable if we could get ALL
# the fields from the simulation object, i.e. demag, exchange,
# anisotropy and also the corresponding energies.
#
# Ideally this would have the flexiblity to realise when we have
# two different anisotropies in the simulation, and provide both of
# these. It may be that we need create a 'fieldname' that the user
# can provide when creating interactions which summarises what the
# field is about, and which can be used as a useful column header
# here for the ndt file.
if entities is None:
self._entities = {}
self.add_entity('time', {'unit': '<s>',
'get': lambda sim: sim.t,
'header': 'time'})
self.add_entity('m', {'unit': '<>',
'get': lambda sim: sim.m_average,
'header': ('m_x', 'm_y', 'm_z')})
# add time integrator dummy tokens than return NAN as we haven't got
# the integrator yet (or may never create one).
self.add_entity('steps', {
'unit': '<1>',
#'get': lambda sim: sim.integrator.stats()['nsteps'],
'get': lambda sim: np.NAN,
'header': 'steps'})
self.add_entity('last_step_dt', {
'unit': '<1>',
#'get': lambda sim: sim.integrator.stats()['hlast'],
'get': lambda sim: np.NAN,
'header': 'last_step_dt'})
self.add_entity('dmdt', {
'unit': '<A/ms>',
#'get': lambda sim: sim.dmdt_max,
'get': lambda sim: np.array([np.NAN, np.NAN, np.NAN]),
'header': ('dmdt_x', 'dmdt_y', 'dmdt_z')})
else:
self._entities = entities
self.filename = filename
self.sim = simulation
# in what order to write data
if entity_order:
self.entity_order = entity_order
else:
self.entity_order = self.default_entity_order()
# if file exists, cowardly stop
if os.path.exists(filename) and not override:
msg = "File %s exists already; cowardly stopping" % filename
raise RuntimeError(msg)
def add_entity(self, name, dic):
"""
Add an entity to be saved to this ndt file at the next data saving instance. The
arguments are:
name : a reference name for this entity (used to order the entities in the ndt file)
dic : a dictionary containing data for the header lines and a function to retrieve the data.
Examples:
For the time entity, we have
name = 'time'
dic = {'unit': '<s>',
'get': lambda sim: sim.t,
'header': 'time'},
For the magnetisation entity, we have
name = 'm'
dic = {'unit': '<>',
'get': lambda sim: sim.m_average,
'header': ('m_x', 'm_y', 'm_z')}
"""
if self.save_head:
raise RuntimeError("Attempt to add entity '{}'->'{}' to ndt file {}" +
"after file has been created -- this is impossible".
format(name, dic, self.filename))
assert name not in self._entities.keys(), \
"Attempt to add a second '{}' to entities for {}".\
format(name, self.filename)
# check that right keywords are given
entity_descr = "entity '{}' -> '{}'".format(name, dic)
assert 'header' in dic, "Missing 'header' in " + entity_descr
assert 'unit' in dic, "Missing 'unit' in " + entity_descr
assert 'get' in dic, "Missing 'get' in " + entity_descr
self._entities[name] = dic
self.update_entity_order()
def modify_entity_get_method(self, name, new_get_method):
"""Allows changing the get method. Is used for integrators at the moment: we register
dummy get methods when the tablewriter file is created, and then updated those if and
when an integrator has been created."""
assert name in self._entities, "Couldn't find '{}' in {}".format(
name, self._entities.keys())
logger.debug("Updating get method for {} in TableWriter(name={})".format(
name, self.filename))
# logger.debug("Updating get method for {} in TableWriter(name={}) old method: {}, new method: {}".format(
# name, self.filename, self._entities[name]['get'], new_get_method))
self._entities[name]['get'] = new_get_method
def delete_entity_get_method(self, name):
"""We cannot delete entities once they are created (as this would change the number of columns in the
data file). Instead, we register a return function that returns numpy.NAN.
"""
assert name in self._entities, "Couldn't find '{}' in {}".format(
name, self._entities.keys())
logger.debug("'Deleting' get method for {} in TableWriter(name={})".format(
name, self.filename))
self._entities[name]['get'] = lambda sim: np.NAN
def delete_entity_get_methods(self):
"""Method to delete all get_methods.
Might need this (trying to find references to the simulation objects are hiding).
"""
logger.debug("'Deletinging all get methods in TableWriter(name={})".format(self.filename))
keys = self._entities.keys()
for key in keys:
self.delete_entity_get_method(key)
def default_entity_order(self):
keys = self._entities.keys()
# time needs to go first
if 'time' in keys:
keys.remove('time')
return ['time'] + sorted(keys)
elif 'step' in keys:
keys.remove('step')
return ['step'] + sorted(keys)
else:
return keys
def update_entity_order(self):
self.entity_order = self.default_entity_order()
def headers(self):
"""return line one and two of ndt data file as string"""
line1 = [self.comment_symbol]
line2 = [self.comment_symbol]
for entityname in self.entity_order:
colheaders = self._entities[entityname]['header']
# colheaders can be a 3-tuple ('mx','my','mz'), say
# or a string ('time'). Avoid iterating over string:
if isinstance(colheaders, str):
colheaders = [colheaders]
for colhead in colheaders:
line1.append(self.string_format % colhead)
line2.append(self.string_format %
self._entities[entityname]['unit'])
return "".join(line1) + "\n" + "".join(line2) + "\n"
@timer.method
def save(self):
"""Append data (spatial averages of fields) for current
configuration"""
if not self.save_head:
f = open(self.filename, 'w')
# Write header
f.write(self.headers())
f.close()
self.save_head = True
# open file
with open(self.filename, 'a') as f:
f.write(' ' * len(self.comment_symbol)) # account for comment
# symbol width
# The commented lines below are Hans' initial attempt to catch when the
# number of columns to be written changes
# but this seems to never happen. So it's not quite right.
# Also, if this was the right place to catch it, i.e. if watching
# self._entities is the critical object that shouldn't change after
# the header has been written, then we should convert this into a
# 'property' which raises an error if called for writing once the
# header lines have been written. HF, 9 June 2014.
# if len(self._entities) == self.ncolumn_headings_written:
# msg = "It seems number of columns to be written" + \
# "to {} has changed".format(self.filename)
# msg += "from {} to {}. This is not supported.".format(
# self.ncolumn_headings_written, len(self.entity_order))
# logger.error(msg)
# raise ValueError(msg)
for entityname in self.entity_order:
value = self._entities[entityname]['get'](self.sim)
if isinstance(value, np.ndarray):
for v in value:
f.write(self.float_format % v)
elif isinstance(value, float) or isinstance(value, int):
f.write(self.float_format % value)
elif isinstance(value, types.NoneType):
#f.write(self.string_format % value)
f.write(self.string_format % "nan")
else:
msg = "Can only deal with numpy arrays, float and int " + \
"so far, but type is %s" % type(value)
raise NotImplementedError(msg)
f.write('\n')
class Tablereader(object):
# open ndt file
def __init__(self, filename):
self.filename = filename
# if file exists, cowardly stop
if not os.path.exists(filename):
raise RuntimeError("Cannot see file '%s'" % self.filename)
# immediatey read file
self.reload()
def reload(self):
"""Read Table data file"""
try:
self.f = open(self.filename, 'r')
except IOError:
raise RuntimeError("Cannot see file '%s'" % self.filename)
line1 = self.f.readline()
line2 = self.f.readline()
headers = line1.split()
units = line2.split()
assert len(headers) == len(units)
# use numpy to read remaining data (genfromtxt will
# complain if there are rows with different sizes)
try:
self.data = np.genfromtxt(self.f)
except ValueError:
raise RuntimeError("Cannot load data from file '{}'." +
"Maybe the file was incompletely written?".
format(self.f))
self.f.close()
# Make sure we have a 2d array even if the file only contains a single
# line (or none)
if self.data.ndim == 1:
self.data = self.data[np.newaxis, :]
# Check if the number of data columns is equal to the number of headers
assert self.data.shape[1] == len(headers) - 1
datadic = {}
# now wrap up data conveniently
for i, entity in enumerate(headers[1:]):
datadic[entity] = self.data[:, i]
self.datadic = datadic
def entities(self):
"""Returns list of available entities"""
return self.datadic.keys()
def timesteps(self):
"""Returns list of available time steps"""
return self.datadic['time']
def __getitem__(self, entity):
"""
Given the entity name, return the data as a 1D numpy array.
If multiple entity names (separated by commas) are given
then a 2D numpy array is returned where the columns represent
the data for the entities.
"""
if isinstance(entity, StringType):
res = self.datadic[entity]
elif isinstance(entity, TupleType):
res = [self.datadic[e] for e in entity]
else:
raise TypeError("'entity' must be a string or a tuple. "
"Got: {} ({})".format(entity, type(entity)))
return res
class FieldSaver(object):
"""
Wrapper class which can incrementally save data to one file or
multiple files (depending on the file type). Internally, this
keeps a counter which is included in the file name if multiple
files need to be created.
Supported file types:
.npy -- Creates multiple, incrementally numbered .npy files.
"""
cnt_pattern = '_{:06d}'
def __init__(self, filename, overwrite=False, incremental=False):
if not filename.endswith('.npy'):
filename += '.npy'
# Create any non-existing directory components
dirname = os.path.dirname(filename)
if dirname != '' and not os.path.exists(dirname):
os.makedirs(dirname)
self.filename = filename
self.basename, self.ext = os.path.splitext(filename)
self.incremental = incremental
self.counter = 0
if incremental:
existing_files = glob(self.basename + '_*' + self.ext)
else:
existing_files = glob(self.filename)
if len(existing_files) > 0:
if overwrite == False:
raise IOError(
"Will not overwrite existing file(s). Use 'overwrite=True' "
"if this is what you want.".format(self.basename))
else:
logger.debug("Overwriting {} existing file(s) "
"'{}*.npy'.".format(len(existing_files), self.basename))
for f in existing_files:
os.remove(f)
def save(self, data):
"""
Save the given data (which should be a numpy array).
"""
if self.incremental:
cur_filename = self.basename + \
self.cnt_pattern.format(self.counter) + self.ext
else:
cur_filename = self.filename
logger.debug("Saving field data to file '{}'.".format(cur_filename))
np.save(cur_filename, data)
self.counter += 1
def demo2():
import finmag
sim = finmag.example.barmini(name='demo2-fileio')
sim.save_averages()
# and write some more data
sim.schedule("save_ndt", every=10e-12)
sim.run_until(0.1e-9)
# read the data
data = Tablereader('demo2_fileio.ndt')
for t, mx, my, mz in zip(data['time'], data['m_x'], data['m_y'], data['m_z']):
print("t={:10g}, m = {:12}, {:12}, {:12}".format(t, mx, my, mz))
def demo1():
# create example simulation
import finmag
import dolfin as df
xmin, ymin, zmin = 0, 0, 0 # one corner of cuboid
xmax, ymax, zmax = 6, 6, 11 # other corner of cuboid
nx, ny, nz = 3, 3, 6 # number of subdivisions (use ~2nm edgelength)
mesh = df.BoxMesh(df.Point(xmin, ymin, zmin), df.Point(xmax, ymax, zmax), nx, ny, nz)
# standard Py parameters
sim = finmag.sim_with(
mesh, Ms=0.86e6, alpha=0.5, unit_length=1e-9, A=13e-12, m_init=(1, 0, 1))
filename = 'data.txt'
ndt = Tablewriter(filename, sim, override=True)
times = np.linspace(0, 3.0e-11, 6 + 1)
for i, time in enumerate(times):
print("In iteration {}, computing up to time {}".format(i, time))
sim.run_until(time)
ndt.save()
# now open file for reading
f = Tablereader(filename)
print f.timesteps()
print f['m_x']
if __name__ == "__main__":
print("Demo 1")
demo1()
print("Demo 2")
demo2()
| 16,523 | 36.049327 | 114 |
py
|
finmag
|
finmag-master/src/finmag/util/dmi_helper.py
|
import numpy as np
import dolfin as df
def find_skyrmion_center_2d(fun, point_up=False):
"""
Find the centre the skyrmion, suppose only one skyrmion
and only works for 2d mesh.
`fun` accept a dolfin function.
`point_up` : the core of skyrmion, points up or points down.
"""
V = fun.function_space()
mesh = V.mesh()
coods = V.dofmap().tabulate_all_coordinates(mesh).reshape(3, -1)[0]
coods.shape = (-1, mesh.topology().dim())
xs = coods[:, 0]
ys = coods[:, 1]
mxys = fun.vector().array().reshape(3, -1)
mzs = mxys[2]
if point_up:
mzs = - mxys[2]
mins = [i for i, u in enumerate(mzs) if u < -0.9]
xs_max = np.max(xs[mins])
xs_min = np.min(xs[mins])
ys_max = np.max(ys[mins])
ys_min = np.min(ys[mins])
xs_refine = np.linspace(xs_min, xs_max, 101)
ys_refine = np.linspace(ys_min, ys_max, 101)
coods_refine = np.array([(x, y) for x in xs_refine for y in ys_refine])
mzs_refine = np.array([fun(xy)[2] for xy in coods_refine])
min_id = np.argmin(mzs_refine)
if point_up:
min_id = np.argmax(mzs_refine)
center = coods_refine[min_id]
return center[0], center[1]
def compute_skyrmion_number_2d(m):
gradm = df.grad(m)
dmx_dx = gradm[0, 0]
dmy_dx = gradm[1, 0]
dmz_dx = gradm[2, 0]
dmx_dy = gradm[0, 1]
dmy_dy = gradm[1, 1]
dmz_dy = gradm[2, 1]
mx = m[0]
my = m[1]
mz = m[2]
tx = mx * (-dmy_dy * dmz_dx + dmy_dx * dmz_dy)
ty = my * (dmx_dy * dmz_dx - dmx_dx * dmz_dy)
tz = mz * (-dmx_dy * dmy_dx + dmx_dx * dmy_dy)
total = (tx + ty + tz) * df.dx
sky_num = df.assemble(total) / (4 * np.pi)
return sky_num
| 1,714 | 20.987179 | 75 |
py
|
finmag
|
finmag-master/src/finmag/util/vtk_saver.py
|
import os
import re
import glob
import time
import logging
import dolfin as df
log = logging.getLogger("finmag")
class VTKSaver(object):
def __init__(self, filename=None, overwrite=False):
self.filename = filename
self.f = None
self.counter = 0
if filename != None:
self.open(filename, overwrite)
def open(self, filename, overwrite=False):
ext = os.path.splitext(filename)[1]
if ext != '.pvd':
raise ValueError(
"File extension for vtk snapshot file must be '.pvd', "
"but got: '{}'".format(ext))
self.filename = filename
self.basename = re.sub('\.pvd$', '', self.filename)
if os.path.exists(self.filename):
if overwrite:
log.warning(
"Removing file '{}' and all associated .vtu files "
"(because overwrite=True).".format(self.filename))
os.remove(self.filename)
for f in glob.glob(self.basename + "*.vtu"):
os.remove(f)
else:
raise IOError(
"Aborting snapshot creation. File already exists and "
"would overwritten: '{}' (use overwrite=True if this "
"is what you want)".format(self.filename))
# Open the file here so that it stays open during all calls to
# save(), otherwise consecutive calls will overwrite previously
# written data.
self.f = df.File(self.filename, "compressed")
def save_field(self, field_data, t):
"""
Save the given field data to the .pvd file associated with
this VTKSaver.
*Arguments*
field_data: dolfin.Function
The data to be saved.
t: float
The time step with which the data is associated
"""
t0 = time.time()
self.f << field_data
t1 = time.time()
log.debug("Saved field at t={} to file '{}' (snapshot #{}; saving took "
"{:.3g} seconds).".format(t, self.filename, self.counter, t1 - t0))
self.counter += 1
| 2,171 | 29.591549 | 85 |
py
|
finmag
|
finmag-master/src/finmag/util/fft_test_helpers.py
|
import os
import numpy as np
import matplotlib.pyplot as plt
import subprocess as sp
from numpy import sqrt, sin, cos, pi, exp, real, conj
def create_test_ndt_file(dirname, t_step, t_ini, t_end, omega, alpha, debug=False):
"""
Create a .ndt file with a time series of artificial magnetisation
data for testing purposes.
"""
print "Precessional frequency: {} GHz".format(omega / 1e9)
ts = np.arange(t_ini, t_end, t_step)
print len(ts)
# Use damped harmonic oscillator to create fake magnetisation dynamics
mx = exp(-ts * 1e8 / alpha) * sin(omega * ts)
my = exp(-ts * 1e8 / alpha) * cos(omega * ts)
mz = 1 - sqrt(mx ** 2 + my ** 2)
data = np.array([ts, mx, my, mz]).T
if debug:
# Plot the dynamics for debugging purposes
fig = plt.figure(figsize=(20, 5))
ax = fig.gca()
ax.plot(ts, mx)
ax.plot(ts, my)
ax.plot(ts, mz)
fig.savefig(os.path.join(dirname, 'm_vs_t.png'))
# Save the data to a .ndt file. The sed commands add the two header lines
# which are required by the file format.
ndt_filename = os.path.join(dirname, 'fake_relaxation.ndt')
np.savetxt(ndt_filename, data)
sp.check_call(
"sed -i '1 i # time m_x m_y m_z' ./fake_relaxation.ndt", shell=True)
sp.check_call(
"sed -i '2 i # <s> <> <> <>' ./fake_relaxation.ndt", shell=True)
return ndt_filename
def create_test_npy_files(dirname, t_step, t_ini, t_end, omega, alpha, num_vertices):
"""
Construct a time series of artificial magnetisation data and save
it to a bunch of .npy files.
"""
print "Precessional frequency: {} GHz".format(omega / 1e9)
ts = np.arange(t_ini, t_end, t_step)
num_timesteps = len(ts)
print "Number of timesteps: {}".format(num_timesteps)
# Use damped harmonic oscillator to create fake magnetisation dynamics
mx = exp(-ts * 1e8 / alpha) * sin(omega * ts)
my = exp(-ts * 1e8 / alpha) * cos(omega * ts)
mz = 1 - sqrt(mx ** 2 + my ** 2)
# Write the data to a series of .npy files
a = np.zeros((3, num_vertices))
for i in xrange(num_timesteps):
a[0, :] = mx[i]
a[1, :] = my[i]
a[2, :] = mz[i]
filename = os.path.join(dirname, 'm_ringdown_{:06d}.npy'.format(i))
np.save(filename, a.ravel())
npy_files = os.path.join(dirname, 'm_ringdown_*.npy')
return npy_files
def create_test_npy_files_with_two_regions(dirname, t_step, t_ini, t_end, omega1, alpha1, num_vertices1, omega2, alpha2, num_vertices2):
"""
Construct a time series of artificial magnetisation data and save
it to a bunch of .npy files.
"""
print "Precessional frequency in region 1: {} GHz".format(omega1 / 1e9)
print "Precessional frequency in region 2: {} GHz".format(omega2 / 1e9)
ts = np.arange(t_ini, t_end, t_step)
num_timesteps = len(ts)
print "Number of timesteps: {}".format(num_timesteps)
# Use damped harmonic oscillator to create fake magnetisation dynamics
mx1 = exp(-ts * 1e8 / alpha1) * sin(omega1 * ts)
my1 = exp(-ts * 1e8 / alpha1) * cos(omega1 * ts)
mz1 = 1 - sqrt(mx1 ** 2 + my1 ** 2)
mx2 = exp(-ts * 1e8 / alpha2) * sin(omega2 * ts)
my2 = exp(-ts * 1e8 / alpha2) * cos(omega2 * ts)
mz2 = 1 - sqrt(mx2 ** 2 + my2 ** 2)
# Write the data to a series of .npy files
N = num_vertices1 + num_vertices2
a = np.zeros((3, N))
for i in xrange(num_timesteps):
# Write values in region 1
a[0, :num_vertices1] = mx1[i]
a[1, :num_vertices1] = my1[i]
a[2, :num_vertices1] = mz1[i]
# Write values in region 2
a[0, num_vertices1:] = mx2[i]
a[1, num_vertices1:] = my2[i]
a[2, num_vertices1:] = mz2[i]
filename = os.path.join(dirname, 'm_ringdown_{:06d}.npy'.format(i))
np.save(filename, a.ravel())
npy_files = os.path.join(dirname, 'm_ringdown_*.npy')
return npy_files
| 3,967 | 33.504348 | 136 |
py
|
finmag
|
finmag-master/src/finmag/util/versions.py
|
import os
import re
import sh
import sys
import logging
import finmag
logger = logging.getLogger('finmag')
def get_linux_issue():
try:
f = open("/etc/issue")
except IOError:
logger.error("Can't read /etc/issue -- this is odd?")
raise RuntimeError("Cannot establish linux version")
issue = f.readline() # only return first line
issue = issue.replace('\\l', '')
issue = issue.replace('\\n', '')
#logger.debug("Linux OS = '%s'" % issue)
return issue.strip() # get rid of white space left and right
def get_version_python():
version = sys.version.split(' ')[0]
assert version.count('.') == 2, "Unknown version format: %s" % version
return version
def get_module_version(name):
try:
m = __import__(name)
return m.__version__
except ImportError:
return None
def get_version_ipython():
try:
return get_module_version('IPython')
except ValueError:
# This is needed due to a strange error seen in some test runs:
#
# /usr/lib/python2.7/dist-packages/IPython/utils/io.py:32: in __init__
# > raise ValueError("fallback required, but not specified")
# E ValueError: fallback required, but not specified
#
# It seems that this can happen because standard output is caught by
# py.test, but providing the -s switch didn't help either.
return None
def get_version_dolfin():
return get_module_version('dolfin')
def get_version_numpy():
return get_module_version('numpy')
def get_version_matplotlib():
# this will only do a look-up of matplotlib's version if it is already
# imported. If matplotlib hasn't been imported yet, it won't do so either.
if "matplotlib" not in sys.modules:
return "lazily loaded"
return get_module_version('matplotlib')
def get_version_scipy():
return get_module_version('scipy')
def get_version_boostpython():
"""
Determine and return the boost-python version.
We check the name of the symlink of libboost_python.
If libboost_python.so is installed, returns a string with the version
number, otherwise returns None. Raises NotImplementedError if
the version cannot be determined. This may mean the file is not available,
or not available in the standard place (/usr/lib).
"""
# get version number as string
maj, min_, rev = get_version_python().split('.')
# libfile = /usr/lib/libboost_python-py27.so' or similar
libfile = '/usr/lib/libboost_python-py%s%s.so' % (maj, min_)
try:
filename = os.readlink(libfile)
except OSError:
raise NotImplementedError(
"Cannot locate %s. Cannot determine boost-python version." % libfile)
# expect filename to be something like 'libboost_python-py27.so.1.49.0'
version = filename.split(".so.")[1]
return version
def get_debian_package_version(pkg_name):
"""
Determine and return the version of the given Debian package (as a string).
This only works on Debian-derived systems (such as Debian or Ubuntu) as
it internally calls 'dpkg -s' to determine the version number.
If the package is installed, returns a string with the version number,
otherwise returns None. Warns if the version cannot be determined due to
an unsupported system.
"""
import subprocess
import re
version = None
try:
with open(os.devnull, 'w') as devnull:
output = subprocess.check_output(
['dpkg', '-s', pkg_name], stderr=devnull)
except subprocess.CalledProcessError as e:
logger.warning(
"Could not determine version of {} using dpkg.".format(pkg_name))
if e.returncode == 1:
logger.warning(
"The package {} is probably not installed.".format(pkg_name))
elif e.returncode == 127:
logger.warning(
"This does not seem to be a debian-derived Linux distribution.")
else:
logger.warning("Can't determine cause of error.")
return None
lines = output.split('\n')
version_str = filter(lambda s: s.startswith('Version'), lines)[0]
version = re.sub('Version: ', '', version_str)
return version
def get_version_sundials():
return finmag.native.sundials.get_sundials_version()
def get_version_paraview():
try:
# XXX TODO: There should be a more cross-platform way of
# determining the Paraview version, but the only method I could
# find is in the thread [1], and it doesn't work any more for
# recent versions of Paraview. It's quite annoying that something
# as simple as "import paraview; paraview.__version__" doesn't
# work...
#
# [1] http://blog.gmane.org/gmane.comp.science.paraview.user/month=20090801/page=34
version = get_debian_package_version('paraview')
except:
try:
sh.pvpython('--version')
except sh.ErrorReturnCode_1 as ex:
# This is fine. (Oddly, pvpython returns
# with exit code 1 if successful...)
m = re.match('paraview version (.*)', ex.stderr.strip())
version = m.group(1)
return version
def running_binary_distribution():
"""Return True if this is the cython-based binary
distribution or False if it is source distribtion
"""
thefile = __file__
if thefile.endswith('.py') or thefile.endswith('.pyc'):
#logger.debug("Running source code version")
return False
elif thefile.endswith('.so'):
#logger.debug("Binary finmag distribution")
return True
else:
logger.error("thefile=%s" % thefile)
raise RuntimeError("Checking running_binary_distribution failed!")
def loose_compare_ubuntu_version(v1, v2):
if not v1.startswith('Ubuntu') or not v2.startswith('Ubuntu'):
return False
from distutils.version import LooseVersion
t1 = LooseVersion(v1).version
t2 = LooseVersion(v2).version
if t1[3] == t2[3] and t1[4] == t2[4]:
return True
return False
if __name__ == "__main__":
linux_issue = get_linux_issue()
print("__file__ = %s" % __file__)
print("Linux issue: %s" % linux_issue)
print("Binary distribution: %s" % running_binary_distribution())
print("Sundials version: %s" % get_version_sundials())
print loose_compare_ubuntu_version('Ubuntu 12.04.1 LTS', "Ubuntu 12.04.2 LTS")
| 6,533 | 30.114286 | 91 |
py
|
finmag
|
finmag-master/src/finmag/util/visualization_impl.py
|
# This file contains the actual implementation of the Paraview-based
# plotting code. Unfortunately, we cannot import this directly into
# Finmag due to an incompatibility between the Python modules
# 'paraview.servermanager' and 'vtk' which can lead to segfaults (see
# comment in visualization.py). Instead, we need to call this as an
# external process (using the subprocess module) in order to avoid the
# conflict.
#
# NOTE: If you make any changes in the signature (or docstring) of
# this function, make sure to make the corresponding changes in
# visualization.py. (both in the signature of 'render_paraview_scene'
# and in the definition of 'script_string').
from __future__ import division
import os
import sh
import sys
import shutil
import textwrap
import tempfile
import logging
import IPython.core.display
import subprocess as sp
import numpy as np
import numbers
import shlex
import StringIO
from threading import Timer
logger = logging.getLogger("finmag")
# This is a copy of the function in finmag.util.helpers, but we can't import finmag
# in this file without throwing a crash so we define the function here
# separately.
def run_cmd_with_timeout(cmd, timeout_sec):
"""
Runs the given shell command but kills the spawned subprocess
if the timeout is reached.
Returns the exit code of the shell command. Raises OSError if
the command does not exist. If the timeout is reached and the
process is killed, the return code is -9.
"""
proc = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE)
kill_proc = lambda p: p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
timer.start()
stdout, stderr = proc.communicate()
timer.cancel()
return proc.returncode, stdout, stderr
def find_valid_X_display(displays_to_try=xrange(10, 100)):
"""
Sequentially checks all X displays in the given list (default: 0 through 99)
and returns the number of the first valid display that is found. Returns None
if no valid display could be found.
*Arguments*
displays_to_try: list of displays to search (default: [0, ..., 99])
"""
# A (probably faster) alternative way would be to write a temporary
# shell script which contains the loop and run that script using a
# single subprocess call. However, since usually display :0 will be
# available the loop below should terminate quite quickly.
for display in displays_to_try:
try:
sh.xdpyinfo('-display', ':{}'.format(display))
# This display is available since the command finished successfully
logger.debug("Found valid display :{}".format(display))
return display
except sh.ErrorReturnCode:
# This display is not available
continue
logger.debug("No valid display found.")
return None
def find_unused_X_display(displays_to_try=xrange(10, 100)):
"""
Sequentially checks all X displays in the given list (default: 0 through 99)
and returns the number of the first unused display that is found. Returns None
if no unused display could be found.
*Arguments*
displays_to_try: list of displays to search (default: [0, ..., 99])
"""
for display in displays_to_try:
try:
sh.xdpyinfo('-display', ':{}'.format(display))
# If the command finished successfully, this display is already in
# use.
except sh.ErrorReturnCode:
logger.debug("Found unused display :{}".format(display))
return display
logger.debug("No unused display found.")
return None
class ColorMap(object):
def __init__(self, color_space, rgb_points, nan_color):
self.color_space = color_space
self.rgb_points = rgb_points
self.nan_color = nan_color
_color_maps = {
"coolwarm":
ColorMap('Diverging',
[0.0, 0.231373, 0.298039, 0.752941,
1.0, 0.705882, 0.0156863, 0.14902],
[0.247059, 0, 0]),
"heated_body":
ColorMap('RGB',
[0.0, 0, 0, 0,
0.4, 0.901961, 0, 0,
0.8, 0.901961, 0.901961, 0,
1.0, 1, 1, 1],
[0, 0.498039, 1]),
"blue_to_red_rainbow":
ColorMap('HSV',
[0.0, 0, 0, 1,
1.0, 1, 0, 0],
[0.498039, 0.498039, 0.498039]),
}
_axes = {'x': 0, 'y': 1, 'z': 2, 'magnitude': -1}
_axes_names = {0: 'x', 1: 'y', 2: 'z', -1: 'magnitude'}
_representations = ['3D Glyphs', 'Outline', 'Points', 'Surface',
'Surface With Edges', 'Volume', 'Wireframe']
def render_paraview_scene(
pvd_file,
outfile,
field_name='m',
timesteps=None,
camera_position=[0, -200, +200],
camera_focal_point=[0, 0, 0],
camera_view_up=[0, 0, 1],
view_size=(800, 600),
magnification=1,
fit_view_to_scene=True,
color_by_axis=0,
colormap='coolwarm',
rescale_colormap_to_data_range=True,
show_colorbar=True,
colorbar_label_format='%-#5.2g',
add_glyphs=True,
glyph_type='cones',
glyph_scale_factor=1.0,
glyph_random_mode=True,
glyph_mask_points=True,
glyph_max_number_of_points=10000,
show_orientation_axes=False,
show_center_axes=False,
representation="Surface With Edges",
palette='screen',
use_parallel_projection=False,
trim_border=True,
rescale=None,
diffuse_color=None):
"""
Load a *.pvd file, render the scene in it and save the result to an image file.
*Returns*
An IPython.core.display.Image object containing the output image.
*Arguments*
pvd_file:
Input filename (must be in *.pvd format).
outfile:
Name of the output image file (may be None, which is the
default). The image type (e.g. PNG) is derived from the file
extension. If multiple timesteps are to be animated, the
output files will have additional suffixes of the form
'_N_TIMESTEP', where N represents the index of the timestep
(in the array passed as the argument `timesteps`) and TIMESTEP
is the actual timestep itself.
field_name:
The field to plot. Default: 'm' (= the normalised magnetisation).
Note that this field must of course have been saved in the .pvd
file.
timesteps:
The timesteps for which to render the scene. The default is
None, which means to animate all timesteps (and save them as a
sequence of images if `outfile` is specified). Other valid
values are either a single number or a list of numbers.
camera_position: 3-vector
camera_focal_point: 3-vector
camera_view_up: 3-vector
These variables control the position and orientation of the
camera.
view_size: pair of int
Controls the size of the view. This can be used to adjust the
size and aspect ratio of the visible scene (useful for example
if a colorbar is present). Default: (400, 400).
magnification: int
Magnification factor which controls the size of the saved image.
Note that due to limitations in Paraview this must be an integer.
fit_view_to_scene: True | False
If True (the default), the view is automatically adjusted so
that the entire scene is visible. In this case the exact
location of the camera is ignored and only its relative
position w.r.t. the focal point is taken into account.
color_by_axis: integer or string (allowed values: 0, 1, 2, -1,
or 'x', 'y', 'z', 'magnitude')
The vector components in the direction of this axis are used
to color the plot. If '-1' is given, the vector magnitudes
are used instead of any vector components.
colormap:
The colormap to use. Supported values:
{}.
rescale_colormap_to_data_range: True | False
If False (default: True), the colormap corresponds to the data
range [-1.0, +1.0]. If set to True, the colormap is rescaled
so that it corresponds to the minimum/maximum data values *over
all specified timesteps*.
show_colorbar: True | False
If True (the default), a colorbar is added to the plot.
colorbar_label_format: string
Controls how colorbar labels are formatted (e.g., how many
digits are displayed, etc.). This can be any formatting string
for floating point numbers as understood by Python's 'print'
statement. Default: '%-#5.2g'.
add_glyphs: True | False
If True (the default), little glyphs are added at the mesh
vertices to indicate the direction of the field vectors.
glyph_type: string
Type of glyphs to use. The only currently supported glyph type
is 'cones'.
glyph_scale_factor: float
Controls the glyph size. The default value of 1.0 corresponds
to a value automatically determined by the plotting function
which makes the glyphs visible but keeps them small enough so
that glyphs at different vertices don't overlap. This argument
can be used to tweak that size (e.g. '0.5' means to use half
the automatically determined size, and '3.0' three times that
size, etc.).
glyph_mask_points: True | False
If True (the default), limit the maximum number of glyphs to
the value indicated by glyph_max_number_of_points.
glyph_max_number_of_points: int
Specifies the maximum number of glyphs that should appear in
the output dataset if glyph_mask_points is True.
glyph_random_mode: True | False
If True (the default), the glyph positions are chosen
randomly. Otherwise the point IDs to which glyphs are attached
are evenly spaced. This setting only has an effect if
glyph_mask_points is True.
show_orientation_axes: False | True
If True (default: False), a set of three small axes is added
to the scene to indicate the directions of the coordinate axes.
show_center_axes: False | True
If True (default: False), a set of three axes is plotted at
the center of rotation.
representation: string
Controls the way in which the visual representation of bodies
in the scene. Allowed values:
{}
palette: 'print' | 'screen'
The color scheme to be used. The main difference is that
'print' uses white as the background color whereas 'screen'
uses dark grey.
use_parallel_projection: True | False
If False (the default), perspective projection is used to
render the scene. Otherwise parallel projection is used.
trim_border: True | False
If True (the default), any superfluous space around the scene
will be trimmed from the saved image. This requires imagemagick
to be installed.
rescale: float | None
Factor by which the output image will be rescaled. For example,
using 'rescale=0.4' will rescale the image by 40%.
diffuse_color: 3-tuple of RGB values
The solid color of the body. If given, this overrides any
colormap-related values.
"""
from paraview import servermanager
import paraview.simple as pv
# Paraview crashes if there is no X server running, so we check
# whether this is the case.
if not os.environ.has_key('DISPLAY'):
logger.warning(
"Could not detect a running X server (this may happen, for "
"example, if you are on a ssh connection without X forwarding; "
"use 'ssh -X' in this case). Aborting because Paraview is "
"likely to crash.")
if not representation in _representations:
raise ValueError("Unsupported representation: '{}'. Allowed values: "
"{}".format(representation, _representations))
if abs(magnification - int(magnification)) > 1e-6:
logger.warning("Due to limitations in Paraview, the 'magnification' "
"argument must be an integer (got: {}). Using nearest "
"integer value.".format(magnification))
magnification = int(round(magnification))
if not os.path.exists(pvd_file):
raise IOError("File does not exist: '{}'.".format(pvd_file))
servermanager.Disconnect()
servermanager.Connect()
reader = servermanager.sources.PVDReader(FileName=pvd_file)
reader.UpdatePipeline()
view = servermanager.CreateRenderView()
repr = servermanager.CreateRepresentation(reader, view)
repr.Representation = representation
view.CameraPosition = camera_position
view.CameraFocalPoint = camera_focal_point
view.CameraViewUp = camera_view_up
if fit_view_to_scene:
# N.B.: this email describes a more sophisticated (= proper?)
# way of doing this, but it's probably overkill for now:
#
# http://www.paraview.org/pipermail/paraview/2012-March/024352.html
#
view.ResetCamera()
view.OrientationAxesVisibility = (1 if show_orientation_axes else 0)
view.CenterAxesVisibility = (1 if show_center_axes else 0)
if palette == 'print':
view.Background = [1.0, 1.0, 1.0]
view.OrientationAxesLabelColor = [0.0, 0.0, 0.0]
repr.AmbientColor = [0.0, 0.0, 0.0]
elif palette == 'screen':
view.Background = [0.32, 0.34, 0.43]
view.OrientationAxesLabelColor = [1.0, 1.0, 1.0]
repr.AmbientColor = [1.0, 1.0, 1.0]
else:
raise ValueError("Palette argument must be either 'print' "
"or 'screen'. Got: {}".format(palette))
view.CameraParallelProjection = 1 if use_parallel_projection else 0
# Convert color_by_axis to integer and store the name separately
try:
color_by_axis = _axes[color_by_axis.lower()]
except AttributeError:
if not color_by_axis in [0, 1, 2, -1]:
raise ValueError("color_by_axis must have one of the values "
"[0, 1, 2, -1] or ['x', 'y', 'z', 'magnitude']. "
"Got: {}".format(color_by_axis))
color_by_axis_name = _axes_names[color_by_axis]
if timesteps is None:
timesteps = reader.TimestepValues
elif not isinstance(timesteps, (list, tuple, np.ndarray)):
if not isinstance(timesteps, numbers.Number):
raise TypeError(
"Argument 'timesteps' must be either None or a number or a list of numbers. Got: '{}'".format(timesteps))
timesteps = [timesteps]
data_range = (-1.0, 1.0)
if rescale_colormap_to_data_range:
dmin, dmax = np.infty, -np.infty
for t in timesteps:
reader.UpdatePipeline(t)
dataInfo = reader.GetDataInformation()
pointDataInfo = dataInfo.GetPointDataInformation()
arrayInfo = pointDataInfo.GetArrayInformation(field_name)
cur_data_range = arrayInfo.GetComponentRange(color_by_axis)
dmin = min(cur_data_range[0], dmin)
dmax = max(cur_data_range[1], dmax)
data_range = (dmin, dmax)
logger.debug("Rescaling colormap to data range: {}".format(data_range))
# Set the correct colormap and rescale it if necessary.
try:
cmap = _color_maps[colormap]
if colormap == 'blue_to_red_rainbow':
print(textwrap.dedent("""
Use of the 'rainbow' color map is discouraged as it has a number of distinct
disadvantages. Use at your own risk! For details see, e.g., [1], [2].
[1] K. Moreland, "Diverging Color Maps for Scientific Visualization"
http://www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
[2] http://www.paraview.org/ParaView3/index.php/Default_Color_Map
"""))
except KeyError:
raise ValueError("Unsupported colormap: '{}'. Allowed values: "
"{}".format(colormap, _color_maps.keys()))
lut = servermanager.rendering.PVLookupTable()
lut.ColorSpace = cmap.color_space
rgb_points = cmap.rgb_points
dmin, dmax = data_range
cmin = rgb_points[0]
cmax = rgb_points[-4]
if cmin == cmax:
# workaround for the case that the data range only
# contains a single value
cmax += 1e-8
for i in xrange(0, len(rgb_points), 4):
rgb_points[i] = (rgb_points[i] - cmin) / \
(cmax - cmin) * (dmax - dmin) + dmin
lut.RGBPoints = rgb_points
lut.NanColor = cmap.nan_color
if color_by_axis in [0, 1, 2]:
lut.VectorMode = "Component"
lut.VectorComponent = color_by_axis
elif color_by_axis == -1:
lut.VectorMode = "Magnitude"
lut.VectorComponent = color_by_axis
if diffuse_color is not None:
print "diffuse_color: {} ({})".format(diffuse_color, type(diffuse_color))
repr.DiffuseColor = diffuse_color
else:
repr.LookupTable = lut
if field_name is not None:
repr.ColorArrayName = ("POINT_DATA", field_name)
if add_glyphs:
logger.debug("Adding cone glyphs.")
glyph = pv.servermanager.filters.Glyph(Input=reader)
# Try to determine an appropriate scale_factor automatically
import vtk.util.numpy_support as VN
grid = servermanager.Fetch(reader)
# Determine approximate mesh spacing
def mesh_spacing_for_cell(cell):
cell_bounds = np.array(cell.GetBounds()).reshape((3, 2))
return float(min(filter(lambda x: x != 0.0, cell_bounds[:, 1] - cell_bounds[:, 0])))
mesh_spacing = np.average(
[mesh_spacing_for_cell(grid.GetCell(i)) for i in range(grid.GetNumberOfCells())])
# Determine maximum field magnitude
m = VN.vtk_to_numpy(grid.GetPointData().GetArray(field_name))
max_field_magnitude = float(max(map(np.linalg.norm, m)))
glyph_scale_factor_internal = mesh_spacing / max_field_magnitude
logger.debug(
"Using automatically determined glyph_scale_factor_internal = {:.2g} "
"(determined from approximate mesh spacing {:.2g} and maximum "
"field magnitude {:.2g}). This may need manual tweaking in case "
"glyphs appear very large or very small.".format(
glyph_scale_factor_internal, mesh_spacing, max_field_magnitude))
glyph.ScaleFactor = glyph_scale_factor * glyph_scale_factor_internal
glyph.ScaleMode = 'vector'
glyph.Vectors = ['POINTS', field_name]
try:
# only relevant for animation IIUC, but can't hurt setting it
glyph.KeepRandomPoints = 1
except AttributeError:
# Older version of Paraview which doesn't support this setting.
# Ignoring for now.
pass
#glyph.MaskPoints = glyph_mask_points
#glyph.MaximumNumberofPoints = glyph_max_number_of_points
if glyph_type != 'cones':
glyph_type = 'cones'
logger.warning("Unsupported glyph type: '{}'. "
"Falling back to 'cones'.".format(glyph_type))
if glyph_type == 'cones':
cone = servermanager.sources.Cone()
cone.Resolution = 20
cone.Radius = 0.2
else:
# This should not happen as we're catching it above.
raise NotImplementedError()
glyph.SetPropertyWithName('Source', cone)
glyph_repr = servermanager.CreateRepresentation(glyph, view)
glyph_repr.LookupTable = lut
glyph_repr.ColorArrayName = ("POINT_DATA", 'GlyphVector')
if show_colorbar:
# XXX TODO: Remove the import of paraview.simple once I know why
from paraview.simple import CreateScalarBar
scalarbar = CreateScalarBar(
Title=field_name, ComponentTitle=color_by_axis_name.capitalize(),
Enabled=1, LabelFontSize=12, TitleFontSize=12)
scalarbar.LabelFormat = colorbar_label_format,
if palette == 'print':
scalarbar.LabelColor = [0.0, 0.0, 0.0] # black labels for print
else:
scalarbar.LabelColor = [1.0, 1.0, 1.0] # white labels for screen
view.Representations.append(scalarbar)
scalarbar.LookupTable = lut
reader.UpdatePipelineInformation()
if outfile is None:
_, outfile = tempfile.mkstemp(suffix='.png')
view.ViewSize = view_size
def write_image(outfilename):
_, suffix = os.path.splitext(outfilename)
if suffix == '.png':
view.WriteImage(outfilename, "vtkPNGWriter", magnification)
elif suffix in ['.jpg', '.jpeg']:
view.WriteImage(outfilename, "vtkJPEGWriter", magnification)
else:
raise ValueError("Output image must have extension '.png' or "
"'.jpg'. Got: {}".format(suffix))
if trim_border:
if palette == 'print':
bordercolor = '"rgb(255,255,255)"'
else:
# Strangely, we get a slightly different background
# color for PNG than for JPG.
bordercolor = '"rgb(82,87,110)"' if (
suffix == '.png') else '"rgb(82,87,109)"'
cmd = 'mogrify -bordercolor {} -border 1x1 -trim {}'.format(
bordercolor, outfilename)
try:
sp.check_output(cmd, stderr=sp.STDOUT, shell=True)
logger.debug("Trimming border from rendered scene.")
except OSError:
logger.warning(
"Using the 'trim' argument requires ImageMagick to be installed.")
except sp.CalledProcessError as ex:
logger.warning("Could not trim border from image. "
"The error message was: {}".format(ex.output))
if rescale:
rescale_factor = int(rescale * 100.0)
cmd = 'mogrify -resize {:d}% {}'.format(
rescale_factor, outfilename)
try:
sp.check_output(cmd, stderr=sp.STDOUT, shell=True)
logger.debug(
"Resizing output image by {:d}%".format(rescale_factor))
except OSError:
logger.warning(
"Using the 'rescale' argument requires ImageMagick to be installed.")
except sp.CalledProcessError as ex:
logger.warning(
"Could not rescale image. The error message was: {}".format(ex.output))
if len(timesteps) == 1:
# If a single timestep is rendered, we return the resulting image.
view.ViewTime = timesteps[0]
write_image(outfile)
res = IPython.core.display.Image(filename=outfile)
else:
# Otherwise we export a bunch of images with sequentially
# numbered suffixes.
#
# TODO: What should we return? Just the image for the first
# timestep as we currently do? Or can we somehow create
# a video and return that?
outbasename, outsuffix = os.path.splitext(outfile)
def generate_outfilename(i, t):
return outbasename + '_{:04d}_'.format(i) + str(t) + outsuffix
for (i, t) in enumerate(timesteps):
view.ViewTime = t
cur_outfilename = generate_outfilename(i, t)
logger.debug(
"Saving timestep {} to file '{}'.".format(t, cur_outfilename))
write_image(cur_outfilename)
res = IPython.core.display.Image(
filename=generate_outfilename(0, timesteps[0]))
servermanager.Disconnect()
return None
# Automatically add supported colormaps and representations to the docstring:
render_paraview_scene.__doc__ = \
render_paraview_scene.__doc__.format(_color_maps.keys(), _representations)
| 24,132 | 36.242284 | 121 |
py
|
finmag
|
finmag-master/src/finmag/util/vtk_saver_test.py
|
import os
import pytest
import dolfin as df
import numpy as np
from vtk_saver import VTKSaver
from finmag.util.helpers import assert_number_of_files
class TestVTKSaver(object):
def setup_class(self):
"""
Create a dummy field in various formats (numpy arrays and
dolfin function).
"""
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 1), 5, 5, 5)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
N = mesh.num_vertices()
self.field_data = df.Function(S3)
# The next line is a hack and not recommended for real work
self.field_data.vector().array()[:] = np.zeros(3 * N)
def test_constructor(self, tmpdir):
"""
Check various methods of creating a VTKSaver object.
"""
os.chdir(str(tmpdir))
v1 = VTKSaver()
v2 = VTKSaver('myfile.pvd')
def test_file_extension_is_correct(self, tmpdir):
"""
Check that only filenames with extension '.pvd' are accepted.
"""
os.chdir(str(tmpdir))
VTKSaver("myfile.pvd")
with pytest.raises(ValueError):
VTKSaver("myfile.vtk")
with pytest.raises(ValueError):
VTKSaver("myfile.vtu")
with pytest.raises(ValueError):
VTKSaver("myfile.txt")
def test_existing_files_are_deleted_if_requested(self, tmpdir):
"""
"""
os.chdir(str(tmpdir))
# Create a few (empty) dummy .pvd and .vtu files
with open("myfile.pvd", 'w'):
pass
with open("myfile000001.vtu", 'w'):
pass
with open("myfile000002.vtu", 'w'):
pass
with open("myfile000003.vtu", 'w'):
pass
with open("myfile000004.vtu", 'w'):
pass
# Trying to re-open an existing .pvd file should raise an error:
with pytest.raises(IOError):
VTKSaver("myfile.pvd")
# Unless 'overwrite' is set to True, in which case the .vtu
# files should be deleted:
v = VTKSaver("myfile.pvd", overwrite=True)
# just to create a single .pvd and .vtu file
v.save_field(self.field_data, t=0)
assert_number_of_files("myfile.pvd", 1)
assert_number_of_files("myfile*.vtu", 1)
def test_save_field(self, tmpdir):
"""
Check that calling save_field with the field data given in
various format works and creates the expected .vtu files.
"""
os.chdir(str(tmpdir))
v = VTKSaver("myfile.pvd")
v.save_field(self.field_data, t=0.0)
assert_number_of_files("myfile.pvd", 1)
assert_number_of_files("myfile*.vtu", 1)
v.save_field(self.field_data, t=1e-12)
assert_number_of_files("myfile.pvd", 1)
assert_number_of_files("myfile*.vtu", 2)
v.save_field(self.field_data, t=3e-12)
v.save_field(self.field_data, t=8e-12)
assert_number_of_files("myfile.pvd", 1)
assert_number_of_files("myfile*.vtu", 4)
def test_saving_to_file_with_different_name(self, tmpdir):
os.chdir(str(tmpdir))
v = VTKSaver("myfile1.pvd")
v.save_field(self.field_data, t=0.0)
v.save_field(self.field_data, t=0.1)
v.open("myfile2.pvd")
v.save_field(self.field_data, t=0.0)
v.save_field(self.field_data, t=0.1)
v.save_field(self.field_data, t=0.2)
assert_number_of_files("myfile1.pvd", 1)
assert_number_of_files("myfile1*.vtu", 2)
assert_number_of_files("myfile2.pvd", 1)
assert_number_of_files("myfile2*.vtu", 3)
| 3,619 | 31.321429 | 72 |
py
|
finmag
|
finmag-master/src/finmag/util/batch_task.py
|
import os
import time
import numpy as np
from multiprocessing import Process, Queue, Lock
lock = Lock()
class TaskState(object):
def __init__(self, taskname):
self.taskname = taskname
self.state = {}
self.load()
def load(self):
if not os.path.exists(self.taskname):
return
f = open(self.taskname, 'r')
data = f.read()
f.close()
for line in data.splitlines():
k, v = line.split(':')
self.state[k.strip()] = v.strip()
def save_state(self):
f = open(self.taskname, 'w')
for (k, v) in self.state.items():
f.write(u'%s : %s\n' % (k, v))
f.close()
def update_state(self, k, v, save=True):
key = self.dict2str(k)
if save:
self.load()
if v:
self.state[key] = 'Done!'
else:
self.state[key] = 'Waiting!'
if save:
self.save_state()
def dict2str(self, d):
res = []
for k in d:
res.append(k)
res.append(str(d[k]))
return '_'.join(res)
def done(self, k):
key = self.dict2str(k)
if key in self.state:
if 'Done' in self.state[key]:
return True
else:
self.update_state(k, False, False)
return False
class BatchTasks(object):
def __init__(self, fun, processes=4, taskname='task', waiting_time=1):
self.fun = fun
self.tasks = [{}]
self.parameters = []
self.current_directory = os.getcwd()
self.ts = TaskState(taskname + '.txt')
self.waiting_time = waiting_time
self.dims = []
self.processes = processes
self.process_res = []
def add_parameters(self, name, values):
new_tasks = []
self.parameters.append(name)
self.dims.append(len(values))
for task in self.tasks:
for v in values:
t = dict(task)
t[name] = v
new_tasks.append(t)
self.tasks = list(new_tasks)
def generate_directory(self, task):
base = self.current_directory
for name in self.parameters:
base = os.path.join(base, name + '_' + str(task[name]))
return base
def run_single(self):
while not self.task_q.empty():
task = self.task_q.get()
dirname = self.generate_directory(task)
if not os.path.exists(dirname):
os.makedirs(dirname)
os.chdir(dirname)
self.fun(**task)
os.chdir(self.current_directory)
lock.acquire()
self.ts.update_state(task, True)
lock.release()
time.sleep(self.waiting_time)
def start(self):
self.task_q = Queue()
for task in self.tasks:
if not self.ts.done(task):
self.task_q.put(task)
self.ts.save_state()
self.threads = []
for _ in range(self.processes):
t = Process(target=self.run_single)
t.start()
self.threads.append(t)
time.sleep(self.waiting_time)
for t in self.threads:
t.join()
def post_process(self, fun):
for task in self.tasks:
dirname = self.generate_directory(task)
os.chdir(dirname)
try:
self.process_res.append(fun(**task))
except:
print 'error:', task
os.chdir(self.current_directory)
def get_res(self, key=None, value=None):
res = []
par = []
if len(self.parameters) == 1:
for i, task in enumerate(self.tasks):
par.append(task.values()[0])
res.append(self.process_res[i])
elif len(self.parameters) == 2:
for i, task in enumerate(self.tasks):
if key in task and task[key] == value:
res.append(self.process_res[i])
tmp_task = dict(task)
del tmp_task[key]
par.append(tmp_task.values()[0])
else:
raise NotImplementedError(
'Only support one- and two- parameter case!')
if key is None and value is None and len(self.parameters) == 2:
v0 = self.parameters[0]
v1 = self.parameters[1]
for i, task in enumerate(self.tasks):
par.append((task[v0], task[v1]))
res.append(self.process_res[i])
return np.array(par), np.array(res)
def task(p1, p2):
print 'current directory:', os.getcwd()
res = 'p1=' + str(p1) + ' p2=' + str(p2)
#res= 1/0
with open('res.txt', 'w') as f:
f.write(res)
time.sleep(1)
if __name__ == "__main__":
tasks = BatchTasks(task, 2)
tasks.add_parameters('p1', ['a', 'b', 'c'])
tasks.add_parameters('p2', range(1, 5))
tasks.start()
| 4,992 | 25.005208 | 74 |
py
|
finmag
|
finmag-master/src/finmag/util/configuration.py
|
import ConfigParser as configparser
import os
__all__ = ["get_configuration"]
CONFIGURATION_FILES = [
os.path.expanduser("~/.finmagrc"),
os.path.expanduser("~/.finmag/finmagrc")
]
def get_configuration():
_parser = configparser.SafeConfigParser()
_parser.read(CONFIGURATION_FILES)
return _parser
def get_config_option(section, name, default_value=None):
try:
return get_configuration().get(section, name)
except configparser.NoOptionError:
return default_value
except configparser.NoSectionError:
return default_value
def write_finmagrc_template_to_file(filename):
"""
Write some default finmag configuration options to the given file.
"""
with open(filename, 'w') as f:
f.write(FINMAGRC_TEMPLATE)
def create_default_finmagrc_file():
"""
Check whether a configuration file already exists at any of the
supported locations. If this is not the case, the file
'~/.finmagrc' is created with some default configuration options.
Supported locations for configuration files are:
~/.finmagrc
~/.finmag/finmagrc
"""
import logging
logger = logging.getLogger("finmag")
if not any([os.path.exists(f) for f in CONFIGURATION_FILES]):
try:
write_finmagrc_template_to_file(os.path.expanduser('~/.finmagrc'))
logger.info(
"Created default configuration in '~/.finmagrc' because no "
"Finmag configuration file was found. Please review the "
"settings and adapt them to your liking.")
except IOError as e:
logger.info(
"Could not create default configuration file '~/.finmagrc' "
"(reason: {}). Please create one manually.".format(e.strerror))
# Template for the '.finmagrc' file. This is used in the documentation
# and to create a default configuration file if none exists yet.
FINMAGRC_TEMPLATE = \
"""\
[logging]
# Color_scheme choices: [dark_bg, light_bg, none]
color_scheme = dark_bg
# Logfiles entries:
#
# - Files with an absolute path name (such as '~/.finmag/global.log')
# define global logfiles to which all finmag programs will add
# log statements. Their file size can be limited by setting appropriate
# values for 'maxBytes' and 'backupCount' (see the documentation of
# 'logging.handlers.RotatingFileHander' for details on what they mean).
#
# - Filenames without an absolute path (such as 'session.log') result in
# a logfile of that name being created in the current working
# directory when the finmag module is loaded with 'import finmag'.
#
# For example:
#
#logfiles =
# ~/.finmag/global.log
# session.log
logfiles =
~/.finmag/global.log
# Logfile size limit in bytes (default: 50 MB)
maxBytes = 52428800
# Number of backups for logfiles when they exceed the size limit
backupCount = 1
# Useful logging level choices: [DEBUG, INFO, WARNING]
console_logging_level = DEBUG
# Set this to False to suppress the initial debugging messages
# reporting the versions of various modules.
display_module_versions_at_startup = True
[visualization]
# When using Paraview for visualization, it usually uses the default
# X display :0, which leads to an annoying pop-up window. If 'xpra'
# is installed, this can be used instead to render the scene off-screen.
# However, sometimes this leads to errors and/or broken images.
# Therefore we provide this configuration option to turn the use of
# xpra off. If xpra is not installed then this setting has no effect.
use_xpra = True
# Tell xpra which display to use ('None' means try to find any available
# unused X display). This setting has no effect if xpra is disabled.
use_display = None
"""
| 3,749 | 30.779661 | 79 |
py
|
finmag
|
finmag-master/src/finmag/util/plot_helpers_test.py
|
import os
import pytest
from finmag.example import barmini
from finmag.util.plot_helpers import *
@pytest.mark.requires_X_display
def test_plot_ndt_columns_and_plot_dynamics(tmpdir):
"""
Simply check that we can call the command `plot_ndt_columns` with some arguments
"""
os.chdir(str(tmpdir))
sim = barmini()
sim.schedule('save_ndt', every=1e-12)
sim.run_until(1e-11)
plot_ndt_columns('barmini.ndt', columns=['m_x', 'm_y', 'm_z', 'E_Demag', 'H_Exchange_x'],
outfile='barmini.png', title="Some awesome title",
show_legend=True, legend_loc='center', figsize=(10, 4))
plot_dynamics('barmini.ndt', components='xz',
outfile='barmini2.png', xlim=(0, 0.8e-11), ylim=(-1, 1))
assert(os.path.exists('barmini.png'))
assert(os.path.exists('barmini2.png'))
| 856 | 30.740741 | 93 |
py
|
finmag
|
finmag-master/src/finmag/util/macrospin.py
|
import numpy
import numbers
from finmag.util.consts import gamma as gamma_llg
def make_analytic_solution(H, alpha, gamma=gamma_llg):
"""
Returns a function `m(t)` which computes the magnetisation vector
of a macrospin as a function of time, i.e. the typical precession
under the influence of an applied field `H`. Assumes the initial
condition m(0) = (1,0,0).
Arguments:
- H: the magnitude of the applied field (in A/m)
- alpha: Gilbert damping factor (dimensionless)
- gamma (alias OOMMF's gamma_G): gyromagnetic ratio (in m/A*s)
"""
if not isinstance(H, numbers.Number):
raise TypeError("H must be a single number, but got: {}".format(H))
p = float(gamma) / (1 + alpha ** 2)
theta0 = numpy.pi / 2
t0 = numpy.log(
numpy.sin(theta0) / (1 + numpy.cos(theta0))) / (p * alpha * H)
# Matteo uses spherical coordinates,
# which have to be converted to cartesian coordinates.
def phi(t):
return p * H * t
def cos_theta(t):
return numpy.tanh(p * alpha * H * (t - t0))
def sin_theta(t):
return 1 / (numpy.cosh(p * alpha * H * (t - t0)))
def x(t):
return sin_theta(t) * numpy.cos(phi(t))
def y(t):
return sin_theta(t) * numpy.sin(phi(t))
def z(t):
return cos_theta(t)
def m(t):
return numpy.array([x(t), y(t), z(t)])
return m
| 1,413 | 25.185185 | 75 |
py
|
finmag
|
finmag-master/src/finmag/util/plot_helpers.py
|
"""
Easy way to plot values of a function R2 -> R in 2D or 3D.
Does not offer much flexibility, but can be helpful for quick visualisation
of data in an ipython notebook for instance.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
# don't let the pyflakes error "unused import" in the next line fool you
from mpl_toolkits.mplot3d import axes3d # used in fig.gca(projection="3d")
from finmag.util.fileio import Tablereader
import types
from .helpers import *
def surface_2d(x, y, u, labels=("", "", ""), title="",
ylim=None, xlim=None, clim=None, cmap=cm.coolwarm, path="", **kwargs):
fig = plt.figure(**kwargs)
ax = fig.add_subplot(111)
X, Y = np.meshgrid(x, y)
surf = ax.pcolormesh(X, Y, u, cmap=cmap)
if xlim is not None:
ax.set_xlim(xlim)
if ylim is not None:
ax.set_ylim(ylim)
if clim is not None:
surf.set_clim(vmin=clim[0], vmax=clim[1])
cb = fig.colorbar(surf)
cb.ax.yaxis.set_label_position('right')
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
cb.ax.set_ylabel(labels[2])
plt.title(title)
plt.savefig(path) if path else plt.show()
def surface_3d(x, y, u, labels=("", "", ""), title="", path=""):
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(x, y)
surf = ax.plot_surface(X, Y, u, cmap=cm.coolwarm)
ax.set_xlabel(labels[0])
ax.set_ylabel(labels[1])
ax.set_zlabel(labels[2])
plt.title(title)
plt.savefig(path) if path else plt.show()
def quiver(f, mesh, filename=None, title="", **kwargs):
"""
Takes a numpy array of the values of a vector-valued function, defined
over a mesh (either a dolfin mesh, or one from finmag.util.oommf.mesh)
and shows a quiver plot of the data.
Accepts mlab quiver3d keyword arguments as keywords,
which it will pass down.
"""
assert isinstance(f, np.ndarray)
from mayavi import mlab
from dolfin.cpp import Mesh as dolfin_mesh
from finmag.util.oommf.mesh import Mesh as oommf_mesh
if isinstance(mesh, dolfin_mesh):
coords = mesh.coordinates()
elif isinstance(mesh, oommf_mesh):
coords = np.array(list(mesh.iter_coords()))
elif isinstance(mesh, np.ndarray) or isinstance(mesh, list):
# If you know what the data has to look like, why not
# be able to pass it in directly.
coords = mesh
else:
raise TypeError("Don't know what to do with mesh of class {0}.".format(
mesh.__class__))
r = coords.reshape(coords.size, order="F").reshape((coords.shape[1], -1))
# All 3 coordinates of the mesh points must be known to the plotter,
# even if the mesh is one-dimensional. If not all coordinates are known,
# fill the rest up with zeros.
codimension = 3 - r.shape[0]
if codimension > 0:
r = np.append(r, [np.zeros(r[0].shape[0])] * codimension, axis=0)
if f.size == f.shape[0]:
# dolfin provides a flat numpy array, but we would like
# one with the x, y and z components as individual arrays.
f = components(f)
figure = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
q = mlab.quiver3d(*(tuple(r) + tuple(f)), figure=figure, **kwargs)
q.scene.isometric_view()
mlab.title(title)
mlab.axes(figure=figure)
if filename:
mlab.savefig(filename)
else:
mlab.show()
mlab.close(all=True)
def plot_ndt_columns(ndt_file, columns=['m_x', 'm_y', 'm_z'], style='-',
xlim=None, ylim=None, outfile=None, title=None,
show_legend=True, legend_loc='best', figsize=None):
"""
Helper function to quickly plot the time evolution of the specified
columns in an .ndt file (default: m_x, m_y, m_z) and optionally save
the output to a file.
*Arguments*
columns : list of strings
The names of the columns to plot. These must coincide with the
names in the first header line of the .ndt file.
outfile : None | string
If given, save the plot to a file with this name.
style : string
The plotting style. Default: '-'.
title : None | string
Title text to use for the plot.
show_legend : boolean
If True, a legend with the column names is displayed.
legend_loc : string | integer
Optional location code for the legend (same as for pyplot's
legend() command).
figsize : None | pair of float
Optional argument to set the figure size of the output plot.
"""
f = Tablereader(ndt_file)
ts = f.timesteps()
column_vals = f[tuple(columns)]
fig = plt.figure(figsize=figsize)
ax = fig.gca()
for col_name, col in zip(columns, column_vals):
ax.plot(ts, col, style, label=col_name)
if title:
ax.set_title(title)
if show_legend:
ax.legend(loc=legend_loc)
ymin, ymax = ylim or ax.get_ylim()
ax.set_xlim(xlim)
ax.set_ylim(ymin - 0.05, ymax + 0.05)
if outfile:
fig.savefig(outfile)
return fig
def plot_dynamics(ndt_file, components='xyz', **kwargs):
"""
Convenience wrapper around `plot_ndt_columns` with a slightly
easier to remember name. The main difference is that this function
can only plot the dynamics of the magnetisation, not other fields.
The magnetisation components to plot are specified in the argument
`components`. All other keyword arguments are the same as for
`plot_ndt_columns`.
"""
columns = ['m_{}'.format(c) for c in components]
return plot_ndt_columns(ndt_file, columns=columns, **kwargs)
def plot_dynamics_3d(ndt_file, field='m', style='-', elev=None, azim=None,
outfile=None, title=None, show_legend=True,
legend_loc='best', figsize=None):
"""
Plot the time evolution of a 3D vector field (default: 'm') as a 3D
trajectory and optionally save the output to a file.
*Arguments*
field : string
The field to be plotted (default: 'm'). The .ndt file must
contain three columns corresponding to the x, y, z components
of this field (e.g., 'm_x', 'm_y', 'm_z').
style : string
The plotting style. Default: '-'.
elev : float
The 'elevation' (= polar angle) of the camera position (in degrees).
Sensible values are between -90 and +90.
azim : float
The azimuthal angle of the camera position (in degrees).
Sensible values are between 0 and 360.
outfile : None | string
If given, save the plot to a file with this name.
title : None | string
Title text to use for the plot.
show_legend : boolean
If True, a legend with the field names is displayed.
legend_loc : string | integer
Optional location code for the legend (same as for pyplot's
legend() command).
figsize : None | pair of float
Optional argument to set the figure size of the output plot.
"""
f = Tablereader(ndt_file)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot('111', projection='3d')
ax.view_init(elev=elev, azim=azim)
fld_x = f[field + '_x']
fld_y = f[field + '_y']
fld_z = f[field + '_z']
ax.plot(fld_x, fld_y, fld_z, label=field if show_legend else '')
if title:
ax.set_title(title)
if show_legend:
ax.legend(loc=legend_loc)
if outfile:
fig.savefig(outfile)
return fig
def plot_hysteresis_loop(H_vals, m_vals,
style='x-', add_point_labels=False, point_labels=None,
infobox=[], infobox_loc='bottom right',
filename=None, title="Hysteresis loop",
xlabel="H_ext (A/m)", ylabel="m_avg",
figsize=(10, 7)):
"""
Produce a hysteresis plot.
Arguments:
H_vals -- list of scalar values; the values of the applied field used for the relaxation
stages of the hysteresis loop
m_vals -- list of scalar values; the magnetisation obtained at the end of each relaxation
stage in the hysteresis loop
Keyword arguments:
style -- the plot style (default: 'x-')
add_point_labels -- if True (default: False), every point is labeled with a number which
indicates the relaxation stage of the hysteresis loop it represents
point_labels -- list or None; each list element can either be an integer or a pair of the
form (index, label), where index is an integer and label is a string. If
not None, only the points whose index appears in this list are labelled
(either with their index, or with the given label string if provided).
For example, if only every 10th point should be labeled, one might say
'point_labels=xrange(0, NUM_POINTS, 10)'.
infobox -- list; each entry can be either a string or a pair of the form (name, value).
If not empty, an info box will added to the plot with the list elements appearing
on separate lines. Strings are printed verbatim, whereas name/value pairs are
converted to a string of the form "name = value".
filename -- if given, save the resulting plot to a file with the specified name;
can also be a list of files
"""
if not all([isinstance(x, (types.IntType, types.FloatType)) for x in m_vals]):
raise ValueError(
"m_vals must be a list of scalar values, got: {}".format(m_vals))
fig = plt.figure(figsize=figsize)
ax = fig.gca()
N = len(H_vals) // 2
H_max = max(H_vals)
ax.plot(H_vals, m_vals, style)
ax.set_xlim(-1.1 * H_max, 1.1 * H_max)
ax.set_ylim((-1.2, 1.2))
if point_labels is None:
point_labels = xrange(len(H_vals))
# Convert point_labels into a dictionary where the keys are the point indices
# and the values are the respective labels to be used.
point_labels = dict(
map(lambda i: i if isinstance(i, tuple) else (i, str(i)), point_labels))
if add_point_labels:
for i in xrange(len(H_vals)):
if point_labels.has_key(i):
x = H_vals[i]
y = m_vals[i]
ax.annotate(point_labels[i], xy=(
x, y), xytext=(-10, 5) if i < N else (0, -15), textcoords='offset points')
# draw the info box
if infobox != []:
box_text = ""
for elt in infobox:
if isinstance(elt, types.StringType):
box_text += elt + '\n'
else:
try:
name, value = elt
box_text += "{} = {}\n".format(name, value)
except ValueError:
raise ValueError(
"All list elements in 'infobox' must be either strings or pairs of the form (name, value). Got: '{}'".format(elt))
box_text = box_text.rstrip()
if infobox_loc not in ["top left", "top right", "bottom left", "bottom right"]:
raise ValueError(
"'infobox_loc' must be one of 'top left', 'top right', 'bottom left', 'bottom right'.")
vpos, hpos = infobox_loc.split()
x = H_max if hpos == "right" else -H_max
y = 1.0 if vpos == "top" else -1.0
ax.text(x, y, box_text, size=12,
# transform = ax.transAxes,
horizontalalignment=hpos, verticalalignment=vpos, multialignment="left",
bbox=dict(boxstyle="round, pad=0.3", facecolor="white", edgecolor="green", linewidth=1))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
plt.tight_layout()
if filename:
filenames = [filename] if isinstance(
filename, basestring) else filename
for name in filenames:
create_missing_directory_components(name)
fig.savefig(name)
def boxplot(arr, filename, **kwargs):
plt.boxplot(list(arr), **kwargs)
plt.savefig(filename)
if __name__ == "__main__":
# --------------------------- DEMO ----------------------------------------#
xs = np.linspace(-300, 300, 201)
ts = np.linspace(0, 100, 101)
my = np.zeros((len(ts), len(xs)))
for t in ts: # fake some magnetisation data
my[t][:] = t * np.sin(
2 * np.pi * 3 * xs / abs(np.min(xs) - np.max(xs))) / 100
print "# values on x-axis: {}, # values on y-axis (time): {}.".format(
len(xs), len(ts))
print "Shape of the plotted array: {}.".format(my.shape)
print "Minimum: {}, Maximum: {}.".format(np.min(my), np.max(my))
labels = ("x (nm)", "time (ps)", "m_y")
surface_2d(xs, ts, my, labels, "2D surface", path="surface_2d.png")
surface_3d(xs, ts, my, labels, "3D surface", path="surface_3d.png")
print "Saved plots in 'surface_2d.png' and 'surface_3d.png'."
| 13,058 | 32.570694 | 138 |
py
|
finmag
|
finmag-master/src/finmag/util/dispersion.py
|
import os
import math
import numpy as np
import dolfin as df
import fenicstools as tls
from joblib import Memory
from tempfile import mkdtemp
__all__ = ['points_on_line', 'points_on_axis', 'probe',
'magnetisation_deviation', 'spinwaves', 'dispersion_relation',
'spinwaves_to_vtk']
# important, since we manipulate the np.array directly
df.parameters.reorder_dofs_serial = False
CACHE = mkdtemp()
memory = Memory(cachedir=CACHE, verbose=0)
def points_on_line(r0, r1, spacing):
"""
Coordinates of points spaced `spacing` apart between points `r0` and `r1`.
The dimensionality is inferred from the length of the tuples `r0` and `r1`,
while the specified `spacing` will be an upper bound to the actual spacing.
"""
dim = len(r0)
v = np.array(r1) - np.array(r0)
length = np.linalg.norm(v)
steps = math.ceil(1.0 * length / spacing) + 1
points = np.zeros((steps, dim))
for i in xrange(dim):
points[:, i] = np.linspace(r0[i], r1[i], steps)
return points
def points_on_axis(mesh, axis, spacing, offset=0):
"""
The points along `axis` spaced `spacing` apart with `offset` from the edge
of the mesh. Axis should be one of x, y, or z.
"""
axis_i = ord(axis) - 120
coords_i = mesh.coordinates()[:, axis_i]
cmin, cmax = coords_i.min(), coords_i.max()
cleft, cright = cmin + offset, cmax - offset
distance = cright - cleft
steps = math.ceil(distance / spacing) + 1
coordinates = np.zeros((steps, mesh.geometry().dim()))
coordinates[:, axis_i] = np.linspace(cleft, cright, steps)
return coordinates
@memory.cache
def probe(points, mesh, data_fun):
"""
Returns the recorded magnetisation dynamics on the given points on mesh.
The callable `data_fun` should return the time and the recorded
magnetisation for an integer timestep or False if no magnetisation exists
for that timestep.
"""
S3 = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)
m = df.Function(S3)
probes = tls.Probes(points.flatten(), S3)
ts = []
timestep = 0
while True:
data = data_fun(timestep)
if data:
ts.append(data[0])
m.vector()[:] = data[1]
probes(m)
else:
break
timestep += 1
return ts, np.swapaxes(probes.array(), 0, 2)
def magnetisation_deviation(m_t, j):
"""
Returns the deviation of the `j` component of the
magnetisation from the starting magnetisation.
"""
j = ord(j) - 120
mj_t = m_t[:, j]
delta_mj_t = mj_t - mj_t[0]
return delta_mj_t
@memory.cache
def spinwaves(points, mesh, data_fun, component):
"""
Returns ingredients for surface plot of magnetisation deviation dynamics.
"""
length = np.linalg.norm(points[-1] - points[0])
rs = np.linspace(0, length, points.shape[0])
ts, m_t = probe(points, mesh, data_fun)
delta_mj_t = magnetisation_deviation(m_t, component)
return rs, ts, delta_mj_t
@memory.cache
def dispersion_relation(points, mesh, data_fun, component):
"""
Returns ingredients for plot of dispersion relation.
"""
rs, ts, delta_mj_t = spinwaves(points, mesh, data_fun, component)
dr = abs(rs[1] - rs[0])
k = 2 * math.pi * np.fft.fftshift(np.fft.fftfreq(len(rs), dr))
dt = abs(ts[1] - ts[0])
freq = np.fft.fftshift(np.fft.fftfreq(len(ts), dt))
amplitude = np.abs(np.fft.fftshift(np.fft.fft2(delta_mj_t)))
return k, freq, amplitude
def spinwaves_to_vtk(points, mesh, data_fun, component, directory=""):
rs, ts, delta_mj_t = spinwaves(points, mesh, data_fun, component)
mesh = df.RectangleMesh(
df.Point(ts[0] * 1e12, rs[0] * 1e9),
df.Point(ts[-1] * 1e12, rs[-1] * 1e9),
len(ts) - 1, len(rs) - 1)
excitation_data = np.swapaxes(delta_mj_t, 0, 1).reshape(-1)
S1 = df.FunctionSpace(mesh, "CG", 1)
excitation = df.Function(S1)
excitation.rename("delta_m{}_t".format(component), "excitation")
excitation.vector()[:] = excitation_data
f = df.File(os.path.join(directory, "excitation.pvd"), "compressed")
f << excitation
| 4,216 | 29.338129 | 80 |
py
|
finmag
|
finmag-master/src/finmag/util/meshes.py
|
"""
This module contains convenience functions to create common types of
meshes. The execution time may be relatively slow (in particular for
fine meshes) because the mesh creation is done externally via Netgen.
Caveat: Netgen only saves the first 5-6 digits (or so) of each
coordinate during the mesh creation process. Thus it is not advisable
to use these functions to create meshes on the nanoscale. Instead,
create a "macroscopic" mesh and use something like unit_length=1e-9 to
set the desired length scale when reading the mesh into Finmag!
"""
import os
import re
import sys
import copy
import math
import shutil
import commands
import logging
import textwrap
import hashlib
import tempfile
import dolfin as df
import numpy as np
from types import ListType, TupleType
from math import sin, cos, pi
logger = logging.getLogger(name='finmag')
def from_geofile(geofile, save_result=True):
"""
Using netgen, returns a dolfin mesh object built from the given geofile.
*Arguments*
geofile (str)
Filename of a .geo file which is compatible with netgen.
save_result (bool) [optional]
Controls whether the resulting dolfin mesh is saved to
disk (default: True). Doing so greatly speeds up later
calls to the function with the same geofile. If the
geofile has been modified since the last run of the mesh
generation, the saved version is disregarded. The file
will have the same basename as the geofile, just with the
extension .xml.gz instead of .geo, and will be placed in
the same directory.
*Return*
mesh
dolfin mesh object, instance of the dolfin.cpp.Mesh class.
*Example*
The return value of this function can be used like a conventional
dolfin mesh.
.. code-block:: python
import dolfin
from finmag.util.meshes import from_geofile
mesh = from_geofile(path_to_my_geofile)
dolfin.plot(mesh, interactive=True)
"""
result_filename = os.path.splitext(geofile)[0] + ".xml.gz"
result_file_exists = False
skip_mesh_creation = False
if os.path.isfile(result_filename):
result_file_exists = True
if os.path.getmtime(result_filename) < os.path.getmtime(geofile):
# TODO: If save_result is False but the .xml.gz file
# already exists, it will be updated (hence, saved)
# anyway. Is this desired?
logger.warn("The mesh file '{}' is outdated (since it is "
"older than the .geo file '{}') and will be "
"overwritten.".format(result_filename, geofile))
else:
logger.debug("The mesh '{}' already exists and is "
"automatically returned.".format(result_filename))
skip_mesh_creation = True
if not skip_mesh_creation:
xml = convert_diffpack_to_xml(run_netgen(geofile))
change_xml_marker_starts_with_zero(xml)
result_filename = compress(xml)
mesh = df.Mesh(result_filename)
if not save_result and not result_file_exists:
# We delete the .xml.gz file only if it didn't exist previously
os.remove(result_filename)
logger.debug(
"Removing file '%s' because mesh is created on the fly." % result_filename)
return mesh
def from_csg(csg, save_result=True, filename="", directory=""):
"""
Using netgen, returns a dolfin mesh object built from the given CSG string.
If `save_result` is True (which is the default), both the generated
geofile and the dolfin mesh are saved to disk. By default, the
filename will use the md5 hash of the csg string, but can be
specified by passing a name (without suffix) into `filename`.
The `directory` argument can be used to control where the
generated files are saved. The `filename` argument may
contain path components, too (which are simply appended to
`directory`).
A word of caution: It will not overwrite an existing geofile with the same
name, so remember this when calling the function with save_result=True and
a custom filename.
Caveat: if `filename` contains an absolute path then value of
`directory` is ignored.
"""
if filename == "":
filename = hashlib.md5(csg).hexdigest()
if os.path.isabs(filename) and directory != "":
logger.warning("Ignoring 'directory' argument (value given: '{}') because 'filename' contains an absolute path: '{}'".format(
directory, filename))
if save_result:
if directory == "":
# TODO: Is there a reason why we can't use os.curdir
# directly as the default in the function definition
# above? I seem to remember that there was an issue
# related to the test suite (where files need to be
# created in MODULE_DIR), but it would be good to
# double-check that.
directory = os.curdir
# strip '.xml.gz' extension if present
filename = re.sub('\.xml\.gz$', '', filename)
geofile = os.path.abspath(os.path.join(directory, filename) + ".geo")
# Make sure that 'directory' actually contains all the
# directory components of the path:
directory, _ = os.path.split(geofile)
if not os.path.exists(directory):
logger.debug(
"Creating directory '{}' as it does not exist.".format(directory))
os.mkdir(directory)
if not os.path.exists(geofile):
with open(geofile, "w") as f:
f.write(csg)
mesh = from_geofile(geofile, save_result=True)
else:
tmp = tempfile.NamedTemporaryFile(suffix='.geo', delete=False)
tmp.write(csg)
tmp.close()
mesh = from_geofile(tmp.name, save_result=False)
# Since we used delete=False in NamedTemporaryFile, we are
# responsible for the deletion of the file.
os.remove(tmp.name)
logger.debug(
"Removing file '%s' because mesh is created on the fly." % tmp.name)
return mesh
def run_netgen(geofile):
"""
Runs netgen on the geofile and returns a file in DIFFPACK format.
"""
if not os.path.isfile(geofile):
raise ValueError("Can't find file: '{}'".format(geofile))
basename, extension = os.path.splitext(geofile)
diffpackfile = basename + ".grid"
if not extension == ".geo":
raise ValueError("Input needs to be a .geo file.")
logger.debug(
"Using netgen to convert {} to DIFFPACK format.".format(geofile))
netgen_cmd = "netgen -geofile={} -meshfiletype='DIFFPACK Format' -meshfile={} -batchmode".format(
geofile, diffpackfile)
status, output = commands.getstatusoutput(netgen_cmd)
if status == 34304:
logger.warning("Warning: Ignoring netgen's output status of 34304.")
elif status != 0:
print output
print "netgen failed with exit code", status
sys.exit(2)
elif output.lower().find("error") != -1:
logger.warning(
"Netgen's exit status was 0, but an error seems to have occurred anyway (since Netgen's output contains the word 'error').")
logger.warning("Netgen output:")
logger.warning("\n====>")
logger.warning(output)
logger.warning("<====\n")
logger.debug('Done!')
return diffpackfile
def convert_diffpack_to_xml(diffpackfile):
"""
Converts the diffpackfile to xml using dolfin-convert.
"""
if not os.path.isfile(diffpackfile):
raise ValueError("Can't find file: '{}'".format(diffpackfile))
logger.debug(
'Using dolfin-convert to convert {} to xml format.'.format(diffpackfile))
basename = os.path.splitext(diffpackfile)[0]
xmlfile = basename + ".xml"
dolfin_conv_cmd = 'dolfin-convert {0} {1}'.format(diffpackfile, xmlfile)
status, output = commands.getstatusoutput(dolfin_conv_cmd)
if status != 0:
print output
print "dolfin-convert failed with exit code", status
sys.exit(3)
files = ["%s.xml.bak" % basename,
"%s_bi.xml" % basename,
diffpackfile]
for f in files:
if os.path.isfile(f):
os.remove(f)
return xmlfile
def change_xml_marker_starts_with_zero(xmlfile):
"""
the xml file also contains mesh_value_collection in dolfin 1.1 (not in dolfin 1.0) and
the marker index starts with 1 but the default df.dx refers to dx(0), so this function is
going to fix this problem (could we report this as a very small bug? seems that dolfin
community will abandon netegn later?)
"""
f = open(xmlfile, 'r')
data = f.read()
f.close()
data_begin = False
values = []
for line in data.splitlines():
if 'mesh_value_collection' in line:
if 'dim="3"' in line:
data_begin = True
else:
data_begin = False
if data_begin and 'value="' in line:
v = line.split('value="')[1]
v = v.split('"')[0]
values.append(int(v))
if len(values) == 0:
return
if min(values) == 0:
return
elif min(values) < 0:
raise ValueError("Mesh markers are wrong?!")
min_index = min(values)
f = open(xmlfile, 'w')
data_begin = False
for line in data.splitlines():
if 'mesh_value_collection' in line:
if 'dim="3"' in line:
data_begin = True
else:
data_begin = False
if data_begin and 'value="' in line:
v = line.split('value="')
v_bak = v[0]
v = v[1].split('"')[0]
v = int(v) - min_index
f.write(v_bak + 'value="%d"/>\n' % v)
else:
f.write(line + '\n')
f.close()
def compress(filename):
"""
Compress file using gzip.
"""
logger.debug("Compressing {}".format(filename))
compr_cmd = 'gzip -f %s' % filename
status, output = commands.getstatusoutput(compr_cmd)
if status != 0:
print output
print "gzip failed with exit code", status
sys.exit(4)
return filename + ".gz"
def box(x0, x1, x2, y0, y1, y2, maxh, save_result=True, filename='', directory=''):
"""
Returns a dolfin mesh object describing an axis-parallel box.
The two points (x0, x1, x2) and (y0, y1, y2) are interpreted as
two diagonally opposite corners of the box.
If `save_result` is True (the default), both the generated geofile
and the dolfin mesh will be saved to disk. By default, the
filename will be automatically generated based on the values of
`radius` and `maxh` (for example,'box-0_0-0_0-0_0-1_0-2_0-3_0.geo'),
but a different one can be specified by passing a name (without
suffix) into `filename`. If `save_result` is False, passing a
filename has no effect.
The `directory` argument can be used to control where the files
should be saved in case no filename is given explicitly.
Note that this function uses Netgen to produce the mesh. There is
also the 'native' Dolfin method dolfin.cpp.BoxMesh() which creates a
regularly-spaced mesh (whereas the mesh produced by Netgen is more
irregularly spaced). Depending on the context, either can be
useful.
"""
# Make sure that each x_i < y_i
[x0, y0] = sorted([x0, y0])
[x1, y1] = sorted([x1, y1])
[x2, y2] = sorted([x2, y2])
csg = textwrap.dedent("""\
algebraic3d
solid main = orthobrick ( {}, {}, {}; {}, {}, {} ) -maxh = {maxh};
tlo main;
""").format(x0, x1, x2, y0, y1, y2, maxh=maxh)
if save_result == True and filename == '':
filename = "box-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}".format(
x0, x1, x2, y0, y1, y2, maxh).replace(".", "_")
return from_csg(csg, save_result=save_result, filename=filename, directory=directory)
def sphere(r, maxh, save_result=True, filename='', directory=''):
"""
Returns a dolfin mesh object describing a sphere with radius `r`
and given mesh coarseness maxh.
If `save_result` is True (the default), both the generated geofile
and the dolfin mesh will be saved to disk. By default, the
filename will be automatically generated based on the values of
`r` and `maxh` (for example, 'sphere-10_0-0_2.geo'), but a
different one can be specified by passing a name (without suffix)
into `filename`. If `save_result` is False, passing a filename has
no effect.
The `directory` argument can be used to control where the files
should be saved in case no filename is given explicitly.
"""
csg = textwrap.dedent("""\
algebraic3d
solid main = sphere ( 0, 0, 0; {r} ) -maxh = {maxh};
tlo main;
""").format(r=r, maxh=maxh)
if save_result == True and filename == '':
filename = "sphere-{:g}-{:g}".format(r, maxh).replace(".", "_")
return from_csg(csg, save_result=save_result, filename=filename, directory=directory)
def cylinder(r, h, maxh, save_result=True, filename='', directory=''):
"""
Return a dolfin mesh representing a cylinder of radius `r` and
height `h`. The argument `maxh` controls the maximal element size
in the mesh (see the Netgen manual 4.x, Chapter 2).
If `save_result` is True (the default), both the generated geofile
and the dolfin mesh will be saved to disk. By default, the
filename will be automatically generated based on the values of
`r`, `h` and `maxh` (for example, 'cyl-50_0-10_0-0_2.geo'), but a
different one can be specified by passing a name (without suffix)
into `filename` If `save_result` is False, passing a filename has
no effect.
The `directory` argument can be used to control where the files
should be saved in case no filename is given explicitly.
"""
csg_string = textwrap.dedent("""\
algebraic3d
solid fincyl = cylinder (0, 0, 1; 0, 0, -1; {r} )
and plane (0, 0, 0; 0, 0, -1)
and plane (0, 0, {h}; 0, 0, 1) -maxh = {maxh};
tlo fincyl;
""").format(r=r, h=h, maxh=maxh)
if save_result == True and filename == '':
filename = "cyl-{:.1f}-{:.1f}-{:.1f}".format(
r, h, maxh).replace(".", "_")
return from_csg(csg_string, save_result=save_result, filename=filename, directory=directory)
def nanodisk(d, h, maxh, save_result=True, filename='', directory=''):
"""
This is almost exactly the same as `cylinder`, but the first
argument is the *diameter* of the disk, not the radius. This is
often more intuitive than specifying the radius.
*Arugments*
d: disk diameter
h: disk height (= thickness)
maxh: mesh discretization
For the other keyword arguments, see the documentation of `cylinder`.
*Returns*
The mesh for a cylindrical nanodisk of the specified shape.
"""
return cylinder(0.5 * d, h, maxh, save_result=save_result, filename=filename, directory=directory)
def elliptical_nanodisk(d1, d2, h, maxh, save_result=True, filename='', directory=''):
"""
Almost exactly the same as `elliptic_cylinder`, except that the dimension of the
axes are given by the *diameters*, not the radii.
"""
return elliptic_cylinder(0.5 * d1, 0.5 * d2, h, maxh, save_result=save_result, filename=filename, directory=directory)
def elliptical_nanodisk_with_cuboid_shell(d1, d2, h, maxh_disk, lx, ly, lz, maxh_shell, valign='center', sep=1.0, save_result=True, filename='', directory=''):
"""
Return a dolfin mesh representing an elliptical nanodisk surrounded by a cuboid 'shell'.
This is useful to compute the stray field of a nanodisk using the 'airbox' method.
The vertical alignment of the cuboid shell with the nanodisk can be controlled using
the argument `valign` (valid values are: 'center', 'bottom', 'top').
*Arguments*
d1, d2: major and minor diameter of the elliptical nanodisk
h: disk height
maxh_disk: mesh discretisation of the nanodisk
lx, ly, lz: edge length of the cuboid shell
maxh_shell: mesh discretisation of the cuboid shell
valign: 'center' | 'bottom' | 'top'
sep: width of the gap between the nanodisk and the shell (default: 1.0)
"""
r1 = 0.5 * d1
r2 = 0.5 * d2
snegx = -0.5 * lx
snegy = -0.5 * ly
snegz = 0.0
sx = 0.5 * lx
sy = 0.5 * ly
sz = lz
# We may have to use a very small non-zero value here if vertices
EPS = 0.0
# are missing from the inner mesh due to rounding errors.
if valign == 'bottom':
vdiff = EPS
elif valign == 'top':
vdiff = (lz - h) - EPS
elif valign == 'center':
vdiff = 0.5 * (lz - h)
else:
raise ValueError(
"Argument 'valign' must be one of 'center', 'top', 'bottom'. Got: '{}'.".format(valign))
snegz = snegz - vdiff
sz = sz - vdiff
csg_string = textwrap.dedent("""\
algebraic3d
solid disk = ellipticcylinder (0, 0, 0; {r1}, 0, 0; 0, {r2}, 0 )
and plane (0, 0, 0; 0, 0, -1)
and plane (0, 0, {h}; 0, 0, 1) -maxh = {maxh_disk};
solid shell = ellipticcylinder (0, 0, 0; {r1_shell}, 0, 0; 0, {r2_shell}, 0 )
and plane (0, 0, {negsep}; 0, 0, -1)
and plane (0, 0, {h_shell}; 0, 0, 1) -maxh = {maxh_disk};
solid box = orthobrick ( {snegx}, {snegy}, {snegz}; {sx}, {sy}, {sz} ) -maxh = {maxh_shell};
solid air = box and not shell;
tlo disk;
tlo air -transparent;
""").format(r1=r1, r2=r2, h=h, r1_shell=r1 + sep, r2_shell=r2 + sep, negsep=-sep, h_shell=h + sep,
snegx=snegx, snegy=snegy, snegz=snegz, sx=sx, sy=sy, sz=sz,
maxh_disk=maxh_disk, maxh_shell=maxh_shell)
if save_result == True and filename == '':
filename = "ellcyl-with-shell-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{}".format(
r1, r2, h, lx, ly, lz, maxh_disk, maxh_shell, sep, valign).replace(".", "_")
return from_csg(csg_string, save_result=save_result, filename=filename, directory=directory)
def pair_of_disks(d1, d2, h1, h2, sep, theta, maxh, save_result=True, filename='', directory=''):
"""
Return a dolfin mesh representing a pair of disks. The first disk
is always centered at the origin; the center of the second one is
at a distance from the origin so that the edge-to-edge separation
of the two disks equals `sep` (i.e. this distance is equal to
0.5*d1 + sep + 0.5*d2), and the angle between the x-axis and the
line joining the disk centers is given by `theta`.
*Arguments*
d1, d2: float
Diameters of the two disks.
h1, h2: float
Heights of the two disks.
sep: float
Edge-to-edge separation between the disks (note that this is *not* the distance
between the disk centers).
theta: float
Angle (in degrees) between the x-axis and the line joining the disk centers.
maxh: float
Mesh discretisation.
For the meaning of the arguments `save_result`, `filename`, and `directory` see
the docstring of the `cylinder` function.
"""
theta_rad = theta * pi / 180.0
r1 = 0.5 * d1
r2 = 0.5 * d2
sep_centers = r1 + sep + r2
x2 = sep_centers * cos(theta_rad)
y2 = sep_centers * sin(theta_rad)
csg_string = textwrap.dedent("""\
algebraic3d
solid disk1 = cylinder (0, 0, 1; 0, 0, -1; {r1} )
and plane (0, 0, 0; 0, 0, -1)
and plane (0, 0, {h1}; 0, 0, 1) -maxh = {maxh};
solid disk2 = cylinder ({x2}, {y2}, 1; {x2}, {y2}, -1; {r2} )
and plane (0, 0, 0; 0, 0, -1)
and plane (0, 0, {h2}; 0, 0, 1) -maxh = {maxh};
tlo disk1;
tlo disk2;
""").format(r1=r1, h1=h1, x2=x2, y2=y2, r2=r2, h2=h2, maxh=maxh)
if save_result == True and filename == '':
filename = "diskpair-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}-{:.1f}".format(
r1, r2, h1, h2, sep, theta, maxh).replace(".", "_")
return from_csg(csg_string, save_result=save_result, filename=filename, directory=directory)
def elliptic_cylinder(r1, r2, h, maxh, save_result=True, filename='', directory=''):
"""
Return a dolfin mesh representing an elliptic cylinder with semi-major
axis r1, semi-minor axis r2 and height `h`. The argument `maxh` controls
the maximal element size in the mesh (see the Netgen manual 4.x, Chapter 2).
If `save_result` is True (the default), both the generated geofile
and the dolfin mesh will be saved to disk. By default, the
filename will be automatically generated based on the values of
`r1`, `r2, `h` and `maxh` (for example, 'cyl-50_0-25_0-10_0-0_2.geo'), but a
different one can be specified by passing a name (without suffix)
into `filename` If `save_result` is False, passing a filename has
no effect.
The `directory` argument can be used to control where the files
should be saved in case no filename is given explicitly.
"""
csg_string = textwrap.dedent("""\
algebraic3d
solid fincyl = ellipticcylinder (0, 0, 0; {r1}, 0, 0; 0, {r2}, 0 )
and plane (0, 0, 0; 0, 0, -1)
and plane (0, 0, {h}; 0, 0, 1) -maxh = {maxh};
tlo fincyl;
""").format(r1=r1, r2=r2, h=h, maxh=maxh)
if save_result == True and filename == '':
filename = "ellcyl-{:.1f}-{:.1f}-{:.1f}-{:.1f}".format(
r1, r2, h, maxh).replace(".", "_")
return from_csg(csg_string, save_result=save_result, filename=filename, directory=directory)
def ellipsoid(r1, r2, r3, maxh, save_result=True, filename='', directory=''):
"""
Return a dolfin mesh representing an ellipsoid with main axes lengths
r1, r2, r3. The argument `maxh` controls the maximal element size in
the mesh (see the Netgen manual 4.x, Chapter 2).
If `save_result` is True (the default), both the generated geofile and
the dolfin mesh will be saved to disk. By default, the filename will be
automatically generated based on the values of `r1`, `r2, `h` and `maxh`
(for example, 'ellipsoid-50_0-25_0-10_0-0_2.geo'), but a different one can be
specified by passing a name (without suffix) into `filename` If `save_result`
is False, passing a filename has no effect.
The `directory` argument can be used to control where the files should be
saved in case no filename is given explicitly.
"""
csg_string = textwrap.dedent("""\
algebraic3d
solid ell = ellipsoid (0, 0, 0; {r1}, 0, 0; 0, {r2}, 0; 0, 0, {r3}) -maxh = {maxh};
tlo ell;
""").format(r1=r1, r2=r2, r3=r3, maxh=maxh)
if save_result == True and filename == '':
filename = "ellipsoid-{:.1f}-{:.1f}-{:.1f}-{:.1f}".format(
r1, r2, r3, maxh).replace(".", "_")
return from_csg(csg_string, save_result=save_result, filename=filename, directory=directory)
def ring(r1, r2, h, maxh, save_result=True, filename='', directory='', with_middle_plane=False):
"""
Return a dolfin mesh representing a ring with inner radius `r1`, outer
radius `r2` and height `h`. The argument `maxh` controls the maximal element size
in the mesh (see the Netgen manual 4.x, Chapter 2).
If `save_result` is True (the default), both the generated geofile
and the dolfin mesh will be saved to disk. By default, the
filename will be automatically generated based on the values of
`r`, `h` and `maxh` (for example, 'cyl-50_0-10_0-0_2.geo'), but a
different one can be specified by passing a name (without suffix)
into `filename` If `save_result` is False, passing a filename has
no effect.
The `directory` argument can be used to control where the files
should be saved in case no filename is given explicitly.
"""
csg_string = textwrap.dedent("""\
algebraic3d
solid fincyl = cylinder (0, 0, -{h}; 0, 0, {h}; {r1} )
and plane (0, 0, -{h}; 0, 0, -1)
and plane (0, 0, {h}; 0, 0, 1);
solid fincyl2 = cylinder (0, 0, -{h}; 0, 0, 0; {r2} )
and plane (0, 0, -{h}; 0, 0, -1)
and plane (0, 0, {h}; 0, 0, 1);
solid ring = fincyl2 and not fincyl -maxh = {maxh};
tlo ring;
""").format(r1=r1, r2=r2, h=h / 2.0, maxh=maxh)
if with_middle_plane:
csg_string = textwrap.dedent("""\
algebraic3d
solid fincyl = cylinder (0, 0, -{h}; 0, 0, {h}; {r1} )
and plane (0, 0, -{h}; 0, 0, -1)
and plane (0, 0, {h}; 0, 0, 1);
solid fincyl2 = cylinder (0, 0, -{h}; 0, 0, 0; {r2} )
and plane (0, 0, -{h}; 0, 0, -1)
and plane (0, 0, 0; 0, 0, 1);
solid fincyl3 = cylinder (0, 0, 0; 0, 0, {h}; {r2} )
and plane (0, 0, 0; 0, 0, -1)
and plane (0, 0, {h}; 0, 0, 1);
solid ring = (fincyl2 or fincyl3) and not fincyl -maxh = {maxh};
tlo ring;
""").format(r1=r1, r2=r2, h=h / 2.0, maxh=maxh)
if save_result == True and filename == '':
filename = "ring-{:.1f}-{:.1f}-{:.1f}-{:.1f}".format(
r1, r2, h, maxh).replace(".", "_")
return from_csg(csg_string, save_result=save_result, filename=filename, directory=directory)
def sphere_inside_box(r_sphere, r_shell, l_box, maxh_sphere, maxh_box, maxh_shell=None, center_sphere=(0, 0, 0), save_result=True, filename='', directory=''):
"""
Create a mesh representing a sphere inside a box. The sphere and box belong to two
different mesh regions and there is a small gap between the sphere and the outer
region. The box is centered at (0, 0, 0) and has edge length `l_box`. The sphere
is centered at `center_sphere` and has radius `r_sphere`. The gap between the sphere
and the mesh region belonging to the box outside the sphere is defined by the radius
`r_shell`, i.e. the width of the gap is `(r_shell - r_sphere)`.
"""
x, y, z = center_sphere
if maxh_shell is None:
maxh_shell = maxh_sphere
mesh_descr = textwrap.dedent("""\
algebraic3d
solid ball = sphere ( {x}, {y}, {z}; {r_sphere} ) -maxh = {maxh_sphere};
solid shell = sphere ( {x}, {y}, {z}; {r_shell} ) -maxh = {maxh_sphere};
solid box = orthobrick ( {sneg}, {sneg}, {sneg}; {s}, {s}, {s} ) -maxh = {maxh_box};
solid air = box and not shell;
tlo ball;
tlo air -transparent;
""").format(x=x, y=y, z=z, r_sphere=r_sphere, r_shell=r_shell, sneg=-0.5 * l_box, s=0.5 * l_box,
maxh_sphere=maxh_sphere, maxh_box=maxh_box)
if save_result == True and filename == '':
filename = 'sphere_inside_box__{:.1f}_{:.1f}_{:.1f}__{:.1f}__{:.1f}__{:.1f}__{:.1f}__{:.1f}'.format(
x, y, z, r_sphere, r_shell, l_box, maxh_sphere, maxh_box)
mesh = from_csg(
mesh_descr, save_result=save_result, filename=filename, directory=directory)
return mesh
def truncated_cone(r_base, r_top, h, maxh, save_result=True, filename='', directory=''):
"""
Return a dolfin mesh representing a truncated-cone (a cone with the top
sliced off) of base-radius, `r_base` and top-radius, `r_top` with height,
`h`. The argument `maxh` controls the maximal element size in the mesh
(see the Netgen manual 4.x, Chapter 2).
If `save_result` is True (the default), both the generated geofile and the
dolfin mesh will be saved to disk. By default, the filename will be
automatically generated based on the values of `r_base`, `r_top`, `h` and
`maxh` (for example, 'cutcone-50_0-20_0-10_0-2_0.geo'), but a different
one can be specified by passing a name (without suffix) into `filename`.
If `save_result` is False, passing a filename has no effect. The
`directory` argument can be used to control where the files should be
saved in case no filename is given explicitly.
"""
csg_string = textwrap.dedent("""\
algebraic3d
solid cutcone = cone ( 0, 0, 0; {r_base}; 0, 0, {h}; {r_top})
and plane (0, 0, 0; 0, 0, -1)
and plane (0, 0, {h}; 0, 0, 1) -maxh = {maxh};
tlo cutcone;
""").format(r_base=r_base, r_top=r_top, h=h, maxh=maxh)
if save_result == True and filename == '':
filename = "cutcone-{:.1f}-{:.1f}-{:.1f}-{:.1f}".format(
r_base, r_top, h, maxh).replace(".", "_")
return from_csg(csg_string,
save_result=save_result,
filename=filename,
directory=directory)
def mesh_volume(mesh):
"""
Computes the total volume of all tetrahedral cells in the mesh.
alternatively, volume = assemble(Constant(1)*dx(mesh))
"""
return sum([c.volume() for c in df.cells(mesh)])
def nodal_volume(space, unit_length=1):
"""
Computes the volume of each node of the mesh of the provided (Vector)FunctionSpace.
The returned numpy.array will be compatible to functions of that FunctionSpace,
so will be one-dimensional for a FunctionSpace, and three-dimensional
for a VectorFunctionSpace.
"""
v = df.TestFunction(space)
dim = space.mesh().topology().dim()
if isinstance(space, df.FunctionSpace) and space.num_sub_spaces() == 3:
return df.assemble(df.dot(v, df.Constant((1, 1, 1))) * df.dx).array() * unit_length ** dim
else:
return df.assemble(v * df.dx).array() * unit_length ** dim
def mesh_info(mesh):
"""
Return a string containing some basic information about the mesh
(such as the number of cells/vertices/interior and surface triangles)
as well as the distribution of edge lengths.
"""
# Note: It might be useful for this function to return the 'raw' data
# (number of cells, vertices, triangles, edge length distribution,
# etc.) instead of a string; this could then be used by another
# function generate such an info string (or to print the data
# directly). However, until such a need arises we leave it as it is.
# Remark: the number of surface triangles is computed below as follows:
#
# F_s = 4*C - F_i,
#
# where we use the abbreviation:
#
# C = number of cells/tetrahedra
# F_i = number of interior facets
# F_s = number of surface facets
#
# Proof: Suppose that each tetrahedron was separated from its neighbours
# by a small distance. Then the number of surface facets F_s would
# be exactly 4*C (since each tetrahedron has four surface triangles).
# To get the number of surface facets in the "true" mesh (without
# space between neighbouring cells), all the facets at which two
# tetrahedra are "glued together" (i.e., precisely the interior
# facets) need to be subtracted from this because otherwise they
# would be counted twice.
edges = [e for e in df.edges(mesh)]
facets = [f for f in df.facets(mesh)]
C = mesh.num_cells()
F = len(facets)
F_i = 4 * C - F
F_s = F - F_i
E = len(edges)
V = mesh.num_vertices()
lens = [e.length() for e in df.edges(mesh)]
vals, bins = np.histogram(lens, bins=20)
# to ensure that 'vals' and 'bins' have the same number of elements
vals = np.insert(vals, 0, 0)
vals_normalised = 70.0 / max(vals) * vals
info_string = textwrap.dedent("""\
===== Mesh info: ==============================
{:6d} cells (= volume elements)
{:6d} facets
{:6d} surface facets
{:6d} interior facets
{:6d} edges
{:6d} vertices
===== Distribution of edge lengths: ===========
""".format(C, F, F_s, F_i, E, V))
for (b, v) in zip(bins, vals_normalised):
info_string += "{:.3f} {}\n".format(b, int(round(v)) * '*')
return info_string
def mesh_quality(mesh):
"""
Returns a histogram string about the quality of the cells a mesh.
The cell quality is measured by
cell_dimension * inradius / circumradius
which can take values between 0 and 1, where 1 is the best quality
(e.g. a triangular/tetrahedral cell would be equilateral/regular).
"""
ratios = df.MeshQuality.radius_ratios(mesh).array()
vals, bins = np.histogram(ratios, bins=20)
# to ensure that 'vals' and 'bins' have the same number of elements
vals = np.insert(vals, 0, 0)
vals_normalised = 70.0 / max(vals) * vals
info_string = "======== Mesh quality info: ========\n"
for (b, v) in zip(bins, vals_normalised):
info_string += "{:.3f} {}\n".format(b, int(round(v)) * '*')
return info_string
def longest_edges(mesh):
"""
Return a df.Function over the cells of `mesh` where the value
is equal to the length of the longest edge of the cell.
"""
V = df.FunctionSpace(mesh, "DG", 0)
f = df.Function(V)
for c in df.cells(mesh):
f.vector()[c.global_index()] = max([e.length() for e in df.edges(c)])
return f
def print_mesh_info(mesh):
print mesh_info(mesh)
def order_of_magnitude(value):
return int(math.floor(math.log10(value)))
def mesh_size(mesh, unit_length):
"""
Return the maximum extent of the mesh along any of the x/y/z axes.
"""
coords = mesh.coordinates()
max_extent = max(coords.max(axis=0) - coords.min(axis=0))
return max_extent * unit_length
def mesh_size_plausible(mesh, unit_length):
"""
Try to detect if unit_length fits to the mesh.
"""
mesh_size_magnitude = order_of_magnitude(mesh_size(mesh, unit_length))
# we expect mesh sizes inbetween a nanometer and tens of microns
plausible = (mesh_size_magnitude >= -9) and (mesh_size_magnitude <= -5)
return plausible
def describe_mesh_size(mesh, unit_length):
"""
Describe the size of the mesh in words.
Returns string which could be read after `Your mesh is...`.
"""
magn = order_of_magnitude(mesh_size(mesh, unit_length))
if magn <= -15:
# happens when mesh expressed in meters and unit_length=1e-9
# nevertheless
return "smaller than a femtometer"
if magn < -9:
return "smaller than a nanometer"
if magn == -9:
return "a few nanometers large"
if magn == -8:
return "tens of nanometers large"
if magn == -7:
return "hundreds of nanometers large"
if magn == -6:
return "a micrometer large or more"
if magn == -5:
return "tens of micrometers large"
if magn < 0:
return "so large! Such wow. Very mesh."
# the following happens when mesh expressed in nanometers and unit_length=1
if magn == 0:
return "a few meters large"
if magn == 1:
return "dozens of meters large"
if magn >= 2:
return "hundreds of meters large"
def plot_mesh(mesh, scalar_field=None, ax=None, figsize=None, elev=None, azim=None, dg_fun=None,
xlim=None, ylim=None, zlim=None, **kwargs):
"""
Plot the given mesh.
Note that for fine meshes it may be necessary to adjust the
`linewidth` argument because if the mesh edges are drawn too thick
compared to the entire mesh then the figure will appear all black.
FIXME: For 2D meshes we currently draw a wireframe mesh by default
because I haven't figured out yet how to use `tripcolor` properly.
This should be changed so that 2D and 3D plotting are consistent.
Also, once this is done it might be nice to provide a `wireframe`
keyword which enables/disables wireframe-style plotting.
TODO: It might be nice to automatically adjust the linewidth, e.g.
based on the ratio mesh.num_cells()/mesh_volume(mesh).
*Arguments*
scalar_field: None or array (of scalar vertex values) or function
If given, the triangle colors will be derived from the field
values (using the specified cmap). In this case the `color`
argument is ignored. If scalar_field is a function, it is
first applied to all vertices and should expect an array of
(x, y) (for 2D meshes) or (x, y, z) (for 3d meshes)
coordinates as its argument.
ax : None or matplotlib.axes.AxesSubplot (for 2D meshes)
or matplotlib.axes.Axes3DSubplot (for 3D meshes)
If `ax` is not given, an appropriate Axes object is created
automatically. Note that a 3D Axes object can be created as
follows:
import matplotlib.pyplot as plt
ax = plt.gca(projection='3d')
figsize : pair of floats
Size of the figure in which the mesh is to be plotted. If the
`ax` argument is provided, this is ignored.
elev : float | None
Elevation angle (in degrees) of the 'camera view'. Only meaningful
for 3D plots and is ignored for 2D meshes.
azim : float | None
Azimuthal angle (in degrees) of the 'camera view' in the x,y plane.
Only meaningful for 3D plots and is ignored for 2D meshes.
All other keyword arguments are passed on to matplotlib's `plot_trisurf`
(for 3D meshes) or to `triplot` (for 2D meshes). The following defaults
are used:
color = 'blue'
cmap = matplotlib.cm.jet
*Returns*
The Axes object in which the mesh was plotted (either the one
provided by the user or the one which was automatically created).
"""
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
top_dim = mesh.topology().dim()
geom_dim = mesh.geometry().dim()
# If the user doesn't explicitly specify a linewidth, we
# heuristically adapt it so that the plot doesn't appear all black
# because the lines are drawn too thick.
#
# There are certainly better ways to do this, but this seems to
# work reasonably well for most cases. (However, for very oblong
# structures it may make more sense to check the extent in each
# dimension individually rather than the mesh volume as a whole.)
if not kwargs.has_key('linewidth'):
lw_threshold = 500.0 if geom_dim == 2 else 5000.0
a = mesh.num_cells() / mesh_volume(mesh)
if a > lw_threshold:
kwargs['linewidth'] = pow(lw_threshold / a, 1.0 / geom_dim)
logger.debug("Automatically adapting linewidth to improve plot quality "
"(new value: linewidth = {})".format(kwargs['linewidth']))
# Set default values for some keyword arguments
if not kwargs.has_key('color'):
kwargs['color'] = 'blue'
if kwargs.has_key('cmap'):
if scalar_field is None:
kwargs.pop('cmap')
logger.warning("Ignoring 'cmap' argument since no 'scalar_field' "
"argument was given")
else:
if scalar_field != None:
# cmap should only be set when a field was given
kwargs['cmap'] = cm.jet
# Create Axis if none was provided
if ax == None:
logger.debug("Creating new figure with figsize '{}'".format(figsize))
fig = plt.figure(figsize=figsize)
ax = fig.gca(aspect='equal', projection=(None if (geom_dim == 2) else '3d'))
ax.view_init(elev=elev, azim=azim)
else:
if figsize != None:
logger.warning("Ignoring argument `figsize` because `ax` was "
"provided explicitly.")
if dg_fun == None:
dg_fun = df.Function(df.FunctionSpace(mesh, 'DG', 0))
dg_fun.vector()[:] = 1
if geom_dim == 2:
coords = mesh.coordinates()
x = coords[:, 0]
y = coords[:, 1]
triangs = np.array([[v.index() for v in df.vertices(s)]
for s in df.faces(mesh)])
xmid = x[triangs].mean(axis=1)
ymid = y[triangs].mean(axis=1)
zfaces = np.array([dg_fun(xmid[i], ymid[i]) for i in range(len(xmid))])
if scalar_field != None:
logger.warning(
"Ignoring the 'scalar_field' argument as this is not implemented for 2D meshes yet.")
# XXX TODO: It would be nice to have the triangles coloured.
# This should be possible using 'tripcolor', but I haven't
# figured out yet how to pass it the color information (e.g.,
# uniformly coloured if we just want to plot the mesh, or
# passing an array of color values if we want to plot a
# scalar function on a mesh).
#ax.tripcolor(x, y, triangles=triangs)
ax.tripcolor(
x, y, triangles=triangs, facecolors=zfaces, edgecolors='k', **kwargs)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
elif geom_dim == 3:
# TODO: Remove this error message once matplotlib 1.3 has been
# released!
import matplotlib
if matplotlib.__version__[:3] < '1.3':
raise NotImplementedError(
"Plotting 3D meshes is only supported with versions of "
"matplotlib >= 1.3.x. Unfortunately, the latest stable "
"release is 1.2.0, so you have to install the development "
"version manually. Apologies for the inconvenience!")
if top_dim == 3:
# Extract the boundary mesh because we need a mesh
# of topological dimension 2 for plotting.
mesh = df.BoundaryMesh(mesh, 'exterior')
elif top_dim != 2:
raise NotImplementedError(
"Cannot plot 3D mesh of topological dimension {}"
"".format(top_dim))
coords = mesh.coordinates()
x = coords[:, 0]
y = coords[:, 1]
z = coords[:, 2]
if scalar_field != None:
try:
scalar_field = np.array(map(scalar_field, coords))
except TypeError:
scalar_field = np.array(scalar_field)
# Set shade = False by default because it looks nicer
if not kwargs.has_key('shade'):
kwargs['shade'] = False
try:
ax.plot_trisurf(
x, y, z, triangles=mesh.cells(), vertex_vals=scalar_field, **kwargs)
except AttributeError:
if scalar_field != None:
logger.warning("Ignoring 'scalar_field' argument because this "
"version of matplotlib doesn't support it.")
ax.plot_trisurf(x, y, z, triangles=mesh.cells(), **kwargs)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_zlim(zlim)
else:
raise ValueError(
"Plotting is only supported for 2- and 3-dimensional meshes.")
ax.set_aspect('equal')
return ax
def plot_mesh_with_paraview(mesh, **kwargs):
"""
Use Paraview to render the mesh. This is just a convenience
function which saves the mesh to a temporary .vtu file and then
calls `finmag.util.visualization.render_paraview_scene` on that
file. Any keyword arguments given are passed on to
`render_paraview_scene` - see its docstring for more information.
Use the `diffuse_color` keyword to set the mesh color and the
`outfile` keyword to give a output filename in case the plot should
be saved to a PNG file.
"""
import tempfile
from finmag.util.visualization import render_paraview_scene
tmpdir = tempfile.mkdtemp()
tmp_meshfile = os.path.join(tmpdir, 'mesh.pvd')
F = df.File(tmp_meshfile)
F << mesh
diffuse_color = kwargs.pop('diffuse_color', [0, 0.9, 0])
image = render_paraview_scene(
tmp_meshfile, field_name=None, add_glyphs=False,
rescale_colormap_to_data_range=False, show_colorbar=False,
diffuse_color=diffuse_color, **kwargs)
shutil.rmtree(tmpdir)
return image
def plot_mesh_regions(fun_mesh_regions, regions, colors=None, alphas=None,
markers='.', marker_sizes=None, zoom_to_mesh_size=True,
ax=None, **kwargs):
"""
Visualise particular regions/subdomains of a mesh by plotting
markers at the midpoints of all cells belonging to the specified
region(s). If multiple regions are to be plotted, different
colours are used to distinguish them.
*Arguments*
fun_mesh_regions : dolfin.CellFunction
A dolfin.MeshFunction defined on the cells of the mesh. It
should take integer values which indicate for each cell which
mesh region it belongs to.
regions : int or list of ints
The region(s) to plot.
colors : single color or list of colors
Colors to be used for the markers of the individual regions.
If the number of supplied colors is shorter than the number of
regions, colors from the beginning of the list will be reused.
alphas : float or list of floats
Alpha (= transparency) values to be used for the markers of
the individual regions. If the number of supplied alpha values
is shorter than the number of regions to be plotted, values
from the beginning of the list will be reused.
markers : single marker or list of markers
Markers to be used for the markers of the individual regions.
If the number of supplied markers is shorter than the number
of regions to be plotted, values from the beginning of the
list will be reused.
marker_sizes : float or list of float
Sizes for the markers of the individual regions.
If the number of supplied markers is shorter than the number
of regions to be plotted, values from the beginning of the
list will be reused.
zoom_to_mesh_size : boolean
If this is True then the x-, y- and z-axis limits are
automatically adjusted to the minimum/maximum x-coordinate of
the mesh so that the visible region covers the extent of the
mesh. Note that if not all mesh regions are plotted, this
means that parts of the plot will appear to be empty.
The reason for this behaviour is that it can be quite
confusing if a region appears to fill the entire screen (due
to matplotlib automatically adjusting the axis limits) when it
is only supposed to cover a small part of the mesh. If this is
behaviour is undesired, set `zoom_to_mesh_size` to False. If
necessary, you can also explicitly call 'ax.set_xlim3d()' (and
similarly for y and z limits) on the Axis object which is
returned from this function.
ax : None or matplotlib.axes.Axes3DSubplot
If `ax` is not given, an appropriate Axes object is created
automatically.
**kwargs
All keyword arguments are passed on to the matplotlib's
`scatter3d` function.
*Returns*
The Axes object in which the mesh was plotted (either the one
provided by the user or the one which was automatically created).
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def _ensure_is_list(arg):
res = arg
if res == None:
res = []
elif not isinstance(arg, (ListType, TupleType)):
res = [res]
return res
regions = _ensure_is_list(regions)
colors = _ensure_is_list(colors)
alphas = _ensure_is_list(alphas)
markers = _ensure_is_list(markers)
sizes = _ensure_is_list(marker_sizes)
if not isinstance(regions, (ListType, TupleType)):
raise TypeError("Argument 'region' must be a single integer "
"or a list of integers. "
"Got: '{}' ({})".format(regions, type(regions)))
if ax is None:
ax = plt.gca(projection='3d')
mesh = fun_mesh_regions.mesh()
midpoints = [[c.midpoint() for c in df.cells(mesh)
if fun_mesh_regions[c.index()] == r] for r in regions]
pts = [[(pt.x(), pt.y(), pt.z()) for pt in m] for m in midpoints]
num_regions = len(regions)
# TODO: More generic would be to create a dictionary here to which
# we add a color/alpha/... argument iff colors/alphas/... is not
# None. This allows us to leave the default to matplotlib if no
# value was explicitly set by the user (instead of creating an
# artificial default value).
def _suppy_args(arg_dict, name, lst, i):
if lst != []:
val = lst[i % len(lst)]
if val != None:
arg_dict[name] = val
for i in xrange(num_regions):
arg_dict = copy.copy(kwargs)
_suppy_args(arg_dict, 'color', colors, i)
_suppy_args(arg_dict, 'alpha', alphas, i)
_suppy_args(arg_dict, 'marker', markers, i)
_suppy_args(arg_dict, 's', sizes, i)
pts_region = pts[i]
ax.scatter3D(*zip(*pts_region), **arg_dict)
if zoom_to_mesh_size:
logger.debug("Adjusting axis limits in order to zoom to mesh size")
coords = mesh.coordinates()
xs = coords[:, 0]
ys = coords[:, 1]
zs = coords[:, 2]
ax.set_xlim3d(min(xs), max(xs))
ax.set_ylim3d(min(ys), max(ys))
ax.set_zlim3d(min(zs), max(zs))
return ax
def line_mesh(vertices):
"""
Construct a mesh representing a (potentially curved) line. The
resulting mesh simply consists of an (ordered) list of nodes in
which adjacent ones are connected by an edge.
The geometrical dimension can be arbitrary, i.e. the mesh can
either represent a 1D interval, a line/curve in a 2D plane, or
even a line/curve in 3D space.
*Examples*
.. code-block:: python
# 1D interval with 5 sub-intervals and 6 nodes
line_mesh([0.0, 0.1, 0.3, 0.4, 0.8, 1.0])
# Line in 2D plane consisting of three points
line_mesh([[0.0, 0.0],
[1.0, 2.4],
[3.0, 7.2]])
# Spiral in 3D space
vertices = [(cos(t), sin(t), t) for t in linspace(0, 2*pi, 20)]
line_mesh(vertices)
*Arguments*
vertices:
List of coordinates (for 1D meshes only) or list of mesh nodes.
*Returns*
A dolfin.Mesh of topological dimension 1 and geometrical dimension
equal to the dimension of the space containing the mesh nodes.
"""
n = len(vertices)
vertices = np.array(vertices, dtype=float)
if vertices.ndim == 1:
vertices = vertices[:, np.newaxis]
if vertices.ndim != 2:
raise ValueError(
"Argument 'vertices' must either be a list of mesh coordinates "
"(for 1D meshes) or a list of mesh nodes. Got: {}".format(vertices))
dim = vertices.shape[-1]
# The 'cells' of the mesh are simply the intervals connecting adjacent
# nodes
cells = [[i, i + 1] for i in xrange(n - 1)]
return build_mesh(vertices, cells)
def embed3d(mesh, z_embed=0.0):
"""
Given a mesh of geometrical dimension 2, create a 3D mesh
via the following embedding of the 2d vertex coordinates:
(x, y) --> (x, y, z_embed)
Here, `z_embed` is the value passed in by the user.
"""
geom_dim = mesh.geometry().dim()
if geom_dim != 2:
raise NotImplementedError(
"Mesh currently must have geometrical dimension 2. Got: {}".format(geom_dim))
vertices = mesh.coordinates()
cells = mesh.cells()
# Create an array with the desired 3d coordinates
vertices_3d = np.zeros((len(vertices), 3))
vertices_3d[:, :2] = vertices
vertices_3d[:, 2] = z_embed
return build_mesh(vertices_3d, cells)
def build_mesh(vertices, cells):
"""
Helper function to create a mesh with the given
"""
vertices = np.asarray(vertices, dtype='double')
cells = np.asarray(cells, dtype='uintp')
assert vertices.ndim == 2
assert cells.ndim == 2
geom_dim = vertices.shape[-1]
top_dim = cells.shape[-1] - 1
mesh = df.Mesh()
editor = df.MeshEditor()
editor.open(mesh, top_dim, geom_dim)
editor.init_vertices(len(vertices))
editor.init_cells(len(cells))
for i, pt in enumerate(vertices):
editor.add_vertex(i, pt)
for i, c in enumerate(cells):
editor.add_cell(i, c)
editor.close()
return mesh
def mesh_is_periodic(mesh, axes):
"""
Check that the given mesh is periodic. The argument `axes`
can be either 'x' or 'xy', indicating that axes of the
mesh should be checked in x-direction or both x- and y-direction.
"""
coords = mesh.coordinates()
cells = mesh.cells()
# Convert 'axes' into a list of values between 0 and 2
try:
axes = map(lambda val: {'x': 0, 'y': 1, 'z': 2}[val], axes)
except KeyError:
raise ValueError(
"Argument 'axes' should be a string containing only 'x', 'y' and 'z'.")
min_coords = coords.min(axis=0)
max_coords = coords.max(axis=0)
# Generate dictionary which associates each axis direction with the indices
# of the minimal and maximal verices along that axis direction.
extremal_vertex_indices = {
# XXX TODO: Maybe avoid the repeated loops if speed becomes a problem
# for large meshes?
axis: {'min': [i for i in xrange(len(coords)) if coords[i][axis] == min_coords[axis]],
'max': [i for i in xrange(len(coords)) if coords[i][axis] == max_coords[axis]],
} for axis in axes}
mesh_extents = [b - a for (a, b) in zip(min_coords, max_coords)]
# Using dolfin's bounding box tree to speed things up
bbt = df.BoundingBoxTree()
bbt.build(mesh)
def find_matching_vertex_index(idx, axis, a):
"""
Find index of the vertex which is identified with the vertex `idx`
on the other side of the mesh.
"""
pt_coords = coords[idx].copy(
) # need a copy because otherwise we edit the mesh coordinates in-place
pt_coords[axis] += a * mesh_extents[axis] # move point to other edge
pt = df.Point(*pt_coords)
cell_idx, distance = bbt.compute_closest_entity(pt)
for v_idx in cells[cell_idx]:
if (np.linalg.norm(pt_coords - coords[v_idx]) < 1e-14):
return v_idx
return None
for axis in axes:
idcs_edge1 = extremal_vertex_indices[axis]['min']
idcs_edge2 = extremal_vertex_indices[axis]['max']
# If we don't have the same number of vertices on the two edges then
# the mesh is clearly not periodic
if len(idcs_edge1) != len(idcs_edge2):
return False
def all_matching_vertices_exist_on_other_edge(indices1, indices2, a):
"""
Helper function to check whether all vertices with index in 'indices1' have
a corresponding vertex on the other side of the mesh with index in 'indices2'.
"""
for idx1 in indices1:
idx2 = find_matching_vertex_index(idx1, axis, a)
if idx2 is None or idx2 not in indices2:
# No matching vertex found on other edge, hence mesh is not
# periodic
return False
return True
if not all_matching_vertices_exist_on_other_edge(idcs_edge1, idcs_edge2, +1):
return False
if not all_matching_vertices_exist_on_other_edge(idcs_edge2, idcs_edge1, -1):
return False
# No a-periodicity found, hence the mesh is periodic
return True
def regular_polygon(n, r, f):
"""
Returns a dolfin mesh representing a 2D polygon with the
following parameters
n = number of sides
r = distance from centre to a vertex
f = fineness of mesh
"""
theta = 2*math.pi/n
csg = ""
for i in range(0, n):
csg = csg + "Point({}) = {{{}, {}, 0, {}}};\n".format(i+1,
r*math.cos(theta*i),
r*math.sin(theta*i),
f)
csg = csg + "\n"
for i in range(1, n+1):
if (i==n):
csg = csg + "Line({}) = {{{},{}}}; \n".format(i,i,1)
else:
csg = csg + "Line({}) = {{{},{}}}; \n".format(i,i,i+1)
csg = csg + "\nLine Loop(1) = {"
for i in range(1, n+1):
csg = csg + "{}".format(i)
if (i!=n):
csg += ","
csg+="};\n\nPlane Surface(1) = {1};\n\nPhysical Surface = {1};"
filename = filename="polygon_{}_{}_{}".format(n,r,f)
csg_saved=open(filename+".geo",'w')
csg_saved.write(csg)
csg_saved.close()
cmd="gmsh " + filename + ".geo -2 -o "+filename+".msh"
os.system(cmd)
cmd="dolfin-convert "+filename+".msh "+filename+".xml"
os.system(cmd)
mesh = df.Mesh(filename+".xml")
cmd = "rm " + filename +".xml " + filename + ".geo " + filename +".msh"
os.system(cmd)
return mesh
def regular_polygon_extruded(n,r,t,f):
"""
Returns a dolfin mesh representing a 2D polygon with the
following parameters. For a 2D mesh set t = 0
Number of layers is calculated by taking the ceiling of the value t/f
n = number of sides
r = distance from centre to a vertex
f = fineness of mesh
t = extrude thickness
"""
theta = 2*math.pi/n
csg = ""
n_layers = math.ceil(t/f)
for i in range(0,n):
csg = csg + "Point({}) = {{{}, {}, 0, {}}}; \n".format(i+1, r*math.cos(theta*i), r*math.sin(theta*i),f)
csg = csg + "\n"
for i in range(1,n+1):
if (i==n):
csg = csg+"Line({}) = {{{},{}}}; \n".format(i,i,1)
else:
csg = csg+"Line({}) = {{{},{}}}; \n".format(i,i,i+1)
csg = csg + "\nLine Loop(1) = {"
for i in range(1,n+1):
csg=csg+"{}".format(i)
if (i!=n):
csg += ","
csg += "};\n\nPlane Surface(1) = {1};\n\nPhysical Surface = {1};"
if (t!=0):
n_layers = math.ceil(t/f)
csg += "\n\nExtrude {{0,0,{}}} {{\nSurface{{1}}; \nLayers{{{}}};\n}}".format(t,n_layers)
filename = "polygon_ext_{}_{}_{}_{}".format(n,r,t,f)
else:
filename = "polygon_{}_{}_{}".format(n,r,f)
csg_saved=open(filename+".geo",'w')
csg_saved.write(csg)
csg_saved.close()
if (t==0):
cmd = "gmsh " + filename + ".geo -2 -o " + filename + ".msh"
else:
cmd = "gmsh " + filename + ".geo -3 -o " + filename + ".msh"
os.system(cmd)
cmd = "dolfin-convert "+filename+".msh "+filename+".xml"
os.system(cmd)
mesh = df.Mesh(filename+".xml")
cmd = "rm " + filename +".xml " + filename + ".geo " + filename +".msh"
os.system(cmd)
return mesh
def disk_with_internal_layers(d, layer_positions, lmax, name=''):
"""Creates a disk mesh with a flat interface inside.
Args:
d - disk diameter
layer_positions - assuming layer 0 is at height 0, layer 1 is at layer_positions[0], etc...
lmax - discretisation
"""
# First create the disk:
geo_script = textwrap.dedent("""\
lmax = DefineNumber[ $lmax$, Name "Parameters/lmax" ];
rad = DefineNumber[ $rad$, Name "Parameters/rad" ];
Point(1) = {0, 0, 0, lmax};
Point(2) = {rad, 0, 0, lmax};
Point(3) = {-rad, 0, 0, lmax};
Point(4) = {0, rad, 0, lmax};
Point(5) = {0, -rad, 0, lmax};
Circle(1) = {4, 1, 2};
Circle(2) = {2, 1, 5};
Circle(3) = {5, 1, 3};
Circle(4) = {3, 1, 4};
Line Loop(5) = {4, 1, 2, 3};
Ruled Surface(6) = {5};
""")
# Now extrude each multiple times, for each layer:
for i, l in enumerate(layer_positions):
if i == 0:
geo_script += textwrap.dedent("""\
out1[] = Extrude {{0, 0, {}}} {{
Surface{{6}};
}};
""").format(l)
else:
geo_script += textwrap.dedent("""\
out{}[] = Extrude {{0, 0, {}}} {{
Surface{{out{}[0]}};
}};
""").format(i+1, l-layer_positions[i-1], i)
# Replace parameters in the gmsh geometry script.
geo_script = geo_script.replace('$rad$', str(d/2.))
geo_script = geo_script.replace('$lmax$', str(lmax))
#print(geo_script)
# Write the geometry script to the .geo file.
# basename = 'disk_with_boundary-{}-{}-{}-{}-{}'.format(name, str(d/2.0), jid, aid, '_'.join(layers))
basename = 'disk_with_boundary-{}-{}'.format(name, str(d/2.0))
print('\n\nMESH FILENAMES = {}\n\n'.format(basename))
geo_file = open(basename + '.geo', 'w')
geo_file.write(geo_script)
geo_file.close()
# Create a 3d mesh.
gmsh_command = 'gmsh {}.geo -3 -optimize_netgen -o {}.msh'.format(basename, basename)
os.system(gmsh_command)
# Convert msh mesh format to the xml (required by dolfin).
dc_command = 'dolfin-convert {}.msh {}.xml'.format(basename, basename)
os.system(dc_command)
# Load the mesh and create a dolfin object.
mesh = df.Mesh('{}.xml'.format(basename))
# Delete all temporary files.
# os.system('rm {}.geo {}.msh {}.xml'.format(basename, basename, basename))
return mesh
| 61,359 | 35.502082 | 159 |
py
|
finmag
|
finmag-master/src/finmag/util/meshes_test.py
|
import dolfin as df
import numpy as np
import pytest
import os
from meshes import *
from mesh_templates import *
from math import sin, cos, pi
import mshr
def test_mesh_size():
"""
Construct a couple of meshes (box, sphere) and check that
the mesh size is reported as expected.
"""
RTOL = 1e-3
box_mesh = df.BoxMesh(df.Point(-20, -30, 10), df.Point(30, 42, 20), 4, 4, 4)
assert(np.isclose(mesh_size(box_mesh, unit_length=1.0), 72.0, rtol=RTOL))
assert(
np.isclose(mesh_size(box_mesh, unit_length=3e-5), 216e-5, rtol=RTOL))
s = Sphere(12.0, center=(34, 12, 17))
sphere_mesh = s.create_mesh(maxh=3.0, save_result=False)
assert(
np.isclose(mesh_size(sphere_mesh, unit_length=1.0), 24.0, rtol=RTOL))
assert(
np.isclose(mesh_size(sphere_mesh, unit_length=2e4), 48e4, rtol=RTOL))
def test_line_mesh():
"""
Create vertices lying on a spiral in 3D space, build a line-mesh from it
and check that it has the correct vertices.
"""
vertices = [(sin(t), cos(t), t) for t in np.linspace(-2 * pi, 4 * pi, 100)]
mesh = line_mesh(vertices)
assert np.allclose(vertices, mesh.coordinates())
def test_embed3d():
# Create a 2D mesh which we want to embed in 3D space
mesh_2d = df.RectangleMesh(df.Point(0, 0), df.Point(20, 10), 10, 5)
coords_2d = mesh_2d.coordinates()
z_embed = 4.2
# Create array containing the expected 3D coordinates
coords_3d_expected = z_embed * np.ones((len(coords_2d), 3))
coords_3d_expected[:, :2] = coords_2d
# Create the embedded 3D mesh
mesh_3d = embed3d(mesh_2d, z_embed)
coords_3d = mesh_3d.coordinates()
# Check that the coordinates coincide
assert(np.allclose(coords_3d, coords_3d_expected))
# Check that we can't embed a 1D or 3D mesh (TODO: we could make these
# work, but currently they are not implemented)
with pytest.raises(NotImplementedError):
embed3d(df.UnitIntervalMesh(4))
with pytest.raises(NotImplementedError):
embed3d(df.UnitCubeMesh(4, 4, 4))
def test_sphere_inside_box(tmpdir, debug=False):
"""
TODO: Currently this test doesn't do much; it only checks whether we can execute the command `sphere_inside_box`.
"""
os.chdir(str(tmpdir))
mesh = sphere_inside_box(r_sphere=10, r_shell=15, l_box=50,
maxh_sphere=5.0, maxh_box=10.0, center_sphere=(10, -5, 8))
if debug:
plot_mesh_with_paraview(
mesh, representation='Wireframe', outfile='mesh__sphere_inside_box.png')
f = df.File('mesh__sphere_inside_box.pvd')
f << mesh
del f
def test_build_mesh():
"""
Create a few meshes, extract the vertices and cells from them and pass them
to build_mesh() to rebuild the mesh. Then check that the result is the same
as the original.
"""
def assert_mesh_builds_correctly(mesh):
coords = mesh.coordinates()
cells = mesh.cells()
mesh_new = build_mesh(coords, cells)
assert np.allclose(coords, mesh_new.coordinates())
assert np.allclose(cells, mesh_new.cells())
mesh1 = df.RectangleMesh(df.Point(0, 0), df.Point(20, 10), 12, 8)
assert_mesh_builds_correctly(mesh1)
mesh2_temp = mshr.Circle(df.Point(2.0, -3.0), 10)
mesh2 = mshr.generate_mesh(mesh2_temp, 10)
assert_mesh_builds_correctly(mesh2)
mesh3 = df.BoxMesh(df.Point(0, 0, 0), df.Point(20, 10, 5), 12, 8, 3)
assert_mesh_builds_correctly(mesh3)
mesh4_temp = mshr.Sphere(df.Point(2.0, 3.0, -4.0), 10)
mesh4 = mshr.generate_mesh(mesh4_temp, 10)
assert_mesh_builds_correctly(mesh4)
def create_periodic_mesh(periodicity='none', dim=3):
"""
Helper function to create a mesh which is either non-periodic
(if periodicity='none'), periodic in one direction if
(periodicity='x' or periodicity='y') or periodic in both
directions (if periodicity='xy').
The argument `dim` specified the dimension of the mesh (allowed
values: dim=2 or dim=3).
"""
if dim == 2 or dim == 3:
if periodicity == 'none':
# Unit square with added 'asymmetric' points on the four sides (to break periodicity)
#vertices = [(0, 0), (1, 0), (1, 1), (0.5, 1), (0, 1), (0, 0.5)]
#cells = [(0, 1, 5), (1, 2, 3), (3, 4, 5), (1, 3, 5)]
vertices = [
(0, 0), (0.7, 0), (1, 0), (1, 0.8), (1, 1), (0.3, 1), (0, 1), (0, 0.2)]
cells = [
(0, 1, 7), (1, 2, 3), (1, 3, 7), (3, 4, 5), (3, 5, 7), (5, 6, 7)]
elif periodicity == 'x':
# Unit square with added 'asymmetric' points on top/bottom side
#vertices = [(0, 0), (1, 0), (1, 1), (0.5, 1), (0, 1)]
#cells = [(0, 1, 3), (1, 2, 3), (0, 3, 4)]
vertices = [(0, 0), (0.2, 0), (1, 0), (1, 1), (0.7, 1), (0, 1)]
cells = [(0, 1, 5), (1, 2, 4), (2, 3, 4), (1, 4, 5)]
elif periodicity == 'y':
# Unit square with added point on left edge
vertices = [(0, 0), (1, 0), (1, 1), (0, 1), (0, 0.5)]
cells = [(0, 1, 4), (1, 2, 4), (2, 3, 4)]
elif periodicity == 'xy':
# Unit square
vertices = [(0, 0), (1, 0), (1, 1), (0, 1)]
cells = [(0, 1, 2), (0, 2, 3)]
else:
raise ValueError(
"Argument 'periodicity' must have one of the values 'none', 'x', 'y', 'z'")
else:
raise NotImplementedError(
'Can only create 2d and 3d meshes with predefined periodicity.')
mesh = build_mesh(vertices, cells)
if dim == 3:
# XXX TODO: It would be better to build a truly 3D mesh,
# but this should suffice for now.
mesh = embed3d(mesh)
return mesh
def test_mesh_is_periodic(tmpdir):
"""
"""
os.chdir(str(tmpdir))
# Create a bunch of 2D meshes with different periodicity
# and check that we detect this periodicity correctly.
mesh1 = create_periodic_mesh(periodicity='none', dim=2)
assert not mesh_is_periodic(mesh1, 'x')
#assert not mesh_is_periodic(mesh1, 'y')
assert not mesh_is_periodic(mesh1, 'xy')
mesh2 = create_periodic_mesh(periodicity='x', dim=2)
assert mesh_is_periodic(mesh2, 'x')
#assert not mesh_is_periodic(mesh2, 'y')
assert not mesh_is_periodic(mesh2, 'xy')
mesh3 = create_periodic_mesh(periodicity='y', dim=2)
assert not mesh_is_periodic(mesh3, 'x')
#assert mesh_is_periodic(mesh3, 'y')
assert not mesh_is_periodic(mesh3, 'xy')
mesh4 = create_periodic_mesh(periodicity='xy', dim=2)
assert mesh_is_periodic(mesh4, 'x')
#assert mesh_is_periodic(mesh4, 'y')
assert mesh_is_periodic(mesh4, 'xy')
mesh_rectangle = df.RectangleMesh(df.Point(0, 0), df.Point(20, 10), 12, 8)
assert mesh_is_periodic(mesh_rectangle, 'x')
#assert mesh_is_periodic(mesh_rectangle, 'y')
assert mesh_is_periodic(mesh_rectangle, 'xy')
# Repeat this process for a bunch of 3D meshes with
# different periodicity.
mesh5 = create_periodic_mesh(periodicity='none', dim=3)
assert not mesh_is_periodic(mesh5, 'x')
#assert not mesh_is_periodic(mesh5, 'y')
assert not mesh_is_periodic(mesh5, 'xy')
mesh6 = create_periodic_mesh(periodicity='x', dim=3)
assert mesh_is_periodic(mesh6, 'x')
#assert not mesh_is_periodic(mesh6, 'y')
assert not mesh_is_periodic(mesh6, 'xy')
mesh7 = create_periodic_mesh(periodicity='y', dim=3)
assert not mesh_is_periodic(mesh7, 'x')
#assert mesh_is_periodic(mesh7, 'y')
assert not mesh_is_periodic(mesh7, 'xy')
mesh8 = create_periodic_mesh(periodicity='xy', dim=3)
assert mesh_is_periodic(mesh8, 'x')
#assert mesh_is_periodic(mesh8, 'y')
assert mesh_is_periodic(mesh8, 'xy')
mesh_box = df.BoxMesh(df.Point(0, 0, 0), df.Point(20, 10, 5), 12, 8, 3)
assert mesh_is_periodic(mesh_box, 'x')
#assert mesh_is_periodic(mesh_box, 'y')
assert mesh_is_periodic(mesh_box, 'xy')
def test_regular_polygon():
testmesh = regular_polygon(6,50,5)
testmesh = regular_polygon(6,50,5)
assert np.max(testmesh.coordinates()) == 50
assert np.min(testmesh.coordinates()) == -50
def test_regular_polygon_extruded():
testmesh = regular_polygon_extruded(5,50,30,10)
assert np.amax(testmesh.coordinates(),axis=0)[0] == 50
| 8,361 | 35.837004 | 117 |
py
|
finmag
|
finmag-master/src/finmag/util/pbc2d.py
|
import dolfin as df
import numpy as np
class PeriodicBoundary1D(df.SubDomain):
"""
Periodic Boundary condition in in x direction
"""
def __init__(self, mesh):
super(PeriodicBoundary1D, self).__init__()
self.mesh = mesh
self.find_mesh_info()
def inside(self, x, on_boundary):
on_x = bool(df.near(x[0], self.xmin) and on_boundary)
return on_x
def map(self, x, y):
y[0] = x[0] - self.width
if self.dim > 1:
y[1] = x[1]
if self.dim > 2:
y[2] = x[2]
def find_mesh_info(self):
coords = self.mesh.coordinates()
self.length = len(coords)
max_v = coords.max(axis=0)
min_v = coords.min(axis=0)
self.xmin = min_v[0]
self.xmax = max_v[0]
self.width = self.xmax - self.xmin
self.dim = self.mesh.topology().dim()
class PeriodicBoundary2D(df.SubDomain):
"""
Periodic Boundary condition in xy-plane.
"""
def __init__(self, mesh):
super(PeriodicBoundary2D, self).__init__()
self.mesh = mesh
self.find_mesh_info()
def inside(self, x, on_boundary):
on_x = bool(
df.near(x[0], self.xmin) and x[1] < self.ymax and on_boundary)
on_y = bool(
df.near(x[1], self.ymin) and x[0] < self.xmax and on_boundary)
return on_x or on_y
def map(self, x, y):
y[0] = x[0] - self.width
y[1] = x[1] - self.height
if self.dim == 3:
y[2] = x[2]
if df.near(x[0], self.xmax) and x[1] < self.ymax:
y[1] = x[1]
if df.near(x[1], self.ymax) and x[0] < self.xmax:
y[0] = x[0]
def find_mesh_info(self):
coords = self.mesh.coordinates()
self.length = len(coords)
max_v = coords.max(axis=0)
min_v = coords.min(axis=0)
self.xmin = min_v[0]
self.xmax = max_v[0]
self.ymin = min_v[1]
self.ymax = max_v[1]
self.width = self.xmax - self.xmin
self.height = self.ymax - self.ymin
self.dim = self.mesh.topology().dim()
# Collect all vertices that lie on one of the periodic
# boundaries of the mesh.
px_mins = []
px_maxs = []
py_mins = []
py_maxs = []
mesh = self.mesh
for vertex in df.vertices(mesh):
if vertex.point().x() == self.xmin:
px_mins.append(df.Vertex(mesh, vertex.index()))
elif vertex.point().x() == self.xmax:
px_maxs.append(df.Vertex(mesh, vertex.index()))
if vertex.point().y() == self.ymin:
py_mins.append(df.Vertex(mesh, vertex.index()))
elif vertex.point().y() == self.ymax:
py_maxs.append(df.Vertex(mesh, vertex.index()))
# Collect the indices of vertices on the 'min' boundary
# and find all vertices on the 'max' boundary which match
# one of those 'min' vertices.
indics = []
indics_pbc = []
for v1 in px_mins:
indics.append(v1.index())
for v2 in px_maxs:
if v1.point().y() == v2.point().y() and v1.point().z() == v2.point().z():
indics_pbc.append(v2.index())
px_maxs.remove(v2)
for v1 in py_mins:
indics.append(v1.index())
for v2 in py_maxs:
if v1.point().x() == v2.point().x() and v1.point().z() == v2.point().z():
indics_pbc.append(v2.index())
py_maxs.remove(v2)
"""
print self.xmin,self.xmax,self.ymin,self.ymax,self.height,self.width
print '='*100
print indics,indics_pbc
"""
ids = np.array(indics, dtype=np.int32)
ids_pbc = np.array(indics_pbc, dtype=np.int32)
#assert len(indics) == len(indics_pbc)
self.ids = np.array(
[ids[:], ids[:] + self.length, ids[:] + self.length * 2], dtype=np.int32)
self.ids_pbc = np.array(
[ids_pbc[:], ids_pbc[:] + self.length, ids_pbc[:] + self.length * 2], dtype=np.int32)
self.ids.shape = (-1,)
self.ids_pbc.shape = (-1,)
def modify_m(self, m):
"""
This method might be not necessary ...
"""
for i in range(len(self.ids_pbc)):
j = self.ids_pbc[i]
k = self.ids[i]
m[j] = m[k]
def modify_field(self, v):
"""
modifiy the corresponding fields, magnetisation m or the volumes of nodes.
"""
for i in range(len(self.ids_pbc)):
v[self.ids_pbc[i]] = v[self.ids[i]]
if __name__ == "__main__":
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(10, 5, 1), 10, 5, 1)
#mesh = df.UnitSquareMesh(2, 2)
V = df.FunctionSpace(mesh, "Lagrange", 1)
#V3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
u1 = df.TrialFunction(V)
v1 = df.TestFunction(V)
K = df.assemble(df.inner(df.grad(u1), df.grad(v1)) * df.dx)
L = df.assemble(v1 * df.dx)
print 'before:', K.array()
#print 'length:', len(K.array())
pbc = PeriodicBoundary2D(mesh)
V = df.FunctionSpace(mesh, "Lagrange", 1, constrained_domain=pbc)
u1 = df.TrialFunction(V)
v1 = df.TestFunction(V)
K = df.assemble(df.inner(df.grad(u1), df.grad(v1)) * df.dx)
print 'after', K.array()
#print 'length:', len(K.array())
| 5,424 | 28.483696 | 97 |
py
|
finmag
|
finmag-master/src/finmag/util/nmesh_to_dolfin.py
|
import re
def convert(nmesh_file, xml_file):
"""
Convert a mesh from nmesh ASCII format to dolfin xml format.
Note that region information is read from nmesh, but then discarded.
You would need to extend the Emitter class to output a second xml file
if you wanted to preserve the region information.
"""
p = Parser(nmesh_file)
e = Emitter(xml_file)
nb_vertices = p.vertices()
e.vertices(nb_vertices)
for i in range(int(nb_vertices)):
e.vertex(p.vertex())
nb_simplices = p.simplices()
e.simplices(nb_simplices)
for i in range(int(nb_simplices)):
e.simplex(p.simplex())
p.done()
e.done()
class Parser(object):
"""
Encapsulates what we know about nmesh ASCII files.
"""
def __init__(self, nmesh_file):
"""
Create a parser for the nmesh ASCII file in `nmesh_file`.
"""
print "Reading from '{}'.".format(nmesh_file)
self._fi = open(nmesh_file, 'r')
self._version_string(self._next())
dim, self._vertices, self._simplices = self._summary(self._next())
print "Found a mesh with {} dimensions, {} nodes and {} tetrahedra.".format(
dim, self._vertices, self._simplices)
def vertices(self):
"""
Returns the number of vertices in the mesh.
"""
self._vertices_heading(self._next())
return self._vertices
def vertex(self):
"""
Returns the x, y and z coordinates of the next vertex in the file.
"""
return self._vertex(self._next())
def simplices(self):
"""
Returns the number of simplices (tetrahedra) in the mesh.
"""
self._simplices_heading(self._next())
return self._simplices
def simplex(self):
"""
Returns the tuple (region_number, v0, v1, v2, v3), where the vi
are the indices of the vertices that make up the simplex.
"""
return self._simplex(self._next())
def done(self):
"""
Allow the parser to perform necessary clean-up operations like
closing its handle on the nmesh ASCII file.
"""
self._fi.close()
def _next(self):
return self._fi.readline().rstrip()
def _version_string(self, s):
VERSION_STRING = "# PYFEM mesh file version 1.0"
if not s == VERSION_STRING:
raise ValueError("Version string should be '{}', is '{}'.".format(
VERSION_STRING, s))
def _summary(self, s):
pattern = '^# dim = (\d)\s+nodes = (\d+)\s+simplices = (\d+)'
match = re.compile(pattern).match(s)
return match.groups()
def _vertices_heading(self, s):
if not s == self._vertices:
raise ValueError("Info 'nodes = {}' in summary doesn't match header '{}'.".format(
self._vertices, s))
def _vertex(self, s):
coords = s.split()
if not len(coords) == 3:
raise ValueError(
"Can't parse coordinates from string '{}'.".format(s))
return coords
def _simplices_heading(self, s):
if not s == self._simplices:
raise ValueError("Info 'simplices = {}' in summary doesn't match header '{}'.".format(
self._simplices, s))
def _simplex(self, s):
try:
region, v0, v1, v2, v3 = s.split()
except ValueError as e:
print "Expected line with region number followed by indices of four nodes, got '{}'.".format(s)
raise
return region, v0, v1, v2, v3
class Emitter(object):
"""
Write a dolfin xml mesh file.
"""
def __init__(self, xml_file):
"""
Create the emitter.
"""
self.vi = 0 # index of vertex
self.si = 0 # index of simplex
self._vertices = 0 # number of vertices in total
self._simplices = 0 # number of simplices in total
self._done_with_vertices = False
self._done_with_simplices = False
print "Writing to '{}'.".format(xml_file)
self._fo = open(xml_file, 'w')
# None of the python xml libraries I checked supports emitting chunks
# of XML. They all wanted to have the full document in memory before
# comitting it to disk. Since the meshfiles can be potentially very,
# very long and the format is simple I'm using custom code to emit the
# XML. I promise to offer a round of beer if this causes issues down
# the line. (Offer expires 31.12.2019).
self._write('<?xml version="1.0"?>')
self._write('<dolfin xmlns:dolfin="http://fenicsproject.org">')
self._write(' <mesh celltype="tetrahedron" dim="3">')
def vertices(self, n):
"""
Save the number `n` of vertices in the mesh.
"""
self._vertices = int(n)
s = ' <vertices size="{}">'
self._write(s.format(n))
def vertex(self, coords):
"""
Save a single vertex with the coordinates `x`, `y` and `z`.
Indexed automatically.
"""
x, y, z = coords
if not self._done_with_vertices:
s = ' <vertex index="{}" x="{}" y="{}" z="{}" />'
self._write(s.format(self.vi, x, y, z))
self.vi += 1
if self.vi == self._vertices:
self._close_vertices()
self._done_with_vertices = True
else:
raise ValueError(
"Reached number of {} vertices already. Aborting.".format(self._vertices))
def simplices(self, n):
"""
Save the number of simplices (tetrahedra) in the mesh.
"""
self._simplices = int(n)
s = ' <cells size="{}">'
self._write(s.format(n))
def simplex(self, argtuple):
"""
Save a single simplex, identified by its vertices `v0`, `v1`, `v2` and `v3`.
Region number is discarded as of now.
Indexed automatically.
"""
region, v0, v1, v2, v3 = argtuple
if not self._done_with_simplices:
s = ' <tetrahedron index="{}" v0="{}" v1="{}" v2="{}" v3="{}" />'
self._write(s.format(self.si, v0, v1, v2, v3))
self.si += 1
if self.si == self._simplices:
self._close_simplices()
self._close_all()
self._done_with_simplices = True
else:
raise ValueError(
"Reached number of {} simplices already. Aborting.".format(self._simplices))
def done(self):
"""
Allow the parser to perform necessary clean-up operations like
closing its handle on the xml file.
"""
self._fo.close()
def _write(self, s):
self._fo.write(s + "\n")
def _close_vertices(self):
self._write(' </vertices>')
def _close_simplices(self):
self._write(' </cells>')
def _close_all(self):
self._write(' </mesh>')
self._write('</dolfin>')
| 7,063 | 28.556485 | 107 |
py
|
finmag
|
finmag-master/src/finmag/util/test_dmi_from_helix.py
|
import DMI_from_helix as dfh
import numpy as np
import pytest
a = 3.53e-13
ms = 1.56e5
l = 22e-9
d0 = 4 * np.pi * a / l
hDirection = np.array([1., 0., 0.])
h = hDirection * 0.
def test_dmi_from_helix_solution():
# Test the helix length solution with an example that is known to produce a
# certain value.
l = dfh.Find_Helix_Length(d0, a, ms, H=h)
expectedSolution = 2.50e-8
assert abs(l - expectedSolution) < 1.5e-9
def test_helix_strong_field():
# Test whether a helix is found (it shouldn't be) in a very strong magnetic
# field.
h = hDirection * ms
with pytest.raises(ValueError):
dfh.Find_Helix_Length(d0, a, ms, H=h)
# Perform the test with a strong negative field also.
h -= 2 * h
with pytest.raises(ValueError):
dfh.Find_Helix_Length(d0, a, ms, H=h)
# As well as a field in a funky direction.
h = np.array([1., 1., 1.]) * ms
with pytest.raises(ValueError):
dfh.Find_Helix_Length(d0, a, ms, H=h)
def test_zero_helix():
# Test whether a DMI value can be found for a helix of length zero (in the
# absence of a magnetic field)
l = 0.
with pytest.raises(ValueError):
dfh.Find_DMI(a, ms, l, H=h)
| 1,220 | 23.42 | 79 |
py
|
finmag
|
finmag-master/src/finmag/util/point_contacts.py
|
import dolfin as df
def point_contacts(origins, radius, J, debug=False):
"""
Returns a dolfin expression that locates one or more point contacts.
The expression takes the value *J* for coordinates belonging to a point
contact and the value 0 everywhere else. 'Belonging to a point contact'
means that the coordinates are within *radius* of one of the
specified *origins*.
"""
if radius > 1e-6:
# Assuming this is a macroscopic mesh coming from netgen, with the
# circular part cut out at the right position.
# Need to smooth the boundary.
radius += 1e-4
distance_to_origin = "sqrt(pow(x[0] - {0}, 2) + pow(x[1] - {1}, 2))"
point_contact_conditions = [
"(" + distance_to_origin.format(pos[0], pos[1]) + " <= r)" for pos in origins]
expr_str = " || ".join(point_contact_conditions) + " ? J : 0"
if debug:
print expr_str
return df.Expression(expr_str, r=radius, J=J, degree=1)
if __name__ == "__main__":
mesh = df.RectangleMesh(df.Point(0, 0), df.Point(100, 100), 500, 500)
S1 = df.FunctionSpace(mesh, "DG", 0)
pc_expr = point_contacts(
[(25, 50), (75, 50)], radius=10, J=1e10, debug=True)
f = df.interpolate(pc_expr, S1)
df.plot(f)
df.interactive()
| 1,284 | 35.714286 | 86 |
py
|
finmag
|
finmag-master/src/finmag/util/test_set_function_values.py
|
import pytest
import numpy as np
import dolfin as df
from finmag.field import Field
from set_function_values import *
EPSILON = 1e-14
@pytest.fixture
def f():
mesh = df.UnitIntervalMesh(1)
V = df.FunctionSpace(mesh, "CG", 1)
f = df.Function(V)
return f
@pytest.fixture
def vf():
mesh = df.UnitIntervalMesh(1)
W = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)
vf = df.Function(W)
return vf
def test_function_from_constant(f):
value = 1
from_constant(f, df.Constant(value))
assert abs(f(0) - value) < EPSILON
def test_vector_function_from_constant(vf):
value = np.array((1, 2, 3))
from_constant(vf, df.Constant(value))
assert np.max(np.abs((vf(0) - value))) < EPSILON
def test_function_from_expression(f):
value = 1
from_expression(f, df.Expression(str(value), degree=1))
assert abs(f(0) - value) < EPSILON
def test_vector_function_from_expression(vf):
value = np.array((1, 2, 3))
from_expression(vf, df.Expression(map(str, value), degree=1))
assert np.max(np.abs(vf(0) - value)) < EPSILON
def test_function_from_field(f):
value = 1
# why does the following line fail? it prevents us from creating
# a Field instance from a dolfin function
#assert isinstance(f.function_space(), df.FunctionSpace)
#from_field(f, Field(f.function_space(), value))
#assert abs(f(0) - value) < EPSILON
| 1,399 | 23.137931 | 68 |
py
|
finmag
|
finmag-master/src/finmag/util/mesh_templates.py
|
#!/usr/bin/env python
import textwrap
import hashlib
from finmag.util.meshes import from_csg
from finmag.util.helpers import vec2str
netgen_primitives = ['plane', 'cylinder', 'sphere',
'ellipticcylinder', 'ellipsoid', 'cone', 'orthobrick', 'polyhedron']
class MeshTemplate(object):
# Internal counter. It is 0 for mesh primitives but is
# increased for combined shapes (e.g. created via MeshSum
# to create unique names for those combined domains.
counter = 0
def __init__(self, name=None, csg_string=None):
self.name = name
self._csg_stub = csg_string
def _get_name(self):
return self._name
def _set_name(self, value):
if value in netgen_primitives:
raise ValueError(
"Cannot use name '{}' for mesh template as it coincides "
"with one of Netgen's primitives. Please choose a different "
"name (or use the uppercase version.")
self._name = value
name = property(_get_name, _set_name)
def __add__(self, other):
return MeshSum(self, other)
def __sub__(self, other):
return MeshDifference(self, other)
def hash(self, maxh=None, **kwargs):
csg = self.csg_string(maxh=maxh, **kwargs)
return hashlib.md5(csg).hexdigest()
def generic_filename(self, maxh, **kwargs):
raise NotImplementedError(
"Generic mesh prototyp does not provide a filename. Please build a mesh by combining mesh primitives.")
def csg_string(self, maxh=None, **kwargs):
csg_string = textwrap.dedent("""\
algebraic3d
{csg_stub}
tlo {name};
""").format(csg_stub=self.csg_stub(maxh, **kwargs), name=self.name)
return csg_string
def create_mesh(self, maxh=None, save_result=True, filename='', directory='', **kwargs):
if save_result == True and filename == '':
filename = self.generic_filename(maxh, **kwargs)
csg_string = self.csg_string(maxh, **kwargs)
return from_csg(csg_string, save_result=save_result, filename=filename, directory=directory)
class MeshSum(MeshTemplate):
def __init__(self, mesh1, mesh2, name=None):
if mesh1.name == mesh2.name:
raise ValueError(
"Cannot combine mesh templates with the same name ('{}'). Please explicitly "
"rename one or both of them (either by using the 'name' argument in the "
"constructor or by setting their 'name' attribute).".format(mesh1.name))
self.counter = max(mesh1.counter, mesh2.counter) + 1
if name is None:
#name = 'mesh_sum__{}__{}'.format(mesh1.name, mesh2.name)
# create a unique name for this combined domain
name = 'dom_' + str(self.counter)
self.name = name
self.mesh1 = mesh1
self.mesh2 = mesh2
def csg_stub(self, maxh=None, **kwargs):
csg_stub = (self.mesh1.csg_stub(maxh, **kwargs) +
self.mesh2.csg_stub(maxh, **kwargs) +
"solid {name} = {name1} or {name2};\n".format(
name=self.name,
name1=self.mesh1.name,
name2=self.mesh2.name))
return csg_stub
def generic_filename(self, maxh, **kwargs):
filename = "mesh_sum__{}".format(self.hash(maxh, **kwargs))
return filename
class MeshDifference(MeshTemplate):
def __init__(self, mesh1, mesh2, name=None):
if mesh1.name == mesh2.name:
raise ValueError(
"Cannot combine mesh templates with the same name ('{}'). Please explicitly "
"rename one or both of them (either by using the 'name' argument in the "
"constructor or by setting their 'name' attribute).".format(mesh1.name))
self.counter = max(mesh1.counter, mesh2.counter) + 1
if name is None:
#name = 'mesh_sum__{}__{}'.format(mesh1.name, mesh2.name)
# create a unique name for this combined domain
name = 'dom_' + str(self.counter)
self.name = name
self.mesh1 = mesh1
self.mesh2 = mesh2
def csg_stub(self, maxh=None, **kwargs):
csg_stub = (self.mesh1.csg_stub(maxh, **kwargs) +
self.mesh2.csg_stub(maxh, **kwargs) +
"solid {name} = {name1} and not {name2};\n".format(
name=self.name,
name1=self.mesh1.name,
name2=self.mesh2.name))
return csg_stub
def generic_filename(self, maxh, **kwargs):
filename = "mesh_difference__{}".format(
self.mesh1.hash(maxh, **kwargs))
return filename
class MeshPrimitive(MeshTemplate):
def _get_maxh(self, maxh=None, **kwargs):
"""
If `kwargs` contains an item with key 'maxh_NAME' (where NAME
is equal to self.name), returns this value and the associated key.
Otherwise returns the value associated with the key 'maxh'.
"""
try:
key = 'maxh_' + self.name
maxh = kwargs[key]
except KeyError:
if maxh == None:
raise ValueError(
"Please provide a valid value for 'maxh' (or maxh_... for each of the components of the mesh template).")
return maxh
def csg_stub(self, maxh=None, **kwargs):
maxh = self._get_maxh(maxh, **kwargs)
key = 'maxh_{}'.format(self.name)
fmtdict = {key: maxh}
return self._csg_stub.format(**fmtdict)
class Sphere(MeshPrimitive):
def __init__(self, r, center=(0, 0, 0), name='Sphere'):
self.r = r
self.center = center
self.name = name
self._csg_stub = textwrap.dedent("""\
solid {name} = sphere ( {center}; {r} ) -maxh = {{maxh_{name}}};
""".format(name=name, center=vec2str(center, delims=''), r=r))
def generic_filename(self, maxh, **kwargs):
maxh = self._get_maxh(maxh, **kwargs)
return "sphere__center_{}__r_{:.1f}__maxh_{:.1f}".format(
vec2str(self.center, fmt='{:.1f}', delims='', sep='_'), self.r, maxh).replace(".", "_")
class Box(MeshPrimitive):
def __init__(self, x0, y0, z0, x1, y1, z1, name='Box'):
self.x0 = x0
self.y0 = y0
self.z0 = z0
self.x1 = x1
self.y1 = y1
self.z1 = z1
self.name = name
self._csg_stub = textwrap.dedent("""\
solid {name} = orthobrick ( {x0}, {y0}, {z0}; {x1}, {y1}, {z1} ) -maxh = {{maxh_{name}}};
""".format(name=name, x0=x0, y0=y0, z0=z0, x1=x1, y1=y1, z1=z1))
def generic_filename(self, maxh, **kwargs):
maxh = self._get_maxh(maxh, **kwargs)
return "box__{:.1f}__{:.1f}__{:.1f}__{:.1f}__{:.1f}__{:.1f}__maxh_{:.1f}".format(
self.x0, self.y0, self.z0, self.x1, self.y1, self.z1, maxh).replace(".", "_")
class EllipticalNanodisk(MeshPrimitive):
def __init__(self, d1, d2, h, center=(0, 0, 0), valign='bottom', name='EllipticalNanodisk'):
self.d1 = d1
self.d2 = d2
self.h = h
self.center = center
self.valign = valign
self.name = name
r1 = 0.5 * d1
r2 = 0.5 * d2
try:
h_bottom = {'bottom': center[2],
'center': center[2] - 0.5 * h,
'top': center[2] - h,
}[valign]
except KeyError:
raise ValueError(
"Argument 'valign' must be one of 'center', 'top', 'bottom'. Got: '{}'".format(valign))
h_top = h_bottom + h
self._csg_stub = textwrap.dedent("""\
solid {name} = ellipticcylinder ({center}; {r1}, 0, 0; 0, {r2}, 0 )
and plane (0, 0, {h_bottom}; 0, 0, -1)
and plane (0, 0, {h_top}; 0, 0, 1) -maxh = {{maxh_{name}}};
""".format(name=name, center=vec2str(self.center, delims=''), r1=r1, r2=r2, h_bottom=h_bottom, h_top=h_top))
def generic_filename(self, maxh, **kwargs):
maxh = self._get_maxh(maxh, **kwargs)
return "elliptical_nanodisk__d1_{:.1f}__d2_{:.1f}__h_{:.1f}__center_{}__valign_{}__maxh_{:.1f}".format(
self.d1, self.d2, self.h, vec2str(self.center, fmt='{:.1f}', delims='', sep='_'), self.valign, maxh).replace(".", "_")
class Nanodisk(EllipticalNanodisk):
def __init__(self, d, h, center=(0, 0, 0), valign='bottom', name='Nanodisk'):
super(Nanodisk, self).__init__(
d, d, h, center=center, valign=valign, name=name)
def generic_filename(self, maxh, **kwargs):
maxh = self._get_maxh(maxh, **kwargs)
return "nanodisk__d_{:.1f}__h_{:.1f}__center_{}__valign_{}__maxh_{:.1f}".format(
self.d1, self.h, vec2str(self.center, fmt='{:.1f}', delims='', sep='_'), self.valign, maxh).replace(".", "_")
| 8,942 | 36.894068 | 130 |
py
|
finmag
|
finmag-master/src/finmag/util/solid_angle_magpar.py
|
import instant
def return_csa_magpar():
args = [["xn", "x", "in"], ["v1n", "v1", "in"],
["v2n", "v2", "in"], ["v3n", "v3", "in"]]
return instant.inline_with_numpy(C_CODE, arrays=args)
C_CODE = """
double SolidAngle(int xn, double *x, int v1n, double *v1, int v2n, double *v2, int v3n, double *v3);
#define my_daxpy(a,b,c,d,e,f) {(e)[0]+=b*(c)[0];(e)[1]+=b*(c)[1];(e)[2]+=b*(c)[2];}
#define my_dcopy(a,b,c,d,e) {(d)[0]=(b)[0];(d)[1]=(b)[1];(d)[2]=(b)[2];}
#define my_dnrm2(a,b,c) sqrt((b)[0]*(b)[0]+(b)[1]*(b)[1]+(b)[2]*(b)[2])
#define my_dscal(a,b,c,d) {(c)[0]*=b;(c)[1]*=b;(c)[2]*=b;}
#define my_ddot(a,b,c,d,e) ((b)[0]*(d)[0]+(b)[1]*(d)[1]+(b)[2]*(d)[2])
#define douter(a,b,c,d) {(d)[0]=(b)[1]*(c)[2]-(b)[2]*(c)[1];(d)[1]=(b)[2]*(c)[0]-(b)[0]*(c)[2];(d)[2]=(b)[0]*(c)[1]-(b)[1]*(c)[0];}
// was PETSC_MACHINE_EPSILON*100 which yields 1.e-12 using doubles.
#define D_EPS 1.e-12
int ND=3;
const double PETSC_PI=atan2(0.0, -1.0);
int PointFromPlane(double *x, double *v1, double *v2, double *v3, double *d)
{
// computes the distance beetween the point x and the plane defined by v1, v2, v3
// note that x, v1, v2 and v3 are 3-dimensional arrays (pointer to double)
double ab[ND],ac[ND]; // vectors ab and ac
double n[ND]; // vector n, normal to the plane
/* calculate edge vectors */
my_dcopy(ND,v1,1,ab,1); // ab is now v1
my_daxpy(ND,-1.0,v2,1,ab,1); // ab = ab - v2
my_dcopy(ND,v1,1,ac,1); // ac is now v1
my_daxpy(ND,-1.0,v3,1,ac,1); // ac = ac - v3
// summary: ab = v1 - v2
// ac = v1 - v3
/* calculate normal vector */
douter(ND,ab,ac,n); // n = cross(ab, ac)
/* calculate distance */
// normally, this would have to be divided by norm(n), because n is not a unit vector
*d=my_ddot(ND,x,1,n,1)-my_ddot(ND,v1,1,n,1); // d = x \dot n - v1 \dot n
// or (x-v1) \dot n
return(0);
}
double SolidAngle(int xn, double *x, int v1n, double *v1, int v2n, double *v2, int v3n, double *v3)
{
double omega;
/* http://en.wikipedia.org/wiki/Solid_angle */
double d;
PointFromPlane(x,v1,v2,v3,&d);
if (fabs(d)<D_EPS) {
omega=0.0;
return(0);
}
double t_ea[ND],t_eb[ND],t_ec[ND];
double t_nab[ND],t_nbc[ND],t_nca[ND];
double t_norm;
/* calculate edge vectors */
my_dcopy(ND,v1,1,t_ea,1);
my_daxpy(ND,-1.0,x,1,t_ea,1);
my_dcopy(ND,v2,1,t_eb,1);
my_daxpy(ND,-1.0,x,1,t_eb,1);
my_dcopy(ND,v3,1,t_ec,1);
my_daxpy(ND,-1.0,x,1,t_ec,1);
/* calculate normal vectors */
douter(ND,t_ea,t_eb,t_nab);
douter(ND,t_eb,t_ec,t_nbc);
douter(ND,t_ec,t_ea,t_nca);
/* normalize vectors */
t_norm=my_dnrm2(ND,t_nab,1);
if (t_norm < D_EPS) {
omega=0.0;
return(omega);
}
my_dscal(ND,1.0/t_norm,t_nab,1);
t_norm=my_dnrm2(ND,t_nbc,1);
if (t_norm < D_EPS) {
omega=0.0;
return(omega);
}
my_dscal(ND,1.0/t_norm,t_nbc,1);
t_norm=my_dnrm2(ND,t_nca,1);
if (t_norm < D_EPS) {
omega=0.0;
return(omega);
}
my_dscal(ND,1.0/t_norm,t_nca,1);
/* calculate dihedral angles between facets */
/* TODO source of this formula ? */
double t_a_abbc,t_a_bcca,t_a_caab;
t_a_abbc=t_nab[0]*t_nbc[0]+t_nab[1]*t_nbc[1]+t_nab[2]*t_nbc[2];
t_a_bcca=t_nbc[0]*t_nca[0]+t_nbc[1]*t_nca[1]+t_nbc[2]*t_nca[2];
t_a_caab=t_nca[0]*t_nab[0]+t_nca[1]*t_nab[1]+t_nca[2]*t_nab[2];
if (t_a_abbc>1) t_a_abbc=PETSC_PI; else if (t_a_abbc<-1) t_a_abbc=0; else t_a_abbc=PETSC_PI-acos(t_nab[0]*t_nbc[0]+t_nab[1]*t_nbc[1]+t_nab[2]*t_nbc[2]);
if (t_a_bcca>1) t_a_bcca=PETSC_PI; else if (t_a_bcca<-1) t_a_bcca=0; else t_a_bcca=PETSC_PI-acos(t_nbc[0]*t_nca[0]+t_nbc[1]*t_nca[1]+t_nbc[2]*t_nca[2]);
if (t_a_caab>1) t_a_caab=PETSC_PI; else if (t_a_caab<-1) t_a_caab=0; else t_a_caab=PETSC_PI-acos(t_nca[0]*t_nab[0]+t_nca[1]*t_nab[1]+t_nca[2]*t_nab[2]);
omega=t_a_abbc+t_a_bcca+t_a_caab-PETSC_PI;
return(omega);
}
"""
| 3,963 | 32.033333 | 154 |
py
|
finmag
|
finmag-master/src/finmag/util/plot.py
|
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
from collections import Iterable
import numpy as np
def plot_m(sim, component='x', filename=None, figsize=(10, 5),
extent=None, z=0.0, gridpoints=[100, 100], cbar=True, ncbarticks=5,
cmap='RdBu', bgcolor='w', scale_by_Ms=False, axis_units='nm'):
"""
Plotting function for the magnetisation.
Inputs
------
sim: simulation object of type Simulation or NormalModeSimulation
component: str
'x', 'y', 'z', 'all' or 'angle'
filename: str (None)
File to save the figure to - does not save if not specified.
figsize: 2-tuple
Matplotlib figure size specifiers in inches.
If not 'all', the x-size is 1/3 of what is specified,
such that consistency in size is maintained between
'all' and other plots.
extent: None, float, or 4-tuple
If None, the bounds are calculated from the
finite element mesh.
If a single number, the calculated extent is
multiplied by that - e.g. if extent=1.1, if the
mesh bounds are [-50, 50, -50, 50] then the new
bounds are [-55, 55, -55, 55].
If specified, directly, must be in
the format:
[-ve x bounds of plot, +ve x-bounds of plot,
-ve y bounds of plot, +ve y-bounds of plot]
z: float
Height at which to sample the field.
gridpoints: 2-tuple of integers
[Number of gridpoints in x, gridpoints in y]
Because this is finite elements, we can sample
at arbitrary places. However, sampling can
be fairly costly, so the higher the number of
samples, the longer plotting will take.
cbar: Boolean
Plot a colorbar, or not...
ncbarticks:
Number of values listed on colorbar axis.
Ignored if no colorbar.
cmap:
Matplotlib colormap name. For magnetisation,
divergent colormaps, like RdBu, tend to work
well.
For spin angles, a cyclic map like 'hsv' works
better.
See the full list here:
https://matplotlib.org/examples/color/colormaps_reference.html
bcolor: str
Color specifier for background. Areas outside of
the mesh are set to this color. 'w' for white,
'k' for black, or use a hexadecimal color code,
or a tuple of RGB values.
axis_units: str
Units for the x and y axis labels, in case
nm does not make sense.
"""
# If the user doesn't specify plot bounds,
# calculate them from the mesh.
if not isinstance(extent, Iterable):
xs = sim.mesh.coordinates()[:, 0]
ys = sim.mesh.coordinates()[:, 1]
xmin = np.min(xs)
xmax = np.max(xs)
ymin = np.min(ys)
ymax = np.max(ys)
calc_bounds = np.array([xmin, xmax, ymin, ymax])
if type(extent) is float:
extent *= calc_bounds
else:
extent = calc_bounds
components = ['x', 'y', 'z']
x = np.linspace(extent[0], extent[1], gridpoints[0])
y = np.linspace(extent[2], extent[3], gridpoints[1])
X, Y = np.meshgrid(x, y)
X = X.flatten()
Y = Y.flatten()
Z = np.zeros_like(X) + z
mx = []
my = []
mz = []
Ms = []
for xv, yv, zv in zip(X, Y, Z):
try:
# Sample the field at the mesh.
mxv, myv, mzv = sim.llg.m_field.f(xv, yv, zv)
if scale_by_Ms is True:
msv = sim.Ms((xv, yv, zv))
except RuntimeError:
# Set nan's for places outside of the mesh,
# as this allows us to set the colormap
# for bad values and hence set a background color
# not in the colorrange of a given colormap.
mxv, myv, mzv = np.nan, np.nan, np.nan
msv = 0.0
# Note - nan * 0.0 = nan,
# so this is fine and doesn't mess with our
# bounds later!
mx.append(mxv)
my.append(myv)
mz.append(mzv)
Ms.append(msv)
mx = np.array(mx).reshape(gridpoints[0], gridpoints[1])
my = np.array(my).reshape(gridpoints[0], gridpoints[1])
mz = np.array(mz).reshape(gridpoints[0], gridpoints[1])
Ms = np.array(Ms).reshape(gridpoints[0], gridpoints[1])
if scale_by_Ms is True:
mx *= Ms
my *= Ms
mz *= Ms
m = [mx, my, mz]
if component in ['x', 'y', 'z', 'angle']:
fig = plt.figure(figsize=(figsize[0]/3, figsize[1]))
# Have to use ImageGrid in order to get the Colorbar
# to scale in size with the subplots!
grid = ImageGrid(fig, 111,
nrows_ncols=(1, 1),
axes_pad=0.15,
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="7%",
cbar_pad=0.15,
)
ax = grid[0]
ax.set_xlabel('$x$ ({})'.format(axis_units))
ax.set_ylabel('$y$ ({})'.format(axis_units))
# Note: By default, imshow plots like a matrix rather
# than a Cartesian axis, and so below we have to set
# origin = 'lower' everywhere.
if component is 'angle':
theta = np.arctan2(my, mx)
theta[theta < 0] += 2*np.pi
cmap_edited = plt.get_cmap(cmap)
# Edit the colormap and set bad values to bgcolor
cmap_edited.set_bad(color=bgcolor, alpha=1.0)
# Here we set the bounds between 0 and 2*pi for the angle,
# though there's no reason why it couldn't be -pi and pi
# really.
im = ax.imshow(theta, origin='lower',
extent=extent, vmin=0,
vmax=2*np.pi, cmap=cmap_edited)
ax.set_title('$xy$ angle')
else:
cmap_edited = plt.get_cmap(cmap)
cmap_edited.set_bad(color=bgcolor, alpha=1.0)
if scale_by_Ms is True:
vmin = -np.max(Ms)
vmax = np.max(Ms)
else:
vmin = -1.0
vmax = 1.0
im = ax.imshow(m[components.index(component)], origin='lower',
extent=extent, vmin=vmin, vmax=vmax,
cmap=cmap_edited)
ax.set_title('$m_{}$'.format(component))
elif component is 'all':
fig = plt.figure(figsize=figsize)
grid = ImageGrid(fig, 111,
nrows_ncols=(1, 3),
axes_pad=0.15,
share_all=True,
cbar_location="right",
cbar_mode="single",
cbar_size="7%",
cbar_pad=0.15,
)
if scale_by_Ms is True:
vmin = -np.max(Ms)
vmax = np.max(Ms)
else:
vmin = -1.0
vmax = 1.0
for ax, comp, label in zip(grid, [mx, my, mz], ['x', 'y', 'z']):
im = ax.imshow(comp, origin='lower', extent=extent,
vmin=vmin, vmax=vmax, cmap=cmap)
ax.set_title('$m_{}$'.format(label))
ax.set_xlabel('$x$ ({})'.format(axis_units))
ax.set_ylabel('$y$ ({})'.format(axis_units))
else:
raise ValueError("Component is not valid")
if cbar is True:
if component is 'angle':
# Some special handling to print \pi
# rather than the numbers!
cbar = ax.cax.colorbar(im,
ticks=np.linspace(0, 2*np.pi, ncbarticks))
cbarlabels = ['${:.1f} \pi$'.format(x/(np.pi))
if x != 0.0 else '0.0'
for x in np.linspace(0, 2*np.pi, ncbarticks)]
cbar.ax.set_yticklabels(cbarlabels)
else:
cbar = ax.cax.colorbar(im,
ticks=np.linspace(vmin, vmax, ncbarticks))
if scale_by_Ms:
cbar.ax.set_ylabel('A / m', rotation=0)
ax.cax.toggle_label(True)
if filename:
fig.savefig(filename, dpi=1000)
| 8,257 | 32.433198 | 77 |
py
|
finmag
|
finmag-master/src/finmag/util/__init__.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
from .plot import *
| 291 | 28.2 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/vtk_export.py
|
import os
import re
import glob
import time
import logging
import dolfin as df
from aeon import timer
log = logging.getLogger(name="finmag")
class VTK(object):
"""
Can save dolfin functions to VTK files and allows for sequential snapshots.
This object can be used in two modes:
1. To save a single snapshot.
Pass a ``filename`` to the constructor. Should that file exist, it will not
be overwritten unless ``force_overwrite`` is set to True.
2. To save a series of snapshots.
In that case, don't pass in a ``filename``, but only a ``prefix``. The
filename will then be built using the ``prefix`` and the current number of
the snapshot.
"""
def __init__(self, filename="", directory="", force_overwrite=False, prefix=""):
"""
Force the object into one of the two modes described in the class documentation.
If `filename` is empty, a default filename will be generated
based on a sequentially increasing counter.
A user-defined string can be inserted into the generated filename by
passing `prefix`. The prefix will be ignored if a filename is passed.
Note that `filename` is also allowed to contain directory
components (for example filename='snapshots/foo.pvd'), which
are simply appended to `directory`. However, if `filename`
contains an absolute path then the value of `directory` is
ignored. If a file with the same filename already exists, the
method will abort unless `force_overwrite` is True, in which
case the existing .pvd and all associated .vtu files are
deleted before saving the snapshot.
If `directory` is non-empty then the file will be saved in the
specified directory.
All directory components present in either `directory` or
`filename` are created if they do not already exist.
"""
self.filename = filename
self.directory = directory
self.force_overwrite = force_overwrite
self.prefix = prefix
self.counter = 1
if filename == "":
prefix_insert = "" if self.prefix == "" else self.prefix + "_"
filename = "{}.pvd".format(prefix_insert, self.counter)
ext = os.path.splitext(filename)[1]
if ext != '.pvd':
raise ValueError(
"File extension for vtk snapshot file must be '.pvd', "
"but got: '{}'".format(ext))
if os.path.isabs(filename) and self.directory != "":
log.warning(
"Ignoring 'directory' argument (value given: '{}') because "
"'filename' contains an absolute path: '{}'".format(
self.directory, filename))
self.output_file = os.path.join(self.directory, filename)
if os.path.exists(self.output_file):
if self.force_overwrite:
log.warning(
"Removing file '{}' and all associated .vtu files "
"(because force_overwrite=True).".format(self.output_file))
os.remove(self.output_file)
basename = re.sub('\.pvd$', '', self.output_file)
for f in glob.glob(basename + "*.vtu"):
os.remove(f)
else:
raise IOError(
"Aborting snapshot creation. File already exists and "
"would overwritten: '{}' (use force_overwrite=True if "
"this is what you want)".format(self.output_file))
# We need to open the file here so that it stays open during
# all calls to save(), otherwise consecutive calls will
# overwrite previously written data.
self.f = df.File(self.output_file, "compressed")
@timer.method
def save(self, dolfin_function, t):
"""
Save the ``dolfin_function`` to a .pvd file (in VTK format) which can
later be inspected using Paraview, for example.
"""
t0 = time.time()
self.f << dolfin_function
t1 = time.time()
log.debug("Saved snapshot at t={} to file '{}' (saving took "
"{:.3g} seconds).".format(t, self.output_file, t1 - t0))
self.counter += 1
| 4,264 | 37.080357 | 88 |
py
|
finmag
|
finmag-master/src/finmag/util/native_compiler.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
"""
Automatic compilation of C/C++ extension modules.
Invoking make_modules() will run the Makefile in the native code directory.
If make returns an error code, an exception will be raised.
Only the first call to make_modules() will invoke make; subsequent calls are ignored.
This module should not be used directly. Use
from finmag.native import [symbol]
when a native function or class is required.
"""
import subprocess
import logging
import sys
import os
import re
__all__ = ["make_modules"]
NATIVE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../../../native")
MODULES_OUTPUT_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../../finmag/native")
MAKEFILE = os.path.join(NATIVE_DIR, "Makefile")
logger = logging.getLogger("finmag")
def replace_c_errors_with_python_errors(s):
repl = lambda m: r'File "%s", line %s (%s): ' % (
os.path.abspath(os.path.join(NATIVE_DIR, m.group(1))), m.group(2), m.group(3))
return re.sub(r"([^\s:]+):(\d+):(\d+): ", repl, s)
def run_make(cmd, **kwargs):
try:
subprocess.check_output(cmd, stderr=subprocess.STDOUT, **kwargs)
except subprocess.CalledProcessError, ex:
print(ex.output)
output = replace_c_errors_with_python_errors(ex.output)
with open(MODULES_OUTPUT_DIR + "/compiler_errors.log", "w") as f:
f.write(output)
print "If you can't see the error message below, either set your term to deal with utf8, or check the file src/finmag/native/compiler_errors.log"
sys.stderr.write(output)
raise Exception("make_modules: Make failed")
modules_compiled = False
def make_modules():
global modules_compiled
if not modules_compiled:
if not os.environ.has_key('DISABLE_PYTHON_MAKE') and os.path.exists(MAKEFILE):
# FIXME: The next line always prints, even if modules are built.
# It may be possible to fix this by running 'make -q' first and
# checking its exit status, but this seems to require some
# restructuring of the build logic in 'native'.
logger.debug("Building modules in 'native'...")
run_make(["make"], cwd=NATIVE_DIR)
modules_compiled = True
def pipe_output(cmd):
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, bufsize=1)
while True:
line = process.stdout.readline()
if not line:
break
print replace_c_errors_with_python_errors(line),
process.communicate()
return process.poll()
| 2,858 | 33.035714 | 153 |
py
|
finmag
|
finmag-master/src/finmag/util/helpers.py
|
from __future__ import division
from datetime import datetime
from glob import glob
from contextlib import contextmanager
from finmag.util.fileio import Tablereader
from finmag.util.visualization import render_paraview_scene
from finmag.util.versions import get_version_dolfin
from finmag.util import ansistrm
from threading import Timer
from distutils.version import LooseVersion
import subprocess as sp
import shlex
import itertools
import tempfile
import shutil
import logging.handlers
import numpy as np
import dolfin as df
import math
import types
import sys
import os
import re
import sh
logger = logging.getLogger("finmag")
def expression_from_python_function(func, function_space):
class ExpressionFromPythonFunction(df.Expression):
"""
Turn a python function to a dolfin expression over given functionspace.
"""
def __init__(self, python_function, **kwargs):
self.func = python_function
def eval(self, eval_result, x):
eval_result[:] = self.func(x)
def value_shape(self):
# () for scalar field, (N,) for N dimensional vector field
return function_space.ufl_element().value_shape()
return ExpressionFromPythonFunction(func, degree=1)
def create_missing_directory_components(filename):
"""
Creates any directory components in 'filename' which don't exist yet.
For example, if filename='/foo/bar/baz.txt' then the directory /foo/bar
will be created.
"""
# Create directory part if it does not exist
dirname = os.path.dirname(filename)
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
def logging_handler_str(handler):
"""
Return a string describing the given logging handler.
"""
if handler.__class__ == logging.StreamHandler:
handlerstr = str(handler.stream)
elif handler.__class__ in [logging.FileHandler, logging.handlers.RotatingFileHandler]:
handlerstr = str(handler.baseFilename)
else:
handlerstr = str(handler)
return handlerstr
def logging_status_str():
"""
Return a string that shows all known loggers and their current levels.
This is useful for debugging of the logging module.
"""
rootlog = logging.getLogger('')
msg = ("Current logging status: "
"rootLogger level=%2d\n" % rootlog.level)
# This keeps the loggers (with the exception of root)
loggers = logging.Logger.manager.loggerDict
for loggername, logger in [('root', rootlog)] + loggers.items():
# check that we have any handlers at all before we attempt
# to iterate
if hasattr(logger, 'handlers'):
for i, handler in enumerate(logger.handlers):
handlerstr = logging_handler_str(handler)
msg += (" %15s (lev=%2d, eff.lev=%2d) -> handler %d: lev=%2d %s\n"
% (loggername, logger.level, logger.getEffectiveLevel(),
i, handler.level, handlerstr))
else:
msg += (" %15s -> %s\n"
% (loggername, "no handlers found"))
return msg
def set_logging_level(level):
"""
Set the level for finmag log messages.
*Arguments*
level: string
One of the levels supported by Python's `logging` module.
Supported values: 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL' and
the finmag specific level 'EXTREMEDEBUG'.
"""
if level not in ['EXTREMEDEBUG', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
raise ValueError("Logging level must be one of: 'DEBUG', 'INFO', "
"'WARNING', 'ERROR', 'CRITICAL'")
logger.setLevel(level)
supported_color_schemes = ansistrm.level_maps.keys()
supported_color_schemes_str = ", ".join(
["'{}'".format(s) for s in supported_color_schemes])
def set_color_scheme(color_scheme):
"""
Set the color scheme for finmag log messages in the terminal.
*Arguments*
color_scheme: string
One of the color schemes supported by Python's `logging` module.
Supported values: {}.
"""
if color_scheme not in supported_color_schemes:
raise ValueError(
"Color scheme must be one of: {}".format(supported_color_schemes_str))
for h in logger.handlers:
if not isinstance(h, ansistrm.ColorizingStreamHandler):
continue
h.level_map = ansistrm.level_maps[color_scheme]
# Insert supported color schemes into docstring
set_color_scheme.__doc__ = set_color_scheme.__doc__.format(
supported_color_schemes_str)
def start_logging_to_file(filename, formatter=None, mode='a', level=logging.DEBUG, rotating=False, maxBytes=0, backupCount=1):
"""
Add a logging handler to the "finmag" logger which writes all
(future) logging output to the given file. It is possible to call
this multiple times with different filenames. By default, if the
file already exists then new output will be appended at the end
(use the 'mode' argument to change this).
*Arguments*
formatter: instance of logging.Formatter
For details, see the section 'Formatter Objectsion' in the
documentation of the logging module.
mode: ['a' | 'w']
Determines whether new content is appended at the end ('a') or
whether logfile contents are overwritten ('w'). Default: 'a'.
rotating: bool
If True (default: False), limit the size of the logfile to
`maxBytes` (0 means unlimited). Once the file size is near
this limit, a 'rollover' will occur. See the docstring of
`logging.handlers.RotatingFileHandler` for details.
*Returns*
The newly created logging hander is returned.
"""
if formatter is None:
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s: %(message)s', datefmt='%H:%M:%S')
filename = os.path.abspath(os.path.expanduser(filename))
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
h = logging.handlers.RotatingFileHandler(
filename, mode=mode, maxBytes=maxBytes, backupCount=backupCount)
h.setLevel(level)
h.setFormatter(formatter)
if mode == 'a':
logger.info("Finmag logging output will be appended to file: "
"'{}'".format(filename))
else:
# XXX FIXME: There is still a small bug here: if we create
# multiple simulations with the same name from the same
# ipython session, the logging output of the first will not be
# deleted. For example:
#
# from finmag import sim_with
# import dolfin as df
# import logging
#
# logger = logging.getLogger("finmag")
# mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 1), 5, 5, 5)
#
# logger.debug("Creating first simulation")
# sim = sim_with(mesh, 1e6, m_init=(1,0,0), name="sim1")
#
# logger.debug("Creating second simulation")
# sim = sim_with(mesh, 1e6, m_init=(1,0,0), name="sim1")
#
# At the end the output of the first simulation is still
# present in the logfile "sim1.log".
logger.info("Finmag logging output will be written to file: '{}' "
"(any old content will be overwritten).".format(filename))
logger.addHandler(h)
return h
def get_hg_revision_info(repo_dir, revision='tip'):
"""
Returns the revision number, revision ID and date of a revision in
the given Mercurial repository. For example, the information
returned might be:
(3486, '18e7def5e18a', '2013-04-25')
"""
cwd_bak = os.getcwd()
try:
os.chdir(os.path.expanduser(repo_dir))
except OSError:
raise ValueError(
"Expected a valid repository, but directory does not exist: '{}'".format(repo_dir))
try:
rev_nr = int(
sp.check_output(['hg', 'id', '-n', '-r', revision]).strip())
rev_id = sp.check_output(['hg', 'id', '-i', '-r', revision]).strip()
#rev_log = sp.check_output(['hg', 'log', '-r', revision]).strip()
rev_date = sp.check_output(
['hg', 'log', '-r', revision, '--template', '{date|isodate}']).split()[0]
except sp.CalledProcessError:
raise ValueError(
"Invalid revision '{}', or invalid Mercurial repository: '{}'".format(revision, repo_dir))
os.chdir(cwd_bak)
return rev_nr, rev_id, rev_date
def get_git_revision_info(repo_dir, revision='HEAD'):
"""
Return the revision id and the date of a revision in the given github repository.
For examaple, the information returned should looks like,
"""
cwd_bak = os.getcwd()
try:
os.chdir(os.path.expanduser(repo_dir))
except OSError:
raise ValueError(
"Expected a valid repository, but directory does not exist: '{}'".format(repo_dir))
try:
rev_id = sp.check_output(['git', 'rev-parse', revision]).strip()
rev_date = sp.check_output(
['git', 'show', '-s', '--format=%ci',revision]).split()[0]
rev_nr = int(sp.check_output(['git', 'rev-list', '--count', revision]))
except sp.CalledProcessError:
raise ValueError(
"Invalid revision '{}', or invalid Git repository: '{}'".format(revision, repo_dir))
os.chdir(cwd_bak)
return rev_nr, rev_id, rev_date
def binary_tarball_name(repo_dir, revision='HEAD', suffix=''):
"""
Returns the name of the Finmag binary tarball if built from the
given repository and revision.
The general pattern is something like:
FinMag-dist__2013-04-25__rev3486_18e7def5e18a_suffix.tar.bz2
If specified, the suffix is inserted immediately before '.tar.bz2'.
*Arguments*
repo_dir : name of a directory containing a valid Finmag repository.
revision : the revision to be bundled in the tarball.
suffix : string to be appended to the tarball
"""
# XXX TODO: Should we also check whether the repo is actually a Finmag
# repository?!?
rev_nr, rev_id, rev_date = get_git_revision_info(repo_dir, revision)
tarball_name = "FinMag-dist__{}__{}{}.tar.bz2".format(
rev_date, rev_id, suffix)
return tarball_name
def clean_filename(filename):
"""
Remove non-alphanumeric characters from filenames.
*Parameters*
filename : str
The filename to be sanitized.
*Returns*
clean : str
A sanitized filename that contains only alphanumeric
characters and underscores.
"""
filename = re.sub(r'[^a-zA-Z0-9_]', '_', filename)
return filename
def assert_number_of_files(files, n):
"""
Check that there are exactly `n` files matching the pattern in
`files` (which may contain wildcards, such as 'foo/bar*.txt') and
raise an AssertionError otherwise.
"""
assert(len(glob(files)) == n)
def times_curl(v, dim):
"""
Returns v times curl of v on meshes with dimensions 1, 2, or 3.
Arguments:
- v is a dolfin function on a 1d, 2d or 3d vector function space.
- dim is the number of dimensions of the mesh.
On three-dimensional meshes, dolfin supports computing the integrand
of the DMI energy using
df.inner(v, df.curl(v)) eq.1
However, the curl operator is not implemented on 1d and 2d meshes.
With the expansion of the curl in cartesian coordinates:
curlx = dmzdy - dmydz
curly = dmxdz - dmzdx
curlz = dmydx - dmxdy
we can compute eq. 1 with
(vx * curlx + vy * curly + vz * curlz),
including only existing derivatives. Derivatives that do not exist
are set to 0.
"""
if dim == 3:
return df.inner(v, df.curl(v))
gradv = df.grad(v)
# Derivatives along x direction exist in both 1d and 2d cases.
dvxdx = gradv[0, 0]
dvydx = gradv[1, 0]
dvzdx = gradv[2, 0]
# Derivatives along z direction do not exist in 1d and 2d cases,
# so they are set to zero.
dvxdz = 0
dvydz = 0
dvzdz = 0
# Derivatives along y direction exist only in a 2d case.
# For 1d case, these derivatives are set to zero.
if dim == 1:
dvxdy = 0
dvydy = 0
dvzdy = 0
elif dim == 2:
dvxdy = gradv[0, 1]
dvydy = gradv[1, 1]
dvzdy = gradv[2, 1]
# Components of the curl(v).
curlx = dvzdy - dvydz
curly = dvxdz - dvzdx
curlz = dvydx - dvxdy
# Return v*curl(v).
return v[0] * curlx + v[1] * curly + v[2] * curlz
def components(vs):
"""
For a list of vectors of the form [x0, ..., xn, y0, ..., yn, z0, ..., zn]
this will return a list of vectors with the shape
[[x0, ..., xn], [y0, ..., yn], [z0, ..., z1]].
"""
return vs.view().reshape((3, -1))
def vectors(vs):
"""
For a list of vectors of the form [x0, ..., xn, y0, ..., yn, z0, ..., zn]
this will return a list of vectors with the shape
[[x0, y0, z0], ..., [xn, yn, zn]].
"""
number_of_nodes = len(vs) // 3
return vs.view().reshape((number_of_nodes, -1), order="F")
def for_dolfin(vs):
"""
The opposite of the function vectors.
Takes a list with the shape [[x0, y0, z0], ..., [xn, yn, zn]]
and returns [x0, ..., xn, y0, ..., yn, z0, ..., zn].
"""
return rows_to_columns(vs).flatten()
def norm(vs):
"""
Returns the euclidian norm of one or several vectors in three dimensions.
When passing an array of vectors, the shape is expected to be in the form
[[x0, y0, z0], ..., [xn, yn, zn]].
"""
if not type(vs) == np.ndarray:
vs = np.array(vs)
if vs.shape == (3, ):
return np.linalg.norm(vs)
return np.sqrt(np.add.reduce(vs * vs, axis=1))
def crossprod(v, w):
"""
Compute the point-wise cross product of two (3D) vector fields on a mesh.
The arguments `v` and `w` should be numpy.arrays representing
dolfin functions, i.e. they should be of the form [x0, ..., xn,
y0, ..., yn, z0, ..., zn]. The return value is again a numpy array
of the same form.
"""
if df.parameters.reorder_dofs_serial != False:
raise RuntimeError(
"Please ensure that df.parameters.reorder_dofs_serial is set to False.")
assert(v.ndim == 1 and w.ndim == 1)
a = v.reshape(3, -1)
b = w.reshape(3, -1)
return np.cross(a, b, axisa=0, axisb=0, axisc=0).reshape(-1)
def fnormalise(arr, ignore_zero_vectors=False):
"""
Returns a normalised copy of vectors in arr.
Expects arr to be a numpy.ndarray in the form that dolfin
provides: [x0, ..., xn, y0, ..., yn, z0, ..., zn].
If `ignore_zero_vectors` is True (default: False) then any
3-vector of norm zero will be left alone. Thus the resulting array
will still have zero vectors in the same places. Otherwise the
entries in the resulting array will be filled with NaN.
"""
a = arr.astype(np.float64) # this copies
a = a.reshape((3, -1))
a_norm = np.sqrt(a[0] * a[0] + a[1] * a[1] + a[2] * a[2])
if ignore_zero_vectors:
a_norm[np.where(a_norm == 0)] = 1.0
a /= a_norm
a = a.ravel()
return a
def angle(v1, v2):
"""
Returns the angle between two three-dimensional vectors.
"""
return np.arccos(np.dot(v1, v2) / (norm(v1) * norm(v2)))
def rows_to_columns(arr):
"""
For an array of the shape [[x1, y1, z1], ..., [xn, yn, zn]]
returns an array of the shape [[x1, ..., xn],[y1, ..., yn],[z1, ..., zn]].
"""
return arr.reshape(arr.size, order="F").reshape((3, -1))
def stats(arr, axis=1):
median = np.median(arr)
average = np.mean(arr)
minimum = np.nanmin(arr)
maximum = np.nanmax(arr)
spread = np.std(arr)
stats = " min, median, max = {0}, {1}, {2}\n mean, std = {3}, {4}".format(
minimum, median, maximum, average, spread)
return stats
def frexp10(x, method="string"):
"""
Same as math.frexp but in base 10, will return (m, e)
such that x == m * 10 ** e.
"""
if method == "math":
lb10 = math.log10(x)
return 10 ** (lb10 - int(lb10)), int(lb10)
else:
nb = ("%e" % x).split("e")
return float(nb[0]), int(nb[1])
def tex_sci(x, p=2):
"""
Returns LaTeX code for the scientific notation of a floating point number.
"""
m, e = frexp10(x)
return "{:.{precision}f} \\times 10^{{{}}}".format(m, e, precision=p)
def sphinx_sci(x, p=2):
"""
Returns the code you need to have math mode in sphinx and the
scientific notation of the floating point nunber x.
"""
return ":math:`{}`".format(tex_sci(x, p))
def mesh_and_space(mesh_or_space):
"""
Return a (df.Mesh, df.VectorFuntionspace) tuple where one of the two items
was passed in as argument and the other one built/extracted from it.
"""
if isinstance(mesh_or_space, df.FunctionSpace) and mesh_or_space.num_sub_spaces() == 3:
S3 = mesh_or_space
mesh = S3.mesh()
else:
mesh = mesh_or_space
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
return mesh, S3
def verify_function_space_type(function_space, family, degree, dim):
"""
Check that `function_space` is a dolfin FunctionSpace or VectorFunctionSpace
of the correct type. It checks for:
- the finite element family (e.g. 'CG' for a CG1 space)
- the finite element degree (e.g. 1 for a CG1 space
- the dimension `dim` of the function values of elements of that function space;
this would be `None` if `function_space` is a FunctionSpace and a single number
if `function_space` is a VectorFunctionSpace.
"""
ufl_element = function_space.ufl_element()
mesh = function_space.mesh()
# Allow abbreviations 'DG' and 'CG'
if family == 'DG':
family = 'Discontinuous Lagrange'
elif family == 'CG':
family = 'Lagrange'
family_and_degree_are_correct = \
(family == ufl_element.family() and
degree == ufl_element.degree())
print 'Family', family
print 'Degree', degree
print family_and_degree_are_correct
if dim == None:
# `function_space` should be a dolfin.FunctionSpace
return (function_space.num_sub_spaces() == 0 and
family_and_degree_are_correct)
else:
# `function_space` should be a dolfin.VectorFunctionSpace
# for VectorFunctionSpace this should be a 1-tuple of the form (dim,)
value_shape = ufl_element.value_shape()
if len(value_shape) != 1:
return False
else:
return (function_space.num_sub_spaces() > 0 and
family_and_degree_are_correct and
dim == value_shape[0])
def mesh_equal(mesh1, mesh2):
cds1 = mesh1.coordinates()
cds2 = mesh2.coordinates()
return np.array_equal(cds1, cds2)
# TODO: In dolfin 1.2 if the pbc are used, the degree of freedom for functionspace
# is different from the number of mesh coordinates, so we need to consider this
# problem as well
def vector_valued_function(value, mesh_or_space, normalise=False, **kwargs):
"""
Create a vector-valued function on the given mesh or VectorFunctionSpace.
Returns an object of type 'df.Function'.
`value` can be any of the following:
- tuple, list or numpy.ndarray of length 3
- dolfin.Constant representing a 3-vector
- dolfin.Expression
- 3-tuple of strings (with keyword arguments if needed),
which will get cast to a dolfin.Expression where any variables in
the expression are substituted with the values taken from 'kwargs'
- numpy.ndarray of nodal values of the shape (3*n,), where n
is the number of nodes. Note that the elements in this array
should follow dolfin's convention (i.e., the x-coordinates
of all function values should be listed first, then the y-
and z-values). The shape can also be (n, 3) with one vector
per node.
- function (any callable object will do) of the form:
f: (x, y, z) -> v
where v is the 3-vector that is the function value at the
point (x, y, z).
*Arguments*
value -- the value of the function as described above
mesh_or_space -- either a dolfin.VectorFunctionSpace of dimension 3
or a dolfin.Mesh
normalise -- if True then the function values are normalised to
unit length (default: False)
kwargs -- if `value` is a 3-tuple of strings (which will be
cast to a dolfin.Expression), then any variables
occurring in them will be substituted with the
values in kwargs; otherwise kwargs is ignored
"""
mesh, S3 = mesh_and_space(mesh_or_space)
assert(S3.ufl_element().family() == 'Lagrange')
assert(S3.ufl_element().degree() == 1)
if isinstance(value, (df.Constant, df.Expression)):
fun = df.interpolate(value, S3)
elif isinstance(value, (tuple, list, np.ndarray)) and len(value) == 3:
# We recognise a sequence of strings as ingredient for a df.Expression.
if all(isinstance(item, basestring) for item in value):
expr = df.Expression(value, degree=1, **kwargs)
fun = df.interpolate(expr, S3)
else:
#fun = df.Function(S3)
#vec = np.empty((fun.vector().size()/3, 3))
# vec[:] = value # using broadcasting
# fun.vector().set_local(vec.transpose().reshape(-1))
expr = df.Constant(list(value))
fun = df.interpolate(expr, S3)
elif isinstance(value, np.ndarray):
fun = df.Function(S3)
if value.ndim == 2:
assert value.shape[1] == 3
value = value.reshape(value.size, order="F")
if not value.dtype == np.double:
value = value.astype(np.double)
fun.vector().set_local(value)
# if it's a normal function, we wrapper it into a dolfin expression
elif hasattr(value, '__call__'):
class HelperExpression(df.Expression):
def __init__(self, value, **kwargs):
self.fun = value
def eval(self, value, x):
value[:] = self.fun(x)[:]
def value_shape(self):
return (3,)
hexp = HelperExpression(value, degree=1)
fun = df.interpolate(hexp, S3)
else:
raise TypeError("Cannot set value of vector-valued function from "
"argument of type '{}'".format(type(value)))
if normalise:
fun.vector().set_local(fnormalise(fun.vector().array()))
return fun
# XXX TODO: This should perhaps be merged with scalar_valued_dg_function
# to avoid code duplication (but only if it doesn't obfuscate the
# interface and it is clear how to distinguish whether a Lagrange or
# DG function space should be used).
def scalar_valued_function(value, mesh_or_space):
"""
Create a scalar function on the given mesh or VectorFunctionSpace.
If mesh_or_space is a FunctionSpace, it should be of type "Lagrange"
(for "DG" spaces use the function `scalar_valued_dg_function`
instead). Returns an object of type 'df.Function'.
`value` can be any of the following (see `vector_valued_function`
for more details):
- a number
- numpy.ndarray or a common list
- dolfin.Constant or dolfin.Expression
- function (or any callable object)
"""
if isinstance(mesh_or_space, df.FunctionSpace):
S1 = mesh_or_space
assert(S1.ufl_element().family() == 'Lagrange')
assert(S1.ufl_element().degree() == 1)
mesh = S1.mesh()
else:
mesh = mesh_or_space
S1 = df.FunctionSpace(mesh, "Lagrange", 1)
if isinstance(value, (df.Constant, df.Expression)):
fun = df.interpolate(value, S1)
elif isinstance(value, (np.ndarray, list)):
fun = df.Function(S1)
assert(len(value) == fun.vector().size())
fun.vector().set_local(value)
elif isinstance(value, (int, float, long)):
fun = df.Function(S1)
fun.vector()[:] = value
elif hasattr(value, '__call__'):
# if it's a normal function, we wrapper it into a dolfin expression
class HelperExpression(df.Expression):
def __init__(self, value, **kwargs):
self.fun = value
def eval(self, value, x):
value[0] = self.fun(x)
hexp = HelperExpression(value, degree=1)
fun = df.interpolate(hexp, S1)
else:
raise TypeError("Cannot set value of scalar-valued function from "
"argument of type '{}'".format(type(value)))
return fun
def scalar_valued_dg_function(value, mesh_or_space):
"""
Create a scalar function on the given mesh or VectorFunctionSpace.
If mesh_or_space is a FunctionSpace, it should be of type "DG"
(for "Lagrange" spaces use the function `scalar_valued_function`
instead). Returns an object of type 'df.Function'.
`value` can be any of the following (see `vector_valued_function`
for more details):
- a number
- numpy.ndarray or a common list
- dolfin.Constant or dolfin.Expression
- function (or any callable object)
"""
if isinstance(mesh_or_space, df.FunctionSpace):
dg = mesh_or_space
assert(dg.ufl_element().family() == 'Discontinuous Lagrange')
assert(dg.ufl_element().degree() == 0)
mesh = dg.mesh()
else:
mesh = mesh_or_space
dg = df.FunctionSpace(mesh, "DG", 0)
if isinstance(value, (df.Constant, df.Expression)):
fun = df.interpolate(value, dg)
elif isinstance(value, (np.ndarray, list)):
fun = df.Function(dg)
assert(len(value) == fun.vector().size())
fun.vector().set_local(value)
elif isinstance(value, (int, float, long)):
fun = df.Function(dg)
fun.vector()[:] = value
elif isinstance(value, df.Function):
mesh1 = value.function_space().mesh()
fun = df.Function(dg)
if mesh_equal(mesh, mesh1) and value.vector().size() == fun.vector().size():
fun = value
else:
raise RuntimeError("Meshes are not compatible for given function.")
elif hasattr(value, '__call__'):
class HelperExpression(df.Expression):
def __init__(self, value, **kwargs):
self.fun = value
def eval(self, value, x):
value[0] = self.fun(x)
hexp = HelperExpression(value, degree=1)
fun = df.interpolate(hexp, dg)
else:
raise TypeError("Cannot set value of scalar-valued DG function from "
"argument of type '{}'".format(type(value)))
return fun
def vector_valued_dg_function(value, mesh_or_space, normalise=False):
"""
Create a vector function on the given mesh or VectorFunctionSpace.
If mesh_or_space is a FunctionSpace, it should be of type "DG"
(for "Lagrange" spaces use the function `scalar_valued_function`
instead). Returns an object of type 'df.Function'.
`value` can be any of the following (see `vector_valued_function`
for more details):
- a number
- numpy.ndarray or a common list
- dolfin.Constant or dolfin.Expression
- function (or any callable object)
"""
if isinstance(mesh_or_space, df.VectorFunctionSpace):
dg = mesh_or_space
mesh = dg.mesh()
else:
mesh = mesh_or_space
dg = df.VectorFunctionSpace(mesh, "DG", 0)
if isinstance(value, (df.Constant, df.Expression)):
fun = df.interpolate(value, dg)
elif isinstance(value, (tuple, list, np.ndarray)) and len(value) == 3:
# We recognise a sequence of strings as ingredient for a df.Expression.
if all(isinstance(item, basestring) for item in value):
expr = df.Expression(value, degree=1)
fun = df.interpolate(expr, dg)
else:
fun = df.Function(dg)
vec = np.empty((fun.vector().size() / 3, 3))
vec[:] = value # using broadcasting
fun.vector().set_local(vec.transpose().reshape(-1))
elif isinstance(value, (np.ndarray, list)):
fun = df.Function(dg)
assert(len(value) == fun.vector().size())
fun.vector().set_local(value)
elif isinstance(value, (int, float, long)):
fun = df.Function(dg)
fun.vector()[:] = value
elif isinstance(value, df.Function):
mesh1 = value.function_space().mesh()
fun = df.Function(dg)
if mesh_equal(mesh, mesh1) and value.vector().size() == fun.vector().size():
fun = value
else:
raise RuntimeError("Meshes are not compatible for given function.")
elif hasattr(value, '__call__'):
class HelperExpression(df.Expression):
def __init__(self, value, **kwargs):
super(HelperExpression, self).__init__()
self.fun = value
def eval(self, value, x):
value[:] = self.fun(x)[:]
def value_shape(self):
return (3,)
hexp = HelperExpression(value, degree=1)
fun = df.interpolate(hexp, dg)
else:
raise TypeError("Cannot set value of vector-valued DG function from "
"argument of type '{}'".format(type(value)))
if normalise:
fun.vector()[:] = fnormalise(fun.vector().array())
return fun
def value_for_region(mesh, value, region_no, default_value=0, project_to_CG=False):
"""
Returns a dolfin.Function `f` (which by default is defined on the cells)
such that the value of `f` on any cell whose region number (as stored in
the mesh file, e.g. when produced by Netgen) is equal to `region_no`.
The value of `f` for all other cells will be set to `default_value`.
The returned function will be defined on the cells unless project_to_CG
has been set to True (then it will be defined on the nodes).
"""
DG0 = df.FunctionSpace(mesh, "DG", 0)
f = df.Function(DG0)
regions = mesh.domains().cell_domains()
for cell_no, region_no in enumerate(regions.array()):
# this assumes that the dofs are ordered like the regions information
if region_no == region:
f.vector()[cell_no] = value
else:
f.vector()[cell_no] = default_value
if project_to_CG == True:
return df.project(f, df.FunctionSpace(mesh, "CG", 1))
return f
def restriction(mesh, submesh):
"""
Return a Python function `r` of the form
r: f -> f_submesh
whose first argument `f` is either a `dolfin.Function` or a
`numpy.array` and which returns another `dolfin.Function` or
`numpy.array` which has the same values as `f` but is only
defined on `submesh`.
`submesh` must be of type `dolfin.SubMesh` and be a proper
submesh of `mesh`.
"""
if not isinstance(submesh, df.SubMesh):
raise TypeError("Argument 'submesh' must be of type `dolfin.SubMesh`. "
"Got: {}".format(type(submesh)))
try:
# This is the correct syntax now, see:
# http://fenicsproject.org/qa/185/entity-mapping-between-a-submesh-and-the-parent-mesh
parent_vertex_indices = submesh.data().array(
'parent_vertex_indices', 0)
except RuntimeError:
# Legacy syntax (for dolfin <= 1.2 or so).
# TODO: This should be removed in the future once dolfin 1.3 is
# released!
parent_vertex_indices = submesh.data().mesh_function(
'parent_vertex_indices').array()
V = df.FunctionSpace(mesh, 'CG', 1)
V_submesh = df.FunctionSpace(submesh, 'CG', 1)
def restrict_to_submesh(f):
# Remark: We can't use df.interpolate here to interpolate the
# function values from the full mesh on the submesh because it
# sometimes crashes (probably due to rounding errors), even if we
# set df.parameters["allow_extrapolation"]=True as they recommend
# in the error message.
#
# Therefore we manually interpolate the function values here using
# the vertex mappings determined above. This works fine if the
# dofs are not re-ordered, but will probably cause problems in
# parallel (or with dof reordering enabled).
if isinstance(f, np.ndarray):
if f.ndim == 1:
return f[parent_vertex_indices]
elif f.ndim == 2:
return f[:, parent_vertex_indices]
else:
raise TypeError(
"Array must be 1- or 2-dimensional. Got: dim={}".format(f.ndim))
else:
assert(isinstance(f, df.Function))
f_arr = f.vector().array()
f_submesh = df.Function(V_submesh)
f_submesh.vector()[:] = f_arr[parent_vertex_indices]
return f_submesh
return restrict_to_submesh
def mark_subdomain_by_function(fun, mesh_or_space, domain_index, subdomains):
"""
Mark the subdomains with given index if user provide a region by function, such as
def region1(coords):
if coords[2]<0.5:
return 1
else:
return 0
"""
if isinstance(mesh_or_space, df.FunctionSpace):
dg = mesh_or_space
mesh = dg.mesh()
else:
mesh = mesh_or_space
if hasattr(fun, '__call__'):
cds = mesh.coordinates()
index = 0
for cell in df.cells(mesh):
p1, p2, p3, p4 = cell.entities(0)
coord = (cds[p1] + cds[p2] + cds[p3] + cds[p4]) / 4.0
if fun(coord):
subdomains.array()[index] = domain_index
index += 1
else:
raise AttributeError
def duplicate_output_to_file(filename, add_timestamp=False, timestamp_fmt='__%Y-%m-%d_%H.%M.%S'):
"""
Redirect all (future) output to a file with the given filename.
This redirects both to stdout and stderr.
If `add_timestamp` is True (default: False) then a timestamp will
be added to the filename indicating the time when the call to this
function occurred (for example, the filename 'output.txt' might be
changed into 'output_2012-01-01_14.33.52.txt'). The timestamp
format can be controlled via `timestamp_fmt`, which should be a
formatting string as accepted by `datetime.strftime`.
"""
create_missing_directory_components(filename)
if add_timestamp:
name, ext = os.path.splitext(filename)
filename = '{}{}{}'.format(
name, datetime.strftime(datetime.now(), timestamp_fmt), ext)
logger.debug("Duplicating output to file '{}'".format(filename))
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
tee = sp.Popen(["tee", filename], stdin=sp.PIPE)
os.dup2(tee.stdin.fileno(), sys.stdout.fileno())
os.dup2(tee.stdin.fileno(), sys.stderr.fileno())
def cartesian_to_spherical(vector):
"""
Converts cartesian coordinates to spherical coordinates.
Returns a tuple (r, theta, phi) where r is the radial distance, theta
is the inclination (or elevation) and phi is the azimuth (ISO standard 31-11).
"""
r = np.linalg.norm(vector)
unit_vector = np.array(vector) / r
theta = np.arccos(unit_vector[2])
phi = np.arctan2(unit_vector[1], unit_vector[0])
return np.array((r, theta, phi))
def spherical_to_cartesian(v):
"""
Converts spherical coordinates to cartesian.
Expects the arguments r for radial distance, inclination theta
and azimuth phi.
"""
r, theta, phi = v
x = r * np.sin(theta) * np.cos(phi)
y = r * np.sin(theta) * np.sin(phi)
z = r * np.cos(theta)
return np.array((x, y, z))
def pointing_upwards((x, y, z)):
"""
Returns a boolean that is true when the vector is pointing upwards.
Upwards is defined as having a polar angle smaller than 45 degrees.
"""
_, theta, _ = cartesian_to_spherical((x, y, z))
return theta <= (np.pi / 4)
def pointing_downwards((x, y, z)):
"""
Returns a boolean that is true when the vector is pointing downwards.
Downwards is defined as having a polar angle between 135 and 225 degrees.
"""
_, theta, _ = cartesian_to_spherical((x, y, z))
return abs(theta - np.pi) < (np.pi / 4)
def piecewise_on_subdomains(mesh, mesh_function, fun_vals):
"""
Constructs and returns a dolfin Function which is piecewise constant on
certain subdomains of a mesh.
*Arguments*
mesh : dolfin Mesh
The mesh on which the new function will be defined.
mesh_function : dolfin MeshFunction
A function assigning to each cell the subdomain number to
which this cell belongs.
fun_vals : sequence
The function values for the returned function, where the first
value provided corresponds to the first region defined in the mesh
and so on.
*Returns*
A dolfin function which is piecewise constant on subdomains and on the
subdomain with index `idx` assumes the value given by `fun_vals[idx]`.
"""
V = df.FunctionSpace(mesh, 'DG', 0)
f = df.Function(V)
a = np.asarray(mesh_function.array() - 1, dtype=np.int32)
f.vector()[:] = np.choose(a, fun_vals)
return f
def vector_field_from_dolfin_function(f, xlims=None, ylims=None, zlims=None,
nx=None, ny=None, nz=None):
"""
Probes a (vector-valued) dolfin.Function `f`: R^3 -> R^3 on a
rectangular grid.
It returns six arrays `x`, `y`, `z`, `u`, `v`, `w` representing
the grid coordinates and vector field components, respectively,
which can be used for plotting, etc.
The arguments `xlims`, `ylims`, `zlims`, `nx`, `ny`, `nz` can be
used to control the extent and coarseness of the grid.
*Arguments*
xlims, ylims, zlims : pair of floats
The extent of the grid along the x-/y-/z-axis. If no value is
provided, the minimum/maximum mesh coordinates are used along
each axis.
nx, ny, nz : int
The grid spacings along the x-/y-/z/axis. If no value is
provided, a sensible default is derived from the average cell
size of the mesh.
*Returns*
The arrays `x`, `y`, `z`, `u`, `v`, `w`. Each of these has shape
(nx, ny, nz). The first three are the same as would be returned by
the command:
numpy.mgrid[xmin:xmax:nx*1j, ymin:ymax:ny*1j, zmin:zmax:nz*1j]
"""
mesh = f.function_space().mesh()
coords = mesh.coordinates()
def _find_limits(limits, i):
return limits if (limits != None) else (min(coords[:, i]), max(coords[:, i]))
(xmin, xmax) = _find_limits(xlims, 0)
(ymin, ymax) = _find_limits(ylims, 1)
(zmin, zmax) = _find_limits(zlims, 2)
print "Limits:"
print "xmin, xmax: {}, {}".format(xmin, xmax)
print "ymin, ymax: {}, {}".format(ymin, ymax)
print "zmin, zmax: {}, {}".format(zmin, zmax)
if nx == None or ny == None or nz == None:
raise NotImplementedError("Please provide specific values "
"for nx, ny, nz for now.")
X, Y, Z = np.mgrid[xmin:xmax:nx * 1j, ymin:ymax:ny * 1j, zmin:zmax:nz * 1j]
U = np.empty((nx, ny, nz))
V = np.empty((nx, ny, nz))
W = np.empty((nx, ny, nz))
for i in xrange(nx):
for j in xrange(ny):
for k in xrange(nz):
val = f(X[i, j, k], Y[i, j, k], Z[i, j, k])
U[i, j, k] = val[0]
V[i, j, k] = val[1]
W[i, j, k] = val[2]
return X, Y, Z, U, V, W
def probe(dolfin_function, points, apply_func=None):
"""
Probe the dolfin function at the given points.
*Arguments*
dolfin_function: dolfin.Function
A dolfin function.
points: numpy.array
An array of points where the field should be probed. Can
have arbitrary shape, except that the last axis must have
dimension 3. For example, if pts.shape == (10,20,5,3) then
the field is probed at all points on a regular grid of
size 10 x 20 x 5.
apply_func: any callable
Optional function to be applied to the returned values. If not
provided, the values are returned unchanged.
*Returns*
A numpy.ma.masked_array of the same shape as `pts`, where the last
axis contains the field values instead of the point locations (or
`apply_func` applied to the field values in case it is provided.).
Elements in the output array corresponding to probing point outside
the mesh are masked out.
"""
points = np.array(points)
if not points.shape[-1] == 3:
raise ValueError(
"Arguments 'points' must be a numpy array of 3D points, "
"i.e. the last axis must have dimension 3. Shape of "
"'pts' is: {}".format(points.shape))
if apply_func == None:
# use the identity operation by default
apply_func = lambda x: x
output_shape = np.array(apply_func([0, 0, 0])).shape
res = np.ma.empty(points.shape[:-1] + output_shape)
# N.B.: setting the mask to a full matrix right from the start (as
# we do in the next line) might be slightly memory-inefficient; if
# that becomes a problem we can always set it to 'np.ma.nomask'
# here, but then we need a check for res.mask == np.ma.nomask in
# the 'except' branch below and set it to a full mask if we
# actually need to mask out any values during the loop.
res.mask = np.zeros_like(res, dtype=bool)
loop_indices = itertools.product(*map(xrange, points.shape[:-1]))
for idx in loop_indices:
try:
# XXX TODO: The docstring of a df.Function says at the very
# end that it's possible to pass (slices of) a larger array
# in order to fast fill up an array with multiple evaluations.
# This might be worth investigating!
#
# Alternatively, it may be good to write special helper functions
# For the most common cases Nx3 and (nx x ny x nz x 3). Or can
# we even reshape the array in the beginning and then only use the
# first case?
pt = points[idx]
res[idx] = apply_func(dolfin_function(pt))
except RuntimeError:
res.mask[idx] = True
return res
def probe_along_line(dolfin_function, pt_start, pt_end, N, apply_func=None):
"""
Probe the dolfin function at the `N` equidistant points along a straight
line connecting `pt_start` and `pt_end`.
*Arguments*
dolfin_function: dolfin.Function
A dolfin function.
pt_start, pt_end:
Start and end point of the straight line along which to point.
N: int
Number of probing points.
apply_func: any callable
Optional function to be applied to the returned values. If not
provided, the values are returned unchanged.
*Returns*
A tuple `(pts, vals)` where `pts` is the list of probing points
(i.e., the `N` equidistant points between `pt_start` and `pt_end`)
and `vals` is a numpy.ma.masked_array of shape `(N, 3)` containing
the field values at the probed points (or `apply_func` applied to
the field values in case it is provided.). Elements in the output
array corresponding to probing point outside the mesh are masked out.
"""
pt_start = np.asarray(pt_start)
pt_end = np.asarray(pt_end)
pts = np.array(
[(1 - t) * pt_start + t * pt_end for t in np.linspace(0, 1, N)])
vals = probe(dolfin_function, pts, apply_func=apply_func)
return pts, vals
def compute_dmdt(t0, m0, t1, m1):
"""
Returns the maximum of the L2 norm of dm/dt.
Arguments:
t0, t1: two points in time (floats)
m0, m1: the magnetisation at t0, resp. t1 (np.arrays of shape 3*n)
"""
dm = (m1 - m0).reshape((3, -1))
max_dm = np.max(np.sqrt(np.sum(dm ** 2, axis=0))) # max of L2-norm
dt = abs(t1 - t0)
max_dmdt = max_dm / dt
return max_dmdt
def npy_to_dolfin_function(filename, mesh_or_space):
"""
Create a dolfin function on the given mesh or function space whose
coefficients are stored in the given file.
*Arguments*
filename : str
The .npy file in which the function coefficients are stored.
mesh_or_space :
Either a dolfin.Mesh or a dolfin.VectorFunctionSpace of dimension 3.
"""
# XXX TODO: Implement a test which writes a function, then reads
# it back in and checks that it's the same. Also vice versa.
a = np.load(filename)
_, V = mesh_and_space(mesh_or_space)
fun = df.Function(V)
fun.vector().set_local(a)
return fun
def average_field(field_vals):
"""
Return the average value of the given field. `field_vals` must be
a numpy.array of shape (N,) followingg the dolfin convention for
the field values.
"""
assert field_vals.ndim == 1
field_vals.shape = (3, -1)
av = np.average(field_vals, axis=1)
field_vals.shape = (-1,)
return av
def save_dg_fun(fun, name='unnamed.vtk', dataname='m', binary=False):
"""
Seems that saving DG function to vtk doesn't work properly.
Ooops, seems that paraview don't like cell data.
"""
import pyvtk
funspace = fun.function_space()
mesh = funspace.mesh()
points = mesh.coordinates()
tetras = np.array(mesh.cells(), dtype=np.int)
grid = pyvtk.UnstructuredGrid(points,
tetra=tetras)
m = fun.vector().array()
m.shape = (3, -1)
print m
data = pyvtk.CellData(pyvtk.Vectors(np.transpose(m)))
m.shape = (-1,)
vtk = pyvtk.VtkData(grid, data, dataname)
if binary:
vtk.tofile(name, 'binary')
else:
vtk.tofile(name)
def save_dg_fun_points(fun, name='unnamed.vtk', dataname='m', binary=False):
"""
Seems that saving DG function to vtk doesn't work properly.
"""
import pyvtk
V = fun.function_space()
mesh = V.mesh()
points = []
for cell in df.cells(mesh):
points.append(V.dofmap().tabulate_coordinates(cell)[0])
verts = [i for i in range(len(points))]
grid = pyvtk.UnstructuredGrid(points,
vertex=verts)
m = fun.vector().array()
m.shape = (3, -1)
data = pyvtk.PointData(pyvtk.Vectors(np.transpose(m), dataname))
m.shape = (-1,)
vtk = pyvtk.VtkData(grid, data, 'Generated by Finmag')
if binary:
vtk.tofile(name, 'binary')
else:
vtk.tofile(name)
def vec2str(a, fmt='{}', delims='()', sep=', '):
"""
Convert a 3-sequence (e.g. a numpy array) to a string, optionally
with some formatting options. The argument `a` is also allowed to
have the value `None`, in which case the string 'None' is returned.
The argument `delims` can be used to specify different left and right
delimiters (default: opening and closing parentheses). If only one
delimiter is given (e.g. "|") then this is used both as left and right
delimiter. If `delims` is empty, no delimiters will be used.
Examples::
a = numpy.array([1, 200, 30000])
vec2str(a) --> (1, 200, 30000)
vec2str(a, fmt='{:.3g}') --> (1, 200, 3e+04)
vec2str(a, fmt='{:.2f}') --> (1.00, 200.00, 30000.00)
vec2str(a, delims='[]') --> [1, 200, 30000]
vec2str(a, delims='|', sep='__') --> |1__200__30000|
vec2str(a, delims='', sep=' - ') --> 1 - 200 - 30000
"""
if a is None:
res = 'None'
else:
try:
ldelim = delims[0]
except IndexError:
ldelim = ""
try:
rdelim = delims[1]
except IndexError:
rdelim = ldelim
res = ("{ldelim}{fmt}{sep}{fmt}{sep}{fmt}{rdelim}".format(
fmt=fmt, ldelim=ldelim, rdelim=rdelim, sep=sep)).format(a[0], a[1], a[2])
return res
def pairwise(iterable):
"""
s -> (s0,s1), (s1,s2), (s2, s3), ...
"""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def apply_vertexwise(f, *args):
"""
Apply the function f to the values of the given arguments
at each vertex separately and aggregate the result into a
new dolfin.Function.
*Arguments*
f : callable
The function that is to be applied to the vertex values.
\*args: collection of dolfin.Functions
The fields
"""
# XXX TODO: Some of this is horribly inefficient. Should clean it up...
mesh = args[0].function_space().mesh()
# XXX TODO: the following check(s) seems to fail even if the meshes are the same. How to do this properly in dolfin?
# assert(all((mesh == u.function_space().mesh() for u in args))) # check that all meshes coincide
# assert(all(u.function_space().mesh() == u.function_space().mesh() for
# (u, v) in pairwise(args))) # check that all meshes coincide
# Extract the array for each dolfin.Function
aa = [u.vector().array() for u in args]
#V = args[0].function_space()
# assert(all([V == a.function_space() for a in aa])) # XXX TODO: how to
# deal with functions defined on different function spaces?!?
# Reshape each array according to the dimension of the VectorFunctionSpace
dims = [u.domain().geometric_dimension() for u in args]
aa_reshaped = [
a.reshape(dim, -1).T for (a, dim) in itertools.izip(aa, dims)]
# Evaluate f on successive rows of the reshaped arrays
aa_evaluated = [f(*args) for args in itertools.izip(*aa_reshaped)]
#import ipdb; ipdb.set_trace()
try:
dim_out = len(aa_evaluated[0])
except TypeError:
dim_out = None
if dim_out is None:
W = df.FunctionSpace(mesh, 'CG', 1)
else:
assert(all(dim_out == len(x) for x in aa_evaluated))
# XXX TODO: should we use df.FunctionSpace if dim_out == 1 ?
W = df.VectorFunctionSpace(mesh, 'CG', 1, dim=dim_out)
res = df.Function(W)
res.vector().set_local(np.array(aa_evaluated).T.reshape(-1))
return res
class TemporaryDirectory(object):
def __init__(self, keep=False):
self.keep = keep
def __enter__(self):
self.tmpdir = tempfile.mkdtemp()
return self.tmpdir
def __exit__(self, type, value, traceback):
if not self.keep:
shutil.rmtree(self.tmpdir)
self.tmpdir = None
class run_in_tmpdir(object):
def __init__(self, keep=False):
self.keep = keep
self.cwd_bak = os.getcwd()
def __enter__(self):
self.tmpdir = tempfile.mkdtemp()
os.chdir(self.tmpdir)
return self.tmpdir
def __exit__(self, type, value, traceback):
if not self.keep:
shutil.rmtree(self.tmpdir)
self.tmpdir = None
os.chdir(self.cwd_bak)
@contextmanager
def ignored(*exceptions):
"""
Ignore the given exceptions within the scope of the context.
Example::
with ignored(OSError):
os.remove('non_existing_file.txt')
"""
try:
yield
except exceptions:
pass
def run_cmd_with_timeout(cmd, timeout_sec):
"""
Runs the given shell command but kills the spawned subprocess
if the timeout is reached.
Returns the exit code of the shell command. Raises OSError if
the command does not exist. If the timeout is reached and the
process is killed, the return code is -9.
"""
proc = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.PIPE)
kill_proc = lambda p: p.kill()
timer = Timer(timeout_sec, kill_proc, [proc])
timer.start()
stdout, stderr = proc.communicate()
timer.cancel()
return proc.returncode, stdout, stderr
def jpg2avi(jpg_filename, outfilename=None, duration=1, fps=25):
"""
Convert a series of .jpg files into an animation file in .avi format.
*Arguments*
jpg_filename:
The 'basename' of the series of image files. For example, if
the image files are called 'foo_0000.jpg', 'foo_0001.jpg', etc.
then `jpg_filename` should be 'foo.jpg'. Internally, the image
files are found via a wildcard expression by replacing the suffix
``.jpg`` with ``*.jpg``, so the basename could also have been
``foo_.jpg`` or even ``f.jpg``. (However, it should be restrictive
enough so that only the desired images are found.)
outfilename:
The filename of the resulting .avi file. If None (the default),
uses the basename of the .jpg file.
duration:
Duration of the created animation (in seconds).
fps:
Framerate (in frames per second) of the created animation.
"""
if not jpg_filename.endswith('.jpg'):
raise ValueError(
"jpg_filename must end in '.jpg'. Got: '{}'".format(jpg_filename))
pattern = re.sub('\.jpg$', '*.jpg', jpg_filename)
pattern_escaped = re.sub('\.jpg$', '\*.jpg', jpg_filename)
jpg_files = sorted(glob(pattern))
logger.debug('Found {} jpg files.'.format(len(jpg_files)))
if outfilename is None:
outfilename = re.sub('\.jpg$', '.avi', jpg_filename)
logger.debug("Using outfilename='{}'".format(outfilename))
create_missing_directory_components(outfilename)
# Use mencoder with two-pass encoding to convert the files.
# See http://mariovalle.name/mencoder/mencoder.html
try:
mencoder_options = "vbitrate=2160000:mbd=2:keyint=132:v4mv:vqmin=3:lumi_mask=0.07:dark_mask=0.2:mpeg_quant:scplx_mask=0.1:tcplx_mask=0.1:naq"
sh.mencoder('-ovc', 'lavc', '-lavcopts',
'vcodec=mpeg4:vpass=1:' + mencoder_options,
'-mf', 'type=jpg:fps={}'.format(fps /
duration), '-nosound',
'-o', '/dev/null', 'mf://' + pattern)
sh.mencoder('-ovc', 'lavc', '-lavcopts',
'vcodec=mpeg4:vpass=2:' + mencoder_options,
'-mf', 'type=jpg:fps={}'.format(fps /
duration), '-nosound',
'-o', outfilename, 'mf://' + pattern)
os.remove('divx2pass.log') # tidy up output from the two-pass enoding
except sh.CommandNotFound:
logger.error("mencoder does not seem to be installed but is needed for "
"movie creation. Please install it (e.g. on Debian/Ubuntu: "
"'sudo apt-get install mencoder').")
except sh.ErrorReturnCode as exc:
logger.warning(
"mencoder had non-zero exit status: {} (diagnostic message: '{}')".format(exc.exit_code, exc.message))
def pvd2avi(pvd_filename, outfilename=None, duration=1, fps=25, **kwargs):
"""
Export a .pvd animation to a movie file in .avi format.
*Arguments*
pvd_filename:
The name of the .pvd file to be converted.
outfilename:
The filename of the resulting .avi file. If None (the default),
the basename of the .pvd file is used.
duration:
Duration of the created animation (in seconds).
fps:
Framerate (in frames per second) of the created animation.
All other keyword arguments are passed on to the function
`finmag.util.visualization.render_paraview_scene` and can
be used to tweak the appearance of the animation.
"""
if not pvd_filename.endswith('.pvd'):
raise ValueError(
"pvd_filename must end in '.pvd'. Got: '{}'".format(pvd_filename))
if outfilename == None:
outfilename = re.sub('\.pvd$', '.avi', pvd_filename)
if kwargs.has_key('trim_border') and kwargs['trim_border'] == True:
logger.warning(
"Cannot use 'trim_border=True' when converting a .pvd time series to .avi; using 'trim_border=False'.")
kwargs['trim_border'] = False
with TemporaryDirectory() as tmpdir:
jpg_tmpfilename = os.path.join(tmpdir, 'animation.jpg')
render_paraview_scene(pvd_filename, outfile=jpg_tmpfilename, **kwargs)
jpg2avi(jpg_tmpfilename, outfilename=outfilename,
duration=duration, fps=fps)
def warn_about_outdated_code(min_dolfin_version, msg):
"""
If the current dolfin version is >= min_dolfin_version, print the
given warning message. This is useful to warn about outdated code
which is temporarily kept for backwards compatibility but should
eventually be removed. Remember that the call to this function
should of course occur from the new code that will be executed in
later dolfin versions (otherwise the warning will never get
printed when it's relevant).
"""
if LooseVersion(get_version_dolfin()) >= LooseVersion(min_dolfin_version):
logger.warning(msg)
def format_time(num_seconds):
"""
Given a number of seconds, return a string with `num_seconds`
converted into a more readable format (including minutes and
hours if appropriate).
"""
hours = int(num_seconds / 3600.0)
r = num_seconds - 3600 * hours
minutes = int(r / 60.0)
seconds = r - 60 * minutes
res = "{} h ".format(hours) if (hours > 0) else ""
res += "{} min ".format(minutes) if (minutes >
0 or (minutes == 0 and hours > 0)) else ""
res += "{:.2f} seconds".format(seconds)
return res
def make_human_readable(nbytes):
"""
Given a number of bytes, return a string of the form "12.2 MB" or "3.44 GB"
which makes the number more digestible by a human reader. Everything less
than 500 MB will be displayed in units of MB, everything above in units of GB.
"""
if nbytes < 500 * 1024 ** 2:
res = '{:.2f} MB'.format(nbytes / 1024 ** 2)
else:
res = '{:.2f} GB'.format(nbytes / 1024 ** 3)
return res
def print_boundary_element_matrix_size(mesh, generalised=False):
"""
Given a 3D mesh, print the amount of memory that the boundary element matrix
will occupy in memory. This is useful when treating very big problems in order
to "interactively" adjust a mesh until the matrix fits in memory.
"""
bm = df.BoundaryMesh(mesh, 'exterior', False)
N = bm.num_vertices()
byte_size_float = np.zeros(1, dtype=float).nbytes
memory_usage = N ** 2 * byte_size_float
logger.debug("Boundary element matrix for mesh with {} vertices and {} "
"surface nodes will occupy {} in memory.".format(
mesh.num_vertices(), N, make_human_readable(memory_usage)))
def build_maps(functionspace, dim=3, scalar=False):
v2d_xyz = df.vertex_to_dof_map(functionspace)
d2v_xyz = df.dof_to_vertex_map(functionspace)
# Since version 2016, Dolfin returns the constrained boundary
# elements in the dof_to_vertex_map(). They are added as
# mapped-to-zero indices at the end. Simply cut them off.
d2v_xyz = np.trim_zeros(d2v_xyz, trim='b')
n1, n2 = len(v2d_xyz), len(d2v_xyz)
v2d_xxx = ((v2d_xyz.reshape(int(n1/dim), dim)).transpose()).reshape(-1,)
d2v_xxx = d2v_xyz.copy()
for i in xrange(n2):
j = d2v_xyz[i]
d2v_xxx[i] = (j%dim)*n1/dim + (j/dim)
n = n1 - n2
"""
#in the presence of pbc, n1 > n2, here we try to reduce the length of v2d_xyz.
if n>0:
#next, we reduce the length of v2d_xyz to n2
a = []
b = set()
for x in v2d_xyz:
if x not in b:
b.add(x)
a.append(x)
assert(len(a) == n2)
v2d_xyz2 = np.array(a)
#we need both d2v_xyz and v2d_xyz2 to make sure the values in d2v_xyz is less than n2.
d2v_xyz2 = d2v_xyz.copy()
for i in range(n2):
if d2v_xyz[i]>n2:
j = v2d_xyz[d2v_xyz[i]]
for k in range(n2):
if v2d_xyz2[k] == j:
d2v_xyz2[i] = k
break
v2d_xxx2 = ((v2d_xyz2.reshape(n2/dim, dim)).transpose()).reshape(-1,)
d2v_xxx2 = d2v_xyz2.copy()
for i in xrange(n2):
j = d2v_xyz2[i]
d2v_xxx2[i] = (j%dim)*n2/dim + (j/dim)
"""
if scalar:
return v2d_xyz, d2v_xyz
#we then build new mappings for order xxx rather xyz
#return v2d_xyz2, v2d_xxx2, d2v_xyz2, d2v_xxx2
return v2d_xyz, v2d_xxx, d2v_xyz, d2v_xxx
def get_source(file):
if file.endswith(('.pyc', '.pyo')):
file = file[:-1]
with open(file, 'rb') as f:
return f.read()
| 60,902 | 31.326433 | 149 |
py
|
finmag
|
finmag-master/src/finmag/util/length_scales_test.py
|
import dolfin as df
import numpy as np
import length_scales as ls
from finmag.field import Field
from finmag.util.consts import mu0
class TestLengthScales(object):
def setup(self):
# Create a 3d mesh.
self.mesh3d = df.UnitCubeMesh(11, 10, 10)
# Create a DG scalar function space.
self.functionspace = df.FunctionSpace(self.mesh3d,
"DG", 1)
def test_exchange_length_constant(self):
A = Field(self.functionspace, 2/mu0)
Ms = Field(self.functionspace, 1/mu0)
lex = ls.exchange_length(A, Ms)
assert np.allclose(lex.get_numpy_array_debug(), 2)
def test_exchange_length_varying(self):
A_expression = df.Expression('4/mu0*x[0] + 1e-100', mu0=mu0, degree=1)
Ms_expression = df.Expression('2/mu0*x[0] + 1e-100', mu0=mu0, degree=1)
A = Field(self.functionspace, A_expression)
Ms = Field(self.functionspace, Ms_expression)
lex = ls.exchange_length(A, Ms)
assert abs(lex.probe((0.5, 0.5, 0.5)) - 2) < 0.05
def test_bloch_parameter_constant(self):
A = Field(self.functionspace, 2)
K1 = Field(self.functionspace, 0.5)
bloch_parameter = ls.bloch_parameter(A, K1)
assert np.allclose(bloch_parameter.get_numpy_array_debug(), 2)
def test_bloch_parameter_varying(self):
A_expression = df.Expression('4*x[0] + 1e-100', degree=1)
K1_expression = df.Expression('x[0] + 1e-100', degree=1)
A = Field(self.functionspace, A_expression)
K1 = Field(self.functionspace, K1_expression)
bloch_parameter = ls.bloch_parameter(A, K1)
assert abs(bloch_parameter.probe((0.5, 0.5, 0.5)) - 2) < 0.05
def test_helical_period_constant(self):
A = Field(self.functionspace, 1/np.pi)
D = Field(self.functionspace, 4)
helical_period = ls.helical_period(A, D)
assert np.allclose(helical_period.get_numpy_array_debug(), 1)
def test_helical_period_varying(self):
A_expression = df.Expression('2/pi*x[0] + 1e-100', pi=np.pi, degree=1)
D_expression = df.Expression('8*x[0] + 1e-100', degree=1)
A = Field(self.functionspace, A_expression)
D = Field(self.functionspace, D_expression)
helical_period = ls.helical_period(A, D)
assert abs(helical_period.probe((0.5, 0.5, 0.5)) - 1) < 0.05
| 2,405 | 31.958904 | 79 |
py
|
finmag
|
finmag-master/src/finmag/util/helpers_test.py
|
import numpy as np
import dolfin as df
import tempfile
import logging
import pytest
import os
import re
from finmag.util.helpers import *
from finmag.util.meshes import box, cylinder
from finmag.util.mesh_templates import Sphere
from finmag.util.visualization import render_paraview_scene
from finmag.example import barmini
import finmag
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
TOLERANCE = 1e-15
def test_logging_handler_str():
"""
"""
hdlr = logging.NullHandler()
hdlr_str = logging_handler_str(hdlr)
print hdlr_str
assert(re.match("^<logging.NullHandler object at .*>$", hdlr_str) != None)
def test_logging_status_str():
"""
Test that we can call the function `logging_status_str()` and it returns a
non-empty string.
"""
status_str = logging_status_str()
print status_str
assert(isinstance(status_str, str) and (status_str != ""))
def test_components():
x = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3])
y = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
assert np.array_equal(y, components(x))
def test_vectors():
x1 = np.array([1, 1, 2, 2, 3, 3])
y1 = np.array([[1, 2, 3], [1, 2, 3]])
assert np.array_equal(y1, vectors(x1))
x2 = np.array([0, 1, 1, 0, 2, 3, 3, 2, 4, 5, 5, 4])
y2 = np.array([[0, 2, 4], [1, 3, 5], [1, 3, 5], [0, 2, 4]])
assert np.array_equal(y2, vectors(x2))
def test_norms():
v = [1, 1, 0]
assert abs(norm(v) - np.sqrt(2)) < TOLERANCE
v = np.array([[1, 1, 0], [1, -2, 3]])
assert np.allclose(
norm(v), np.array([np.sqrt(2), np.sqrt(14)]), rtol=TOLERANCE)
def test_fnormalise():
a = np.array([1., 1., 2., 2., 0., 0.])
norm = np.sqrt(1 + 2 ** 2 + 0 ** 2)
expected = a[:] / norm
assert np.allclose(fnormalise(a), expected, rtol=TOLERANCE)
a = np.array([1., 2., 0, 0., 1., 3.])
n1 = np.sqrt(1 + 0 + 1)
n2 = np.sqrt(2 ** 2 + 0 + 3 ** 2)
expected = a[:] / np.array([n1, n2, n1, n2, n1, n2])
assert np.allclose(fnormalise(a), expected, rtol=TOLERANCE)
a = np.array([5 * [1.], 5 * [0], 5 * [0]])
expected = a.copy().ravel()
assert np.allclose(fnormalise(a), expected, rtol=TOLERANCE)
a2 = np.array([5 * [2.], 5 * [0], 5 * [0]])
assert np.allclose(fnormalise(a2), expected, rtol=TOLERANCE)
#a3=np.array([0,0,3,4., 0,2,0,5, 1,0,0,0])
# this is 0 0 3 4
# 0 2 0 5
# 1 0 0 0
#
# can also write as
a3 = np.array([[0, 0, 1.], [0, 2, 0], [3, 0, 0], [4, 5, 0]]).transpose()
c = np.sqrt(4 ** 2 + 5 ** 2)
expected = np.array([0, 0, 1, 4 / c, 0, 1, 0, 5 / c, 1, 0, 0, 0])
print "a3=\n", a3
print "expected=\n", expected
print "fnormalise(a3)=\n", fnormalise(a3)
assert np.allclose(fnormalise(a3), expected, rtol=TOLERANCE)
# check that normalisation also works if input vector happens to be an
# integer array
# first with floats
a4 = np.array([0., 1., 1.])
c = np.sqrt(1 ** 2 + 1 ** 2) # sqrt(2)
expected = np.array([0, 1 / c, 1 / c])
print "a4=\n", a4
print "expected=\n", expected
print "fnormalise(a4)=\n", fnormalise(a4)
assert np.allclose(fnormalise(a4), expected, rtol=TOLERANCE)
# the same test with ints (i.e.
a5 = np.array([0, 1, 1])
expected = a5 / np.sqrt(2)
assert np.allclose(fnormalise(a5), expected, rtol=TOLERANCE)
# test that zero vectors in the input result in NaN if 'ignore_zero_vectors=False'
a6 = np.array([2, 0, 0, 0, 0, 0])
a6_normalised = fnormalise(a6)
assert a6_normalised.shape == (6,)
assert np.allclose(a6_normalised[[0, 2, 4]], [1, 0, 0])
assert np.isnan(a6_normalised[[1, 3, 5]]).all()
# test that zero vectors in the input result in NaN if 'ignore_zero_vectors=False'
a7 = np.array([3, 0, 4, 0, 0, 0])
expected = np.array([0.6, 0, 0.8, 0, 0, 0])
assert np.allclose(fnormalise(a7, ignore_zero_vectors=True), expected, rtol=TOLERANCE)
def test_vector_valued_function():
"""
Test that the different ways of initialising a vector-valued
function on a 3d mesh work and that they produce the expected
results.
"""
mesh = df.UnitCubeMesh(2, 2, 2)
# shift mesh coords to avoid dividing by zero when normalising below
mesh.coordinates()[:] = mesh.coordinates() + 1.0
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
num_vertices = mesh.num_vertices()
vec = np.array([3, 1, 4]) # an arbitrary vector
a = 42
b = 5
c = 23
# Reference vector for the constant-valued functions
x = np.empty((num_vertices, 3))
x[:] = vec
v_ref = x.transpose().reshape(-1)
v_ref_normalised = fnormalise(v_ref[:])
# Reference vector for f_expr and f_callable
v_ref_expr = (mesh.coordinates() * [a, b, c]).transpose().reshape((-1,))
v_ref_expr_normalised = fnormalise(v_ref_expr)
# Create functions using the various methods
f_tuple = vector_valued_function(tuple(vec), S3) # 3-tuple
f_list = vector_valued_function(list(vec), S3) # 3-list
# numpy array representing a 3-vector
f_array3 = vector_valued_function(np.array(vec), S3)
# df. Constant representing a 3-vector
f_dfconstant = vector_valued_function(df.Constant(vec), S3)
# tuple of strings (will be cast to df.Expression)
f_expr = vector_valued_function(
('a*x[0]', 'b*x[1]', 'c*x[2]'), S3, a=a, b=b, c=c)
# numpy array of nodal values shape (3*n,)
f_array3xN = vector_valued_function(v_ref, S3)
f_arrayN3 = vector_valued_function(
np.array([vec for r in mesh.coordinates()]), S3) # numpy array of shape (n, 3)
# f_callable = vector_valued_function(lambda coords: v_ref_expr, S3) # callable accepting mesh node coordinates and yielding the function values
# Cython 0.17.1 does not like this
# f_callable = vector_valued_function(lambda (x,y,z): (a*x, b*y, c*z), S3) # callable accepting mesh node coordinates and yielding the function values
# but this one is okay
# callable accepting mesh node coordinates and yielding the function values
f_callable = vector_valued_function(
lambda t: (a * t[0], b * t[1], c * t[2]), S3)
# A few normalised versions, too
f_tuple_normalised = vector_valued_function(tuple(vec), S3, normalise=True)
f_expr_normalised = vector_valued_function(
('a*x[0]', 'b*x[1]', 'c*x[2]'), S3, a=a, b=b, c=c, normalise=True)
# Cython 0.17.1 does not like this
#f_callable_normalised = vector_valued_function(lambda (x,y,z): (a*x, b*y, c*z), S3, normalise=True)
# but accepts this rephrased version:
f_callable_normalised = vector_valued_function(
lambda t: (a * t[0], b * t[1], c * t[2]), S3, normalise=True)
# Check that the function vectors are as expected
#import ipdb; ipdb.set_trace()
assert(all(f_tuple.vector() == v_ref))
assert(all(f_list.vector() == v_ref))
assert(all(f_array3.vector() == v_ref))
assert(all(f_dfconstant.vector() == v_ref))
assert(all(f_expr.vector() == v_ref_expr))
assert(all(f_array3xN.vector() == v_ref))
assert(all(f_arrayN3.vector() == v_ref))
assert(all(f_callable.vector() == v_ref_expr))
assert(all(f_tuple_normalised.vector() == v_ref_normalised))
print "[DDD] #1: {}".format(f_expr_normalised.vector().array())
print "[DDD] #2: {}".format(v_ref_expr_normalised)
assert(all(f_expr_normalised.vector() == v_ref_expr_normalised))
assert(all(f_callable_normalised.vector() == v_ref_expr_normalised))
def test_scalar_valued_dg_function():
mesh = df.UnitCubeMesh(2, 2, 2)
def init_f(coord):
x, y, z = coord
if z <= 0.5:
return 1
else:
return 10
f = scalar_valued_dg_function(init_f, mesh)
assert f(0, 0, 0.51) == 10.0
assert f(0.5, 0.7, 0.51) == 10.0
assert f(0.4, 0.3, 0.96) == 10.0
assert f(0, 0, 0.49) == 1.0
fa = f.vector().array().reshape(2, -1)
assert np.min(fa[0]) == np.max(fa[0]) == 1
assert np.min(fa[1]) == np.max(fa[1]) == 10
dg = df.FunctionSpace(mesh, "DG", 0)
dgf = df.Function(dg)
dgf.vector()[0] = 9.9
f = scalar_valued_dg_function(dgf, mesh)
assert f.vector().array()[0] == 9.9
def test_angle():
assert abs(angle([1, 0, 0], [1, 0, 0])) < TOLERANCE
assert abs(angle([1, 0, 0], [0, 1, 0]) - np.pi / 2) < TOLERANCE
assert abs(angle([1, 0, 0], [1, 1, 0]) - np.pi / 4) < TOLERANCE
def test_rows_to_columns():
x = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3]])
y = np.array([[1, 1, 1], [2, 2, 2], [3, 3, 3]])
assert np.array_equal(y, rows_to_columns(x))
def test_cartesian_to_spherical():
hapi = np.pi / 2
test_vectors = np.array((
(1, 0, 0), (0, 1, 0), (0, 0, 1),
(-1, 0, 0), (0, -2, 0), (0, 0, -1)))
expected = np.array((
(1, hapi, 0), (1, hapi, hapi), (1, 0, 0),
(1, hapi, np.pi), (2, hapi, -hapi), (1, np.pi, 0)))
for i, v in enumerate(test_vectors):
v_spherical = cartesian_to_spherical(v)
print "Testing vector {}. Got {}. Expected {}.".format(v, v_spherical, expected[i])
assert np.max(np.abs(v_spherical - expected[i])) < TOLERANCE
def test_pointing_upwards():
assert pointing_upwards((0, 0, 1))
assert pointing_upwards((0.5, 0.5, 0.8))
assert pointing_upwards((-0.5, 0.5, 0.8))
assert not pointing_upwards((0, 0, -1))
assert not pointing_upwards((-0.5, 0.5, -0.8))
assert not pointing_upwards((-0.5, 0.5, 0.4))
def test_pointing_downwards():
assert pointing_downwards((0, 0, -1))
assert pointing_downwards((-0.5, -0.5, -0.8))
assert pointing_downwards((-0.5, 0.5, -0.8))
assert not pointing_downwards((0, 0, 1))
assert not pointing_downwards((-0.5, -0.5, 0.8))
assert not pointing_downwards((-0.5, 0.5, -0.4))
def test_piecewise_on_subdomains():
"""
Define a simple cubic mesh with three subdomains, create a function
which takes different values on these subdomains and check that the
resulting function really has the right values.
"""
mesh = df.UnitCubeMesh(1, 1, 1)
fun_vals = (42, 23, -3.14)
g = df.MeshFunction('size_t', mesh, 3)
g.array()[:] = [1, 1, 2, 3, 1, 3]
p = piecewise_on_subdomains(mesh, g, fun_vals)
# check that p is a proper Function, not a MeshFunction
assert(isinstance(p, df.Function))
assert(
np.allclose(p.vector().array(), np.array([42, 42, 23, -3.14, 42, -3.14])))
def test_vector_field_from_dolfin_function():
"""
Create a dolfin.Function representing a vector field on a mesh and
convert it to a vector field on a regular grid using
`vector_field_from_dolfin_function()`. Then compare the resulting
values with the ones obtained by directly computing the field
values from the grid coordinates and check that they coincide.
"""
(xmin, xmax) = (-2, 3)
(ymin, ymax) = (-1, 2.5)
(zmin, zmax) = (0.3, 5)
(nx, ny, nz) = (10, 10, 10)
# Create dolfin.Function representing the vector field. Note that
# we use linear expressions so that they can be accurately
# represented by the linear interpolation on the mesh.
mesh = box(xmin, ymin, zmin, xmax, ymax, zmax, maxh=1.0)
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
e = df.Expression(('-1.0 - 3*x[0] + x[1]',
'+1.0 + 4*x[1] - x[2]',
'0.3 - 0.8*x[0] - 5*x[1] + 0.2*x[2]'), degree=1)
f = df.interpolate(e, V)
X, Y, Z = np.mgrid[xmin:xmax:nx * 1j, ymin:ymax:ny * 1j, zmin:zmax:nz * 1j]
# Evaluate the vector field on the grid to create the reference arrays.
U = -1.0 - 3 * X + Y
V = +1.0 + 4 * Y - Z
W = 0.3 - 0.8 * X - 5 * Y + 0.2 * Z
# Now convert the dolfin.Function to a vector field and compare to
# the reference arrays.
X2, Y2, Z2, U2, V2, W2 = \
vector_field_from_dolfin_function(f, (xmin, xmax), (ymin, ymax),
(zmin, zmax), nx=nx, ny=ny, nz=nz)
assert(np.allclose(X, X2))
assert(np.allclose(Y, Y2))
assert(np.allclose(Z, Z2))
assert(np.allclose(U, U2))
assert(np.allclose(V, V2))
assert(np.allclose(W, W2))
def test_probe():
"""
Define a function on a cylindrical mesh which decays linearly in
x-direction. Then probe this function at a number of points along
the x-axis. This probing is done twice, once normally and once by
supplying a function which should be applied to the probed field
points. The results are compared with the expected values.
"""
# Define a vector-valued function on the mesh
mesh = cylinder(10, 1, 3)
V = df.VectorFunctionSpace(mesh, 'Lagrange', 1, dim=3)
f = df.interpolate(df.Expression(['x[0]', '0', '0'], degree=1), V)
# Define the probing points along the x-axis
xs = np.linspace(-9.9, 9.9, 20)
pts = [[x, 0, 0] for x in xs]
def square_x_coord(pt):
return pt[0] ** 2
# Probe the field (once normally and once with an additional
# function applied to the result). Note that the results have
# different shapes because apply_func returns a scalar, not a
# 3-vector.
res1 = probe(f, pts)
res2 = probe(f, pts, apply_func=square_x_coord)
# Check that we get the expected results.
res1_expected = [[x, 0, 0] for x in xs]
res2_expected = xs ** 2
assert(np.allclose(res1, res1_expected))
assert(np.allclose(res2, res2_expected))
# Probe at points which lie partly outside the sample to see if we
# get masked values in the result.
pts = [[20, 20, 0], [5, 2, 1]]
res1 = probe(f, pts)
res2 = probe(f, pts, apply_func=square_x_coord)
res1_expected = np.ma.masked_array([[np.NaN, np.NaN, np.NaN],
[5, 0, 0]],
mask=[[True, True, True],
[False, False, False]])
res2_expected = np.ma.masked_array([np.NaN, 25], mask=[True, False])
# Check that the arrays are masked out at the same location
assert((np.ma.getmask(res1) == np.ma.getmask(res1_expected)).all())
assert((np.ma.getmask(res2) == np.ma.getmask(res2_expected)).all())
# Check that the non-masked values are the same
assert(np.ma.allclose(res1, res1_expected))
assert(np.ma.allclose(res2, res2_expected))
@pytest.mark.skipif(True,reason="test for hg")
def test_get_hg_revision_info(tmpdir):
finmag_repo = MODULE_DIR
os.chdir(str(tmpdir))
os.mkdir('invalid_repo')
with pytest.raises(ValueError):
get_hg_revision_info('nonexisting_directory')
with pytest.raises(ValueError):
get_hg_revision_info('invalid_repo')
with pytest.raises(ValueError):
get_hg_revision_info(finmag_repo, revision='invalid_revision')
id_string = 'd330c151a7ce'
rev_nr, rev_id, rev_date = get_hg_revision_info(
finmag_repo, revision=id_string)
assert(rev_nr == 4)
assert(rev_id == id_string)
assert(rev_date == '2012-02-02')
@pytest.mark.skip(reason='Commit does not exist anymore (sensitive data removed)')
def test_binary_tarball_name(tmpdir):
finmag_repo = MODULE_DIR
expected_tarball_name = 'FinMag-dist__2017-06-16__3127873bac77fbade1ec4ed0f9017b3cb0204a1f_foobar.tar.bz2'
assert(binary_tarball_name(finmag_repo, revision='3127873bac77fbade1ec4ed0f9017b3cb0204a1f',
suffix='_foobar') == expected_tarball_name)
def test_crossprod():
"""
Compute the cross product of two functions f and g numerically
using `helpers.crossprod` and compare with the analytical
expression.
"""
xmin = ymin = zmin = -2
xmax = ymax = zmax = 3
nx = ny = nz = 10
mesh = df.BoxMesh(df.Point(xmin, ymin, zmin), df.Point(xmax, ymax, zmax), nx, ny, nz)
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
u = df.interpolate(df.Expression(['x[0]', 'x[1]', '0'], degree=1), V)
v = df.interpolate(df.Expression(['-x[1]', 'x[0]', 'x[2]'], degree=1), V)
w = df.interpolate(
df.Expression(['x[1]*x[2]', '-x[0]*x[2]', 'x[0]*x[0]+x[1]*x[1]'], degree=1), V)
a = u.vector().array()
b = v.vector().array()
c = w.vector().array()
axb = crossprod(a, b)
assert(np.allclose(axb, c))
@pytest.mark.skip(reason='Broken, but not used anywhere')
def test_apply_vertexwise():
xmin = ymin = zmin = -2
xmax = ymax = zmax = 3
nx = ny = nz = 10
mesh = df.BoxMesh(df.Point(xmin, ymin, zmin), df.Point(xmax, ymax, zmax), nx, ny, nz)
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
u = df.interpolate(df.Expression(['x[0]', 'x[1]', '0'], degree=1), V)
v = df.interpolate(df.Expression(['-x[1]', 'x[0]', 'x[2]'], degree=1), V)
w = df.interpolate(
df.Expression(['x[1]*x[2]', '-x[0]*x[2]', 'x[0]*x[0]+x[1]*x[1]'], degree=1), V)
#W = df.VectorFunctionSpace(mesh, 'CG', 1)
#w2 = df.interpolate(df.Expression(['-x[0]*x[1]', 'x[1]*x[0]', '0'], degree=1), W)
uxv = apply_vertexwise(np.cross, u, v)
#udotv = apply_vertexwise(np.dot, u, v)
assert(np.allclose(uxv.vector().array(), w.vector().array()))
#assert(np.allclose(udotv.vector().array(), w2.vector().array()))
def test_TemporaryDirectory():
# Check that the directory is created as expected and destroyed
# when leaving the with-block.
with TemporaryDirectory() as tmpdir:
assert(os.path.exists(tmpdir))
assert(not os.path.exists(tmpdir))
# With 'keep=True' the directory should not be deleted.
with TemporaryDirectory(keep=True) as tmpdir2:
assert(os.path.exists(tmpdir2))
assert(os.path.exists(tmpdir2))
# Tidy up
os.rmdir(tmpdir2)
def test_run_in_tmpdir():
cwd_bak = os.getcwd()
# Inside the 'with' block we should be in the
# newly created temporary directory.
with run_in_tmpdir() as tmpdir:
assert os.getcwd() == tmpdir
assert tmpdir != cwd_bak
# Outside the 'with' block we should be back in
# the previous working directory and the temporary
# directory should be destroyed
assert os.getcwd() == cwd_bak
assert not os.path.exists(tmpdir)
# Using 'keep=True' the temporary directory should
# not be deleted.
with run_in_tmpdir(keep=True) as tmpdir2:
pass
assert os.path.exists(tmpdir2)
# Tidy up
os.rmdir(tmpdir2)
def test_contextmanager_ignored(tmpdir):
d = {} # dummy dictionary
s = 'foobar'
with pytest.raises(KeyError):
with ignored(OSError):
d.pop('non_existing_key')
# This should work because we are ignoring the right kind of error.
with ignored(KeyError):
d.pop('non_existing_key')
# Check that we can ignore multiple errors
with ignored(IndexError, KeyError):
d.pop('non_existing_key')
s[42]
def test_run_cmd_with_timeout():
# A successfully run command should have exit code 0
returncode, stdout, _ = run_cmd_with_timeout('echo hello', timeout_sec=100)
assert(returncode == 0)
assert stdout == 'hello\n'
# A non-existing command should raise OSError
with pytest.raises(OSError):
returncode, _, _ = run_cmd_with_timeout('foo', timeout_sec=1)
# This command should be killed due to the timeout, resulting in a return
# code of -9.
returncode, _, _ = run_cmd_with_timeout('sleep 10', timeout_sec=0)
assert(returncode == -9)
@pytest.mark.skipif("True")
def test_jpg2avi(tmpdir):
"""
Test whether we can create an animation from a series of .jpg images.
"""
os.chdir(str(tmpdir))
sim = finmag.example.normal_modes.disk()
sim.compute_normal_modes(n_values=3)
sim.export_normal_mode_animation(k=0, filename='foo/bar.pvd')
# note that we must not trim the border because otherwise the resulting
# .jpg files will have different sizes, which confused mencoder
render_paraview_scene(
'foo/bar.pvd', outfile='foo/quux.jpg', trim_border=False)
# Test the bare-bones export
jpg2avi('foo/quux.jpg')
assert(os.path.exists('foo/quux.avi'))
# Test a few keywords
jpg2avi('foo/quux.jpg', outfilename='animation.avi', duration=10, fps=10)
assert(os.path.exists('animation.avi'))
@pytest.mark.skipif("True")
def test_pvd2avi(tmpdir):
"""
Test whether we can create an animation from the timesteps in a .pvd file.
"""
os.chdir(str(tmpdir))
sim = finmag.example.normal_modes.disk()
sim.compute_normal_modes(n_values=3)
sim.export_normal_mode_animation(k=0, filename='foo/bar.pvd')
# Test the bare-bones export
pvd2avi('foo/bar.pvd')
assert(os.path.exists('foo/bar.avi'))
# Test a few keywords
pvd2avi('foo/bar.pvd', outfilename='animation.avi', duration=10,
fps=10, add_glyphs=False, colormap='heated_body')
assert(os.path.exists('animation.avi'))
def test_set_color_scheme():
"""
Just check that we can call 'set_color_scheme' with allowed values.
We don't check that the color scheme is actually changed, since it's
not obvious how to do that.
"""
set_color_scheme('light_bg')
set_color_scheme('dark_bg')
set_color_scheme('none')
with pytest.raises(ValueError):
set_color_scheme('foobar')
def test_restriction(tmpdir):
"""
Create a mesh consisting of two separate regions and define a
dolfin Function on it which is constant in either region.
Then extract the two subfunctions corresponding to these regions
and check that they are constant and their function vectors
have the correct lengths.
"""
os.chdir(str(tmpdir))
sphere1 = Sphere(10, center=(-20, 0, 0), name="sphere1")
sphere2 = Sphere(20, center=(+30, 0, 0), name="sphere2")
mesh = (sphere1 + sphere2).create_mesh(maxh=5.0)
class Sphere1(df.SubDomain):
def inside(self, pt, on_boundary):
return pt[0] < 0
class Sphere2(df.SubDomain):
def inside(self, pt, on_boundary):
return pt[0] > 0
region_markers = df.CellFunction('size_t', mesh)
subdomain1 = Sphere1()
subdomain2 = Sphere2()
subdomain1.mark(region_markers, 1)
subdomain2.mark(region_markers, 2)
submesh1 = df.SubMesh(mesh, region_markers, 1)
submesh2 = df.SubMesh(mesh, region_markers, 2)
r1 = restriction(mesh, submesh1)
r2 = restriction(mesh, submesh2)
# Define a Python function which is constant in either subregion
def fun_f(pt):
return 42.0 if (pt[0] < 0) else 23.0
# Convert the Python function to a dolfin.Function
f = scalar_valued_function(fun_f, mesh)
# Restrict the function to each of the subregions
f1 = r1(f)
f2 = r2(f)
assert(np.allclose(f1.vector().array(), 42.0))
assert(np.allclose(f2.vector().array(), 23.0))
assert(len(f1.vector().array()) == submesh1.num_vertices())
assert(len(f2.vector().array()) == submesh2.num_vertices())
a = f.vector().array()
a1 = r1(a)
a2 = r2(a)
assert(set(a) == set([23.0, 42.0]))
assert(np.allclose(a1, 42.0))
assert(np.allclose(a2, 23.0))
assert(len(a1) == submesh1.num_vertices())
assert(len(a2) == submesh2.num_vertices())
# Check a multi-dimensional array, too
b = np.concatenate([a, a])
b.shape = (2, -1)
b1 = r1(b)
b2 = r2(b)
assert(set(b.ravel()) == set([23.0, 42.0]))
assert(np.allclose(b1, 42.0))
assert(np.allclose(b2, 23.0))
assert(b1.shape == (2, submesh1.num_vertices()))
assert(b2.shape == (2, submesh2.num_vertices()))
def test_verify_function_space_type():
N = 10
mesh1d = df.UnitIntervalMesh(N)
mesh2d = df.UnitSquareMesh(N, N)
mesh3d = df.UnitCubeMesh(N, N, N)
V1 = df.FunctionSpace(mesh3d, 'DG', 0)
V2 = df.VectorFunctionSpace(mesh1d, 'DG', 0, dim=1)
V3 = df.VectorFunctionSpace(mesh2d, 'CG', 1, dim=3)
# Check that verifying the known function space types works as expected
assert(verify_function_space_type(V1, 'DG', 0, dim=None))
assert(verify_function_space_type(V2, 'DG', 0, dim=1))
assert(verify_function_space_type(V3, 'CG', 1, dim=3))
# Check that the verification function returns 'False' if we pass in a
# non-matching function space type.
# wrong 'dim' (should be None)
assert(not verify_function_space_type(V1, 'DG', 0, dim=1))
# wrong degree
assert(not verify_function_space_type(V1, 'DG', 1, dim=None))
# wrong family
assert(not verify_function_space_type(V1, 'CG', 0, dim=None))
# wrong 'dim' (should be 1)
assert(not verify_function_space_type(V2, 'DG', 0, dim=None))
# wrong 'dim' (should be 1)
assert(not verify_function_space_type(V2, 'DG', 0, dim=42))
assert(not verify_function_space_type(V2, 'DG', 42, dim=1)) # wrong degree
assert(not verify_function_space_type(V2, 'CG', 0, dim=1)) # wrong family
# wrong dimension
assert(not verify_function_space_type(V3, 'CG', 1, dim=42))
assert(not verify_function_space_type(V3, 'CG', 42, dim=1)) # wrong degree
assert(not verify_function_space_type(V3, 'DG', 1, dim=3)) # wrong family
if __name__ == '__main__':
pass
# test_scalar_valued_dg_function()
# test_pvd2avi('.')
| 25,171 | 34.156425 | 154 |
py
|
finmag
|
finmag-master/src/finmag/util/magpar.py
|
##import io
import gzip
import numpy as np
import dolfin as df
import os
import logging
import subprocess
from finmag.util.helpers import run_in_tmpdir
logger = logging.getLogger(name='finmag')
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def gen_magpar_conf(base_name, init_m, Ms=8.6e5, A=13e-12, K1=0,
a=[0, 0, 1], alpha=0.1, use_demag=False):
"""
Generate magpar configuration files (in the current directory) which
can be used to run magpar and compute the various micromagnetic fields
for a simulation with the given parameters.
If base_name='foo', the following files are created in the current directory:
foo.inp
foo.0000.inp
foo.krn
allopts.txt
"""
norm_a = (a[0] ** 2 + a[1] ** 2 + a[2] ** 2) ** 0.5
tmp_mz = a[2] / norm_a
theta = np.arccos(tmp_mz)
if a[0] == 0:
phi = 0
else:
phi = np.arctan(a[1] / a[0])
# theta phi K1 K2 Js A alpha psi # parameter
# (rad) (rad) (J/m^3) (J/m^3) (T) (J/m) (1) (rad) # units
krn_info = " %f %f %e 0 %f %e %f uni" % (
theta, phi, K1, np.pi * 4e-7 * Ms, A, alpha
)
with open(base_name + ".krn", "w") as krn_file:
krn_file.write(krn_info)
allopt = ["-simName ", base_name + "\n",
"-init_mag ", "0\n",
"-inp ", "0000\n",
"-demag ", "1" if use_demag else "0"]
with open("allopt.txt", "w") as allopt_file:
allopt_file.write("".join(allopt))
file_name = base_name + ".inp"
save_inp_of_inital_m(init_m, file_name)
file_name = base_name + ".0000.inp"
save_inp_of_inital_m(init_m, file_name)
logger.debug("Completed gen_magpar_conf()")
def run_magpar(base_name):
magpar_cmd = "magpar.exe"
logger.info("About to call {}".format(magpar_cmd))
subprocess.check_call(magpar_cmd, stdout=subprocess.PIPE)
gzcmd = ("gunzip", base_name + ".0001.gz")
subprocess.check_call(gzcmd)
def read_femsh(file_name):
f = open(file_name, 'r')
a = f.readline()
n_node = int(a.split()[0])
n_cell = int(a.split()[1])
node_coord = []
for i in range(n_node):
line = f.readline()
tmp = line.split()
t2 = [float(tmp[1]), float(tmp[2]), float(tmp[3])]
node_coord.append(t2)
connectivity = []
for i in range(n_cell):
t = f.readline().split()
t2 = [int(t[3]), int(t[4]), int(t[5]), int(t[6])]
connectivity.append(t2)
return np.array(node_coord), np.array(connectivity)
def read_inp(file_name):
# magpar produces gzip-ed files, so look for file_name.gz as well as file_name
if os.path.isfile(file_name):
f = open(file_name, 'r')
elif os.path.isfile(file_name + '.gz'):
f = gzip.open(file_name + '.gz', 'r')
else:
raise OSError("No such file: %s" % file_name)
with f:
a = f.readline()
num = int(a.split()[0])
names = []
for i in range(num):
names.append(f.readline().split(',')[0])
lines = f.readlines()
data = []
for line in lines:
data.append([float(t) for t in line.split()])
data = np.array(data)
fields = {}
for i in range(num):
fields[names[i]] = data[:, i + 1]
return fields
def save_inp_of_inital_m(m, file_name):
mesh = m.mesh()
data_type_number = 3
f = open(file_name, "w")
head = "%d %d %d 0 0\n" % (
mesh.num_vertices(),
mesh.num_cells(),
data_type_number)
f.write(head)
xyz = mesh.coordinates()
if np.max(xyz) < 0.5:
print "Converting unit_length from m to nm."
xyz = xyz * 1e9
for i in range(len(xyz)):
f.write("%d %0.15e %0.15e %0.15e\n"
% (i + 1,
xyz[i][0],
xyz[i][1],
xyz[i][2]))
for c in df.cells(mesh):
id = c.index()
ce = c.entities(0)
f.write("%d 1 tet %d %d %d %d\n"
% (id + 1,
ce[0] + 1,
ce[1] + 1,
ce[2] + 1,
ce[3] + 1))
f.write("3 1 1 1\nM_x, none\nM_y, none\nM_z, none\n")
data = m.get_numpy_array_debug().reshape(3, -1)
for i in range(mesh.num_vertices()):
f.write("%d %0.15e %0.15e %0.15e\n"
% (i + 1,
data[0][i],
data[1][i],
data[2][i]))
f.close()
def get_field(base_name, field="anis"):
"""
Read values for the given field from file with the given
base name. Returns a pair `(nodes, field_vals)` where
`nodes` is an array of shape Nx3 containing the coordinates
of the mesh nodes and `field_vals` is a flat array.
TODO: Why are we returning a flat array here? Shouldn't this
be changed so that the field values are in the same
shape and order as the returned mesh nodes? (Max, 18.9.2014)
"""
file_name = base_name + ".0001"
fields = read_inp(file_name)
if field == "anis":
fx = fields["Hani_x"]
fy = fields["Hani_y"]
fz = fields["Hani_z"]
elif field == "exch":
fx = fields["Hexch_x"]
fy = fields["Hexch_y"]
fz = fields["Hexch_z"]
elif field == "demag":
fx = fields["Hdemag_x"]
fy = fields["Hdemag_y"]
fz = fields["Hdemag_z"]
else:
raise NotImplementedError(
"only exch, anis or demag field can be extracted now")
field = np.array([fx, fy, fz]).reshape(1, -1, order='C')[0]
field = field / (np.pi * 4e-7)
file_name = base_name + ".0001.femsh"
nodes, connectivity = read_femsh(file_name)
return nodes, field
def get_m0(file_name):
fields = read_inp(file_name)
fx = fields["M_x"]
fy = fields["M_y"]
fz = fields["M_z"]
field = np.array([fx, fy, fz]).reshape(1, -1)[0]
return field
def compute_anis_magpar(m, **kwargs):
return compute("anis", m, **kwargs)
def compute_exch_magpar(m, **kwargs):
return compute("exch", m, **kwargs)
def compute_demag_magpar(m, **kwargs):
return compute("demag", m, use_demag=True, **kwargs)
def compute(field_name, m, **kwargs):
with run_in_tmpdir():
base_name = "test_" + field_name
gen_magpar_conf(base_name, m, **kwargs)
run_magpar(base_name)
nodes, field = get_field(base_name, field_name)
return nodes, field
def compare_field(aNodes, aField, bNodes, bField):
"""
Compares two vector fields aField and bField defined over the meshes
aNodes and bNodes respectively.
When n is the number of nodes, we expect aField and bField to be
ndarrays of shape 3n, and aNodes and bNodes to be ndarrays of shape (n, 3).
"""
assert aNodes.shape == bNodes.shape
assert aField.shape == bField.shape
aField = aField.reshape((3, -1))
bField = bField.reshape((3, -1))
bFieldOrdered = np.zeros(bField.shape)
for i, aNode in enumerate(aNodes):
closest_bNode_index = np.abs(bNodes - aNode).mean(axis=1).argmin()
for dim in range(3):
bFieldOrdered[dim][i] = bField[dim][closest_bNode_index]
diff = np.abs(bFieldOrdered - aField)
rel_diff = diff / \
np.sqrt(
np.max(bFieldOrdered[0] ** 2 + bFieldOrdered[1] ** 2 + bFieldOrdered[2] ** 2))
return aField, bFieldOrdered, diff, rel_diff
def compare_field_directly(node1, field1, node2, field2):
"""
Compares field1 defined over the nodes on node1 to field2 over the nodes
node2. Note that the nodes should be the same, but are allowed to be
in different order.
acceptable fields should like this:
[fx0, ..., fxn, fy0, ..., fyn, fz0, ..., fzn]
"""
assert node1.shape == node2.shape
assert field1.shape == field2.shape
field1 = field1.reshape(3, -1)
field2 = field2.reshape(3, -1)
# build two dicts of coordinates -> values in order
# to sort the data of field2 by the ordering of the data in field1.
key1 = []
key2 = []
data2 = {}
for i in range(len(node1)):
tmp1 = "%g%g%g" % (node1[i][0], node1[i][1], node1[i][2])
tmp2 = "%g%g%g" % (node2[i][0], node2[i][1], node2[i][2])
key1.append(tmp1)
key2.append(tmp2)
data2[tmp2] = [field2[0][i], field2[1][i], field2[2][i]]
assert(set(key1) == set(key2))
field2_ordered = np.array([data2[k] for k in key1])
field2_ordered = field2_ordered.reshape(1, -1)[0]
field2_ordered = field2_ordered.reshape(3, -1, order='F')
difference = np.abs(field2_ordered - field1)
relative_difference = difference / np.max(np.sqrt(
field2_ordered[0] ** 2 + field2_ordered[1] ** 2 + field2_ordered[2] ** 2))
return field1, field2_ordered, difference, relative_difference
| 8,862 | 27.775974 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/fileio_test.py
|
import numpy as np
import pytest
import os
from glob import glob
from fileio import *
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_Table_writer_and_reader(tmpdir):
os.chdir(str(tmpdir))
import finmag
import dolfin as df
xmin, ymin, zmin = 0, 0, 0 # one corner of cuboid
xmax, ymax, zmax = 6, 6, 11 # other corner of cuboid
nx, ny, nz = 3, 3, 6 # number of subdivisions (use ~2nm edgelength)
mesh = df.BoxMesh(df.Point(xmin, ymin, zmin), df.Point(xmax, ymax, zmax), nx, ny, nz)
# standard Py parameters
sim = finmag.sim_with(mesh, Ms=0.86e6, alpha=0.5, unit_length=1e-9,
A=13e-12, m_init=(1, 0, 1))
filename = 'test-save_averages-data.ndt'
ndt = Tablewriter(filename, sim)
times = np.linspace(0, 3.0e-11, 6 + 1)
for i, time in enumerate(times):
print("In iteration {}, computing up to time {}".format(i, time))
sim.run_until(time)
ndt.save()
# now open file for reading
data = Tablereader(filename)
print data.timesteps() - times
print("III")
assert np.all(np.abs(data.timesteps() - times)) < 1e-25
mx, my, mz = sim.m_average
assert abs(data['m_x'][-1] - mx) < 1e-11
assert abs(data['m_y'][-1] - my) < 1e-11
assert abs(data['m_z'][-1] - mz) < 1e-11
# Try reading multiple columns at once by indexing 'data'
# with multiple indices (in the assert statement).
dmx = data['m_x']
dmy = data['m_y']
dmz = data['m_z']
dm = np.vstack([dmx, dmy, dmz]).T # stack the arrays together
assert np.allclose(dm, np.array(data['m_x', 'm_y', 'm_z']).T)
# Reading an incomplete dataset should raise a runtime error
with pytest.raises(RuntimeError):
Tablereader(os.path.join(MODULE_DIR, 'test-incomplete-data.ndt'))
def test_Tablewriter_complains_about_changing_entities():
import finmag
sim = finmag.example.barmini(name='tmp-test-fileio2')
# create ndt file
sim.save_averages()
import pytest
with pytest.raises(RuntimeError):
# add entity (should raise RuntimeError)
sim.tablewriter.add_entity(
'test', {'header': 'test', 'unit': '<>', 'get': lambda s: -1})
def test_Tablewriter_complains_about_incomplete_entities():
import pytest
import finmag
sim = finmag.example.barmini(name='tmp-test-fileio2')
# this should pass
sim.tablewriter.add_entity(
'test', {'header': 'test', 'unit': '<>', 'get': lambda s: -1})
# this should fail because we add 'test' the second time
with pytest.raises(AssertionError):
sim.tablewriter.add_entity(
'test', {'header': 'test', 'unit': '<>', 'get': lambda s: -1})
with pytest.raises(AssertionError):
# should fail because 'header' is missing
sim.tablewriter.add_entity(
'test2', {'unknown-keyword': 'test', 'unit': '<>', 'get': lambda s: -1})
with pytest.raises(AssertionError):
# should fail because 'unit' is missing
sim.tablewriter.add_entity(
'test3', {'header': 'test', 'unit-is-missing': '<>', 'get': lambda s: -1})
with pytest.raises(AssertionError):
# should fail because 'get' is missing
sim.tablewriter.add_entity(
'test4', {'header': 'test', 'unit': '<>', 'get-is-missing': lambda s: -1})
with pytest.raises(AssertionError):
# should fail because dictionary is not complete
sim.tablewriter.add_entity('test4', {})
def test_field_saver(tmpdir):
os.chdir(str(tmpdir))
a1 = np.arange(10)
a2 = np.arange(20)
# Test saving to .npz
s = FieldSaver('data_npy.npy', incremental=True)
s.save(a1)
s.save(a1)
s.save(a2) # nothing wrong with saving arrays of different shapes
assert(len(glob('data_npy*.npy')) == 3)
assert(np.allclose(a1, np.load('data_npy_000000.npy')))
assert(np.allclose(a1, np.load('data_npy_000001.npy')))
assert(np.allclose(a2, np.load('data_npy_000002.npy')))
with pytest.raises(IOError):
# Existing files should not be overwritten
FieldSaver('data_npy.npy', incremental=True)
# Using 'overwrite=True' should remove existing files
s = FieldSaver('data_npy.npy', overwrite=True, incremental=True)
assert(len(glob('data_npy*.npy')) == 0)
s.save(a1)
s.save(a1)
assert(len(glob('data_npy*.npy')) == 2)
# Tidying up: remove files created so far
for f in glob('data_npy*.npy'):
os.remove(f)
# Non-incremental saving
s = FieldSaver('data_npy.npy', overwrite=True, incremental=False)
assert(len(glob('data_npy*.npy')) == 0)
s.save(a1)
s.save(a1)
s.save(a1)
assert(len(glob('data_npy*.npy')) == 1)
# Extension is added automatically
s = FieldSaver('data_npy.foo')
s.save(a1)
assert(len(glob('data_npy.foo*.npy')) == 1)
if __name__ == "__main__":
test_Table_writer_and_reader()
test_field_saver()
| 4,952 | 32.241611 | 89 |
py
|
finmag
|
finmag-master/src/finmag/util/length_scales.py
|
import dolfin as df
from finmag.field import Field
from finmag.util.consts import mu0
from numpy import pi
def exchange_length(A, Ms):
"""
Computes the exchange length when the exchange constant A
and the saturation magnetisation Ms are given. Both Ms and A
are Field objects.
"""
dg_functionspace = df.FunctionSpace(A.mesh(), 'DG', 0)
exchange_length = Field(dg_functionspace)
function = df.project(df.sqrt(2*A.f/(mu0*Ms.f**2)), dg_functionspace)
exchange_length.set(function)
return exchange_length
def bloch_parameter(A, K1):
"""
Computes the Bloch parameter when the exchange constant A
and the anisotropy constant K1 are given. Both A and K1
are Field objects.
"""
dg_functionspace = df.FunctionSpace(A.mesh(), 'DG', 0)
bloch_parameter = Field(dg_functionspace)
function = df.project(df.sqrt(A.f/K1.f), dg_functionspace)
bloch_parameter.set(function)
return bloch_parameter
def helical_period(A, D):
"""
Computes the helical period when exchange constant A and
the constant D are given. Both A and D are Field objects.
"""
dg_functionspace = df.FunctionSpace(A.mesh(), 'DG', 0)
helical_period = Field(dg_functionspace)
function = df.project(df.sqrt(4*pi*A.f/D.f), dg_functionspace)
helical_period.set(function)
return helical_period
| 1,368 | 25.843137 | 73 |
py
|
finmag
|
finmag-master/src/finmag/util/DMI_from_helix.py
|
"""
This file contains three functions that use finmag simulations on a 1D mesh
to obtain certain material characteristics. It also contains an example
showing usage of two of the functions.
"""
import dolfin as df
import finmag
import numpy as np
import scipy.optimize
def Find_Helix_Length(D, A, Ms, H=[0, 0, 0], K1=0, KAxis=[0, 0, 0],
plot=False, plotFactor=0.1, plotFileOut="Helix_Length",
pvPath=None):
"""Function that takes some material parameters and returns the
estimated helix length.
@parameters
A : Isotropic exchange energy constant
(J/m)
D : Dzyaloshinskii-Moriya exchange energy constant
(J/m^2)
Ms : Saturation magnetisation
(A/m)
H : External magnetic field strength (a three-dimensional array)
(A/m)
K1 : Eazy-axis anisotropy strength (negative yields hard axis)
(J/m^3)
KAxis : Uniaxial anisotropy axis (a three-dimensional array)
(Unitless)
plot : Boolean to decide whether or not a plot should be displayed.
plotFactor : Float showing what fraction of the dataset to show in the
plot.
plotFileOut : String denoting filepath to write plots to.
pvPath : String denoting the path to save paraview files of the
magnetisation to.
"""
# Make a mesh, with lengths measured in unitLength.
unitLength = 1e-9
meshX = 1000 # Length of mesh (nm). Would prefer this to be an even
# number for the preceeding calculation.
meshXHalf = meshX / 2
meshN = 1000 # Number of points in the desired mesh. Increasing this
# improves the accuracy of the output, but also increases the
# execution time. TODO: Optional parameter? <!>
mesh = df.IntervalMesh(meshN - 1, -meshXHalf, meshXHalf)
# Creating simulation object.
simName = "Finding_Helix_Length"
sim = finmag.Simulation(mesh, Ms, name=simName, unit_length=unitLength)
# Create energy objects and add them to the simulation.
# Isotropic exchange interaction energy object to use in the simulation.
eA = finmag.energies.Exchange(A)
sim.add(eA)
# Dzyaloshinskii-Moriya exchange interaction energy object to use in the
# simulation.
eD = finmag.energies.DMI(D)
sim.add(eD)
# Zeeman energy object to use in the simulation.
eH = finmag.energies.Zeeman(H)
sim.add(eH)
# Anisotropy energy object to use in the simulation.
if K1 != 0:
if np.linalg.norm(KAxis) == 0:
raise ValueError("Anisotropy strength provided without " +
"non-zero axis.")
eK = finmag.energies.UniaxialAnisotropy(K1, KAxis)
sim.add(eK)
# Define initial magnetisation and set it.
np.random.seed(1)
def m_rand(pos):
"""This function returns a consistent random vector direction."""
out = np.random.random(3) - 0.5
return out / np.linalg.norm(out)
sim.set_m(m_rand)
# Run the simulation.
if pvPath is not None:
sim.schedule('save_vtk', at_end=True, overwrite=True,
filename=pvPath)
tEnd = 1e-8 # Time the simulation will take (s)
sim.run_until(tEnd)
# Extract the magnetisation vectors of the relaxed mesh.
xs = np.ndarray([meshN])
ys = np.ndarray([meshN])
zs = np.ndarray([meshN])
for zI in xrange(meshN):
xs[zI] = sim.m[zI]
ys[zI] = sim.m[zI + meshN]
zs[zI] = sim.m[zI + 2 * meshN]
# Check to see if the ferromagnetic state has been encountered. This
# corresponds to all vectors having a strong component perpendicular to the
# helix in the direction of the external field. If the ferromagnetic state
# has been encountered, an exception should be raised.
ferromagnetic = True
for x in xs:
if abs(x) < 0.3:
ferromagnetic = False
break
# Also check to see if all values have the same sign (so that they point in
# the same direction.
if ferromagnetic is True:
for zI in xrange(len(xs) - 1):
if xs[zI] * xs[zI + 1] < 0:
ferromagnetic = False
break
# Do the same for Y and Z directions.
if ferromagnetic is False:
ferromagnetic = True
for y in ys:
if abs(y) < 0.3:
ferromagnetic = False
break
if ferromagnetic is True:
for zI in xrange(len(ys) - 1):
if ys[zI] * ys[zI + 1] < 0:
ferromagnetic = False
break
if ferromagnetic is False:
ferromagnetic = True
for z in zs:
if abs(z) < 0.3:
ferromagnetic = False
break
if ferromagnetic is True:
for zI in xrange(len(zs) - 1):
if zs[zI] * zs[zI + 1] < 0:
ferromagnetic = False
break
if ferromagnetic is True:
msg = "Ferromagnetic relaxed state encountered. This suggests " + \
"that the external magnetic field is too strong for these " + \
"material parameters."
raise ValueError(msg)
# Find the fourier transform of the two magnetisation vector components.
finmag.logger.info("Calculating the fourier transform " +
"of the magnetisation data.")
ffty = np.fft.fft(ys)
ffty = abs(ffty[:len(ffty) / 2])
fftz = np.fft.fft(zs)
fftz = abs(fftz[:len(fftz) / 2])
# Calculate the discrete wavenumber domain fs of the magnetisation data
# after it is transformed.
fPrecision = 1 / (meshX * unitLength)
fs = np.linspace(0, meshN, meshN) * fPrecision
fs = fs[:len(fs) / 2]
# Find the wavenumber peak that corresponds to the helix length.
ly = fs[list(ffty).index(max(ffty))]
lz = fs[list(fftz).index(max(fftz))]
# Do some plotting.
if plot is True:
import matplotlib.pyplot as plt
meshXs = np.linspace(-meshXHalf * plotFactor, meshXHalf * plotFactor,
meshN * plotFactor)
ys = ys[len(ys) / 2. - len(ys) * (plotFactor / 2):
len(ys) / 2. + len(ys) * (plotFactor / 2)]
zs = zs[len(zs) / 2. - len(zs) * (plotFactor / 2):
len(zs) / 2. + len(zs) * (plotFactor / 2)]
# Plot it and make it look good (subjectively).
plt.rc('font', family='serif')
plt.figure()
(fig, ax) = plt.subplots(1)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.spines['left'].set_linewidth(0.5)
ax.spines['left'].set_color('#262626')
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('#262626')
plt.plot(meshXs * unitLength, ys, 'x-', label="Y")
plt.plot(meshXs * unitLength, zs, 'x-', label="Z")
plt.xlabel("Distance from centre of mesh $(\mathrm{m})$")
plt.ylabel("Magnetisation component")
plt.axis("tight")
plt.legend(loc=1)
plt.savefig(plotFileOut + "_magnetization_components.png".format(K1))
plt.figure()
(fig, ax) = plt.subplots(1)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
ax.spines['left'].set_linewidth(0.5)
ax.spines['left'].set_color('#262626')
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('#262626')
plt.semilogy(fs, ffty, 'x-')
plt.xlabel("Wavenumber $(\mathrm{m}^{-1})$")
plt.ylabel("Discrete fourier transform of the magnetisation data.")
plt.axis("tight")
plt.savefig(plotFileOut + "_fourier_transform.png")
# Calculate the wavenumber that matches the waveform, as well as the helix
# length (analogous to the period).
fOut = (ly + lz) / 2.
out = 1 / fOut
# Cleanup and return
return out
def Find_DMI(A, Ms, l, H=[0, 0, 0], K1=0, KAxis=[0, 0, 0], D0=None, tol=1e-6,
verbose=False):
"""Function that takes some material parameters and returns the estimated
DMI constant correct to a given tolerance.
@parameters
A : Isotropic exchange energy constant (J/m)
Ms : Saturation magnetisation (A/m)
l : Helix length (m)
H : External magnetic field strength (three-dimensional array) (A/m)
K1 : Eazy-axis anisotropy strength (negative yields hard axis) (J/m^3)
KAxis : Uniaxial anisotropy axis (a three-dimensional array) (Unitless)
D0 : Estimated Dzyaloshinskii-Moriya exchange energy constant (J/m^2)
tol : Tolerance to which the DMI constant should be found, if any.
verbose : Boolean to dictate whether or not simulation output is provided.
"""
if verbose is False:
logLevel = finmag.logger.level
finmag.logger.setLevel(finmag.logging.ERROR)
def Find_DMI_Signchange(A, Ms, l, H, K1, KAxis, D0, tol):
"""Function that takes some material parameters and returns a range in
which the DMI value can exist.
@parameters
A : Isotropic exchange energy constant (J/m)
Ms : Saturation magnetisation (A/m)
l : Helix length (m)
H : External magnetic field strength (three-dimensional array).
K1 : Eazy-axis anisotropy strength (negative yields hard axis).
KAxis : Uniaxial anisotropy axis (a three-dimensional array) (Unitless)
D0 : Estimated Dzyaloshinskii-Moriya exchange energy constant.
tol : Tolerance to which the DMI constant should be found.
"""
# Two values of d (with a helix lengths greater than and less than the
# desired helix length l) need to be found.
# Initialise two arrays to hold d and l values,
ds = []
ls = []
ds.append(D0)
# Find the sign of the evaluated helix length for the D0 guess,
# subtracted from the actual helix length.
ls.append(Find_Helix_Length(ds[0], A, Ms, H, K1, KAxis) - l)
# Find an increment size bigger than the desired tolerance to search
# for these lengths. The increment should be positive if the
# calculated length is too small and vice versa (due to the nature of
# the relationship).
if ls[0] > 0:
dIncrement = tol * -1
else:
dIncrement = tol
# Find the sign of the evaluated helix length for another guess that's
# a bit far out from the desired tolerance, subtracted from the actual
# helix length.
ds.append(D0 + dIncrement)
ls.append(Find_Helix_Length(ds[1], A, Ms, H, K1, KAxis) - l)
# Keep doing this until two different values have been found.
while ls[-1] == ls[-2]:
ds.append(ds[-1] + dIncrement)
ls.append(Find_Helix_Length(ds[-1], A, Ms, H, K1, KAxis) - l)
# Once the second value has been found, see if the sign change is
# different. If it is, then use those two as the interval. If not,
# proceed searching in the other direction until this situation is
# encountered.
dRange = [0, 0]
if ls[-1] * ls[-2] < 0:
dRange[0] = ds[-2]
dRange[1] = ds[-1]
else:
# It's unfortunate, but now we must do the same as before,
# but in reverse.
dIncrement *= -1
ds = [ds[0]]
ls = [ls[0]]
# Find the sign of the evaluated helix length for another guess
# that's a bit far out from the desired tolerance, subtracted
# from the actual helix length.
ds.append(D0 + dIncrement)
ls.append(Find_Helix_Length(ds[1], A, Ms, H, K1, KAxis) - l)
# Keep doing this until two different values have been found.
while ls[-1] == ls[-2]:
ds.append(ds[-1] + dIncrement)
ls.append(Find_Helix_Length(ds[-1], A, Ms, H, K1, KAxis) - l)
# Pray that a sign change has been found this time.
if ls[-1] * ls[-2] >= 0:
raise RuntimeError("D Range cannot be found for the provided" +
" value of l!")
else:
dRange[0] = ds[-2]
dRange[1] = ds[-1]
# Now that the range has been found, it can be returned.
return dRange
#===Find DMI function starts here...===#
# Check for non-zero, non-negative helix length
if l <= 0:
raise ValueError("Positive helix length required for DMI estimation" +
" if an initial DMI guess is not provided.")
# Suggest an initial guess for D0 if one is not already there. This guess
# comes from the ipython notebook "ref-dmi-constant" without reference,
# but it is used here because it performs well in most examples.
if D0 is None:
D0 = 4 * np.pi * A / l
# Find the range that d can exist in.
dRange = Find_DMI_Signchange(A, Ms, l, H, K1, KAxis, D0, tol * 1e2)
# Use an optimization routine to find d.
def Helix_Length_Difference(D, A, Ms, l, H, K1, KAxis):
return Find_Helix_Length(D, A, Ms, H, K1, KAxis) - l
D = scipy.optimize.brentq(Helix_Length_Difference, dRange[0], dRange[1],
args=(A, Ms, l, H, K1, KAxis), xtol=tol)
# Cleanup and return
if verbose is False:
finmag.logger.setLevel(logLevel)
return D
if __name__ == "__main__":
# Example material properties that work. This is expected to print
# d = 2.21e-4 and l = 2.50e-8, though the process is somewhat stochastic due
# to the random initialisation of the magnetisation each iteration.
A = 3.53e-13 # Isotropic exchange energy constant
# (J/m)
Ms = 1.56e5 # Magnetisation Saturaton (A/m)
l = 22e-9 # Observed helix length (m)
H = np.array([1., 0., 0.]) * Ms * 0. # External magnetic field strength
# (A/m)
D0 = 4 * np.pi * A / l # Dzyaloshinkii-Moriya exchange
# energy constant (J/m^2)
dFound = Find_DMI(A, Ms, l, H=H, D0=D0, verbose=True)
print("DMI given Helix length: {:.2e} J/m^2.".format(dFound))
lFound = Find_Helix_Length(D0, A, Ms, H=H)
print("Helix length given DMI: {:.2e} m.".format(lFound))
| 14,784 | 36.813299 | 80 |
py
|
finmag
|
finmag-master/src/finmag/util/ansistrm.py
|
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
# got it from
# http://plumberjack.blogspot.co.uk/2010/12/colorizing-logging-output-in-terminals.html
# probably part of logutils as well
#
import ctypes
import logging
import os
level_maps = {
'dark_bg':
{
logging.DEBUG: (None, 'blue', True),
logging.INFO: (None, 'white', False),
logging.WARNING: (None, 'yellow', True),
logging.ERROR: (None, 'red', True),
logging.CRITICAL: ('red', 'white', True),
},
'light_bg':
{
logging.DEBUG: (None, 'black', False),
logging.INFO: (None, 'blue', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', True),
},
'none': # Don't do any colouring -- sometimes colours can't be processed
# by specific tools (such as nbconvert, at the moment, Dec 2012, HF
{
logging.DEBUG: (None, None, False), # do not add any colours
logging.INFO: (None, None, False),
logging.WARNING: (None, None, False),
logging.ERROR: (None, None, False),
logging.CRITICAL: (None, None, False),
}
}
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
# levels to (background, foreground, bold/intense)
if os.name == 'nt':
level_map = level_maps['dark_bg']
else:
level_map = level_maps['light_bg']
csi = '\x1b['
reset = '\x1b[0m'
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
if os.name != 'nt':
def output_colorized(self, message):
self.stream.write(message)
else:
import re
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
nt_color_map = {
0: 0x00, # black
1: 0x04, # red
2: 0x02, # green
3: 0x06, # yellow
4: 0x01, # blue
5: 0x05, # magenta
6: 0x03, # cyan
7: 0x07, # white
}
def output_colorized(self, message):
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
fd = getattr(self.stream, 'fileno', None)
if fd is not None:
fd = fd()
if fd in (1, 2): # stdout or stderr
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
while parts:
text = parts.pop(0)
if text:
write(text)
if parts:
params = parts.pop(0)
if h is not None:
params = [int(p) for p in params.split(';')]
color = 0
for p in params:
if 40 <= p <= 47:
color |= self.nt_color_map[p - 40] << 4
elif 30 <= p <= 37:
color |= self.nt_color_map[p - 30]
elif p == 1:
color |= 0x08 # foreground intensity on
elif p == 0: # reset to default color
color = 0x07
else:
pass # error condition ignored
ctypes.windll.kernel32.SetConsoleTextAttribute(
h, color)
def colorize(self, message, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
# Don't colorize any traceback
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
def main():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(ColorizingStreamHandler())
logging.debug('DEBUG')
logging.info('INFO')
logging.warning('WARNING')
logging.error('ERROR')
logging.critical('CRITICAL')
root.setLevel(5)
logging.log(5, "EXTREMEDEBUG")
if __name__ == '__main__':
main()
| 5,575 | 31.231214 | 95 |
py
|
finmag
|
finmag-master/src/finmag/util/oommf/comparison.py
|
import dolfin as df
import numpy as np
from finmag.field import Field
from finmag.energies import UniaxialAnisotropy, Exchange
from finmag.util.oommf import oommf_uniform_exchange, oommf_uniaxial_anisotropy
def compare_anisotropy(m_gen, Ms, K1, axis, dolfin_mesh, oommf_mesh, dims=3, name=""):
finmag_anis_field = compute_finmag_anis(
m_gen, Ms, K1, norm_axis(axis), dolfin_mesh)
finmag_anis = finmag_to_oommf(finmag_anis_field, oommf_mesh, dims)
oommf_anis = oommf_uniaxial_anisotropy(
oommf_m0(m_gen, oommf_mesh), Ms, K1, axis).flat
difference = np.abs(finmag_anis - oommf_anis)
relative_difference = difference / np.sqrt(
oommf_anis[0] ** 2 + oommf_anis[1] ** 2 + oommf_anis[2] ** 2)
return dict(name=name,
mesh=dolfin_mesh, oommf_mesh=oommf_mesh,
anis=finmag_anis, oommf_anis=oommf_anis,
diff=difference, rel_diff=relative_difference)
def norm_axis(a):
a = 1.0 * np.array(a)
a /= np.sqrt(a[0] ** 2 + a[1] ** 2 + a[2] ** 2)
return tuple(a)
def compute_finmag_anis(m_gen, Ms, K1, axis, dolfin_mesh):
S3 = df.VectorFunctionSpace(dolfin_mesh, "Lagrange", 1, dim=3)
coords = np.array(zip(* dolfin_mesh.coordinates()))
m0 = m_gen(coords).flatten()
m = Field(S3)
m.set_with_numpy_array_debug(m0)
anis = UniaxialAnisotropy(K1, axis)
anis.setup(m, Field(df.FunctionSpace(dolfin_mesh, 'DG', 0), Ms))
anis_field = df.Function(S3)
anis_field.vector()[:] = anis.compute_field()
return anis_field
def compare_exchange(m_gen, Ms, A, dolfin_mesh, oommf_mesh, dims=3, name=""):
finmag_exc_field = compute_finmag_exc(dolfin_mesh, m_gen, Ms, A)
finmag_exc = finmag_to_oommf(finmag_exc_field, oommf_mesh, dims)
oommf_exc = oommf_uniform_exchange(oommf_m0(m_gen, oommf_mesh), Ms, A).flat
difference = np.abs(finmag_exc - oommf_exc)
relative_difference = difference / np.sqrt(
oommf_exc[0] ** 2 + oommf_exc[1] ** 2 + oommf_exc[2] ** 2)
return dict(name=name,
mesh=dolfin_mesh, oommf_mesh=oommf_mesh,
exc=finmag_exc, oommf_exc=oommf_exc,
diff=difference, rel_diff=relative_difference)
def compute_finmag_exc(dolfin_mesh, m_gen, Ms, A):
S3 = df.VectorFunctionSpace(dolfin_mesh, "Lagrange", 1, dim=3)
coords = np.array(zip(* dolfin_mesh.coordinates()))
m0 = m_gen(coords).flatten()
m = Field(S3)
m.set_with_numpy_array_debug(m0)
exchange = Exchange(A)
exchange.setup(m, Field(df.FunctionSpace(dolfin_mesh, 'DG', 0), Ms))
finmag_exc_field = df.Function(S3)
finmag_exc_field.vector()[:] = exchange.compute_field()
return finmag_exc_field
def oommf_m0(m_gen, oommf_mesh):
coords = np.array(zip(* oommf_mesh.iter_coords()))
m0 = oommf_mesh.new_field(3)
m0.flat = m_gen(coords)
m0.flat /= np.sqrt(
m0.flat[0] ** 2 + m0.flat[1] ** 2 + m0.flat[2] ** 2)
return m0
def finmag_to_oommf(f, oommf_mesh, dims=1):
"""
Given a dolfin.Function f and a mesh oommf_mesh as defined in
finmag.util.oommf.mesh, it will probe the values of f at the coordinates
of oommf_mesh and return the resulting, oommf_compatible mesh_field.
"""
f_for_oommf = oommf_mesh.new_field(3)
for i, (x, y, z) in enumerate(oommf_mesh.iter_coords()):
if dims == 1:
f_x, f_y, f_z = f(x)
else:
f_x, f_y, f_z = f(x, y, z)
f_for_oommf.flat[0, i] = f_x
f_for_oommf.flat[1, i] = f_y
f_for_oommf.flat[2, i] = f_z
return f_for_oommf.flat
| 3,580 | 34.107843 | 86 |
py
|
finmag
|
finmag-master/src/finmag/util/oommf/test_mesh.py
|
import unittest
import numpy as np
from finmag.util.oommf import mesh
class TestIterCoordsInt(unittest.TestCase):
def test_zyx_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1))
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [2, 0, 0]]
assert np.array_equal(m.mesh_size, [3, 1, 1])
assert m.array_order == mesh.Mesh.ZYX
assert np.array_equal(expected, indices)
m = mesh.Mesh((2, 2, 2), cellsize=(1, 1, 1))
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 1], [1, 1, 1]]
assert np.array_equal(m.mesh_size, [2, 2, 2])
assert m.array_order == mesh.Mesh.ZYX
assert np.array_equal(expected, indices)
def test_xyz_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1), array_order=mesh.Mesh.XYZ)
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [1, 0, 0], [2, 0, 0]]
assert np.array_equal(m.mesh_size, [3, 1, 1])
assert m.array_order == mesh.Mesh.XYZ
assert np.array_equal(expected, indices)
m = mesh.Mesh((2, 2, 2), cellsize=(1, 1, 1), array_order=mesh.Mesh.XYZ)
indices = [r for r in m.iter_coords_int()]
expected = [[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]]
assert np.array_equal(m.mesh_size, [2, 2, 2])
assert m.array_order == mesh.Mesh.XYZ
assert np.array_equal(expected, indices)
class TestIterCoords(unittest.TestCase):
def test_zyx_ordering(self):
m = mesh.Mesh((3, 1, 1), cellsize=(1, 1, 1))
coords = [r for r in m.iter_coords()]
expected = [[0.5, 0.5, 0.5], [1.5, 0.5, 0.5], [2.5, 0.5, 0.5]]
assert np.array_equal(expected, coords)
| 1,909 | 38.791667 | 79 |
py
|
finmag
|
finmag-master/src/finmag/util/oommf/lattice.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Matteo Franchin
'''
This module provides the Lattice class to describe multi dimensional
rectangular grids.
'''
__all__ = ["first_difference", "parse_lattice_spec",
"Lattice", "FieldLattice"]
import numpy
import collections
def first_difference(la, lb, reverse=False):
"""Given two lists 'la' and 'lb', returns the index at which the two lists
differ or len(la) if the first entries in 'lb' match with all the entries
of 'la'. If reverse=True counts the digits from the last one.
In particular, first_difference(a, b, reverse=True) is equivalent to:
ra, rb = list(a), list(b)
ra.reverse(); rb.reverse()
first_difference(ra, rb)
"""
if reverse:
len_la = len(la)
for i in range(len_la):
ri = -1 - i
if la[ri] != lb[ri]:
return i
return len_la
else:
for i, a in enumerate(la):
if a != lb[i]:
return i
return len(a)
def parse_lattice_spec(s):
"""The lattice specification should be a string such as "-5,10,5/0.5,2,2",
which defines a two dimensional lattice where the x coordinate goes
from -5 to 10 in 5 steps, while the y coordinate goes from 0.5 to 2 in 2
steps. Another example is "-5,10,5/1.23,1.23,1", which defines a one
dimensional lattice (since the y component is fixed to 1.23).
The same can be expressed also as "-5,10,5/1.23".
"""
def parse_dim_spec(dim_spec):
try:
nums = dim_spec.split(',')
if len(nums) == 1:
x_min = x_max = nums[0]
num_steps = 1
else:
x_min, x_max, num_steps = nums
except:
raise ValueError('Error in lattice specification: '
+ parse_lattice_spec.__doc__)
return [float(x_min), float(x_max), int(num_steps)]
return [parse_dim_spec(spec) for spec in s.split('/')]
class Lattice(object):
"""This class allows to define a n-dimensional square lattice and perform
various operations on it. In particular it allows to iterate over the
points of the lattice. No storage is needed for this (a Lattice of one
millions of points doesn't require more memory that a Lattice of 1 point).
The points of the lattice can be referred univocally by index."""
def __init__(self, min_max_num_list, order="F", reduction=0.0):
"""Creates a lattice given a list containing, for each dimension,
the corresponding minimum and maximum coordinate and the number of
points in which it is discretised. Something like:
[(x_min, x_max, x_num), (y_min, y_max, y_num), ...]
Alternatively, a string is accepted following the same specification
accepted by the function 'parse_lattice_spec'.
"""
if type(min_max_num_list) == str:
min_max_num_list = parse_lattice_spec(min_max_num_list)
self.min_max_num_list = list(min_max_num_list)
self.dim = len(min_max_num_list)
self.order = order
self.reduction = reduction
if order not in ["C", "F"]:
raise ValueError("Array order should be either 'C' or 'F'.")
def __repr__(self):
return "Lattice(%s)" % self.min_max_num_list
def __add__(self, right):
reduction = max(self.reduction, right.reduction)
return Lattice(self.min_max_num_list + right.min_max_num_list,
reduction=reduction, order=self.order)
def _combine_idx(self, *slower_faster):
if self.order == "F":
return reduce(lambda x, y: y + x, slower_faster)
else:
return reduce(lambda x, y: x + y, slower_faster)
def get_shape(self):
"""Returns the shape of the lattice, i.e. a list containing the number
of points for each dimension of the lattice. Example: [10, 5, 20] for
a 3D lattice made of 10 by 5 by 20 points."""
return [i for _, _, i in self.min_max_num_list]
nodes = property(get_shape)
def get_positions(self, flat=False):
slices = []
for xstart, xend, num_xs in self.min_max_num_list:
if num_xs > 1:
dx = (xend - xstart) / float(num_xs - 1)
s = slice(xstart, xend + 0.5 * dx, dx)
else:
s = slice(xstart, xend + 1.0, xstart + 0.5)
slices.append(s)
ps = numpy.lib.index_tricks.mgrid.__getitem__(slices)
if flat:
ps.shape = (ps.shape[0], -1)
return ps.swapaxes(0, -1)
def _get_stepsizes(self, scale=1.0):
return [(scale * (mx - mn) / (ns - 1) if ns > 1 else (mx - mn))
for mn, mx, ns in self.min_max_num_list]
stepsizes = property(_get_stepsizes)
def _get_min_node_pos(self):
return [mn for mn, _, _ in self.min_max_num_list]
def _get_max_node_pos(self):
return [mx for _, mx, _ in self.min_max_num_list]
min_node_pos = property(_get_min_node_pos)
max_node_pos = property(_get_max_node_pos)
def get_num_points(self):
"""Returns the total number of points in the lattice."""
return reduce(lambda x, y: x * y, self.get_shape())
def get_closest(self, position):
"""Given a position in space, returns the point in the lattice which
is closer to it. What is returned is actually the index of the point
in the lattice."""
def get_closest_i(x, i, min_max_num_list):
x_min, x_max, x_num = min_max_num_list
if x_min < x_max:
return int(round((x_num - 1) * (x - x_min) / (x_max - x_min)))
else:
return 0
return [get_closest_i(position[i], i, min_max_num_list_i)
for i, min_max_num_list_i in enumerate(self.min_max_num_list)]
def get_pos_from_idx(self, idx):
"""Return the position of the point in the lattice which has the given
index."""
pos = []
for nr, i in enumerate(idx):
x_min, x_max, x_num = self.min_max_num_list[nr]
if x_num > 1:
delta_x = (x_max - x_min) / (x_num - 1)
pos.append(x_min + delta_x * i)
else:
pos.append(x_min)
return pos
def scale(self, factors):
"""Scale the Lattice object by the given factor. If factors is a list
than it is interpreted as a list of factors, one for each corresponding
dimensions. Otherwise, it is interpreted as a factor by which all the
dimensions should be scaled."""
if isinstance(factors, collections.Sequence):
for i, f in enumerate(factors):
mn, mx, nm = self.min_max_num_list[i]
self.min_max_num_list[i] = (mn * f, mx * f, nm)
else:
n = len(self.min_max_num_list)
f = factors
for i in range(n):
mn, mx, nm = self.min_max_num_list[i]
self.min_max_num_list[i] = (mn * f, mx * f, nm)
def _foreach(self, nr_idx, idx, pos, fn, fastest_idx, idx_order):
if nr_idx == fastest_idx:
fn(idx, pos)
else:
x_min, x_max, num_steps = self.min_max_num_list[nr_idx]
x_min += self.reduction
x_max -= self.reduction
xi = x_min
assert num_steps > 0, ("Number of steps is less than 1 for "
"dimension %d of the lattice!" % nr_idx)
if num_steps == 1:
delta_xi = 0.0
else:
delta_xi = (x_max - x_min) / (num_steps - 1)
for i in range(num_steps):
pos[nr_idx] = xi
idx[nr_idx] = i
xi += delta_xi
self._foreach(nr_idx + idx_order, idx, pos, fn,
fastest_idx, idx_order)
def foreach(self, fn):
"""Iterates over all the points in the lattice and, for each of those,
call 'fn(idx, pos)' where 'idx' is the index of the current point,
while 'pos' is its position as given by the method 'get_pos_from_idx'.
"""
idx = [0] * self.dim
pos = [0.0] * self.dim
if self.order == "C":
self._foreach(0, idx, pos, fn, self.dim, 1)
else:
self._foreach(self.dim - 1, idx, pos, fn, -1, -1)
class FieldLattice(object):
def __init__(self, lattice, dim=3, order="F",
data=None, reduction=0.0, scale=None):
if isinstance(lattice, Lattice):
self.lattice = lattice
else:
self.lattice = Lattice(lattice, order=order, reduction=reduction)
if scale != None:
self.lattice.scale(scale)
self.field_dim = dim
nodes = self.lattice.nodes
shape = self.lattice._combine_idx(nodes, [dim])
if data != None:
self.field_data = data
else:
self.field_data = \
numpy.ndarray(dtype=float, shape=shape, order=order)
def set(self, setter):
all_components = [slice(None)]
if self.lattice.order == 'C':
def fn(idx, pos):
self.field_data[idx + all_components] = setter(pos)
else:
def fn(idx, pos):
self.field_data[all_components + idx] = setter(pos)
self.lattice.foreach(fn)
| 9,631 | 36.046154 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/oommf/ovf.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Matteo Franchin
"""
Generic library to read/write files using the OOMMF OVF file format.
We support:
- all major OVF versions (OVF 1.0 and 2.0)
- all data modes (binary8, binary4 and text)
- only rectangular mesh types (irregular mesh type is not supported, yet).
Here are few examples illustrating how to use the library:
EXAMPLE 1: reading an OVF file and retrieving the data
from ovf import OVFFile
ovf_file = OVFFile("filename.ovf")
fl = ovf_file.get_field()
# fl is a FieldLattice object, see module lattice.py
# fl.lattice is a Lattice object, describing the mesh (lattice.py)
# fl.field_data is the numpy array containing the data
EXAMPLE 2: creating a new OVF file
from ovf import OVFFile, OVF10, OVF20
# Create the data
fl = FieldLattice("2.5e-9,97.5e-9,20/2.5e-9,47.5e-9,10/2.5e-9,7.5e-9,1")
def setter_function(position):
return [1, 0, 0]
fl.set(setter_function)
# Save it to file
ovf = OVFFile()
ovf.new(fl, version=OVF20, data_type="binary8")
ovf.write("newfile.ovf")
Note that after the 'ovf.new' method has been called, you can customize some
of the fields of the OVF header by accessing it directly under
'ovf.content'. 'ovf.content' gives you access to the content of the header.
For example, if the header contains an entry 'xmin', then you can access it
as 'ovf.content.a_segment.a_header.a_xmin'. However, there are some header
entries which appear only in OVF version 1.0, but not in version 2.0.
One of such entries is 'valueunit', which is valid in v 1.0 but is not valid
in v 2.0 (where it is replaced by the entry 'valueunits'). In practice that
means that the OVF 1.0 files have the entry
ovf.content.a_segment.a_header.a_valueunit
but do not have the entry
ovf.content.a_segment.a_header.a_valueunits
while for OVF 2.0 it is exactly the other way round.
'ovf.content' then provides wrapper entries which work for both version
1.0 and version 2.0. You should then try to use the properties of
'ovf.content' when they are available, rather than using
'ovf.content.a_segment...'.
For example:
h = ovf.content.a_segment.a_header
h.a_title.value = "The title of my OVF file"
h.a_meshunit.value = "m"
ovf.content.valueunits = ["A/m", "A/m", "A/m"]
ovf.write("newfile.ovf")
Here we use:
ovf.content.valueunits = ["A/m", "A/m", "A/m"]
rather than:
h.a_valueunits.value.units = ["A/m", "A/m", "A/m"]
The last line is indeed valid only for OVF 2.0 but fails for OVF 1.0.
The former one, in contrast, works both for version 1.0 and 2.0.
NOTE: The OVF file defines fields over a grid of cubes while the FieldLattice
defines fields over points (the centers of the cubes, actually).
For example:
fl = FieldLattice("1,9,5/1,9,5/1,9,5")
The FieldLattice above defines a cubic lattice of 5x5x5 points: all the ones
obtained varying x, y and z among the values 1, 3, 5, 7, 9.
For example (1, 1, 1) is the first point of the lattice. It lies exactly
at the center of the first cube of the mesh, which occupies the space
between 0-2 in each dimension.
That should be enough to understand the relation between the OVF mesh and
the corresponding FieldLattice. Since the cubes are all equal and the points
are at the center of the cubes, the x-size of the cubes is equal to the
spacing between the nodes along the x-axis. Similarly for the other
directions of space y and z.
If your mesh has just one cubes along one or more directions, then you have
to put some care in specifying the FieldLattice. For example,
fl = FieldLattice("1,3,1/1,3,1/1,3,1")
defines a lattice with just one point (1, 1, 1) corresponding to a cube
with size (2, 2, 2) and that occupies the region of space 0-2 in each
dimension of space. The values (3, 3, 3) are given just to define what
the spacing is, but is not used as a point in the mesh.
"""
__all__ = ["OVF10", "OVF20", "OVFFile", "OVFValueUnits", "OVFValueLabels"]
import struct
from numpy import array, ndarray
from lattice import FieldLattice
# Abbreviations for OVF versions
OVF10 = (1, 0)
OVF20 = (2, 0)
ANY_OVF = [OVF10, OVF20]
def name_normalise(name):
for c in [" ", "\t", "\n"]:
name = name.replace(c, "")
return name.lower()
class OVFReadError(Exception):
pass
class OVFVersionError(Exception):
pass
class OVFNode(object):
def __init__(self, subnodes=[], data=None):
# print "Creating node %s:%s" % (type(self), data)
self._subnodes = list(subnodes)
self._data = data
def _to_str(self, indent=0):
s = " " * indent + "Node %s: data=%s\n" % (type(self), self._data)
for subnode in self._subnodes:
s += subnode._to_str(indent=indent + 2)
return s
def __str__(self):
return self._to_str()
def _get_name(self):
return self._data[0]
def _set_name(self, n):
self._data = (n, self._data[1])
def _get_identity(self):
return name_normalise(self._get_name())
def _get_value(self):
return self._data[1]
def _set_value(self, v):
self._data = (self._data[0], v)
name = property(_get_name, _set_name)
identity = property(_get_identity)
value = property(_get_value, _set_value)
def _add_as_attr(self, obj=None, prefix="a_"):
if obj != None:
assert obj != self
setattr(obj, prefix + self.identity, self)
for subnode in self._subnodes:
subnode._add_as_attr(self, prefix=prefix)
def read(self, stream, root=None):
raise NotImplementedError("OVFNode.read not implemented!")
def write(self, stream, root=None):
raise NotImplementedError("OVFNode.write not implemented!")
class OVFSectionNode(OVFNode):
required = None
def __init__(self, value=[], data=None):
OVFNode.__init__(self, value, data)
name, value = data
self.section_action = lvalue = value.lower()
assert lvalue in ["begin", "end"], "lvalue is %s" % lvalue
self.received = {}
def read(self, stream, root=None):
while True:
node = read_node(stream)
if node == None:
return
node_name = node.name
self._subnodes.append(node)
self.received[node_name] = node
setattr(self, "a_" + node.identity, node)
assert self != node
if isinstance(node, OVFSectionNode):
sa = node.section_action
if sa == "begin":
node.read(stream, root=root)
elif sa == "end":
self._end_section(node_name)
return
else:
raise OVFReadError("Expected section end, but got '%s'."
% node_name)
def _end_section(self, name):
expected = self.name
if name != expected:
raise OVFReadError("Expected end of section %s, but got end "
"of section %s." % (expected, name))
# We check wether we got all we needed
missing_value = []
if self.required != None:
for required_value in self.required:
if not self.received.has_key(required_value):
missing_value.append(required_value)
if missing_value:
raise OVFReadError("Missing entries from %s section: %s."
% (name, ", ".join(missing_value)))
def write(self, stream, root=None):
if self.section_action == "begin":
stream.write_line("# Begin: %s" % self.name)
else:
stream.write_line("# End: %s" % self.name)
for n in self._subnodes:
n.write(stream, root=root)
class OVFValueNode(OVFNode):
def write(self, stream, root=None):
v = self.value
if v != None:
stream.write_line("# %s: %s" % (self.name, self.value))
class OVFType:
def __init__(self, s):
pieces = s.lower().split()
assert pieces[0] in ["oommf", "oommf:"], \
"Unrecognised OVF version string (%s)" % s
mesh_type = None
if pieces[1] == "ovf":
version_str = pieces[2]
elif pieces[2] == "mesh":
mesh_type = pieces[1]
version_str = pieces[3]
if version_str.startswith("v"):
version_str = version_str[1:]
if version_str in ["0.0a0", "0.99", "1.0"]:
version = OVF10
elif version_str in ["2.0"]:
version = OVF20
else:
print ("Unknown OVF version '%s'. Assuming version 2.0.")
version = OVF20
version_str = "2.0"
self.version_str = version_str
self.version = version
self.mesh_type = mesh_type
def __str__(self):
if self.version == OVF10:
return "OOMMF: %s mesh v%s" % (self.mesh_type, self.version_str)
else:
return "OOMMF OVF %s" % self.version_str
def split_strings(s, delim='"'):
"""Similar to s.split() but do not split whatherver is included between
two delimiters."""
OUTSIDE, INSIDE, SPACE = range(3)
state = SPACE
n = len(s)
i = 0
begin = 0
pieces = []
while i < n:
c = s[i]
inc = 1
if state == SPACE:
if not c.isspace():
begin = i
state = OUTSIDE
elif state == OUTSIDE:
if c.isspace():
pieces.append(s[begin:i])
state = SPACE
elif c == delim:
state = INSIDE
else: # state == INSIDE
if c == delim:
state = OUTSIDE
i += inc
if state != SPACE:
pieces.append(s[begin:])
return pieces
class OVFValueUnits:
def __init__(self, s):
self.units = s.split() if type(s) == str else s
def __str__(self):
return " ".join(self.units)
class OVFValueLabels:
def __init__(self, s):
self.labels = split_strings(s) if type(s) == str else s
def __str__(self):
return " ".join(['"%s"' % l for l in self.labels])
# List of known values in OOMMF file. Each entry is a tuple specifying:
# (the name of the field, the type of the field, a description,
# supported version of OVF, default value)
# Version and context are potional. If the version is omitted,
# ANY_OVF is assumed, if the context is omitted, the value is assumed to
# belong to the header.
known_values_list = [
("Segment count", int, "Number of segments in data file", ANY_OVF, "root"),
("Title", str, "Title/long filename of the file"),
("meshtype", str, "Mesh type"),
("meshunit", str, "Fundamental mesh measurement unit"),
("xmin", float, "Minimum x coordinate of the mesh"),
("ymin", float, "Minimum y coordinate of the mesh"),
("zmin", float, "Minimum z coordinate of the mesh"),
("xmax", float, "Maximum x coordinate of the mesh"),
("ymax", float, "Maximum y coordinate of the mesh"),
("zmax", float, "Maximum z coordinate of the mesh"),
("valuedim", int, "Dimension of the data", OVF20),
("valuelabels", OVFValueLabels,
"Labels for each dimension of the field", OVF20),
("valueunit", str, "Units for data values", OVF10),
("valueunits", OVFValueUnits,
"Units for each dimension of the field.", OVF20),
("valuemultiplier", float,
"Multiply data values by this to get true value in valueunit-s", OVF10),
("ValueRangeMaxMag", float, "Maximum value of data (used as hint)", OVF10),
("ValueRangeMinMag", float, "Minimum value of data (used as hint)", OVF10),
("Desc", str, "Extra lines used by postprocessing programs"),
("xbase", float, "x coordinate of first point in data section"),
("ybase", float, "y coordinate of first point in data section"),
("zbase", float, "z coordinate of first point in data section"),
("xnodes", int, "Number of cells along x dimension in the mesh"),
("ynodes", int, "Number of cells along y dimension in the mesh"),
("znodes", int, "Number of cells along z dimension in the mesh"),
("xstepsize", float, "Distance between adjacent grid points."),
("ystepsize", float, "Distance between adjacent grid points."),
("zstepsize", float, "Distance between adjacent grid points."),
("boundary", str, "List of (x, y, z) triples specifying the vertices of a "
"boundary frame. Optional.", OVF10)
# ^^^ I didn't find any examples of what this looks like. I then use str
# for the type.
]
# Build a dictionary corresponding to known_values_list
known_values = {}
for known_value_tuple in known_values_list:
value_name = name_normalise(known_value_tuple[0])
known_values[value_name] = known_value_tuple
class OVFVersionNode(OVFNode):
def write(self, stream, root=None):
v = self.value
if v != None:
stream.write_line("# %s" % self.value)
class OVFSegmentSectionNode(OVFSectionNode):
required = ["Header"]
class OVFHeaderSectionNode(OVFSectionNode):
pass
def _info_binary(oommf_version, data_size):
endianness = '!' if oommf_version == OVF10 else '<'
if data_size == 8:
float_type = 'd'
expected_tag = 123456789012345.0
else:
assert data_size == 4
float_type = 'f'
expected_tag = 1234567.0
return endianness, float_type, expected_tag
class OVFDataSectionNode(OVFSectionNode):
def __init__(self, value=[], data=None):
OVFSectionNode.__init__(self, value, data)
self.mesh_type = None
self.data_type = None
self.nodes = None
self.num_nodes = None
self.num_stored_nodes = None
self.floats_per_node = None
self.field = None
def _get_identity(self):
return "data"
identity = property(_get_identity)
def _retrieve_info_from_root(self, root):
h = root.a_segment.a_header
xn, yn, zn = self.nodes = \
(h.a_xnodes.value, h.a_ynodes.value, h.a_znodes.value)
self.num_nodes = xn * yn * zn
field_dim = root.field_dim
self.mesh_type = root.mesh_type
if self.mesh_type == "rectangular":
self.floats_per_node = field_dim
self.num_stored_nodes = self.num_nodes
else:
assert self.mesh_type == "irregular"
self.floats_per_node = 3 + field_dim
self.num_stored_nodes = h.a_pointcount
self.data_type = name_normalise(self.name)
def read(self, stream, root=None):
self._retrieve_info_from_root(root)
if self.data_type == 'databinary8':
self._read_binary(stream, root=root, data_size=8)
elif self.data_type == 'databinary4':
self._read_binary(stream, root=root, data_size=4)
elif self.data_type == 'datatext':
self._read_ascii(stream, root=root)
else:
raise OVFReadError("Unknown data type '%s' in OVF file."
% self.name)
# Get to end of section
while True:
l = stream.next_line()
if l.startswith("# End:"):
return
def _read_binary(self, stream, root=None, data_size=8):
endianness, float_type, expected_tag = \
_info_binary(root.a_oommf.value.version, data_size)
fmt = endianness + float_type
verification_tag, = struct.unpack(fmt, stream.read_bytes(data_size))
if verification_tag != expected_tag:
raise OVFReadError("Data carries wrong signature: got '%s' but "
"'%s' was expected. This usually means that "
"the file is corrupted or is not being red "
"correctly."
% (verification_tag, expected_tag))
num_floats = self.num_stored_nodes * self.floats_per_node
fmt = endianness + float_type * num_floats
data = stream.read_bytes(num_floats * data_size)
big_float_tuple = struct.unpack(fmt, data)
# Reshape the data
xn, yn, zn = self.nodes
fn = self.floats_per_node
self.field = array(big_float_tuple).reshape((fn, xn, yn, zn),
order="F")
def _read_ascii(self, stream, root=None):
semiflat_array = ndarray(dtype='float', order="F",
shape=(self.floats_per_node, self.num_nodes))
for i in range(self.num_nodes):
l = stream.next_line()
v = [float(vi) for vi in l.split()]
semiflat_array[:, i] = v
# Reshape the data
xn, yn, zn = self.nodes
fn = self.floats_per_node
self.field = semiflat_array.reshape((fn, xn, yn, zn))
def write(self, stream, root=None):
self._retrieve_info_from_root(root)
stream.write_line("# Begin: %s" % self.name)
if self.data_type == "databinary8":
self._write_binary(stream, root=root, data_size=8)
elif self.data_type == "databinary4":
self._write_binary(stream, root=root, data_size=4)
elif self.data_type == "datatext":
self._write_ascii(stream, root=root)
else:
raise ValueError("Unrecognised data type '%s'"
% self.data_type)
stream.write_line("# End: %s" % self.name)
def _write_binary(self, stream, root=None, data_size=8):
endianness, float_type, expected_tag = \
_info_binary(root.a_oommf.value.version, data_size)
fmt = endianness + float_type
out_data = struct.pack(fmt, expected_tag)
num_floats = self.num_stored_nodes * self.floats_per_node
fmt = endianness + float_type * num_floats
flat_array = self.field.ravel('F')
out_data += struct.pack(fmt, *flat_array) + "\n"
stream.write(out_data)
def _write_ascii(self, stream, root=None):
semiflat_array = \
self.field.reshape((self.floats_per_node, self.num_nodes))
for i in range(self.num_nodes):
v = semiflat_array[:, i]
stream.write_line(" ".join([repr(vi) for vi in v]))
def remove_comment(line, marker="##"):
"""Return the given line, without the part which follows the comment
marker ## (and without the marker itself)."""
i = line.find(marker)
if i < 0:
return line
else:
return line[:i]
def version_node(ver_str):
return OVFVersionNode(data=("oommf", OVFType(ver_str)))
def known_value_node(name, value):
lname = name_normalise(name)
if known_values.has_key(lname):
val_type = known_values[lname][1]
value = val_type(value)
else:
print "Unknown value '%s' while reading OVF file." % name
return OVFValueNode(data=(name, value))
def known_section_node(action, name):
lname = name_normalise(name)
cls = None
if lname == "segment":
cls = OVFSegmentSectionNode
elif lname == "header":
cls = OVFHeaderSectionNode
elif lname.startswith("data"):
cls = OVFDataSectionNode
else:
print "Unknown section '%s' while reading OVF file." % name
cls = OVFSectionNode
return cls(data=(name, action))
def read_node(stream):
l = None
while l in ["", "#", None]:
l = stream.next_line()
if l == None:
return None
else:
l = remove_comment(l).lstrip()
if not l.startswith("#"):
raise OVFReadError("Error reading OVF header. "
"Expected #, but got '%s'" % l)
l = l[1:].lstrip()
ll = l.lower()
if ll.startswith("oommf"):
return version_node(ll)
else:
piece = l.split(":", 1)
name = piece[0].strip()
lname = name_normalise(name)
value = None
if len(piece) > 1:
value = piece[1].strip()
if lname in ["begin", "end"]:
return known_section_node(name, value)
else:
return known_value_node(name, value)
class OVFRootNode(OVFSectionNode):
required = ["Segment count"]
def __init__(self):
OVFSectionNode.__init__(self, data=("main", "begin"))
def _get_version(self):
return self.a_oommf.value.version
ovf_version = property(_get_version, None, None, "Version of OVF file.")
def _get_mesh_type(self):
v = self.ovf_version
if v == OVF10:
return self.a_oommf.value.mesh_type
else:
return self.a_segment.a_header.a_meshtype.value
mesh_type = property(_get_mesh_type, None, None,
"Mesh type of the OVF file "
"(a string = rectangular/irregular)")
def _get_field_dim(self):
if self.ovf_version == OVF10:
return 3
else:
return self.a_segment.a_header.a_valuedim.value
field_dim = property(_get_field_dim, None, None, "The size of the field.")
def _get_valueunits(self):
v = self.ovf_version
if v == OVF10:
return self.a_segment.a_header.a_valueunit.value
else:
return self.a_segment.a_header.a_valueunits.value
def _set_valueunits(self, units):
units = [units] if type(units) == str else units
v = self.ovf_version
if v == OVF10:
units_are_all_the_same = units.count(units[0]) == len(units)
assert units_are_all_the_same, \
("OVF 1.0 does not support fields having components with "
"different units.")
self.a_segment.a_header.a_valueunit.value = str(units[0])
else:
assert v == OVF20
def unit_setter(idx):
return units[idx] if idx < len(units) else units[-1]
us = [unit_setter(idx) for idx in range(self.field_dim)]
self.a_segment.a_header.a_valueunits.value = OVFValueUnits(us)
valueunits = property(_get_valueunits, _set_valueunits, None,
"The units of the components of the field "
"(one string or a list of strings).")
def _get_valuelabels(self):
v = self.ovf_version
if v == OVF20:
return self.a_segment.a_header.a_valuelabels.value
else:
raise OVFVersionError("valuelabels is only available in OVF 2.0.")
def _set_valuelabels(self, labels):
v = self.ovf_version
if v == OVF20:
if type(labels) == str:
labels = ["%s_%d" % (labels, i)
for i in range(self.field_dim)]
self.a_segment.a_header.a_valuelabels.value = \
OVFValueLabels(labels)
valuelabels = property(_get_valuelabels, _set_valuelabels, None,
"The labels for the components of the field "
"(a list of names, one for each component. when "
"setting you can provide also a string, used as "
"the basename of the field: _0, _1, _2, ... are "
"appended to each component).")
def write(self, stream, root=None):
for n in self._subnodes:
n.write(stream, root=root)
class OVFStream(object):
def __init__(self, filename, mode="r"):
if type(filename) == str:
self.filename = filename
self.f = open(filename, mode)
else:
self.filename = None
self.f = filename
self.no_line = 0
self.lines = []
def __del__(self):
self.f.close()
def next_line(self):
if self.no_line < len(self.lines):
l = self.lines[self.no_line]
else:
n = self.no_line - len(self.lines)
for _ in range(n + 1):
l = self.f.readline()
if len(l) == 0:
return None
l = l[:-1]
self.lines.append(l)
self.no_line += 1
return l
def read_bytes(self, num_bytes):
l = self.f.read(num_bytes)
self.lines.append(l)
self.no_line = len(self.lines)
return l
def read_lines_ahead(self):
self.lines += self.f.readlines()
def write(self, data):
self.f.write(data)
def write_line(self, line):
self.f.write(line + "\n")
class OVFFile:
def __init__(self, filename=None):
self.content = OVFRootNode()
if filename != None:
self.read(filename)
def new(self, fieldlattice, version=OVF10, mesh_type="rectangular",
data_type="binary8"):
available_data_types = {"text": "Data Text",
"binary4": "Data Binary 4",
"binary8": "Data Binary 8"}
if available_data_types.has_key(data_type):
data_type = available_data_types[data_type]
else:
available_choices = ", ".join(available_data_types.keys())
raise ValueError("Wrong choice of data_type. Available choices "
"are: %s." % available_choices)
assert mesh_type == "rectangular", "Irregular meshes are not " \
"supported, yet!"
assert fieldlattice.lattice.order == "F", \
"FieldLattice should have Fortran ordering!"
assert fieldlattice.lattice.dim == 3, \
"The FieldLattice should be defined over a 3D mesh."
# Generate the root node
root_node = OVFRootNode()
# Append version info
if version == OVF10:
t = OVFType("OOMMF: %s mesh v1.0" % mesh_type)
assert fieldlattice.field_dim == 3, \
("OVF 1.0 only supports fields with dimension 3 (such as "
"the magnetisation, for example)")
else:
t = OVFType("OOMMF OVF 2.0")
assert fieldlattice.field_dim >= 1, \
("You are trying to write a field with dimension 0.")
root_node._subnodes.append(OVFVersionNode(data=("OOMMF", t)))
# Append segment count and segment section
root_node._subnodes.append(OVFValueNode(data=("Segment count", "1")))
segment_node = OVFSectionNode(data=("Segment", "Begin"))
root_node._subnodes.append(segment_node)
# Generate the header
header_node = OVFSectionNode(data=("Header", "Begin"))
segment_node._subnodes.append(header_node)
for known_v in known_values_list:
v_name = known_v[0]
v_type = known_v[1]
v_ver = known_v[3] if len(known_v) > 3 else ANY_OVF
v_context = known_v[4] if len(known_v) > 4 else "header"
if v_context == "header" and \
(v_ver == ANY_OVF or v_ver == version):
v_node = OVFValueNode(data=(v_name, None))
header_node._subnodes.append(v_node)
header_node._subnodes.append(OVFSectionNode(data=("Header", "End")))
# Generate the data segment
fl = fieldlattice
l = fieldlattice.lattice
data_node = OVFDataSectionNode(data=(data_type, "Begin"))
segment_node._subnodes.append(data_node)
data_node._subnodes.append(OVFSectionNode(data=(data_type, "End")))
data_node.field = fl.field_data
segment_node._subnodes.append(OVFSectionNode(data=("Segment", "End")))
# Add subnodes as attributes for better accessibility
root_node._add_as_attr()
# Now write proper values in the header fields
h = root_node.a_segment.a_header
h.a_xnodes.value, h.a_ynodes.value, h.a_znodes.value = l.nodes
ss = l.stepsizes
hss = [0.5 * ssi for ssi in ss]
h.a_xstepsize.value, h.a_ystepsize.value, h.a_zstepsize.value = ss
h.a_xbase.value, h.a_ybase.value, h.a_zbase.value = hss
min_mesh_pos = [nmn - d for nmn, d in zip(l.min_node_pos, hss)]
max_mesh_pos = [nmx + d for nmx, d in zip(l.max_node_pos, hss)]
h.a_xmin.value, h.a_ymin.value, h.a_zmin.value = min_mesh_pos
h.a_xmax.value, h.a_ymax.value, h.a_zmax.value = max_mesh_pos
# Final "decorations"
h.a_title.value = "Title"
h.a_meshtype.value = mesh_type
h.a_meshunit.value = "1.0"
if version == OVF10:
h.a_valueunit.value = "1.0"
h.a_valuemultiplier.value = 1.0
h.a_valuerangeminmag.value = min(fl.field_data.flat)
h.a_valuerangemaxmag.value = max(fl.field_data.flat)
else:
h.a_valuedim.value = fl.field_dim
h.a_valueunits.value = OVFValueUnits(" 1.0" * fl.field_dim)
h.a_valuelabels.value = OVFValueLabels(' "1.0"' * fl.field_dim)
# Finally replace self.content
self.content = root_node
def get_field(self):
root_node = self.content
segment_node = root_node.a_segment
h = segment_node.a_header
ss = [h.a_xstepsize, h.a_ystepsize, h.a_zstepsize]
dx, dy, dz = [0.5 * ssi.value for ssi in ss]
min_max_ndim = \
[(h.a_xmin.value - dx, h.a_xmax.value + dx, h.a_xnodes.value),
(h.a_ymin.value - dy, h.a_ymax.value + dy, h.a_ynodes.value),
(h.a_zmin.value - dz, h.a_zmax.value + dz, h.a_znodes.value)]
field_data = segment_node.a_data.field
field_dim = root_node.field_dim
return FieldLattice(min_max_ndim, dim=field_dim,
data=field_data, order='F')
def read(self, stream):
if not isinstance(stream, OVFStream):
stream = OVFStream(stream)
self.content.read(stream, root=self.content)
self.content._end_section("main")
def write(self, stream):
if not isinstance(stream, OVFStream):
stream = OVFStream(stream, mode="w")
self.content.write(stream, root=self.content)
if __name__ == "__main__no":
import sys
print "Reading"
ovf = OVFFile(sys.argv[1])
print "Writing"
#ovf.content.a_segment.a_databinary8.name = "Data Binary 4"
ovf.write(sys.argv[2])
print "Done"
elif __name__ == "__main__":
# Here is how to create an OVF file from a FieldLattice object
fl = FieldLattice("2.5e-9,97.5e-9,20/2.5e-9,47.5e-9,10/2.5e-9,7.5e-9,1",
order="F")
fl.set(lambda pos: [1, 0, 0])
ovf = OVFFile()
ovf.new(fl, version=OVF20, data_type="binary8")
ovf.content.a_segment.a_header.a_title = "MyFile"
ovf.write("new-v1.ovf")
| 30,985 | 32.753813 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/oommf/init.py
|
from oommf_calculator import calculate_oommf_fields
import numpy as np
from mesh import MeshField
def mesh_spec(mesh):
return """
Specify Oxs_BoxAtlas:atlas {
xrange {%25.18e %25.18e}
yrange {%25.18e %25.18e}
zrange {%25.18e %25.18e}
}
Specify Oxs_RectangularMesh:mesh {
cellsize {%25.18e %25.18e %25.18e}
atlas Oxs_BoxAtlas:atlas
}
""" % (
mesh.origin[0], mesh.endpoint[0],
mesh.origin[1], mesh.endpoint[1],
mesh.origin[2], mesh.endpoint[2],
mesh.cell_size[0], mesh.cell_size[1], mesh.cell_size[2],
)
def oommf_demag(s0, Ms):
assert type(s0) is MeshField and s0.dims == (3,)
res = calculate_oommf_fields("demag", s0, Ms, mesh_spec(
s0.mesh) + "\nSpecify Oxs_Demag {}", fields=["Oxs_Demag::Field", "Oxs_TimeDriver::Spin"])
demag_field = res['Oxs_Demag-Field']
s_field = res['Oxs_TimeDriver-Spin']
assert demag_field.dims == (3,)
if not (np.max(np.abs(s_field.flat - s0.flat)) < 1e-14):
print s_field.flat
print s0.flat
assert np.max(np.abs(s_field.flat - s0.flat)) < 1e-14
return demag_field
def oommf_uniform_exchange(s0, Ms, A):
assert type(s0) is MeshField and s0.dims == (3,)
res = calculate_oommf_fields("uniform_exchange", s0, Ms, mesh_spec(s0.mesh) + "\nSpecify Oxs_UniformExchange { A %25.15e }" % A,
fields=["Oxs_UniformExchange::Field", "Oxs_TimeDriver::Spin"])
exchange_field = res['Oxs_UniformExchange-Field']
s_field = res['Oxs_TimeDriver-Spin']
assert exchange_field.dims == (3,)
if not (np.max(np.abs(s_field.flat - s0.flat)) < 1e-14):
print s_field.flat
print s0.flat
assert np.max(np.abs(s_field.flat - s0.flat)) < 1e-14
return exchange_field
def oommf_uniaxial_anisotropy(m0, Ms, K1, axis):
assert type(m0) is MeshField and m0.dims == (3,)
res = calculate_oommf_fields("uniaxial_anisotropy", m0, Ms, mesh_spec(m0.mesh) + "\nSpecify Oxs_UniaxialAnisotropy { K1 %25.15e axis { %25.15e %25.15e %25.15e } }" % (K1, axis[0], axis[1], axis[2]),
fields=["Oxs_UniaxialAnisotropy::Field", "Oxs_TimeDriver::Spin"])
uniaxial_anisotropy_field = res['Oxs_UniaxialAnisotropy-Field']
m_field = res['Oxs_TimeDriver-Spin']
assert uniaxial_anisotropy_field.dims == (3,)
if not (np.max(np.abs(m_field.flat - m0.flat)) < 1e-14):
print m_field.flat
print m0.flat
assert np.max(np.abs(m_field.flat - m0.flat)) < 1e-14
return uniaxial_anisotropy_field
def oommf_cubic_anisotropy(m0, Ms, u1, u2, K1, K2=0, K3=0):
assert type(m0) is MeshField and m0.dims == (3,)
res = calculate_oommf_fields("cubic_anisotropy", m0, Ms, mesh_spec(m0.mesh) + """\nSpecify Southampton_CubicAnisotropy8 {
K1 %25.15e K2 %25.15e K3 %25.15e axis1 { %25.15e %25.15e %25.15e }
axis2 { %25.15e %25.15e %25.15e } }""" % (K1, K2, K3, u1[0], u1[1], u1[2], u2[0], u2[1], u2[2]),
fields=["Southampton_CubicAnisotropy8::Field", "Oxs_TimeDriver::Spin"])
cubic_anisotropy_field = res['Southampton_CubicAnisotropy8-Field']
m_field = res['Oxs_TimeDriver-Spin']
assert cubic_anisotropy_field.dims == (3,)
if not (np.max(np.abs(m_field.flat - m0.flat)) < 1e-14):
print m_field.flat
print m0.flat
assert np.max(np.abs(m_field.flat - m0.flat)) < 1e-14
return cubic_anisotropy_field
def oommf_fixed_zeeman(s0, Ms, H):
assert type(s0) is MeshField and s0.dims == (3,)
res = calculate_oommf_fields("fixed_zeeman", s0, Ms, mesh_spec(s0.mesh) + "\nSpecify Oxs_FixedZeeman { field {%25.16e %25.16e %25.16e} }" % (H[0], H[1], H[2]),
fields=["Oxs_FixedZeeman::Field", "Oxs_TimeDriver::Spin"])
field = res['Oxs_FixedZeeman-Field']
s_field = res['Oxs_TimeDriver-Spin']
assert field.dims == (3,)
if not (np.max(np.abs(s_field.flat - s0.flat)) < 1e-14):
print s_field.flat
print s0.flat
assert np.max(np.abs(s_field.flat - s0.flat)) < 1e-14
return field
def oommf_dmdt(s0, Ms, A, H, alpha, gamma_G):
assert type(s0) is MeshField and s0.dims == (3,)
# disable everything besides the external field for better comparison.
res = calculate_oommf_fields("dmdt", s0, Ms, mesh_spec(s0.mesh) +
"\nSpecify Oxs_FixedZeeman { field {%25.16e %25.16e %25.16e} }" % (
H[0], H[1], H[2]),
alpha=alpha, gamma_G=gamma_G,
fields=["Oxs_RungeKuttaEvolve:evolver:dm/dt", "Oxs_TimeDriver::Spin"])
field = res['Oxs_RungeKuttaEvolve-evolver-dm_dt']
s_field = res['Oxs_TimeDriver-Spin']
assert field.dims == (3,)
if not (np.max(np.abs(s_field.flat - s0.flat)) < 1e-14):
print s_field.flat
print s0.flat
assert np.max(np.abs(s_field.flat - s0.flat)) < 1e-14
return field
| 5,002 | 36.901515 | 202 |
py
|
finmag
|
finmag-master/src/finmag/util/oommf/__init__.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
from init import *
| 290 | 28.1 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/oommf/mesh.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
import numpy as np
__all__ = ["Mesh", "MeshField"]
# A scalar/vector/tensor field on a mesh
class MeshField(object):
def __init__(self, mesh, arr, dims):
dims = tuple(dims)
assert mesh.is_compatible(arr, len(dims))
assert tuple(arr.shape[:len(dims)]) == dims
self.flat = arr.view()
self.nonflat = arr.view()
self.nonflat.shape = np.append(dims, mesh.mesh_size_ao)
self.flat.shape = np.append(dims, np.prod(mesh.mesh_size_ao))
self.dims = dims
self.mesh = mesh
def to_xyz_array(self):
n = len(self.dims)
return np.ascontiguousarray(np.transpose(self.nonflat, axes=np.append(np.arange(n), n + np.argsort(self.mesh.array_order))))
# Only implemented for vector fields, i.e. len(dims) == 1
def subfield(self, a, b):
assert len(self.dims) == 1
return MeshField(self.mesh, self.flat[a:b], (b - a,))
def copy(self):
return MeshField(self.mesh, self.flat.copy(), self.dims)
# A 3D rectangular mesh
class Mesh(object):
# The tuple is the indices for transposing from XYZ to the array order
XYZ = (0, 1, 2)
ZXY = (2, 0, 1)
ZYX = (2, 1, 0)
# meshsize and cellsize use XYZ order of coordinates
# array_order is the order used by the
def __init__(self, meshsize, cellsize=None, origin=(0, 0, 0), array_order=ZYX, size=None):
if cellsize is None and size is not None:
cellsize = np.array(
size, dtype=float) / np.array(meshsize, dtype=float)
self.mesh_size = np.array(meshsize, dtype=int)
self.cell_size = np.array(cellsize, dtype=float)
self.origin = np.array(origin, dtype=float)
self.array_order = array_order
self.n = np.prod(self.mesh_size)
self.mesh_size_ao = self.mesh_size[list(array_order)]
self.cell_size_ao = self.cell_size[list(array_order)]
# Check validity
assert self.mesh_size.shape == (3,)
assert self.cell_size.shape == (3,)
assert self.origin.shape == (3,)
assert len(array_order) == 3
assert self.cell_size[0] > 0 and self.cell_size[
1] > 0 and self.cell_size[2] > 0
assert self.mesh_size[0] > 0 and self.mesh_size[
1] > 0 and self.mesh_size[2] > 0
assert all(np.isfinite(self.cell_size)) and all(
np.isfinite(self.origin))
assert sorted(array_order) == [0, 1, 2]
# An array is compatible with the specified mesh if
# a) it is C-contiguous
# b) it has more than one dimension
# and either
# c1) it has dim+1 dimensions and the last dimension is equal to the total number of mesh points
# or
# c2) it has dim+3 dimensions and the last 3 dimensions coincide with
# mesh_size
def is_compatible(self, arr, ndim=1):
if type(arr) is not np.ndarray:
return False
if not arr.flags.contiguous:
return False
if arr.ndim == ndim + 1:
return arr.shape[-1] == self.n
elif arr.ndim == ndim + 3:
return arr.shape[-3] == self.mesh_size_ao[0] and arr.shape[-2] == self.mesh_size_ao[1] and arr.shape[-1] == self.mesh_size_ao[2]
else:
return False
# Returns an array of the form [(x_min, x_max, x_num), (y_min, y_max,
# y_num), (z_min, z_max, z_num)]
def get_lattice_spec(self):
return [(self.origin[i] + self.cell_size[i] * 0.5, self.origin[i] + self.cell_size[i] * 0.5 + self.cell_size[i] * self.mesh_size[i], self.mesh_size[i]) for i in xrange(3)]
def field_from_xyz_array(self, arr):
assert arr.ndim == 4
assert tuple(arr.shape[1:]) == tuple(self.mesh_size)
res = arr.view()
res.shape = np.append(arr.shape[:1], self.mesh_size)
res = np.ascontiguousarray(
np.transpose(res, axes=np.append([0], 1 + np.array(self.array_order))))
return MeshField(self, res, arr.shape[:1])
def new_field(self, dims):
if type(dims) == int:
dims = [dims]
arr = np.zeros(np.append(dims, [self.n]))
return MeshField(self, arr, dims)
def field_from_array(self, arr, ndim=1):
return MeshField(self, arr, arr.shape[:ndim])
def field_from_flat_array(self, dims, arr):
a = arr.view()
a.shape = (dims, -1)
return MeshField(self, a, [dims])
def iter_coords_int(self):
l, m, n = self.array_order
for i in xrange(self.mesh_size[l]):
for j in xrange(self.mesh_size[m]):
for k in xrange(self.mesh_size[n]):
r = [-1] * 3
r[l] = i
r[m] = j
r[n] = k
yield tuple(r)
def iter_coords(self):
for i in self.iter_coords_int():
yield([self.origin[d] + (0.5 + i[d]) * self.cell_size[d] for d in xrange(3)])
endpoint = property(
lambda self: self.origin + self.cell_size * self.mesh_size)
is_zyx = property(lambda self: self.array_order == Mesh.ZYX)
| 5,347 | 35.882759 | 179 |
py
|
finmag
|
finmag-master/src/finmag/util/oommf/oommf_calculator.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
import os
import re
import hashlib
import tempfile
import numpy as np
import cStringIO
import sys
import subprocess
import shutil
from finmag.util.helpers import ignored, get_source
from finmag.util.oommf import ovf, lattice
from finmag.util.oommf.mesh import MeshField, Mesh
from subprocess import check_output, CalledProcessError
dirname = os.path.dirname
FINMAG_PKG_DIR = dirname(dirname(dirname(os.path.abspath(__file__))))
TEST_COMPARISON_RESULTS = os.path.join(FINMAG_PKG_DIR, 'tests', 'oommf_results')
CACHE_DIR = os.environ['HOME'] + "/.oommf_calculator"
RUN_DIR = tempfile.mkdtemp(suffix='_oommf_calculator')
if os.environ.has_key('OOMMF_COMMAND'):
OOMMF_COMMAND = os.environ['OOMMF_COMMAND']
else:
OOMMF_COMMAND = 'oommf'
MIF_TEMPLATE = """# MIF 2.1
%(spec)s
Specify Oxs_RungeKuttaEvolve:evolver {
gamma_G %(gamma_G)s
alpha %(alpha)s
method rkf54
}
Specify Oxs_TimeDriver {
basename %(basename)s
evolver :evolver
mesh :mesh
total_iteration_limit 1
Ms %(Ms)s
m0 { Oxs_FileVectorField {
atlas :atlas
norm 1.0
file %(basename)s-start.omf
}}
}
Destination archive mmArchive:oommf_calculator
%(fields)s
"""
SOURCE = get_source(__file__)
def run_oommf(dir, args, **kwargs):
try:
cmd = [OOMMF_COMMAND]
cmd.extend(args)
check_output(cmd, cwd=dir, stderr=subprocess.STDOUT, **kwargs)
except CalledProcessError, ex:
sys.stderr.write(ex.output)
raise Exception("OOMMF invocation failed: " + " ".join(cmd))
except OSError, ex:
sys.stderr.write(ex.strerror + ".\n")
raise Exception(
"Command '{0}' failed. Parameters: '{1}'.".format(cmd[0], " ".join(cmd[1:])))
# Runs an OOMMF mif file contained in str
# Returns a hashtable of field names mapped to arrays compatible with the
# given mesh
def find_cached_result(basename):
test_cached = os.path.join(TEST_COMPARISON_RESULTS, basename)
if os.path.isdir(test_cached):
return test_cached
return os.path.join(CACHE_DIR, basename)
def calculate_oommf_fields(name, s0, Ms, spec=None, alpha=0., gamma_G=0., fields=[]):
assert type(Ms) is float
assert type(s0) is MeshField and s0.dims == (3,)
# Calculate the checksum corresponding to the parameters
m = hashlib.new('md5')
delim = "\n---\n"
m.update(SOURCE + delim)
m.update(name + delim)
m.update("%25.19e%s" % (Ms, delim))
m.update("%25.19e%s" % (alpha, delim))
m.update("%25.19e%s" % (gamma_G, delim))
m.update("%s%s" % (",".join(fields), delim))
m.update(spec + delim)
s = cStringIO.StringIO()
np.save(s, s0.flat)
m.update(s.getvalue())
checksum = m.hexdigest()
# Format the simulation script
basename = "%s_%s" % (name, checksum)
tag = basename.lower()
params = {
'spec': spec,
'basename': basename,
'Ms': "%25.19e" % Ms,
'gamma_G': "%25.19e" % gamma_G,
'alpha': "%25.19e" % alpha,
'tag': tag,
'fields': "\n".join("Schedule %s archive Step 1" % f for f in fields)
}
mif = MIF_TEMPLATE % params
# print mif
with ignored(OSError):
os.makedirs(CACHE_DIR)
# Check if the result is already known
cachedir = find_cached_result(basename)
if not os.path.exists(cachedir):
# Run the simulation
print "Running OOMMF simulation %s..." % basename,
sys.stdout.flush()
dir = os.path.join(RUN_DIR, basename)
with ignored(OSError):
os.makedirs(dir)
# Write the MIF file
mif_file_name = basename + ".mif"
mif_file = open(os.path.join(dir, mif_file_name), "w")
mif_file.write(mif)
mif_file.close()
# Write the starting OMF file
fl = lattice.FieldLattice(s0.mesh.get_lattice_spec())
fl.field_data = s0.flat
# Save it to file
m0_file = ovf.OVFFile()
m0_file.new(fl, version=ovf.OVF10, data_type="binary8")
m0_file.write(os.path.join(dir, basename + "-start.omf"))
# Run the OOMMF simulation
run_oommf(dir, ["boxsi", mif_file_name])
# Move the results to the cache directory
shutil.move(dir, cachedir)
print "success"
# Read the results
fields = {}
for fn in os.listdir(cachedir):
m = re.match("^(.*)_%s-(.*)-00-0000000.o[hvm]f$" % checksum, fn)
if m and m.group(1) == name:
fl = ovf.OVFFile(os.path.join(cachedir, fn)).get_field()
fields[m.group(2)] = s0.mesh.field_from_xyz_array(fl.field_data)
return fields
if __name__ == "__main__":
spec = """set pi [expr 4*atan(1.0)]
set mu0 [expr 4*$pi*1e-7]
Parameter cellsize 5e-9
set Hx -24.6 ;# Applied field in mT
set Hy 4.3
set Hz 0.0
Specify Oxs_BoxAtlas:atlas {
xrange {0 500e-9}
yrange {0 125e-9}
zrange {0 3e-9}
}
Specify Oxs_RectangularMesh:mesh [subst {
cellsize {$cellsize $cellsize 3e-9}
atlas Oxs_BoxAtlas:atlas
}]
Specify Oxs_Demag {}
"""
mesh = Mesh((100, 25, 1), cellsize=(5e-9, 5e-9, 3e-9))
calculate_oommf_fields("testpppp", mesh.new_field(3), 8e5, spec)
| 5,409 | 27.324607 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/tests/__init__.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Gabriel Balaban [email protected]
| 256 | 31.125 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/ode/init.py
|
import finmag.native.sundials as native_ode
import numpy as np
class cvode(object):
def __init__(self, f):
native_ode.cvode(native_ode.CV_ADAMS, native_ode.CV_FUNCTIONAL)
def scipy_to_cvode_rhs(f):
def cvode_rhs(t, y, ydot):
ydot[:] = f(t, y)
return 0
return cvode_rhs
def scipy_to_cvode_jtimes(jac):
def cvode_jtimes(v, Jv, t, y, fy, tmp):
Jv[:] = np.dot(jac(t, y), v)
return 0
return cvode_jtimes
| 469 | 17.076923 | 71 |
py
|
finmag
|
finmag-master/src/finmag/util/ode/__init__.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
from init import *
| 289 | 31.222222 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/ode/tests/robertson_ode.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
import numpy as np
import scipy.integrate
n_rhs_evals = 0
n_jac_evals = 0
def robertson_reset_n_evals():
global n_rhs_evals, n_jac_evals
n_rhs_evals = 0
n_jac_evals = 0
def robertson_rhs(t, y):
global n_rhs_evals
n_rhs_evals += 1
return np.array([
-0.04 * y[0] + 1e4 * y[1] * y[2],
0.04 * y[0] - 1e4 * y[1] * y[2] - 3e7 * y[1] * y[1],
3e7 * y[1] * y[1]
])
def robertson_jacobean(t, y):
global n_jac_evals
n_jac_evals += 1
jac = np.zeros((3, 3))
# jac[i,j] = d f[i] / d y[j]
jac[0, 0] = -0.04
jac[0, 1] = 1e4 * y[2]
jac[0, 2] = 1e4 * y[1]
jac[1, 0] = 0.04
jac[1, 1] = -1e4 * y[2] - 6e7 * y[1]
jac[1, 2] = -1e4 * y[1]
jac[2, 0] = 0
jac[2, 1] = 6e7 * y[1]
jac[2, 2] = 0
return jac
ROBERTSON_Y0 = np.array([1., 0., 0])
if __name__ == "__main__":
import matplotlib.pyplot as plt
ts = np.logspace(-8, 8, base=10, num=200)
ys = np.zeros((3, ts.size))
for i, t in enumerate(ts):
integrator = scipy.integrate.ode(robertson_rhs, jac=robertson_jacobean)
integrator.set_initial_value(ROBERTSON_Y0)
integrator.set_integrator("vode", method="bdf", nsteps=10000)
ys[:, i] = integrator.integrate(t)
ys[1] *= 1e4
vals = []
for i in xrange(3):
vals.append(ts)
vals.append(ys[i])
vals.append('')
plt.semilogx(*vals)
plt.legend(["$y_1$", "$10^4 y_2$", "$y_3$"])
plt.show()
| 1,741 | 23.194444 | 90 |
py
|
finmag
|
finmag-master/src/finmag/util/ode/tests/test_sundials_stiff_ode.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
import unittest
import numpy as np
import finmag.native.sundials as sundials
from finmag.util.ode import scipy_to_cvode_jtimes, scipy_to_cvode_rhs
import robertson_ode
from robertson_ode import robertson_jacobean, robertson_rhs, robertson_reset_n_evals
ROBERTSON_Y0 = np.array([1., 0., 0.])
class SundialsStiffOdeTests(unittest.TestCase):
def test_robertson_scipy(self):
import scipy.integrate
y_tmp = ROBERTSON_Y0.copy()
robertson_reset_n_evals()
integrator = scipy.integrate.ode(robertson_rhs, jac=robertson_jacobean)
integrator.set_initial_value(ROBERTSON_Y0)
integrator.set_integrator("vode", method="bdf", nsteps=5000)
integrator.integrate(1e8)
print "Integration of the Robertson ODE until t=1e8 with scipy VODE, BDF method: %d steps" % (robertson_ode.n_rhs_evals,)
self.assertLess(robertson_ode.n_rhs_evals, 5000)
def test_robertson_scipy_transposed(self):
import scipy.integrate
y_tmp = ROBERTSON_Y0.copy()
robertson_reset_n_evals()
integrator = scipy.integrate.ode(
robertson_rhs, jac=lambda t, y: robertson_jacobean(t, y).T)
integrator.set_initial_value(ROBERTSON_Y0)
integrator.set_integrator("vode", method="bdf", nsteps=5000)
integrator.integrate(1e8)
self.assertGreater(robertson_ode.n_rhs_evals, 5000)
def test_robertson_sundials(self):
robertson_reset_n_evals()
integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON)
integrator.init(
scipy_to_cvode_rhs(robertson_rhs), 0, ROBERTSON_Y0.copy())
integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE)
integrator.set_spils_jac_times_vec_fn(
scipy_to_cvode_jtimes(robertson_jacobean))
integrator.set_scalar_tolerances(1e-8, 1e-8)
integrator.set_max_num_steps(5000)
yout = np.zeros(3)
integrator.advance_time(1e8, yout)
print "Integration of the Robertson ODE until t=1e8 with CVODE, BDF method: %d steps" % (robertson_ode.n_rhs_evals,)
def test_robertson_sundials_transposed(self):
robertson_reset_n_evals()
integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON)
integrator.init(
scipy_to_cvode_rhs(robertson_rhs), 0, ROBERTSON_Y0.copy())
integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE)
integrator.set_spils_jac_times_vec_fn(
scipy_to_cvode_jtimes(lambda t, y: robertson_jacobean(t, y).T))
integrator.set_scalar_tolerances(1e-8, 1e-8)
integrator.set_max_num_steps(5000)
yout = np.zeros(3)
try:
integrator.advance_time(1e8, yout)
except RuntimeError, ex:
self.assertGreater(robertson_ode.n_rhs_evals, 5000)
assert ex.message.find("CV_TOO_MUCH_WORK") >= 0
if __name__ == '__main__':
unittest.main()
| 3,181 | 39.278481 | 129 |
py
|
finmag
|
finmag-master/src/finmag/util/ode/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
finmag
|
finmag-master/src/finmag/util/ode/tests/test_sundials_ode.py
|
# FinMag - a thin layer on top of FEniCS to enable micromagnetic multi-physics simulations
# Copyright (C) 2012 University of Southampton
# Do not distribute
#
# CONTACT: [email protected]
#
# AUTHOR(S) OF THIS FILE: Dmitri Chernyshenko ([email protected])
import unittest
import math
import numpy as np
import finmag.native.sundials as sundials
from finmag.util.helpers import ignored
class OdeSundialsTests(unittest.TestCase):
def test_errors(self):
integrator = sundials.cvode(sundials.CV_ADAMS, sundials.CV_FUNCTIONAL)
y = np.zeros((5,))
try:
integrator.advance_time(1, y)
self.fail("Exception was not raised")
pass
except RuntimeError, ex:
print ex
def test_simple_1d_scipy(self):
import scipy.integrate
integrator = scipy.integrate.ode(lambda t, y: 0.5 * y)
integrator.set_integrator('vode', rtol=1e-8, atol=1e-8)
integrator.set_initial_value(np.array([1.]), 0)
reference = lambda t: [math.exp(0.5 * t)]
ts = np.linspace(0.001, 3, 100)
ys = np.array([integrator.integrate(t) for t in ts])
ref_ys = np.array([reference(t) for t in ts])
assert np.max(np.abs(ys - ref_ys)) < 1e-6
def test_simple_1d(self):
integrator = sundials.cvode(sundials.CV_ADAMS, sundials.CV_FUNCTIONAL)
self.init_simple_test(integrator)
self.run_simple_test(integrator)
integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_FUNCTIONAL)
self.init_simple_test(integrator)
self.run_simple_test(integrator)
def test_simple_1d_diag(self):
integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON)
self.init_simple_test(integrator)
integrator.set_linear_solver_diag()
self.run_simple_test(integrator)
def init_simple_test(self, integrator):
def rhs(t, y, ydot):
ydot[:] = 0.5 * y
return 0
integrator.init(rhs, 0, np.array([1.]))
integrator.set_scalar_tolerances(1e-9, 1e-9)
def run_simple_test(self, integrator):
reference = lambda t: [math.exp(0.5 * t)]
yout = np.zeros(1)
ts = np.linspace(0.001, 3, 100)
ys = np.zeros((100, 1))
for i, t in enumerate(ts):
integrator.advance_time(t, yout)
ys[i] = yout.copy()
ref_ys = np.array([reference(t) for t in ts])
assert np.max(np.abs(ys - ref_ys)) < 1e-6
def test_stiff_sp_gmr(self):
integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON)
self.init_simple_test(integrator)
def jtimes(v, Jv, t, y, fy, tmp):
return 0
integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE)
integrator.set_spils_jac_times_vec_fn(jtimes)
self.run_simple_test(integrator)
def test_jtimes_ex(self):
class MyException(Exception):
pass
integrator = sundials.cvode(sundials.CV_BDF, sundials.CV_NEWTON)
self.init_simple_test(integrator)
def jtimes(v, Jv, t, y, fy, tmp):
raise MyException()
integrator.set_linear_solver_sp_gmr(sundials.PREC_NONE)
integrator.set_spils_jac_times_vec_fn(jtimes)
yout = np.zeros(1)
with ignored(MyException):
integrator.advance_time(1, yout)
self.fail("Exception was not raised")
pass
if __name__ == '__main__':
unittest.main()
| 3,475 | 33.415842 | 90 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_writing_data.py
|
import finmag
import os
import numpy as np
from finmag.util.fileio import Tablereader
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
reference_file = os.path.join(MODULE_DIR, "barmini_test.ndt.ref")
def test_write_ndt_file(tmpdir):
os.chdir(str(tmpdir))
sim = finmag.example.barmini(name="barmini_test")
for i, time in enumerate(np.linspace(0, 1e-10, 21)):
print("i={}, time={}, marker 1.".format(i, time))
sim.advance_time(time)
print("i={}, time={}, marker 2.".format(i, time))
sim.save_averages()
print("i={}, time={}, marker 3.".format(i, time))
print("Done.")
# We used to do a file comparison here, but we had to fall back on
# a numerical comparison since the integration times returned from
# Sundials are slightly different for each run (which might be
# worth investigating, since it means that our simulations runs
# are not 100% reproducible)
f_out = Tablereader("barmini_test.ndt")
f_ref = Tablereader(reference_file)
a_out = np.array(f_out['time', 'm_x', 'm_y', 'm_z'])
a_ref = np.array(f_ref['time', 'm_x', 'm_y', 'm_z'])
diff = np.abs(a_out - a_ref)
print "Maximum difference: {}.".format(np.max(diff))
assert np.allclose(a_out, a_ref, atol=5e-6, rtol=1e-8)
| 1,293 | 35.971429 | 70 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_1d_domain_wall_profile_uniaxial_anisotropy.py
|
import numpy as np
import dolfin as df
from finmag import Simulation as Sim
from finmag.energies import Exchange, UniaxialAnisotropy
from aeon import timer
import pylab
K1 = 520e3 # J/m^3
A = 30e-12 # J/m
x0 = 252e-9 # m
Ms = 1400e3 # A/m
def Mz_exact(x, x0=x0, A=A, Ms=Ms):
"""Analytical solution.
"""
return Ms * np.cos(np.pi / 2 + np.arctan(np.sinh((x - x0) / np.sqrt(A / K1))))
def M0(r):
"""Return initial magnetisation (vectorized)."""
offset = 2e-9
length = 500e-9
x = r[:, 0]
relative_position = -2 * (x - offset) / length + 1
# The following two lines are the vectorized version of this:
# mz = min(1.0, max(-1.0, relative_position))
max1r = np.where(relative_position < -1.0, -1.0, relative_position)
mz = np.where(max1r > 1.0, 1.0, max1r)
return 0 * mz, np.sqrt(1.0 - mz * mz), mz
def test_domain_wall_profile(do_plot=False):
simplices = 500
L = 504e-9
dim = 3
mesh = df.IntervalMesh(simplices, 0, L)
V = df.VectorFunctionSpace(mesh, "CG", 1, dim=dim)
m0 = df.Function(V)
coor = mesh.coordinates()
n = len(m0.vector().array())
print "Double check that the length of the vectors are equal: %g and %g" \
% (n, len(coor) * dim)
assert n == len(coor) * dim
# Setup LLG
sim = Sim(mesh, Ms)
exchange = Exchange(A)
sim.add(exchange)
anisotropy = UniaxialAnisotropy(K1, (0, 0, 1))
sim.add(anisotropy)
# TODO: Find out how one are supposed to pin.
# llg.pins = [0,1,-2,-1] # This is not so good
# MA: because you pin by index -2 and -1 won't work like you'd expect.
sim.alpha = 1.0
# set initial magnetization
x, y, z = M0(coor)
m0.vector()[:] = np.array([x, y, z]).reshape(n)
sim.set_m(np.array([x, y, z]).reshape(n))
# Time integration
# f=open('data.txt','w')
for t in np.arange(0.0, 2e-10, 1e-11):
sim.run_until(t)
#Eani = anisotropy.compute_energy()/L
#Eex = exchange.compute_energy()/L
#f.write('%g\t%g\t%g\n' % (r.t,Eani,Eex))
print "Integrating time: %g" % t
# f.close()
print timer
mz = []
x = np.linspace(0, L, simplices + 1)
for xpos in x:
mz.append(sim.m_field.probe(xpos)[2])
mz = np.array(mz) * Ms
if do_plot:
# Plot magnetisation in z-direction
pylab.plot(x, mz, 'o', label='finmag')
pylab.plot(x, Mz_exact(x), '-', label='analytic')
pylab.legend(("Finmag", "Analytical"))
pylab.title("Domain wall example - Finmag vs analytical solution")
pylab.xlabel("Length")
pylab.ylabel("M.z")
pylab.savefig('1d-domain-wall-profile.png')
try:
import scipy.optimize
except ImportError:
pass
else:
popt, pcov = scipy.optimize.curve_fit(
Mz_exact, x, mz, p0=(x0 * 1.1, A * 1.1, Ms * 1.1))
print "popt=", popt
fittedx0, fittedA, fittedMs = popt
print "Error in fitted x0: %9.7f%%" % ((fittedx0 - x0) / x0 * 100)
print "Error in fitted Ms: %9.7f%%" % ((fittedMs - Ms) / Ms * 100)
print "Error in fitted A : %9.7f%%" % ((fittedA - A) / A * 100)
print "fitted A : %9g" % (fittedA)
print "correct A : %9g" % (A)
print "difference A : %9g" % (fittedA - A)
print "rel difference A : %9g" % ((fittedA - A) / A)
print "quotient A/fittedA and fittedA/A : %9g %g" % (A / fittedA, fittedA / A)
assert abs(fittedA - A) / A < 0.004, "Fitted A too inaccurate"
# Maximum deviation:
maxdiff = max(abs(mz - Mz_exact(x)))
print "Absolute deviation in Mz", maxdiff
assert maxdiff < 1200
maxreldiff = maxdiff / max(Mz_exact(x))
print "Relative deviation in Mz", maxreldiff
assert maxreldiff < 0.0009
if __name__ == "__main__":
test_domain_wall_profile(do_plot=True)
| 3,852 | 29.579365 | 86 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_cyclic_references_in_sim.py
|
def test_cyclic_refs_in_simulation_object_basic():
import finmag
import dolfin as df
mesh = df.UnitIntervalMesh(1)
s = finmag.Simulation(mesh, Ms=1, unit_length=1e-9, name='simple')
refcount = s.shutdown()
# The number 4 is emperical. If it increases, we
# have introduced an extra cyclic reference.
# Update: the cythonised code seems to have only 3 references at his point. Updated
# to smaller than 4 to allow binary build tests to pass.
assert refcount <= 4
def test_cyclic_refs_in_simulation_object_barmini():
import finmag
import dolfin as df
mesh = df.UnitIntervalMesh(1)
s = finmag.example.barmini()
s.run_until(1e-12)
refcount = s.shutdown()
# The number 4 is emperical. If it increases, we
# have introduced an extra cyclic reference.
assert refcount <= 4
| 846 | 31.576923 | 87 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_effective_field.py
|
# HF: April 2014: I am just creating this test file. I am not sure
# whether we have no other tests for the effictive fields - maybe
# they are in a different file.
# Testing new 'get_interaction_list' function:
import dolfin
import finmag
def test_get_interaction_list():
# has bar mini example Demag and Exchange?
s = finmag.example.barmini()
lst = s.get_interaction_list()
assert 'Exchange' in lst
assert 'Demag' in lst
assert len(lst) == 2
# Let's remove one and ceck again
s.remove_interaction('Exchange')
assert s.get_interaction_list() == ['Demag']
# test simulation with no interaction
s2 = finmag.sim_with(
mesh=dolfin.IntervalMesh(10, 0, 1),
m_init=(1, 0, 0), Ms=1,
demag_solver=None, unit_length=1e-8)
assert s2.get_interaction_list() == []
| 831 | 26.733333 | 66 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_jacobian.py
|
import pytest
from dolfin import *
from finmag.physics.llg import LLG
from finmag.energies import Exchange
from math import log
@pytest.mark.xfail
def test_this_needs_fixing():
print("The content of the code in `setup()` above needs to be distributed into the other ")
print("test routines here. It used to be global code that was imported when py.test")
print("was collecting files. So as a quick fix to stop the py.test")
print("collection braking, we put it into the function.")
class MyLLG(LLG):
"""
Temporary extension of LLG because the current version
does its computations externally in C++, and doesn't
compute the variational forms, and thus makes it
impossible to compute the jacobian.
"""
def __init__(self, S1, S3):
LLG.__init__(self, S1, S3)
self.alpha = 0.5
self.p = Constant(self.gamma / (1 + self.alpha ** 2))
def H_eff(self):
"""Very temporary function to make things simple."""
H_app = project((Constant((0, 1e5, 0))), self.S3)
H_ex = Function(self.S3)
# Comment out these two lines if you don't want exchange.
exch = Exchange(1.3e-11)
print "About to call setup"
exch.setup(self._m_field, self.Ms)
H_ex.vector().array()[:] = exch.compute_field()
H_eff = H_ex + H_app
return H_eff
def compute_variational_forms(self):
M, H, Ms, p, c, alpha, V = self._m_field.f, self.H_eff(), \
self.Ms, self.p, self.c, self.alpha, self.S3
u = TrialFunction(V)
v = TestFunction(V)
a = inner(u, v) * dx
L = inner((-p * cross(M, H)
- p * alpha / Ms.f * cross(M, cross(M, H))
- c * (inner(M, M) - Ms.f ** 2) * M / Ms.f ** 2), v) * dx
self.a, self.L = a, L
def variational_forms(self):
self.compute_variational_forms()
return self.a, self.L
def compute_jacobian(self):
L, M = self.L, self._m_field.f
return derivative(L, M)
def derivative_test(L, M, x, hs, J=None):
"""
Taylor remainder test.
*Arguments*
L - right hand side of equation
M - magnetisation field vector around which we develop the taylor series
x - random vector
hs - sequence of step width h
We compute the taylor series of dm/dt represented by L for a statevector P = M + h*x
J - Jacobian. If Jacobian J is given, use that, if not don't.
"""
L_M = assemble(L)
errors = []
for h in hs:
H = Function(V)
H.vector().set_local(h * x.vector().array())
P = Function(V)
P.vector().set_local(M.vector().array() + H.vector().array())
L_P = assemble(replace(L, {M: P})) # Compute exact result
# Without Jacobian information
if J is None:
errors.append(norm(L_P - L_M))
# With Jacobian information
else:
J_M_H = assemble(action(J, H))
errors.append(norm(L_P - L_M - J_M_H))
return errors
def convergence_rates(hs, ys):
assert(len(hs) == len(ys))
rates = [(log(ys[i]) - log(ys[i - 1])) / (log(hs[i]) - log(hs[i - 1]))
for i in range(1, len(hs))]
return rates
def test_convergence_linear():
"""All convergence rates should be 1 as the differences
should convert as O(n)."""
errors = derivative_test(L, M, x, hs)
rates = convergence_rates(hs, errors)
for h, rate in zip(hs, rates):
print "h= %g, rate=%g, rate-1=%g " % (h, rate, rate - 1)
assert abs(rate - 1) < h * CONV_TOL
def test_derivative_linear():
"""This should be zero because the rhs of LLG is linear in M."""
J = llg.compute_jacobian()
errors = derivative_test(L, M, x, hs, J=J)
for h, err in zip(hs, errors):
print "h= %g, error=%g" % (h, err)
assert abs(err) < h ** 2 * DERIV_TOL
m = 1e-5
mesh = BoxMesh(Point(0, 0, 0), Point(m, m, m), 5, 5, 5)
S1 = FunctionSpace(mesh, "Lagrange", 1)
S3 = VectorFunctionSpace(mesh, "Lagrange", 1)
llg = MyLLG(S1, S3)
llg.set_m((1, 0, 0))
M, V = llg._m_field.f, llg.S3
a, L = llg.variational_forms()
x = Function(V)
s = 0.25 # some random number
x.vector()[:] = s
hs = [2.0 / n for n in (1, 2, 4, 8, 16, 32)]
CONV_TOL = 1.5e-12
DERIV_TOL = 1.3e-13
if __name__ == '__main__':
# L is linear
print "Testing linear functional."
print "This should convert as O(h):"
errors = derivative_test(L, M, x, hs)
print errors
print "This should be close to one:"
print convergence_rates(hs, errors)
J = llg.compute_jacobian()
errors = derivative_test(L, M, x, hs, J=J)
print "This should be close to zero since L is linear:"
print errors
test_derivative_linear()
test_convergence_linear()
print ''
'''
# L is nonlinear
print "Testing nonlinear functional."
print "This should convert as O(h):"
errors = derivative_test(L, M, x, hs)
print errors
print "This should be close to one:"
print convergence_rates(hs, errors)
J = llg.compute_jacobian()
print "This should converge as O(h^2):"
errors = derivative_test(L, M, x, hs, J=J)
print errors
print "This should be close to two:"
print convergence_rates(hs, errors)
'''
| 5,293 | 26.148718 | 95 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_sim_parallel.py
|
import os
import dolfin as df
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from finmag import Simulation as Sim
from finmag.energies import Zeeman
#df.parameters.reorder_dofs_serial = True
alpha = 0.1
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def t_test_sim_ode_parallel(do_plot=False):
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(2, 2, 2), 1, 1, 1)
sim = Sim(mesh, 8.6e5, unit_length=1e-9, pbc='2d', parallel=True)
sim.alpha = alpha
sim.set_m((1, 0, 0))
sim.set_tol(1e-10, 1e-14)
H0 = 1e5
sim.add(Zeeman((0, 0, H0)))
# sim.add(Exchange(1.3e-11))
dt = 1e-12
ts = np.linspace(0, 500 * dt, 100)
precession_coeff = sim.gamma / (1 + alpha ** 2)
mz_ref = np.tanh(precession_coeff * alpha * H0 * ts)
mzs = []
length_error = []
for t in ts:
sim.run_until(t)
mm = sim.m.copy()
# mm.shape=(3,-1)
# mx,my,mz = mm[:,0] # same as m_average for this macrospin problem
mzs.append(mm[-1])
#length = np.sqrt(mx**2+my**2+mz**2)
# length_error.append(abs(length-1.0))
if do_plot:
ts_ns = ts * 1e9
plt.plot(ts_ns, mzs, "b.", label="computed")
plt.plot(ts_ns, mz_ref, "r-", label="analytical")
plt.xlabel("time (ns)")
plt.ylabel("mz")
plt.title("integrating a macrospin")
plt.legend()
plt.savefig(os.path.join(MODULE_DIR, "test_sim_ode.png"))
print("Deviation = {}, total value={}".format(
np.max(np.abs(mzs - mz_ref)),
mz_ref))
assert np.max(np.abs(mzs - mz_ref)) < 1e-9
#assert np.max(length_error) < 1e-9
if __name__ == "__main__":
test_sim_ode(do_plot=True)
print "Saved plit in test_sim_ode.png."
| 1,778 | 25.954545 | 75 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_solid_angle.py
|
import numpy as np
from finmag.util.solid_angle_magpar import return_csa_magpar
from finmag.native import llg as native_llg
# native_llg.compute_solid_angle returns a signed angle, magpar does not.
TOLERANCE = 1e-15
csa = native_llg.compute_solid_angle
csa_magpar = return_csa_magpar()
def test_solid_angle_first_octant():
"""
In spherical coordinates, the solid angle is defined as
:math:`\\d\\Omega=\\sin\\theta\\,d\\theta\\,d\\varphi`.
From the point (0, 0, 0), the triangle defined by the vertices
(1, 0, 0), (0, 1, 0) and (0, 0, 1) in cartesian coordinates
can be identified with the azimuth and zenith angles PI/2
and the integral yields PI/2.
"""
origin = np.zeros((3, 1))
# first octant (+, +, +) front-right-top
triangle = np.array(
[[[2.], [0.], [0.]], [[0.], [1.], [0.]], [[0.], [0.], [1.]]])
assert triangle.shape == (3, 3, 1)
angle = csa(origin, triangle)
assert abs(angle[0] - np.pi / 2) < TOLERANCE, \
"The solid angle is {}, but should be PI/2={}.".format(
angle[0], np.pi / 2)
magpar = csa_magpar(
np.zeros(3), np.array([2, 0, 0]), np.array([0, 5, 0]), np.array([0, 0, 1]))
assert abs(angle - magpar) < TOLERANCE
def test_solid_angle_one_minus_sign():
origin = np.zeros((3, 1))
# (-, +, +) back-right-top
triangle = np.array(
[[[-1.], [0.], [0.]], [[0.], [1.], [0.]], [[0.], [0.], [1.]]])
assert triangle.shape == (3, 3, 1)
angle = csa(origin, triangle)[0]
magpar = csa_magpar(
np.zeros(3), np.array([-1, 0, 0]), np.array([0, 1, 0]), np.array([0, 0, 1]))
assert abs(angle + np.pi / 2) < TOLERANCE, \
"The solid angle is {}, but should be -PI/2={}.".format(
angle, -np.pi / 2)
assert abs(abs(angle) - magpar) < TOLERANCE
def test_solid_angle_two_minus_signs():
origin = np.zeros((3, 1))
# (-, -, +) back-left-top
triangle = np.array(
[[[-1.], [0.], [0.]], [[0.], [-1.], [0.]], [[0.], [0.], [1.]]])
assert triangle.shape == (3, 3, 1)
angle = csa(origin, triangle)[0]
assert abs(angle - np.pi / 2) < TOLERANCE, \
"The solid angle is {}, but should be PI/2={}.".format(
angle[0], np.pi / 2)
magpar = csa_magpar(
np.zeros(3), np.array([-1, 0, 0]), np.array([0, -1, 0]), np.array([0, 0, 1]))
assert abs(angle - magpar) < TOLERANCE
def test_octants_solid_angle():
"""
By the same reasing as above, we get 4PI for the solid angle of a sphere
as seen by one point inside of it.
That means that another way of getting the solid angle of a single octant
is considering we are looking at one of the 8 divisions of the
euclidean three-dimensional coordinate system, divide the solid
angle of a sphere by 8 and get PI/2.
"""
origin = np.zeros((3, 1))
TRIANGLES = 8
triangles = np.zeros((3, 3, TRIANGLES))
# (3, 3, 8) three components, three vertices per triangle, 8 triangles
triangles[0, 0, ] = np.array(
[1., -1., 1., -1., 1., -1., 1., -1.]) # X, V1, 1-8
triangles[1, 1, ] = np.array(
[1., 1., -1., -1., 1., 1., -1., -1.]) # Y, V2, 1-8
triangles[2, 2, ] = np.array(
[1., 1., 1., 1., -1., -1., -1., -1.]) # Z, V3, 1-8
angle = csa(origin, triangles)
assert abs(angle) < TOLERANCE, \
"The solid angle is {0}, but should be 0.".format(angle[0])
if __name__ == "__main__":
for name in globals().keys():
if name.startswith("test_"):
print "Running", name
globals()[name]()
| 3,572 | 32.392523 | 85 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_meshes.py
|
import os
import time
import pytest
import shutil
import tempfile
import textwrap
from finmag.util.meshes import *
from dolfin import Mesh
from math import pi
from StringIO import StringIO
import logging
logger = logging.getLogger("finmag")
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
TOLERANCE = 0.05
# tolerance for the box() method, which should be much more precise
BOX_TOLERANCE = 1e-10
def test_from_geofile_and_from_csg():
radius = 1.0
maxh = 0.3
tmpdir = tempfile.mkdtemp()
tmpfile = tempfile.NamedTemporaryFile(
suffix='.geo', dir=tmpdir, delete=False)
csg_string = textwrap.dedent("""\
algebraic3d
solid main = sphere (0, 0, 0; {radius}) -maxh = {maxh};
tlo main;""").format(radius=radius, maxh=maxh)
try:
# Create a temporay .geo file which contains the geometric
# description of a sphere.
tmpfile.write(csg_string)
tmpfile.close()
geofile = tmpfile.name
xmlfile = os.path.splitext(geofile)[0] + ".xml.gz"
# Create a Dolfin mesh from the .geo file, first without saving the
# result
mesh1 = from_geofile(geofile, save_result=False)
assert(isinstance(mesh1, Mesh))
assert(not os.path.isfile(xmlfile))
# Now do the same, but save the result (and check that the .xml.gz file
# is there)
mesh2 = from_geofile(geofile, save_result=True)
assert(isinstance(mesh2, Mesh))
assert(os.path.isfile(xmlfile))
# Capture logging output in a string-stream
stream = StringIO()
handler = logging.StreamHandler(stream)
logger.addHandler(handler)
# Read the mesh form the .geo file again, but this time it
# should be read instantaneously.
mesh3 = from_geofile(geofile, save_result=True)
assert(isinstance(mesh3, Mesh))
assert(os.path.isfile(xmlfile))
handler.flush()
# assert(stream.getvalue() == "The mesh '{}' already exists and is "
# "automatically returned.\n".format(xmlfile))
# 'Touch' the .geo file so that it is newer than the .xml.gz
# file. Then check that upon reading the mesh the .xml.gz file
# is recreated. Note that most machines have sub-millisecond
# precision in their timestamps, but on some systems (such as
# osiris) only full seconds seem to be stored. So we wait for
# one second to make sure that the .geo file is picked up as
# being newer.
stream.truncate(0) # clear stream
time.sleep(1)
os.utime(geofile, None) # update the 'last modified' timestamp
mesh4 = from_geofile(geofile, save_result=True)
assert(isinstance(mesh4, Mesh))
assert(os.path.isfile(xmlfile))
handler.flush()
assert(stream.getvalue().startswith("The mesh file '{}' is outdated "
"(since it is older than the .geo file '{}') and will "
"be overwritten.\n".format(xmlfile, geofile)))
# Create a mesh from a CSG string directly
mesh5 = from_csg(csg_string, save_result=False)
# Check that the volume of the sphere is approximately correct
vol_exact = 4.0 / 3 * pi * radius ** 2
for mesh in [mesh1, mesh2, mesh3, mesh4, mesh5]:
vol_mesh = mesh_volume(mesh)
assert(abs(vol_mesh - vol_exact) / vol_exact < TOLERANCE)
finally:
tmpfile.close()
shutil.rmtree(tmpdir)
def test_box():
# We deliberately choose the two corners so that x1 > y1, to see
# whether the box() function can cope with this.
(x0, x1, x2) = (-0.2, 1.4, 3.0)
(y0, y1, y2) = (1.1, 0.7, 2.2)
maxh = 10.0 # large value so that we get as few vertices as possible
# Note: We use 'save_result=False' in this test so that we can also test 'anonymous'
# mesh creation. In the other tests, we use 'save_result=True' so that the mesh
# is loaded from a file for faster execution.
mesh = box(x0, x1, x2, y0, y1, y2, maxh=maxh, save_result=False)
vol_exact = abs((y0 - x0) * (y1 - x1) * (y2 - x2))
vol_mesh = mesh_volume(mesh)
assert(abs(vol_mesh - vol_exact) / vol_exact < BOX_TOLERANCE)
def test_sphere():
r = 1.0
maxh = 0.2
mesh = sphere(r=r, maxh=maxh, save_result=True, directory=MODULE_DIR)
vol_exact = 4.0 / 3 * pi * r ** 2
vol_mesh = mesh_volume(mesh)
assert(abs(vol_mesh - vol_exact) / vol_exact < TOLERANCE)
def test_cylinder():
r = 1.0
h = 2.0
maxh = 0.2
mesh = cylinder(
r=r, h=h, maxh=maxh, save_result=True, directory=MODULE_DIR)
vol_exact = pi * r * r * h
vol_mesh = mesh_volume(mesh)
assert(abs(vol_mesh - vol_exact) / vol_exact < TOLERANCE)
def test_elliptic_cylinder():
r1 = 2.0
r2 = 1.0
h = 2.5
maxh = 0.2
mesh = elliptic_cylinder(
r1=r1, r2=r2, h=h, maxh=maxh, save_result=True, directory=MODULE_DIR)
vol_exact = pi * r1 * r2 * h
vol_mesh = mesh_volume(mesh)
assert(abs(vol_mesh - vol_exact) / vol_exact < TOLERANCE)
def test_ellipsoid():
r1 = 2.0
r2 = 1.0
r3 = 0.5
maxh = 0.2
mesh = ellipsoid(
r1=r1, r2=r2, r3=r3, maxh=maxh, save_result=True, directory=MODULE_DIR)
vol_exact = 4.0 / 3 * pi * r1 * r2 * r3
vol_mesh = mesh_volume(mesh)
assert(abs(vol_mesh - vol_exact) / vol_exact < TOLERANCE)
@pytest.mark.requires_X_display
def test_plot_mesh_regions():
"""
This test simply calls the function
`finmag.util.meshes.plot_mesh_regions` to see if it can be called
with different arguments without error. No checking of the output
figure is done whatsoever.
"""
# Write csg string to a temporary file
mesh = from_geofile(os.path.join(MODULE_DIR, "sphere_in_cube.geo"))
mesh_regions = df.MeshFunction(
'size_t', mesh, os.path.join(MODULE_DIR, "sphere_in_cube_mat.xml"))
# Call plot_mesh_regions with a variety of different arguments
ax = plot_mesh_regions(mesh_regions, regions=1)
plot_mesh_regions(mesh_regions, regions=1, colors="green", ax=ax)
plot_mesh_regions(mesh_regions, regions=[1, 2], zoom_to_mesh_size=False)
plot_mesh_regions(mesh_regions, regions=[1, 2], colors=["green", "red"],
alphas=[1.0, 0.25])
plot_mesh_regions(mesh_regions, regions=[1, 2], colors=["green", "red"],
alphas=[1.0, 0.25], markers=['<', 'H'], marker_sizes=[20, 50])
# 'None' inside a list means: use matplotlib's default
plot_mesh_regions(mesh_regions, regions=[1, 2], colors=[None, "red"],
alphas=[0.3, None], marker_sizes=[None, 100])
# Sanity check (empty regions)
plot_mesh_regions(mesh_regions, regions=[])
| 6,829 | 34.572917 | 99 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_exchange_static.py
|
import numpy as np
import dolfin as df
from finmag import Simulation as Sim
from finmag.energies import Exchange
from finmag.util.helpers import vectors, angle
TOLERANCE = 8e-7
# define the mesh
length = 20e-9 # m
simplexes = 10
mesh = df.IntervalMesh(simplexes, 0, length)
Ms = 8.6e5
A = 1.3e-11
# initial configuration of the magnetisation
left_right = '2*x[0]/L - 1'
up_down = 'sqrt(1 - (2*x[0]/L - 1)*(2*x[0]/L - 1))'
possible_orientations = [
(left_right, up_down, '0'), # (left_right, '0', up_down),
(up_down, '0', left_right)] # , (up_down, left_right, '0'),
#('0', left_right, up_down)] , ('0', up_down, left_right)]
def angles_after_a_nanosecond(initial_M, pins=[]):
sim = Sim(mesh, Ms)
sim.set_m(initial_M, L=length)
sim.add(Exchange(A))
sim.pins = pins
sim.run_until(1e-9)
m = vectors(sim.m)
angles = np.array([angle(m[i], m[i + 1]) for i in xrange(len(m) - 1)])
return angles
def test_all_orientations_without_pinning():
for m0 in possible_orientations:
angles = angles_after_a_nanosecond(m0)
print "no pinning, all angles: "
print angles
assert np.nanmax(angles) < TOLERANCE
def test_all_orientations_with_pinning():
for m0 in possible_orientations:
angles = angles_after_a_nanosecond(m0, [0, 10])
print "no pinning, all angles: "
print angles
assert np.abs(np.max(angles) - np.min(angles)) < TOLERANCE + 1e-7
def test_exchange_field_should_change_when_M_changes():
sim = Sim(mesh, Ms)
sim.set_m(df.Expression(('(2*x[0]-L)/L',
'sqrt(1 - ((2*x[0]-L)/L)*((2*x[0]-L)/L))',
'0'), L=length, degree=1))
exchange = Exchange(A)
sim.add(exchange)
# save the beginning value of M and the exchange field for comparison
# purposes
old_m = sim.m
old_H_ex = exchange.compute_field()
sim.run_until(1e-11)
# Capture the current value of the exchange field and m.
m = sim.m
H_ex = exchange.compute_field()
# We assert that the magnetisation has indeed changed since the beginning.
assert not np.array_equal(old_m, m)
assert not np.array_equal(old_H_ex, H_ex), "H_ex hasn't changed."
if __name__ == "__main__":
print "without pinning"
test_all_orientations_without_pinning()
print "with pinning"
test_all_orientations_with_pinning()
| 2,356 | 27.39759 | 78 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_interactions_scale_linearly_with_m.py
|
#!/usr/bin/env python
import dolfin as df
import numpy as np
from finmag.field import Field
import pytest
from finmag.energies import Exchange, UniaxialAnisotropy, Zeeman, Demag, DMI
np.random.seed(0)
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(40, 40, 5), 15, 15, 1)
N = mesh.num_vertices()
V = df.VectorFunctionSpace(mesh, 'CG', 1, dim=3)
randvec1 = np.random.random_sample(3 * N)
randvec2 = np.random.random_sample(3 * N)
randvec3 = np.random.random_sample(3 * N)
def compute_field_for_linear_combination(EnergyClass, init_args, a, b, c):
v = Field(V)
v.set_with_numpy_array_debug(a * randvec1 + b * randvec2 + c * randvec3)
e = EnergyClass(**init_args)
e.setup(v, Field(df.FunctionSpace(mesh, 'DG', 0), 8e5), unit_length=1e-9)
return e.compute_field()
def create_demag_params(atol, rtol, maxiter):
"""
Helper function to create a dictionary with the given
demag tolerances and maximum iterations. This can be
directly passed to the Demag class in order to set
these parameters.
"""
demag_params = {
'absolute_tolerance': atol,
'relative_tolerance': rtol,
'maximum_iterations': int(maxiter),
}
return {'phi_1': demag_params, 'phi_2': demag_params}
# All the interactions should be linear in the magnetisation. However,
# for the demag field, this is only true if we use a LU solver or a
# Krylov solver with sufficiently strict tolerances. All the values of
# 'TOL' used in the tests below are the strictest that still make the
# tests pass. It is interesting to see that the various interactions
# have different accuracies (e.g. UniaxialAnisotropy is essentially
# linear in m up to machine precision whereas the Exchange and the
# demag are much less accurate).
@pytest.mark.parametrize(("EnergyClass", "init_args", "TOL"), [
(Exchange, {'A': 13e-12}, 1e-11),
(DMI, {'D': 1.58e-3}, 1e-12),
(UniaxialAnisotropy, {'K1': 1e5, 'axis': (0, 0, 1)}, 1e-15),
# Demag with LU solver should be linear in m
(Demag, {'solver_type': 'LU'}, 1e-10),
# Demag with Krylov solver and strict tolerances should be linear in m
(Demag, {'solver_type': 'Krylov',
'parameters': create_demag_params(1e-15, 1e-15, 1e4)}, 1.2e-10),
# Demag with Krylov solver and weak tolerances is *not* linear in m
pytest.mark.xfail(
(Demag, {'solver_type': 'Krylov', 'parameters': create_demag_params(1e-6, 1e-6, 1e4)}, 1e-8)),
])
def test_interactions_scale_linearly_with_m(EnergyClass, init_args, TOL):
"""
For each energy class, compute the associated effective field
for three random configurations of m. Then compute it for various
linear combinations of and check that the result is the same
linear combination of the individual effective fields.
"""
fld_1 = compute_field_for_linear_combination(
EnergyClass, init_args, 1.0, 0.0, 0.0)
fld_2 = compute_field_for_linear_combination(
EnergyClass, init_args, 0.0, 1.0, 0.0)
fld_3 = compute_field_for_linear_combination(
EnergyClass, init_args, 0.0, 0.0, 1.0)
# Check a few linear combinations with different coefficients (a, b, c)
for (a, b, c) in [(0.37, 7.47, 0.68), (2.0, 0.65, 0.1), (4.76, 3.3, 0.028), (1.01, 2.04, 1.26), (1.111, 4.20, 2.3)]:
fld = compute_field_for_linear_combination(
EnergyClass, init_args, a, b, c)
print(" Testing linear combination with coefficients ({}, {}, {}): {}".format(
a, b, c, np.allclose(fld, a * fld_1 + b * fld_2 + c * fld_3, atol=TOL, rtol=TOL)))
assert np.allclose(
fld, a * fld_1 + b * fld_2 + c * fld_3, atol=TOL, rtol=TOL)
if __name__ == '__main__':
test_interactions_scale_linearly_with_m(Exchange, [13e-12])
| 3,760 | 39.880435 | 120 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_restart_simulation.py
|
import finmag
import os
def test_restart_same_simulation(tmpdir):
os.chdir(str(tmpdir))
sim = finmag.example.barmini()
sim.run_until(10e-12)
# To be able to restart the simulation from a particular point, we need to
# save the magnetisation at that time before:
sim.save_restart_data()
# We can see from the message that the filename ``barmini-restart.npz`` has been chosen. This is the *canonical* filename, composed of
#
# - the simulation name, and
# - the ``-restart`` and
# - the default extension ``.npz`` for multiple numpy arrays saved as a zipped file
#
# For completeness the simulation name is:
print(sim.name)
# Let us also save the magnetisation at this point in time.
m_10em12 = sim.m
# We can also choose any filename we like (although we need to stick to
# the ``.npz`` extension), for example
sim.save_restart_data(filename="my-special-state.npz")
# And show the average component values for future reference
print("t=%s, <m>=%s" % (sim.t, sim.m_average))
# Then carry on with the time integration:
sim.run_until(100e-12)
print("t=%s, <m>=%s" % (sim.t, sim.m_average))
assert sim.t == 100e-12
# We know imagine that we need to restart this run, or create another
# simulation that continues at the point of t=1e-12 where we have saved
# our restart snapshot:
# Restart
# Imagine we need to go back to t=10e-12 and the corresponding
# magnetisation configuration. We can use:
sim.restart()
assert sim.t == 10e-12
# If the ``restart`` method is not given any filename, it will look for the canonical restart name of its simulation object.
#
# And just to convince us:
print("time = %s " % sim.t)
print("<m> = %s" % sim.m_average)
assert sim.t == 10e-12
# check that this identical to before saving
assert (sim.m == m_10em12).all
# integrate a little so that we change time and status
sim.run_until(20e-12)
# If we want to restart from a different configuration (i.e. not from the
# canonical filename, we need to provide a restart file name):
sim.restart('my-special-state.npz')
print("time = %s " % sim.t)
print("<m> = %s" % sim.m_average)
assert sim.t == 10e-12
# check that this identical to before saving
assert (sim.m == m_10em12).all
sim.run_until(24e-12)
assert sim.t == 24e-12
# If we want to use the same magnetisation, but change the point in time
# at which we start the integration, we can use the optional ``t0``
# parameter:
sim.restart('my-special-state.npz', t0=0.42e-12)
print("time = %s " % sim.t)
print("<m> = %s" % sim.m_average)
assert sim.t == 0.42e-12
# check that this identical to before saving
assert (sim.m == m_10em12).all
print("t=%s, <m>=%s" % (sim.t, sim.m_average))
# Creating a new simulation from saved restart file
# To create a new simulation that starts from a saved configurtion, we
# need to create the simulation object (and we have to use exactly the
# same mesh -- there is no check for this at the moment), and can then use
# the restart method as before:
sim2 = finmag.example.barmini()
sim2.restart('my-special-state.npz')
print("t=%s, <m>=%s" % (sim2.t, sim2.m_average))
assert sim2.t == 10e-12
| 3,377 | 29.990826 | 138 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_dmi.py
|
import numpy as np
import dolfin as df
from finmag.field import Field
from finmag.energies import DMI
nm = 1e-9
simplexes = 10
length = 20 * nm
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(length, 3 * nm, 3 * nm), simplexes, 1, 1)
V = df.VectorFunctionSpace(mesh, "Lagrange", 1)
def test_dmi_field():
"""
Simulation 1 is computing H_dmi=dE_dM via assemble.
Simulation 2 is computing H_dmi=g*M with a suitable pre-computed matrix g.
Simulation 3 is computing g using a petsc matrix.
We show that the three methods give equivalent results (this relies
on H_dmi being linear in M).
"""
m_initial = df.Expression((
'(2*x[0]-L)/L',
'sqrt(1 - ((2*x[0]-L)/L)*((2*x[0]-L)/L))',
'0'), L=length, degree=1)
m = Field(V)
m.set(m_initial)
dmi1 = DMI(D=5e-3, method="box-assemble")
dmi1.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), 8.6e5))
dmi2 = DMI(D=5e-3, method="box-matrix-numpy")
dmi2.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), 8.6e5))
dmi3 = DMI(D=5e-3, method="box-matrix-petsc")
dmi3.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), 8.6e5))
H_dmi1 = dmi1.compute_field()
H_dmi2 = dmi2.compute_field()
H_dmi3 = dmi3.compute_field()
diff12 = np.max(np.abs(H_dmi1 - H_dmi2))
diff13 = np.max(np.abs(H_dmi1 - H_dmi3))
print "Difference between H_dmi1 and H_dmi2: max(abs(H_dmi1-H_dmi2))=%g" % diff12
print "Max value = %g, relative error = %g " % (max(H_dmi1), diff12 / max(H_dmi1))
print "Difference between H_dmi1 and H_dmi3: max(abs(H_dmi1-H_dmi3))=%g" % diff13
print "Max value = %g, relative error = %g " % (max(H_dmi1), diff13 / max(H_dmi1))
assert diff12 < 5e-8
assert diff13 < 5e-8
assert diff12 / max(H_dmi1) < 1e-14
assert diff13 / max(H_dmi1) < 1e-14
if __name__ == "__main__":
test_dmi_field()
| 1,858 | 32.8 | 87 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_anis.py
|
import numpy as np
import dolfin as df
from finmag.util.helpers import stats
from finmag.energies import UniaxialAnisotropy, CubicAnisotropy
from finmag import Simulation
Ms = 8.6e5
K1 = 520e3
mx = 0.8
my = 0.6
mz = 0
mu0 = 4 * np.pi * 1e-7
def test_anisotropy():
mesh = df.IntervalMesh(1, 0, 1)
sim = Simulation(mesh, Ms, unit_length=1e-9)
sim.set_m((mx, my, mz))
sim.add(UniaxialAnisotropy(K1, axis=[1, 0, 0]))
expected = 2 * K1 / (mu0 * Ms) * mx
field = sim.effective_field()
assert abs(field[0] - expected) / Ms < 1e-15
def m_init(pos):
x = pos[0]
return (1, 1, 0)
def cubic_anisotropy(K1=520e3, K2=0, K3=0):
mesh = df.RectangleMesh(df.Point(0, 0), df.Point(50, 2), 20, 1)
sim = Simulation(mesh, Ms, unit_length=1e-9)
sim.set_m(m_init)
sim.add(CubicAnisotropy(
K1=K1, K2=K2, K3=K3, u1=(1, 0, 0), u2=(0, 1, 0), assemble=False))
field1 = sim.effective_field()
if __name__ == "__main__":
test_anisotropy()
cubic_anisotropy()
| 1,012 | 20.553191 | 73 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_sim_ode.py
|
import os
import dolfin as df
import numpy as np
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
from finmag import Simulation as Sim
from finmag.energies import Zeeman
alpha = 0.1
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_sim_ode(do_plot=False):
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(2, 2, 2), 1, 1, 1)
sim = Sim(mesh, 8.6e5, unit_length=1e-9, pbc='2d')
sim.alpha = alpha
sim.set_m((1, 0, 0))
sim.set_tol(1e-12, 1e-14)
H0 = 1e5
sim.add(Zeeman((0, 0, H0)))
dt = 1e-12
ts = np.linspace(0, 500 * dt, 100)
precession_coeff = sim.gamma / (1 + alpha ** 2)
mz_ref = np.tanh(precession_coeff * alpha * H0 * ts)
mzs = []
length_error = []
for t in ts:
sim.advance_time(t)
mm = sim.m.copy()
mm.shape = (3, -1)
mx, my, mz = mm[:, 0] # same as m_average for this macrospin problem
mzs.append(mz)
length = np.sqrt(mx ** 2 + my ** 2 + mz ** 2)
length_error.append(abs(length - 1.0))
if do_plot:
ts_ns = ts * 1e9
plt.plot(ts_ns, mzs, "b.", label="computed")
plt.plot(ts_ns, mz_ref, "r-", label="analytical")
plt.xlabel("time (ns)")
plt.ylabel("mz")
plt.title("integrating a macrospin")
plt.legend()
plt.savefig(os.path.join(MODULE_DIR, "test_sim_ode.png"))
print("Deviation = {}, total value={}".format(
np.max(np.abs(mzs - mz_ref)),
mz_ref))
assert np.max(np.abs(mzs - mz_ref)) < 1e-9
assert np.max(length_error) < 1e-9
if __name__ == "__main__":
test_sim_ode(do_plot=True)
print "Saved plit in test_sim_ode.png."
| 1,686 | 25.777778 | 77 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_spatially_varying_anisotropy.py
|
import pytest
import os
import numpy as np
import dolfin as df
import matplotlib.pyplot as plt
from finmag.field import Field
from finmag import Simulation
from finmag.energies import UniaxialAnisotropy
from finmag.util.consts import bloch_parameter
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
#@pytest.mark.xfail(reason='1.5')
def test_spatially_varying_anisotropy_axis(tmpdir, debug=False):
Ms = 1e6
A = 1.3e-11
K1 = 6e5
lb = bloch_parameter(A, K1)
unit_length = 1e-9
nx = 20
Lx = nx * lb / unit_length
mesh = df.IntervalMesh(nx, 0, Lx)
# anisotropy axis goes from (0, 1, 0) at x=0 to (1, 0, 0) at x=Lx
expr_a = df.Expression(("x[0] / sqrt(pow(x[0], 2) + pow(Lx-x[0], 2))",
"(Lx-x[0]) / sqrt(pow(x[0], 2) + pow(Lx-x[0], 2))",
"0"), Lx=Lx, degree=1)
# in theory, a discontinuous Galerkin (constant over the cell) is a good
# choice to represent material parameters. In this case though, the
# parameter varies linearly, so we use the usual CG.
V = df.VectorFunctionSpace(mesh, "CG", 1, dim=3)
a = Field(V, expr_a)
sim = Simulation(mesh, Ms, unit_length)
sim.set_m((1, 1, 0))
sim.add(UniaxialAnisotropy(K1, a))
sim.relax()
# probe the easy axis and the magnetisation along the interval
points = 100
xs = np.linspace(0, Lx, points)
axis_xs = np.zeros((points, 3))
m_xs = np.zeros((points, 3))
for i, x in enumerate(xs):
axis_xs[i] = a(x)
m_xs[i] = sim.m_field(x)
# we want to the magnetisation to follow the easy axis
# it does so, except at x=0, what is happening there?
diff = np.abs(m_xs - axis_xs)
assert diff.max() < 0.02
if debug:
old = os.getcwd()
os.chdir(tmpdir)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xs, axis_xs[:, 0], "b+", label="a_x")
ax.plot(xs, m_xs[:, 0], "r--", label="m_x")
ax.legend(loc="upper left")
ax.set_ylim((0, 1.05))
ax.set_xlabel("x (nm)")
plt.savefig('spatially_varying_easy_axis.png')
plt.close()
sim.m_field.save_pvd('spatially_varying_easy_axis.pvd')
os.chdir(old)
if __name__ == "__main__":
test_spatially_varying_anisotropy_axis(".", debug=True)
| 2,315 | 31.166667 | 79 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_unit_length.py
|
import os
import numpy as np
import dolfin as df
from finmag.field import Field
from finmag.energies import Exchange
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
n = 20
Ms = 8.6e5
A = 1
REL_TOL = 1e-4
def exchange(mesh, unit_length):
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
m = Field(S3, value=df.Expression(("x[1]*u", "0", "sqrt(1-pow(x[1]*u, 2))"), u=unit_length, degree=1))
exch = Exchange(A)
exch.setup(m, Field(df.FunctionSpace(mesh, 'DG', 0), Ms), unit_length=unit_length)
H = exch.compute_field()
E = exch.compute_energy()
return m.get_numpy_array_debug(), H, E
def test_compare_exchange_for_two_dolfin_meshes():
"""
Check that a mesh expressed in nanometers gives the same results
as a mesh expressed in meters for the exchange interaction.
"""
mesh_nm = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 1), n, n, n) # in nm
m_nm, H_nm, E_nm = exchange(mesh_nm, unit_length=1e-9)
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1e-9, 1e-9, 1e-9), n, n, n)
m, H, E = exchange(mesh, unit_length=1)
rel_diff_m = np.max(np.abs(m_nm - m)) # norm m = 1
print "Difference of magnetisation is {:.2f}%.".format(100 * rel_diff_m)
assert rel_diff_m < REL_TOL
rel_diff_E = abs((E_nm - E) / E)
print "Relative difference between E_nm = {:.5g} and E_m = {:.5g} is d = {:.2f}%.".format(E_nm, E, 100 * rel_diff_E)
assert rel_diff_E < REL_TOL
max_diff_H = np.max(np.abs(H_nm - H) / np.max(H))
print "Maximum of relative difference between the two fields is d = {:.2f}%.".format(100 * max_diff_H)
assert np.max(np.abs(H)) > 0
assert max_diff_H < REL_TOL
| 1,670 | 33.8125 | 120 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_spatially_varying_alpha.py
|
import dolfin as df
import numpy as np
from finmag import Simulation
from finmag.physics.llg import LLG
def test_spatially_varying_alpha_using_Simulation_class():
"""
test that I can change the value of alpha through the property sim.alpha
and that I get an df.Function back.
"""
length = 20
simplices = 10
mesh = df.IntervalMesh(simplices, 0, length)
sim = Simulation(mesh, Ms=1, unit_length=1e-9)
sim.alpha = 1
expected_alpha = np.ones(simplices + 1)
assert np.array_equal(sim.alpha.vector().array(), expected_alpha)
def test_spatially_varying_alpha_using_LLG_class():
"""
no property magic here - llg.alpha is a df.Function at heart and can be
set with any type using llg.set_alpha()
"""
length = 20
simplices = 10
mesh = df.IntervalMesh(simplices, 0, length)
S1 = df.FunctionSpace(mesh, "Lagrange", 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, 3)
llg = LLG(S1, S3)
llg.set_alpha(1)
expected_alpha = np.ones(simplices + 1)
print "Got:\n", llg.alpha.vector().array()
print "Expected:\n", expected_alpha
assert np.array_equal(llg.alpha.vector().array(), expected_alpha)
| 1,191 | 27.380952 | 76 |
py
|
finmag
|
finmag-master/src/finmag/tests/__init__.py
| 0 | 0 | 0 |
py
|
|
finmag
|
finmag-master/src/finmag/tests/test_heun.py
|
import numpy as np
from finmag.native.llg import StochasticHeunIntegrator
def test_file_builds():
drift = lambda y, t: 2 * y
diffusion = lambda y, t: y + 0.1
integrator = StochasticHeunIntegrator(np.zeros(1), drift, diffusion, 1e-12)
integrator.helloWorld()
| 276 | 26.7 | 79 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_applied_field.py
|
import numpy as np
import dolfin as df
from finmag import Simulation as Sim
from finmag.energies import Zeeman
Ms = 8.6e5
def test_uniform_external_field():
TOLERANCE = 3.5e-10
mesh = df.UnitCubeMesh(2, 2, 2)
sim = Sim(mesh, Ms)
sim.set_m((1, 0, 0))
sim.add(Zeeman((0, Ms, 0)))
sim.alpha = 1.0
sim.run_until(1e-9)
m = sim.m.reshape((3, -1)).mean(-1)
expected_m = np.array([0, 1, 0])
diff = np.abs(m - expected_m)
assert np.max(diff) < TOLERANCE
def test_negative_uniform_external_field():
TOLERANCE = 1e-10
mesh = df.UnitCubeMesh(2, 2, 2)
sim = Sim(mesh, Ms)
sim.set_m((1, 0.1, 0)) # slightly misaligned
sim.add(Zeeman((-1.0 * Ms, 0, 0)))
sim.alpha = 1.0
sim.run_until(1e-9)
m = sim.m.reshape((3, -1)).mean(-1)
print "Average magnetisation ({:.2g}, {:.2g}, {:.2g}).".format(*m)
expected_m = np.array([-1, 0, 0])
diff = np.abs(m - expected_m)
TOLERANCE = 1e-5
assert np.max(diff) < TOLERANCE
def test_non_uniform_external_field():
TOLERANCE = 1e-9
length = 10e-9
vertices = 5
mesh = df.IntervalMesh(vertices, 0, length)
sim = Sim(mesh, Ms)
sim.set_m((1, 0, 0))
# applied field
# (0, -H, 0) for 0 <= x <= a
# (0, +H, 0) for a < x <= length
H_expr = df.Expression(("0", "H*(x[0]-a)/fabs(x[0]-a)", "0"), a=length / 2, H=Ms / 2, degree=1)
sim.add(Zeeman(H_expr))
sim.alpha = 1.0
sim.run_until(1e-9)
m = sim.m.reshape((3, -1)).mean(-1)
expected_m = np.array([0, 0, 0])
diff = np.abs(m - expected_m)
assert np.max(diff) < TOLERANCE
| 1,603 | 24.870968 | 99 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_time.py
|
import dolfin as df
from finmag import Simulation as Sim
from finmag.energies import Exchange, Demag
epsilon = 1e-16
def test_current_time():
size = 20e-9
simplices = 4
mesh = df.BoxMesh(
df.Point(0, 0, 0),
df.Point(size, size, size),
simplices, simplices, simplices)
Ms = 860e3
A = 13.0e-12
sim = Sim(mesh, Ms)
sim.set_m((1, 0, 0))
sim.add(Exchange(A))
sim.add(Demag())
t = 0.0
t_max = 1e-10
dt = 1e-12
while t <= t_max:
t += dt
sim.run_until(t)
# cur_t is equal to whatever time the integrator decided to probe last
assert not sim.integrator.cur_t == 0.0
# t is equal to our current simulation time
assert abs(sim.t - t) < epsilon
| 764 | 20.857143 | 78 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_dmi_terms.py
|
import pytest
import dolfin as df
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 1), 10, 10, 10)
V1 = df.VectorFunctionSpace(mesh, "CG", 1)
VT = df.TensorFunctionSpace(mesh, "CG", 1)
Vs = df.FunctionSpace(mesh, "CG", 1)
tf = df.TestFunction(Vs)
#from finmag.energies.dmi import dmi_term3d, dmi_term2d, dmi_term3d_dolfin
@pytest.mark.skip(reason='Not sure if we even use dmi_term3d anymore')
def compare_dmi_term3d_with_dolfin(Mexp):
"""Expects string to feed into df.Expression for M"""
print "Working on Mexp=", Mexp
Mexp = df.Expression(Mexp, degree=1)
M = df.interpolate(Mexp, V1)
E = dmi_term3d(M, tf, 1)[0] * df.dx
E1 = df.assemble(E)
E_dolfin = dmi_term3d_dolfin(M, tf, 1)[0] * df.dx
dolfin_curl = df.project(df.curl(M), V1)
curlx, curly, curlz = dolfin_curl.split()
print "dolfin-curlx=", df.assemble(curlx * df.dx)
print "dolfin-curly=", df.assemble(curly * df.dx)
print "dolfin-curlz=", df.assemble(curlz * df.dx)
E2 = df.assemble(E_dolfin)
print E1, E2
print "Diff is %.18e" % (E1 - E2)
return abs(E1 - E2)
@pytest.mark.skip(reason='Not sure if we even use dmi_term3d anymore')
def compare_dmi_term2d_with_dolfin(Mexp):
"""Expects string to feed into df.Expression for M"""
print "Working on Mexp=", Mexp
Mexp = df.Expression(Mexp, degree=1)
V2d = df.VectorFunctionSpace(mesh, "CG", 1)
M2d = df.interpolate(Mexp, V2d)
M = df.interpolate(Mexp, V1)
E = dmi_term2d(M2d, tf, 1)[0] * df.dx
E1 = df.assemble(E)
E_dolfin = dmi_term3d_dolfin(M, tf, 1)[0] * df.dx
dolfin_curl = df.project(df.curl(M), V1)
curlx, curly, curlz = dolfin_curl.split()
print "dolfin-curlx=", df.assemble(curlx * df.dx)
print "dolfin-curly=", df.assemble(curly * df.dx)
print "dolfin-curlz=", df.assemble(curlz * df.dx)
E2 = df.assemble(E_dolfin)
print E1, E2
print "Diff is %.18e" % (E1 - E2)
return abs(E1 - E2)
@pytest.mark.skip(reason='Not sure if we even use dmi_term3d anymore')
def test_dmi_term2d():
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(1, 1, 1), 10, 10, 10)
mesh2d = df.RectangleMesh(df.Point(0, 0), df.Point(1, 1), 10, 10)
eps = 1e-15
assert compare_dmi_term2d_with_dolfin(("x[0]", "0.", "0.")) < eps
assert compare_dmi_term2d_with_dolfin(("x[1]", "0.", "0.")) < eps
assert compare_dmi_term2d_with_dolfin(("x[2]", "0.", "0.")) < eps
assert compare_dmi_term2d_with_dolfin(("0", "x[0]", "0.")) < eps
assert compare_dmi_term2d_with_dolfin(("0", "x[1]", "0.")) < eps
assert compare_dmi_term2d_with_dolfin(("0", "x[2]", "0.")) < eps
#assert compare_dmi_term2d_with_dolfin(("0.","0","x[0]")) <eps
#assert compare_dmi_term2d_with_dolfin(("0.","0","x[1]")) <eps
#assert compare_dmi_term2d_with_dolfin(("0.","0","x[2]")) <eps
# and some more complicated expressions
assert compare_dmi_term2d_with_dolfin(("-0.5*x[1]", "0.5*x[0]", "1")) < eps
assert compare_dmi_term2d_with_dolfin(("-0.5*x[1]*x[1]",
"2*0.5*x[0]",
"0")) < eps
assert compare_dmi_term2d_with_dolfin(("-0.5*x[1]*x[0]",
"2*0.5*x[0]-x[1]",
"0")) < eps
@pytest.mark.skip(reason='Not sure if we even use dmi_term3d anymore')
def test_dmi_with_analytical_solution():
"""For a vector field a(x,y,z)=0.5 * (-y, x, c),
the curl is exactly 1.0."""
eps = 1e-13
M = df.interpolate(df.Expression(("-0.5*x[1]", "0.5*x[0]", "1"), degree=1), V1)
c = 1.0
E1 = df.assemble(dmi_term3d(M, tf, c)[0] * df.dx)
Eexp = 1.0
print "Expect E=%e, computed E=%e" % (Eexp, E1)
diff = abs(E1 - Eexp)
print "deviation between analytical result and numerical is %e" % diff
assert diff < eps
"""For a vector field a(x,y,z)=0.5 * (-y, x, c),
the curl is exactly 1.0."""
eps = 1e-12
M = df.interpolate(df.Expression(("-0.5*x[1]*2", "0.5*x[0]*2", "1"), degree=1), V1)
c = 3.0
E1 = df.assemble(dmi_term3d(M, tf, c)[0] * df.dx)
Eexp = 6.0
print "Expect E=%e, computed E=%e" % (Eexp, E1)
diff = abs(E1 - Eexp)
print "deviation between analytical result and numerical is %e" % diff
assert diff < eps
@pytest.mark.skip(reason='Not sure if we even use dmi_term3d anymore')
def test_dmi_term3d():
eps = 1e-15
assert compare_dmi_term3d_with_dolfin(("x[0]", "0.", "0.")) < eps
assert compare_dmi_term3d_with_dolfin(("x[1]", "0.", "0.")) < eps
assert compare_dmi_term3d_with_dolfin(("x[2]", "0.", "0.")) < eps
assert compare_dmi_term3d_with_dolfin(("0", "x[0]", "0.")) < eps
assert compare_dmi_term3d_with_dolfin(("0", "x[1]", "0.")) < eps
assert compare_dmi_term3d_with_dolfin(("0", "x[2]", "0.")) < eps
assert compare_dmi_term3d_with_dolfin(("0.", "0", "x[0]")) < eps
assert compare_dmi_term3d_with_dolfin(("0.", "0", "x[1]")) < eps
assert compare_dmi_term3d_with_dolfin(("0.", "0", "x[2]")) < eps
# and some more complicated expressions
assert compare_dmi_term3d_with_dolfin(("-0.5*x[1]", "0.5*x[0]", "1")) < eps
assert compare_dmi_term3d_with_dolfin(("-0.5*x[1]*x[1]",
"2*0.5*x[0]",
"x[0]+x[1]+x[2]")) < eps
assert compare_dmi_term3d_with_dolfin(("-0.5*x[1]*x[0]",
"2*0.5*x[0]-x[2]",
"x[0]+x[1]+x[2]")) < eps
@pytest.mark.skip(reason='Not sure if we even use dmi_term3d anymore')
def test_can_post_process_form():
M = df.interpolate(df.Expression(("-0.5*x[1]", "0.5*x[0]", "1"), degree=1), V1)
c = 1.0
E = dmi_term3d(M, tf, c)[0] * df.dx
v = df.TestFunction(V1)
dE_dM = df.derivative(E, M, v)
#vol = df.assemble(df.dot(v, df.Constant([1,1,1]))*df.dx).array()
tmp = df.assemble(dE_dM)
g_form = df.derivative(dE_dM, M)
g_petsc = df.PETScMatrix()
df.assemble(g_form, tensor=g_petsc)
#H_dmi_petsc = df.PETScVector()
# if we got to this line, the required assembly to compute fields works.
assert True
if __name__ == "__main__":
# test_dmi_term3d()
# test_dmi_term2d()
# test_can_post_process_form()
test_dmi_with_analytical_solution()
| 6,324 | 40.071429 | 87 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_solid_angle_invariance.py
|
import numpy as np
from finmag.native.llg import compute_solid_angle
import scipy.linalg
import scipy.stats
import math
import unittest
# Quaternion multiplicatoin
def quaternion_product(a, b):
return np.array([
a[0] * b[0] - a[1] * b[1] - a[2] * b[2] - a[3] * b[3],
a[0] * b[1] + a[1] * b[0] + a[2] * b[3] - a[3] * b[2],
a[0] * b[2] - a[1] * b[3] + a[2] * b[0] + a[3] * b[1],
a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + a[3] * b[0]
])
# Returns a nx3x3 array with n random 3x3 matrix uniformly distributed in SO(3)
def random_3d_rotation_matrix(n):
# Create a random unit quaternion
q = np.random.randn(4, n)
q /= np.sqrt(np.sum(q * q, axis=0))
qinv = np.array([q[0], -q[1], -q[2], -q[3]])
# Set up the SO(3) matrix defined by the quaternion
a = np.array([
quaternion_product(qinv, quaternion_product([0, 1, 0, 0], q))[1:],
quaternion_product(qinv, quaternion_product([0, 0, 1, 0], q))[1:],
quaternion_product(qinv, quaternion_product([0, 0, 0, 1], q))[1:]
])
# Disabled - return a^2 to check that the test fails
# return np.array([np.dot(x, x) for x in a.T])
return a.T
from finmag.util.solid_angle_magpar import return_csa_magpar
csa_magpar = return_csa_magpar()
def solid_angle_magpar(r, T):
assert r.shape == (3,)
assert T.shape == (3, 3)
# First index of T is node number, second spatial
return csa_magpar(r, T[0], T[1], T[2])
def solid_angle_llg(r, T):
assert r.shape == (3,)
assert T.shape == (3, 3)
# First index of T is node number, second spatial
return compute_solid_angle(r.reshape(3, 1), T.reshape(3, 3, 1))[0]
class SolidAngleInvarianceTests(unittest.TestCase):
def test_rotation_matrix(self):
np.random.seed(1)
matrices = random_3d_rotation_matrix(1000)
# Test that the determinant is 1
assert np.max(
np.abs([scipy.linalg.det(m) - 1 for m in matrices])) < 1e-12
# Test that the matrix is orthogonal
assert np.max(np.abs([np.dot(m, m.T) - np.eye(3)
for m in matrices])) < 1e-12
np.random.seed(1)
# The uniform distribution in SO(3) is unchanged under arbitrary rotationss
# Here, we only test the [0,0] component
n = 2000
m1 = random_3d_rotation_matrix(n)
m2 = random_3d_rotation_matrix(n)
def p_values():
for a in random_3d_rotation_matrix(10):
for i in xrange(3):
for j in xrange(3):
yield scipy.stats.ks_2samp(m1[:, i, j], np.dot(m2, a)[:, i, j])[1]
p = list(p_values())
assert np.min(p) > 0.0001
# The solid angle is invariant under 3d rotations that preserve orientation (SO(3))
# and changes sign for orthogonal transformations that change orientation
# (O(3) transformations not in SO(3))
def test_solid_angle(self):
np.random.seed(1)
for i in xrange(1000):
r = np.random.randn(3)
T = np.random.randn(3, 3)
q = random_3d_rotation_matrix(1)[0]
r_rotated = np.dot(q.T, r)
T_rotated = np.dot(T, q)
r_mirror = r[[1, 0, 2]].copy()
T_mirror = T[:, [1, 0, 2]].copy()
angle_llg = solid_angle_llg(r, T)
angle_magpar = solid_angle_magpar(r, T)
angle_llg_rotated = solid_angle_llg(r_rotated, T_rotated)
angle_llg_mirror = solid_angle_llg(r_mirror, T_mirror)
# Check the C++ solid angle vs magpar solid angle
self.assertAlmostEqual(math.fabs(angle_llg), angle_magpar)
# Check the LLG solid angle vs rotated LLG solid angle
self.assertAlmostEqual(angle_llg, angle_llg_rotated)
# Check the C++ solid angle vs magpar solid angle
self.assertAlmostEqual(angle_llg, -angle_llg_mirror)
if __name__ == "__main__":
unittest.main()
| 3,961 | 36.028037 | 90 |
py
|
finmag
|
finmag-master/src/finmag/tests/test_energy_creation_with_variable_Ms.py
|
import pytest
import dolfin as df
from finmag.field import Field
from finmag.util.helpers import fnormalise
from finmag.energies import Exchange, UniaxialAnisotropy, Zeeman, Demag
Ms = 8.6e5
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(10e-9, 10e-9, 10e-9), 5, 5, 5)
@pytest.fixture
def fixt(request):
fixt = request.cached_setup(setup=setup, scope="module")
return fixt
def setup():
"""
Create a cuboid mesh representing a magnetic material and two
dolfin.Functions defined on this mesh:
m -- unit magnetisation (linearly varying across the sample)
Ms_func -- constant function representing the saturation
magnetisation Ms
*Returns*
A triple (m_space, m, Ms_func), where m_space is the
VectorFunctionSpace (of type "continuous Lagrange") on which the
magnetisation m is defined and m, Ms_funct are as above.
"""
m_space = df.VectorFunctionSpace(mesh, "CG", 1)
m = Field(m_space, value=df.Expression(("1e-9", "x[0]/10", "0"), degree=1))
m.set_with_numpy_array_debug(fnormalise(m.get_numpy_array_debug()))
Ms_space = df.FunctionSpace(mesh, "DG", 0)
Ms_func = df.interpolate(df.Constant(Ms), Ms_space)
return m_space, m, Ms_func
@pytest.mark.parametrize(("EnergyClass", "init_args"), [
(Exchange, [1.3e-11]),
(UniaxialAnisotropy, [1e5, (0, 0, 1)]),
(Zeeman, [(0, 0, 1e6)]),
(Demag, []),
])
def test_can_create_energy_object(fixt, EnergyClass, init_args):
"""
Create two instances of the same energy class, once with a
constant number as Ms and once with a constant function.
Then check that the computed energies coincide.
"""
S3, m, Ms_func = fixt
E1 = EnergyClass(*init_args)
E1.setup(m, Field(df.FunctionSpace(mesh, "DG", 0), Ms))
E2 = EnergyClass(*init_args)
Ms_field = Field(df.FunctionSpace(mesh, "DG", 0))
Ms_field.f = Ms_func
E2.setup(m, Ms_field)
assert(abs(E1.compute_energy() - E2.compute_energy()) < 1e-12)
| 2,004 | 28.485294 | 79 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.