query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Testing M4 remeshing formula in 2D, 2 kernel, simple precision, o2 splitting. | def test_2D_m4_2k():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: 'gpu_2k',
Splitting: 'o2'}
)
advec_py = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: '',
Splitting: 'o2'},
)
assertion_2D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_2D_m4_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13",
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_MeshMat_1group(self):\n\n MS_grp = self.meshsol.get_group(\"stator\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[0, 1, 2], [1, 2, 3]])\n result_tgl = cells_grp[\"triangle\"]\n testA = np.sum(abs(solution - result_tgl))\n msg = (\n \"Wrong output: returned \" + str(result_tgl) + \", expected: \" + str(solution)\n )\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)\n\n MS_grp = self.meshsol.get_group(\"rotor\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[3, 3], [1, 2], [2, 3]])\n results = cells_grp[\"triangle\"] # The point indices have changed !\n points = MS_grp.get_mesh().get_point(results)\n testA = np.sum(abs(solution - points))\n msg = \"Wrong output: returned \" + str(results) + \", expected: \" + str(solution)\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)",
"def test_2D_m4_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)",
"def test_filter_l2_1():\n box = Box(length=L, origin=O)\n f = Field(box, formula=func, name='f0')\n d_fine = Discretization([513, 513, 513])\n d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])\n op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,\n variables={f: d_coarse},\n method={Remesh: L2_1, })\n op.discretize()\n op.setup()\n topo_coarse = op.discreteFields[f].topology\n topo_fine = [t for t in f.discreteFields.keys()\n if not t is topo_coarse][0]\n f.initialize(topo=topo_fine)\n f_out = f.discreteFields[topo_coarse]\n op.apply(simu)\n valid = [npw.zeros(f_out[0].shape), ]\n valid = func(valid, *topo_coarse.mesh.coords)\n assert np.allclose(valid[0][topo_coarse.mesh.iCompute],\n f_out[0][topo_coarse.mesh.iCompute]), \\\n np.max(np.abs(valid[0][topo_coarse.mesh.iCompute] -\n f_out[0][topo_coarse.mesh.iCompute]))",
"def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):\n os.chdir(str(tmpdir))\n RTOL = 1e-10\n\n H1 = 1e6 # external field in A/m\n alpha1 = 0.5 # some sort of damping constant\n omega1 = gamma * H1 # precession frequency\n\n H2 = 2.8e4 # external field in A/m\n alpha2 = 0.3 # some sort of damping constant\n omega2 = gamma * H2 # precession frequency\n\n ##\n # Step 1: Construct a time series of artificial magnetisation\n # data and save it to a bunch of .npy files.\n ##\n t_step = 1e-11\n t_ini = 0\n t_end = 10e-9\n\n N1 = 42 # in a real application this would be the number of mesh vertices\n N2 = 23 # in a real application this would be the number of mesh vertices\n fft_test_helpers.create_test_npy_files_with_two_regions(\n str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)\n\n ##\n # Step 2: compute the FFT of a resampled time series, both by\n # hand and using FFT_m.\n ##\n # XXX TODO: Resampling timesteps is not supported when using .npy\n # files. Either simplify the code below, or implement saving to\n # .h5 files so that it's easier to implement resampling for\n # spatially resolved data, too.\n ##\n t_step_res = t_step\n t_ini_res = t_ini\n t_end_res = t_end\n ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)\n\n # Compute time series based on resampled timesteps\n mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)\n my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)\n mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)\n\n # Compute 'analytical' Fourier transform of resampled time series and\n # determine the power of the spectrum for each component. We also need\n # to multiply by the number of mesh nodes because the numerical algorithm\n # sums up all contributions at the individual nodes (but we can just\n # multiply because they are all identical by construction).\n psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2\n psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2\n psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2\n\n # Compute Fourier transform of resampled time series using FFT_m\n freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \\\n compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,\n t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))\n\n # Check that the analytically determined power spectra are the same as the\n # computed ones.\n assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))\n\n if debug:\n # Plot the spectra for debugging\n fig = plt.figure(figsize=(20, 5))\n ax = fig.gca()\n ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')\n ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')\n ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')\n ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')\n ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')\n ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')\n ax.legend(loc='best')\n fig.savefig('psd_m_McMichaelStiles.png')",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def par_test_2(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = [\n self.XYZ_factor.mult(self.scalar),\n self.XYZ_factor.mult(self.scalarf),\n self.scalarf.mult(self.XYZ_factor),\n ]\n\n par_res = [\n self.XYZ_par_factor.mult(self.scalar),\n self.XYZ_par_factor.mult(self.par_scalarf),\n self.par_scalarf.mult(self.XYZ_par_factor),\n ]\n\n for i, ele in enumerate(res):\n assert (\n ele.rand_vars == par_res[i].rand_vars\n and ele.values == par_res[i].values\n )",
"def test_linear_2d_merwe_column():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints2(4, .1, 2., -1)\n kf = UKF2(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([[-1., 1., -1., 1]]).T\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([[i+randn()*0.1],\n [i+randn()*0.1]])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+', c='b')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def test_linear_2d_merwe():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints(4, .1, 2., -1)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def _vrms2(x, y, inc_deg,\n surf_lum, sigma_lum, qobs_lum,\n surf_pot, sigma_pot, qobs_pot,\n beta, tensor, sigmaPsf, normPsf,\n pixSize, pixAng, step, nrad, nang):\n # Axisymmetric deprojection of both luminous and total mass.\n # See equation (12)-(14) of Cappellari (2008)\n #\n inc = np.radians(inc_deg)\n\n qintr_lum = qobs_lum**2 - np.cos(inc)**2\n if np.any(qintr_lum <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_lum = np.sqrt(qintr_lum)/np.sin(inc)\n if np.any(qintr_lum < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_lum = surf_lum*qobs_lum / (sigma_lum*qintr_lum*np.sqrt(2*np.pi))\n\n qintr_pot = qobs_pot**2 - np.cos(inc)**2\n if np.any(qintr_pot <= 0):\n raise RuntimeError('Inclination too low q < 0')\n qintr_pot = np.sqrt(qintr_pot)/np.sin(inc)\n if np.any(qintr_pot < 0.05):\n raise RuntimeError('q < 0.05 components')\n dens_pot = surf_pot*qobs_pot / (sigma_pot*qintr_pot*np.sqrt(2*np.pi))\n\n # Define parameters of polar grid for interpolation\n #\n w = sigma_lum < np.max(np.abs(x)) # Characteristic MGE axial ratio in observed range\n\n if w.sum() < 3:\n qmed = np.median(qobs_lum)\n else:\n qmed = np.median(qobs_lum[w])\n\n rell = np.sqrt(x**2 + (y/qmed)**2) # Elliptical radius of input (x, y)\n\n psfConvolution = (np.max(sigmaPsf) > 0) and (pixSize > 0)\n\n # Kernel step is 1/4 of largest value between sigma(min) and 1/2 pixel side.\n # Kernel half size is the sum of 3*sigma(max) and 1/2 pixel diagonal.\n #\n if (nrad*nang > x.size) and (not psfConvolution): # Just calculate values\n\n xPol = x\n yPol = y\n\n else: # Interpolate values on polar grid\n\n if psfConvolution: # PSF convolution\n if step == 0:\n step = max(pixSize/2., np.min(sigmaPsf))/4.\n mx = 3*np.max(sigmaPsf) + pixSize/np.sqrt(2)\n else: # No convolution\n step = np.min(rell.clip(1)) # Minimum radius of 1pc\n mx = 0\n\n # Make linear grid in log of elliptical radius RAD and eccentric anomaly ANG\n # See Appendix A\n #\n rmax = np.max(rell) + mx # Major axis of ellipse containing all data + convolution\n logRad = np.linspace(np.log(step), np.log(rmax), nrad) # Linear grid in np.log(rell)\n ang = np.linspace(0, np.pi/2, nang) # Linear grid in eccentric anomaly\n radGrid, angGrid = map(np.ravel, np.meshgrid(np.exp(logRad), ang))\n xPol = radGrid*np.cos(angGrid)\n yPol = radGrid*np.sin(angGrid) * qmed\n\n # The model Vrms computation is only performed on the polar grid\n # which is then used to interpolate the values at any other location\n #\n wm2Pol = np.empty_like(xPol)\n mgePol = np.empty_like(xPol)\n for j in range(xPol.size):\n wm2Pol[j] = quadva(_integrand, [0., 1.],\n args=(dens_lum, sigma_lum, qintr_lum,\n dens_pot, sigma_pot, qintr_pot,\n xPol[j], yPol[j], inc, beta, tensor))[0]\n mgePol[j] = np.sum(surf_lum * np.exp(-0.5/sigma_lum**2 *\n (xPol[j]**2 + (yPol[j]/qobs_lum)**2)))\n\n\n if psfConvolution: # PSF convolution\n\n nx = np.ceil(rmax/step)\n ny = np.ceil(rmax*qmed/step)\n x1 = np.linspace(-nx, nx, 2*nx)*step\n y1 = np.linspace(-ny, ny, 2*ny)*step\n xCar, yCar = np.meshgrid(x1, y1) # Cartesian grid for convolution\n\n # Interpolate MGE model and Vrms over cartesian grid\n #\n r1 = 0.5*np.log(xCar**2 + (yCar/qmed)**2) # Log elliptical radius of cartesian grid\n e1 = np.arctan2(np.abs(yCar/qmed), np.abs(xCar)) # Eccentric anomaly of cartesian grid\n\n wm2Car = bilinear_interpolate(logRad, ang, wm2Pol.reshape(nang, nrad), r1, e1)\n mgeCar = bilinear_interpolate(logRad, ang, mgePol.reshape(nang, nrad), r1, e1)\n\n nk = np.ceil(mx/step)\n kgrid = np.linspace(-nk, nk, 2*nk)*step\n xgrid, ygrid = np.meshgrid(kgrid, kgrid) # Kernel is square\n if pixAng != 0:\n xgrid, ygrid = rotate_points(xgrid, ygrid, pixAng)\n\n # Compute kernel with equation (A6) of Cappellari (2008).\n # Normaliztion is irrelevant here as it cancels out.\n #\n kernel = np.zeros_like(xgrid)\n dx = pixSize/2\n sp = np.sqrt(2)*sigmaPsf\n for j in range(len(sigmaPsf)):\n kernel += normPsf[j] \\\n * (special.erf((dx-xgrid)/sp[j]) + special.erf((dx+xgrid)/sp[j])) \\\n * (special.erf((dx-ygrid)/sp[j]) + special.erf((dx+ygrid)/sp[j]))\n kernel /= np.sum(kernel)\n\n # Seeing and aperture convolution with equation (A3)\n #\n muCar = signal.fftconvolve(wm2Car, kernel, mode='same') \\\n / signal.fftconvolve(mgeCar, kernel, mode='same')\n\n # Interpolate convolved image at observed apertures.\n # Aperture integration was already included in the kernel.\n #\n mu = bilinear_interpolate(x1, y1, muCar, x, y)\n\n else: # No PSF convolution\n\n muPol = wm2Pol/mgePol\n\n if nrad*nang > x.size: # Just returns values\n mu = muPol\n else: # Interpolate values\n r1 = 0.5*np.log(x**2 + (y/qmed)**2) # Log elliptical radius of input (x,y)\n e1 = np.arctan2(np.abs(y/qmed), np.abs(x)) # Eccentric anomaly of input (x,y)\n mu = bilinear_interpolate(logRad, ang, muPol.reshape(nang, nrad), r1, e1)\n\n return mu",
"def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)",
"def test_mueller_product(self, ):\n mdims = ('mueller_v', 'mueller_h')\n mm_1 = xr.DataArray(np.random.rand(4, 4, ), dims=mdims, )\n mm_2 = xr.DataArray(np.identity(4, ), dims=mdims, )\n sv_1 = xr.DataArray(np.random.rand(4, ), dims=('stokes', ), )\n\n assert_almost_equal(mm_1.values, mueller_product(mm_1, mm_2).values, )\n assert_almost_equal(mm_1.values, mueller_product(mm_2, mm_1).values, )\n assert_almost_equal(sv_1.values, mueller_product(mm_2, sv_1).data, )",
"def test_3_2_4D_rec_splits(self):\n check = [(-2.0, 3.0, -1.0, 3.0), (9.0, 10.0, 1.0, 5.0),\n (9.0, 3.0, -1.0, 3.0), (9.0, 10.0, -1.0, 3.0),\n (9.0, 10.0, 1.0, 3.0), (9.0, 10.0, -1.0, 5.0),\n (9.0, 3.0, 1.0, 3.0), (9.0, 3.0, 1.0, 5.0),\n (9.0, 3.0, -1.0, 5.0), (-2.0, 10.0, -1.0, 3.0),\n (-2.0, 10.0, 1.0, 3.0), (-2.0, 10.0, 1.0, 5.0),\n (-2.0, 10.0, -1.0, 5.0), (-2.0, 3.0, 1.0, 3.0),\n (-2.0, 3.0, 1.0, 5.0), (-2.0, 3.0, -1.0, 5.0),\n (3.5, 6.5, 0.0, 4.0), (-2.0, 6.5, 0.0, 4.0),\n (-2.0, 3.0, 0.0, 4.0), (-2.0, 3.0, -1.0, 4.0),\n (-2.0, 3.0, 0.0, 3.0), (-2.0, 6.5, -1.0, 4.0),\n (-2.0, 6.5, -1.0, 3.0), (-2.0, 6.5, 0.0, 3.0),\n (3.5, 3.0, 0.0, 4.0), (3.5, 3.0, -1.0, 4.0),\n (3.5, 3.0, -1.0, 3.0), (3.5, 3.0, 0.0, 3.0),\n (3.5, 6.5, -1.0, 4.0), (3.5, 6.5, -1.0, 3.0),\n (3.5, 6.5, 0.0, 3.0), (0.75, 4.75, -0.5, 3.5),\n (9.0, 6.5, 0.0, 4.0), (9.0, 10.0, 0.0, 4.0),\n (9.0, 10.0, 1.0, 4.0), (9.0, 10.0, 0.0, 5.0),\n (9.0, 6.5, 1.0, 4.0), (9.0, 6.5, 1.0, 5.0),\n (9.0, 6.5, 0.0, 5.0), (3.5, 10.0, 0.0, 4.0),\n (3.5, 10.0, 1.0, 4.0), (3.5, 10.0, 1.0, 5.0),\n (3.5, 10.0, 0.0, 5.0), (3.5, 6.5, 1.0, 4.0),\n (3.5, 6.5, 1.0, 5.0), (3.5, 6.5, 0.0, 5.0),\n (6.25, 8.25, 0.5, 4.5), (9.0, 3.0, 0.0, 4.0),\n (9.0, 3.0, -1.0, 4.0), (9.0, 3.0, 0.0, 3.0),\n (9.0, 6.5, -1.0, 4.0), (9.0, 6.5, -1.0, 3.0),\n (9.0, 6.5, 0.0, 3.0), (6.25, 4.75, -0.5, 3.5),\n (9.0, 10.0, -1.0, 4.0), (9.0, 10.0, 0.0, 3.0),\n (3.5, 10.0, -1.0, 4.0), (3.5, 10.0, -1.0, 3.0),\n (3.5, 10.0, 0.0, 3.0), (6.25, 8.25, -0.5, 3.5),\n (9.0, 6.5, 1.0, 3.0), (3.5, 10.0, 1.0, 3.0),\n (3.5, 6.5, 1.0, 3.0), (6.25, 8.25, 0.5, 3.5),\n (9.0, 6.5, -1.0, 5.0), (3.5, 10.0, -1.0, 5.0),\n (3.5, 6.5, -1.0, 5.0), (6.25, 8.25, -0.5, 4.5),\n (9.0, 3.0, 1.0, 4.0), (3.5, 3.0, 1.0, 4.0),\n (3.5, 3.0, 1.0, 3.0), (6.25, 4.75, 0.5, 3.5),\n (9.0, 3.0, 0.0, 5.0), (3.5, 3.0, 1.0, 5.0),\n (3.5, 3.0, 0.0, 5.0), (6.25, 4.75, 0.5, 4.5),\n (3.5, 3.0, -1.0, 5.0), (6.25, 4.75, -0.5, 4.5),\n (-2.0, 10.0, 0.0, 4.0), (-2.0, 10.0, -1.0, 4.0),\n (-2.0, 10.0, 0.0, 3.0), (0.75, 8.25, -0.5, 3.5),\n (-2.0, 10.0, 1.0, 4.0), (-2.0, 6.5, 1.0, 4.0),\n (-2.0, 6.5, 1.0, 3.0), (0.75, 8.25, 0.5, 3.5),\n (-2.0, 10.0, 0.0, 5.0), (-2.0, 6.5, 1.0, 5.0),\n (-2.0, 6.5, 0.0, 5.0), (0.75, 8.25, 0.5, 4.5),\n (-2.0, 6.5, -1.0, 5.0), (0.75, 8.25, -0.5, 4.5),\n (-2.0, 3.0, 1.0, 4.0), (0.75, 4.75, 0.5, 3.5),\n (-2.0, 3.0, 0.0, 5.0), (0.75, 4.75, 0.5, 4.5),\n (0.75, 4.75, -0.5, 4.5), (3.5, 4.75, -0.5, 3.5),\n (3.5, 6.5, -0.5, 3.5), (3.5, 6.5, 0.0, 3.5),\n (3.5, 6.5, -0.5, 4.0), (3.5, 4.75, 0.0, 3.5),\n (3.5, 4.75, 0.0, 4.0), (3.5, 4.75, -0.5, 4.0),\n (0.75, 6.5, -0.5, 3.5), (0.75, 6.5, 0.0, 3.5),\n (0.75, 6.5, 0.0, 4.0), (0.75, 6.5, -0.5, 4.0),\n (0.75, 4.75, 0.0, 3.5), (0.75, 4.75, 0.0, 4.0),\n (0.75, 4.75, -0.5, 4.0), (2.125, 5.625, -0.25, 3.75),\n (-2.0, 4.75, -0.5, 3.5), (-2.0, 3.0, -0.5, 3.5),\n (-2.0, 3.0, -1.0, 3.5), (-2.0, 3.0, -0.5, 3.0),\n (-2.0, 4.75, -1.0, 3.5), (-2.0, 4.75, -1.0, 3.0),\n (-2.0, 4.75, -0.5, 3.0), (0.75, 3.0, -0.5, 3.5),\n (0.75, 3.0, -1.0, 3.5), (0.75, 3.0, -1.0, 3.0),\n (0.75, 3.0, -0.5, 3.0), (0.75, 4.75, -1.0, 3.5),\n (0.75, 4.75, -1.0, 3.0), (0.75, 4.75, -0.5, 3.0),\n (-0.625, 3.875, -0.75, 3.25), (-2.0, 6.5, -0.5, 3.5),\n (-2.0, 6.5, 0.0, 3.5), (-2.0, 6.5, -0.5, 4.0),\n (-2.0, 4.75, 0.0, 3.5), (-2.0, 4.75, 0.0, 4.0),\n (-2.0, 4.75, -0.5, 4.0), (-0.625, 5.625, -0.25, 3.75),\n (-2.0, 3.0, 0.0, 3.5), (-2.0, 3.0, -0.5, 4.0),\n (0.75, 3.0, 0.0, 3.5), (0.75, 3.0, 0.0, 4.0),\n (0.75, 3.0, -0.5, 4.0), (-0.625, 3.875, -0.25, 3.75),\n (-2.0, 4.75, -1.0, 4.0), (0.75, 3.0, -1.0, 4.0),\n (0.75, 4.75, -1.0, 4.0), (-0.625, 3.875, -0.75, 3.75),\n (-2.0, 4.75, 0.0, 3.0), (0.75, 3.0, 0.0, 3.0),\n (0.75, 4.75, 0.0, 3.0), (-0.625, 3.875, -0.25, 3.25),\n (-2.0, 6.5, -1.0, 3.5), (0.75, 6.5, -1.0, 3.5),\n (0.75, 6.5, -1.0, 4.0), (-0.625, 5.625, -0.75, 3.75),\n (-2.0, 6.5, -0.5, 3.0), (0.75, 6.5, -1.0, 3.0),\n (0.75, 6.5, -0.5, 3.0), (-0.625, 5.625, -0.75, 3.25),\n (0.75, 6.5, 0.0, 3.0), (-0.625, 5.625, -0.25, 3.25),\n (3.5, 3.0, -0.5, 3.5), (3.5, 3.0, 0.0, 3.5),\n (3.5, 3.0, -0.5, 4.0), (2.125, 3.875, -0.25, 3.75),\n (3.5, 3.0, -1.0, 3.5), (3.5, 4.75, -1.0, 3.5),\n (3.5, 4.75, -1.0, 4.0), (2.125, 3.875, -0.75, 3.75),\n (3.5, 3.0, -0.5, 3.0), (3.5, 4.75, -1.0, 3.0),\n (3.5, 4.75, -0.5, 3.0), (2.125, 3.875, -0.75, 3.25),\n (3.5, 4.75, 0.0, 3.0), (2.125, 3.875, -0.25, 3.25),\n (3.5, 6.5, -1.0, 3.5), (2.125, 5.625, -0.75, 3.75),\n (3.5, 6.5, -0.5, 3.0), (2.125, 5.625, -0.75, 3.25),\n (2.125, 5.625, -0.25, 3.25), (3.5, 8.25, 0.5, 4.5),\n (3.5, 6.5, 0.5, 4.5), (3.5, 6.5, 0.0, 4.5),\n (3.5, 6.5, 0.5, 4.0), (3.5, 8.25, 0.0, 4.5),\n (3.5, 8.25, 0.0, 4.0), (3.5, 8.25, 0.5, 4.0),\n (6.25, 6.5, 0.5, 4.5), (6.25, 6.5, 0.0, 4.5),\n (6.25, 6.5, 0.0, 4.0), (6.25, 6.5, 0.5, 4.0),\n (6.25, 8.25, 0.0, 4.5), (6.25, 8.25, 0.0, 4.0),\n (6.25, 8.25, 0.5, 4.0), (4.875, 7.375, 0.25, 4.25),\n (9.0, 8.25, 0.5, 4.5), (9.0, 10.0, 0.5, 4.5),\n (9.0, 10.0, 1.0, 4.5), (9.0, 10.0, 0.5, 5.0),\n (9.0, 8.25, 1.0, 4.5), (9.0, 8.25, 1.0, 5.0),\n (9.0, 8.25, 0.5, 5.0), (6.25, 10.0, 0.5, 4.5),\n (6.25, 10.0, 1.0, 4.5), (6.25, 10.0, 1.0, 5.0),\n (6.25, 10.0, 0.5, 5.0), (6.25, 8.25, 1.0, 4.5),\n (6.25, 8.25, 1.0, 5.0), (6.25, 8.25, 0.5, 5.0),\n (7.625, 9.125, 0.75, 4.75), (9.0, 6.5, 0.5, 4.5),\n (9.0, 6.5, 0.0, 4.5), (9.0, 6.5, 0.5, 4.0),\n (9.0, 8.25, 0.0, 4.5), (9.0, 8.25, 0.0, 4.0),\n (9.0, 8.25, 0.5, 4.0), (7.625, 7.375, 0.25, 4.25),\n (9.0, 10.0, 0.0, 4.5), (9.0, 10.0, 0.5, 4.0),\n (6.25, 10.0, 0.0, 4.5), (6.25, 10.0, 0.0, 4.0),\n (6.25, 10.0, 0.5, 4.0), (7.625, 9.125, 0.25, 4.25),\n (9.0, 8.25, 1.0, 4.0), (6.25, 10.0, 1.0, 4.0),\n (6.25, 8.25, 1.0, 4.0), (7.625, 9.125, 0.75, 4.25),\n (9.0, 8.25, 0.0, 5.0), (6.25, 10.0, 0.0, 5.0),\n (6.25, 8.25, 0.0, 5.0), (7.625, 9.125, 0.25, 4.75),\n (9.0, 6.5, 1.0, 4.5), (6.25, 6.5, 1.0, 4.5),\n (6.25, 6.5, 1.0, 4.0), (7.625, 7.375, 0.75, 4.25),\n (9.0, 6.5, 0.5, 5.0), (6.25, 6.5, 1.0, 5.0),\n (6.25, 6.5, 0.5, 5.0), (7.625, 7.375, 0.75, 4.75),\n (6.25, 6.5, 0.0, 5.0), (7.625, 7.375, 0.25, 4.75),\n (3.5, 10.0, 0.5, 4.5), (3.5, 10.0, 0.0, 4.5),\n (3.5, 10.0, 0.5, 4.0), (4.875, 9.125, 0.25, 4.25),\n (3.5, 10.0, 1.0, 4.5), (3.5, 8.25, 1.0, 4.5),\n (3.5, 8.25, 1.0, 4.0), (4.875, 9.125, 0.75, 4.25),\n (3.5, 10.0, 0.5, 5.0), (3.5, 8.25, 1.0, 5.0),\n (3.5, 8.25, 0.5, 5.0), (4.875, 9.125, 0.75, 4.75),\n (3.5, 8.25, 0.0, 5.0), (4.875, 9.125, 0.25, 4.75),\n (3.5, 6.5, 1.0, 4.5), (4.875, 7.375, 0.75, 4.25),\n (3.5, 6.5, 0.5, 5.0), (4.875, 7.375, 0.75, 4.75),\n (4.875, 7.375, 0.25, 4.75), (6.25, 6.5, -0.5, 3.5),\n (6.25, 6.5, 0.0, 3.5), (6.25, 6.5, -0.5, 4.0),\n (6.25, 4.75, 0.0, 3.5), (6.25, 4.75, 0.0, 4.0),\n (6.25, 4.75, -0.5, 4.0), (4.875, 5.625, -0.25, 3.75),\n (9.0, 4.75, -0.5, 3.5), (9.0, 3.0, -0.5, 3.5),\n (9.0, 3.0, -1.0, 3.5), (9.0, 3.0, -0.5, 3.0),\n (9.0, 4.75, -1.0, 3.5), (9.0, 4.75, -1.0, 3.0),\n (9.0, 4.75, -0.5, 3.0), (6.25, 3.0, -0.5, 3.5),\n (6.25, 3.0, -1.0, 3.5), (6.25, 3.0, -1.0, 3.0),\n (6.25, 3.0, -0.5, 3.0), (6.25, 4.75, -1.0, 3.5),\n (6.25, 4.75, -1.0, 3.0), (6.25, 4.75, -0.5, 3.0),\n (7.625, 3.875, -0.75, 3.25), (9.0, 6.5, -0.5, 3.5),\n (9.0, 6.5, 0.0, 3.5), (9.0, 6.5, -0.5, 4.0),\n (9.0, 4.75, 0.0, 3.5), (9.0, 4.75, 0.0, 4.0),\n (9.0, 4.75, -0.5, 4.0), (7.625, 5.625, -0.25, 3.75),\n (9.0, 3.0, 0.0, 3.5), (9.0, 3.0, -0.5, 4.0),\n (6.25, 3.0, 0.0, 3.5), (6.25, 3.0, 0.0, 4.0),\n (6.25, 3.0, -0.5, 4.0), (7.625, 3.875, -0.25, 3.75),\n (9.0, 4.75, -1.0, 4.0), (6.25, 3.0, -1.0, 4.0),\n (6.25, 4.75, -1.0, 4.0), (7.625, 3.875, -0.75, 3.75),\n (9.0, 4.75, 0.0, 3.0), (6.25, 3.0, 0.0, 3.0),\n (6.25, 4.75, 0.0, 3.0), (7.625, 3.875, -0.25, 3.25),\n (9.0, 6.5, -1.0, 3.5), (6.25, 6.5, -1.0, 3.5),\n (6.25, 6.5, -1.0, 4.0), (7.625, 5.625, -0.75, 3.75),\n (9.0, 6.5, -0.5, 3.0), (6.25, 6.5, -1.0, 3.0),\n (6.25, 6.5, -0.5, 3.0), (7.625, 5.625, -0.75, 3.25),\n (6.25, 6.5, 0.0, 3.0), (7.625, 5.625, -0.25, 3.25),\n (4.875, 3.875, -0.25, 3.75), (4.875, 3.875, -0.75, 3.75),\n (4.875, 3.875, -0.75, 3.25), (4.875, 3.875, -0.25, 3.25),\n (4.875, 5.625, -0.75, 3.75), (4.875, 5.625, -0.75, 3.25),\n (4.875, 5.625, -0.25, 3.25), (3.5, 8.25, -0.5, 3.5),\n (3.5, 8.25, 0.0, 3.5), (3.5, 8.25, -0.5, 4.0),\n (6.25, 8.25, 0.0, 3.5), (6.25, 8.25, -0.5, 4.0),\n (4.875, 7.375, -0.25, 3.75), (9.0, 8.25, -0.5, 3.5),\n (9.0, 10.0, -0.5, 3.5), (9.0, 10.0, -1.0, 3.5),\n (9.0, 10.0, -0.5, 3.0), (9.0, 8.25, -1.0, 3.5),\n (9.0, 8.25, -1.0, 3.0), (9.0, 8.25, -0.5, 3.0),\n (6.25, 10.0, -0.5, 3.5), (6.25, 10.0, -1.0, 3.5),\n (6.25, 10.0, -1.0, 3.0), (6.25, 10.0, -0.5, 3.0),\n (6.25, 8.25, -1.0, 3.5), (6.25, 8.25, -1.0, 3.0),\n (6.25, 8.25, -0.5, 3.0), (7.625, 9.125, -0.75, 3.25),\n (9.0, 8.25, 0.0, 3.5), (9.0, 8.25, -0.5, 4.0),\n (7.625, 7.375, -0.25, 3.75), (9.0, 10.0, 0.0, 3.5),\n (9.0, 10.0, -0.5, 4.0), (6.25, 10.0, 0.0, 3.5),\n (6.25, 10.0, -0.5, 4.0), (7.625, 9.125, -0.25, 3.75),\n (9.0, 8.25, -1.0, 4.0), (6.25, 10.0, -1.0, 4.0),\n (6.25, 8.25, -1.0, 4.0), (7.625, 9.125, -0.75, 3.75),\n (9.0, 8.25, 0.0, 3.0), (6.25, 10.0, 0.0, 3.0),\n (6.25, 8.25, 0.0, 3.0), (7.625, 9.125, -0.25, 3.25),\n (7.625, 7.375, -0.75, 3.75), (7.625, 7.375, -0.75, 3.25),\n (7.625, 7.375, -0.25, 3.25), (3.5, 10.0, -0.5, 3.5),\n (3.5, 10.0, 0.0, 3.5), (3.5, 10.0, -0.5, 4.0),\n (4.875, 9.125, -0.25, 3.75), (3.5, 10.0, -1.0, 3.5),\n (3.5, 8.25, -1.0, 3.5), (3.5, 8.25, -1.0, 4.0),\n (4.875, 9.125, -0.75, 3.75), (3.5, 10.0, -0.5, 3.0),\n (3.5, 8.25, -1.0, 3.0), (3.5, 8.25, -0.5, 3.0),\n (4.875, 9.125, -0.75, 3.25), (3.5, 8.25, 0.0, 3.0),\n (4.875, 9.125, -0.25, 3.25), (4.875, 7.375, -0.75, 3.75),\n (4.875, 7.375, -0.75, 3.25), (4.875, 7.375, -0.25, 3.25),\n (3.5, 8.25, 0.5, 3.5), (3.5, 6.5, 0.5, 3.5),\n (6.25, 6.5, 0.5, 3.5), (4.875, 7.375, 0.25, 3.75),\n (9.0, 8.25, 0.5, 3.5), (9.0, 10.0, 0.5, 3.5),\n (9.0, 10.0, 1.0, 3.5), (9.0, 10.0, 0.5, 3.0),\n (9.0, 8.25, 1.0, 3.5), (9.0, 8.25, 1.0, 3.0),\n (9.0, 8.25, 0.5, 3.0), (6.25, 10.0, 0.5, 3.5),\n (6.25, 10.0, 1.0, 3.5), (6.25, 10.0, 1.0, 3.0),\n (6.25, 10.0, 0.5, 3.0), (6.25, 8.25, 1.0, 3.5),\n (6.25, 8.25, 1.0, 3.0), (6.25, 8.25, 0.5, 3.0),\n (7.625, 9.125, 0.75, 3.25), (9.0, 6.5, 0.5, 3.5),\n (7.625, 7.375, 0.25, 3.75), (7.625, 9.125, 0.25, 3.75),\n (7.625, 9.125, 0.75, 3.75), (7.625, 9.125, 0.25, 3.25),\n (9.0, 6.5, 1.0, 3.5), (6.25, 6.5, 1.0, 3.5),\n (7.625, 7.375, 0.75, 3.75), (9.0, 6.5, 0.5, 3.0),\n (6.25, 6.5, 1.0, 3.0), (6.25, 6.5, 0.5, 3.0),\n (7.625, 7.375, 0.75, 3.25), (7.625, 7.375, 0.25, 3.25),\n (3.5, 10.0, 0.5, 3.5), (4.875, 9.125, 0.25, 3.75),\n (3.5, 10.0, 1.0, 3.5), (3.5, 8.25, 1.0, 3.5),\n (4.875, 9.125, 0.75, 3.75), (3.5, 10.0, 0.5, 3.0),\n (3.5, 8.25, 1.0, 3.0), (3.5, 8.25, 0.5, 3.0),\n (4.875, 9.125, 0.75, 3.25), (4.875, 9.125, 0.25, 3.25),\n (3.5, 6.5, 1.0, 3.5), (4.875, 7.375, 0.75, 3.75),\n (3.5, 6.5, 0.5, 3.0), (4.875, 7.375, 0.75, 3.25),\n (4.875, 7.375, 0.25, 3.25), (3.5, 8.25, -0.5, 4.5),\n (3.5, 6.5, -0.5, 4.5), (6.25, 6.5, -0.5, 4.5),\n (4.875, 7.375, -0.25, 4.25), (9.0, 8.25, -0.5, 4.5),\n (9.0, 10.0, -0.5, 4.5), (9.0, 10.0, -1.0, 4.5),\n (9.0, 10.0, -0.5, 5.0), (9.0, 8.25, -1.0, 4.5),\n (9.0, 8.25, -1.0, 5.0), (9.0, 8.25, -0.5, 5.0),\n (6.25, 10.0, -0.5, 4.5), (6.25, 10.0, -1.0, 4.5),\n (6.25, 10.0, -1.0, 5.0), (6.25, 10.0, -0.5, 5.0),\n (6.25, 8.25, -1.0, 4.5), (6.25, 8.25, -1.0, 5.0),\n (6.25, 8.25, -0.5, 5.0), (7.625, 9.125, -0.75, 4.75),\n (9.0, 6.5, -0.5, 4.5), (7.625, 7.375, -0.25, 4.25),\n (7.625, 9.125, -0.25, 4.25), (7.625, 9.125, -0.75, 4.25),\n (7.625, 9.125, -0.25, 4.75), (9.0, 6.5, -1.0, 4.5),\n (6.25, 6.5, -1.0, 4.5), (7.625, 7.375, -0.75, 4.25),\n (9.0, 6.5, -0.5, 5.0), (6.25, 6.5, -1.0, 5.0),\n (6.25, 6.5, -0.5, 5.0), (7.625, 7.375, -0.75, 4.75),\n (7.625, 7.375, -0.25, 4.75), (3.5, 10.0, -0.5, 4.5),\n (4.875, 9.125, -0.25, 4.25), (3.5, 10.0, -1.0, 4.5),\n (3.5, 8.25, -1.0, 4.5), (4.875, 9.125, -0.75, 4.25),\n (3.5, 10.0, -0.5, 5.0), (3.5, 8.25, -1.0, 5.0),\n (3.5, 8.25, -0.5, 5.0), (4.875, 9.125, -0.75, 4.75),\n (4.875, 9.125, -0.25, 4.75), (3.5, 6.5, -1.0, 4.5),\n (4.875, 7.375, -0.75, 4.25), (3.5, 6.5, -0.5, 5.0),\n (4.875, 7.375, -0.75, 4.75), (4.875, 7.375, -0.25, 4.75),\n (3.5, 4.75, 0.5, 3.5), (3.5, 4.75, 0.5, 4.0),\n (6.25, 4.75, 0.5, 4.0), (4.875, 5.625, 0.25, 3.75),\n (9.0, 4.75, 0.5, 3.5), (9.0, 3.0, 0.5, 3.5),\n (9.0, 3.0, 1.0, 3.5), (9.0, 3.0, 0.5, 3.0),\n (9.0, 4.75, 1.0, 3.5), (9.0, 4.75, 1.0, 3.0),\n (9.0, 4.75, 0.5, 3.0), (6.25, 3.0, 0.5, 3.5),\n (6.25, 3.0, 1.0, 3.5), (6.25, 3.0, 1.0, 3.0),\n (6.25, 3.0, 0.5, 3.0), (6.25, 4.75, 1.0, 3.5),\n (6.25, 4.75, 1.0, 3.0), (6.25, 4.75, 0.5, 3.0),\n (7.625, 3.875, 0.75, 3.25), (9.0, 4.75, 0.5, 4.0),\n (7.625, 5.625, 0.25, 3.75), (9.0, 3.0, 0.5, 4.0),\n (6.25, 3.0, 0.5, 4.0), (7.625, 3.875, 0.25, 3.75),\n (9.0, 4.75, 1.0, 4.0), (6.25, 3.0, 1.0, 4.0),\n (6.25, 4.75, 1.0, 4.0), (7.625, 3.875, 0.75, 3.75),\n (7.625, 3.875, 0.25, 3.25), (7.625, 5.625, 0.75, 3.75),\n (7.625, 5.625, 0.75, 3.25), (7.625, 5.625, 0.25, 3.25),\n (3.5, 3.0, 0.5, 3.5), (3.5, 3.0, 0.5, 4.0),\n (4.875, 3.875, 0.25, 3.75), (3.5, 3.0, 1.0, 3.5),\n (3.5, 4.75, 1.0, 3.5), (3.5, 4.75, 1.0, 4.0),\n (4.875, 3.875, 0.75, 3.75), (3.5, 3.0, 0.5, 3.0),\n (3.5, 4.75, 1.0, 3.0), (3.5, 4.75, 0.5, 3.0),\n (4.875, 3.875, 0.75, 3.25), (4.875, 3.875, 0.25, 3.25),\n (4.875, 5.625, 0.75, 3.75), (4.875, 5.625, 0.75, 3.25),\n (4.875, 5.625, 0.25, 3.25), (3.5, 4.75, 0.5, 4.5),\n (3.5, 4.75, 0.0, 4.5), (6.25, 4.75, 0.0, 4.5),\n (4.875, 5.625, 0.25, 4.25), (9.0, 4.75, 0.5, 4.5),\n (9.0, 3.0, 0.5, 4.5), (9.0, 3.0, 1.0, 4.5),\n (9.0, 3.0, 0.5, 5.0), (9.0, 4.75, 1.0, 4.5),\n (9.0, 4.75, 1.0, 5.0), (9.0, 4.75, 0.5, 5.0),\n (6.25, 3.0, 0.5, 4.5), (6.25, 3.0, 1.0, 4.5),\n (6.25, 3.0, 1.0, 5.0), (6.25, 3.0, 0.5, 5.0),\n (6.25, 4.75, 1.0, 4.5), (6.25, 4.75, 1.0, 5.0),\n (6.25, 4.75, 0.5, 5.0), (7.625, 3.875, 0.75, 4.75),\n (9.0, 4.75, 0.0, 4.5), (7.625, 5.625, 0.25, 4.25),\n (9.0, 3.0, 0.0, 4.5), (6.25, 3.0, 0.0, 4.5),\n (7.625, 3.875, 0.25, 4.25), (7.625, 3.875, 0.75, 4.25),\n (9.0, 4.75, 0.0, 5.0), (6.25, 3.0, 0.0, 5.0),\n (6.25, 4.75, 0.0, 5.0), (7.625, 3.875, 0.25, 4.75),\n (7.625, 5.625, 0.75, 4.25), (7.625, 5.625, 0.75, 4.75),\n (7.625, 5.625, 0.25, 4.75), (3.5, 3.0, 0.5, 4.5),\n (3.5, 3.0, 0.0, 4.5), (4.875, 3.875, 0.25, 4.25),\n (3.5, 3.0, 1.0, 4.5), (3.5, 4.75, 1.0, 4.5),\n (4.875, 3.875, 0.75, 4.25), (3.5, 3.0, 0.5, 5.0),\n (3.5, 4.75, 1.0, 5.0), (3.5, 4.75, 0.5, 5.0),\n (4.875, 3.875, 0.75, 4.75), (3.5, 4.75, 0.0, 5.0),\n (4.875, 3.875, 0.25, 4.75), (4.875, 5.625, 0.75, 4.25),\n (4.875, 5.625, 0.75, 4.75), (4.875, 5.625, 0.25, 4.75),\n (3.5, 4.75, -0.5, 4.5), (4.875, 5.625, -0.25, 4.25),\n (9.0, 4.75, -0.5, 4.5), (9.0, 3.0, -0.5, 4.5),\n (9.0, 3.0, -1.0, 4.5), (9.0, 3.0, -0.5, 5.0),\n (9.0, 4.75, -1.0, 4.5), (9.0, 4.75, -1.0, 5.0),\n (9.0, 4.75, -0.5, 5.0), (6.25, 3.0, -0.5, 4.5),\n (6.25, 3.0, -1.0, 4.5), (6.25, 3.0, -1.0, 5.0),\n (6.25, 3.0, -0.5, 5.0), (6.25, 4.75, -1.0, 4.5),\n (6.25, 4.75, -1.0, 5.0), (6.25, 4.75, -0.5, 5.0),\n (7.625, 3.875, -0.75, 4.75), (7.625, 5.625, -0.25, 4.25),\n (7.625, 3.875, -0.25, 4.25), (7.625, 3.875, -0.75, 4.25),\n (7.625, 3.875, -0.25, 4.75), (7.625, 5.625, -0.75, 4.25),\n (7.625, 5.625, -0.75, 4.75), (7.625, 5.625, -0.25, 4.75),\n (3.5, 3.0, -0.5, 4.5), (4.875, 3.875, -0.25, 4.25),\n (3.5, 3.0, -1.0, 4.5), (3.5, 4.75, -1.0, 4.5),\n (4.875, 3.875, -0.75, 4.25), (3.5, 3.0, -0.5, 5.0),\n (3.5, 4.75, -1.0, 5.0), (3.5, 4.75, -0.5, 5.0),\n (4.875, 3.875, -0.75, 4.75), (4.875, 3.875, -0.25, 4.75),\n (4.875, 5.625, -0.75, 4.25), (4.875, 5.625, -0.75, 4.75),\n (4.875, 5.625, -0.25, 4.75), (0.75, 8.25, 0.0, 3.5),\n (0.75, 8.25, 0.0, 4.0), (0.75, 8.25, -0.5, 4.0),\n (2.125, 7.375, -0.25, 3.75), (-2.0, 8.25, -0.5, 3.5),\n (-2.0, 10.0, -0.5, 3.5), (-2.0, 10.0, -1.0, 3.5),\n (-2.0, 10.0, -0.5, 3.0), (-2.0, 8.25, -1.0, 3.5),\n (-2.0, 8.25, -1.0, 3.0), (-2.0, 8.25, -0.5, 3.0),\n (0.75, 10.0, -0.5, 3.5), (0.75, 10.0, -1.0, 3.5),\n (0.75, 10.0, -1.0, 3.0), (0.75, 10.0, -0.5, 3.0),\n (0.75, 8.25, -1.0, 3.5), (0.75, 8.25, -1.0, 3.0),\n (0.75, 8.25, -0.5, 3.0), (-0.625, 9.125, -0.75, 3.25),\n (-2.0, 8.25, 0.0, 3.5), (-2.0, 8.25, 0.0, 4.0),\n (-2.0, 8.25, -0.5, 4.0), (-0.625, 7.375, -0.25, 3.75),\n (-2.0, 10.0, 0.0, 3.5), (-2.0, 10.0, -0.5, 4.0),\n (0.75, 10.0, 0.0, 3.5), (0.75, 10.0, 0.0, 4.0),\n (0.75, 10.0, -0.5, 4.0), (-0.625, 9.125, -0.25, 3.75),\n (-2.0, 8.25, -1.0, 4.0), (0.75, 10.0, -1.0, 4.0),\n (0.75, 8.25, -1.0, 4.0), (-0.625, 9.125, -0.75, 3.75),\n (-2.0, 8.25, 0.0, 3.0), (0.75, 10.0, 0.0, 3.0),\n (0.75, 8.25, 0.0, 3.0), (-0.625, 9.125, -0.25, 3.25),\n (-0.625, 7.375, -0.75, 3.75), (-0.625, 7.375, -0.75, 3.25),\n (-0.625, 7.375, -0.25, 3.25), (2.125, 9.125, -0.25, 3.75),\n (2.125, 9.125, -0.75, 3.75), (2.125, 9.125, -0.75, 3.25),\n (2.125, 9.125, -0.25, 3.25), (2.125, 7.375, -0.75, 3.75),\n (2.125, 7.375, -0.75, 3.25), (2.125, 7.375, -0.25, 3.25),\n (0.75, 6.5, 0.5, 3.5), (0.75, 6.5, 0.5, 4.0),\n (0.75, 8.25, 0.5, 4.0), (2.125, 7.375, 0.25, 3.75),\n (-2.0, 8.25, 0.5, 3.5), (-2.0, 10.0, 0.5, 3.5),\n (-2.0, 10.0, 1.0, 3.5), (-2.0, 10.0, 0.5, 3.0),\n (-2.0, 8.25, 1.0, 3.5), (-2.0, 8.25, 1.0, 3.0),\n (-2.0, 8.25, 0.5, 3.0), (0.75, 10.0, 0.5, 3.5),\n (0.75, 10.0, 1.0, 3.5), (0.75, 10.0, 1.0, 3.0),\n (0.75, 10.0, 0.5, 3.0), (0.75, 8.25, 1.0, 3.5),\n (0.75, 8.25, 1.0, 3.0), (0.75, 8.25, 0.5, 3.0),\n (-0.625, 9.125, 0.75, 3.25), (-2.0, 6.5, 0.5, 3.5),\n (-2.0, 6.5, 0.5, 4.0), (-2.0, 8.25, 0.5, 4.0),\n (-0.625, 7.375, 0.25, 3.75), (-2.0, 10.0, 0.5, 4.0),\n (0.75, 10.0, 0.5, 4.0), (-0.625, 9.125, 0.25, 3.75),\n (-2.0, 8.25, 1.0, 4.0), (0.75, 10.0, 1.0, 4.0),\n (0.75, 8.25, 1.0, 4.0), (-0.625, 9.125, 0.75, 3.75),\n (-0.625, 9.125, 0.25, 3.25), (-2.0, 6.5, 1.0, 3.5),\n (0.75, 6.5, 1.0, 3.5), (0.75, 6.5, 1.0, 4.0),\n (-0.625, 7.375, 0.75, 3.75), (-2.0, 6.5, 0.5, 3.0),\n (0.75, 6.5, 1.0, 3.0), (0.75, 6.5, 0.5, 3.0),\n (-0.625, 7.375, 0.75, 3.25), (-0.625, 7.375, 0.25, 3.25),\n (2.125, 9.125, 0.25, 3.75), (2.125, 9.125, 0.75, 3.75),\n (2.125, 9.125, 0.75, 3.25), (2.125, 9.125, 0.25, 3.25),\n (2.125, 7.375, 0.75, 3.75), (2.125, 7.375, 0.75, 3.25),\n (2.125, 7.375, 0.25, 3.25), (0.75, 6.5, 0.5, 4.5),\n (0.75, 6.5, 0.0, 4.5), (0.75, 8.25, 0.0, 4.5),\n (2.125, 7.375, 0.25, 4.25), (-2.0, 8.25, 0.5, 4.5),\n (-2.0, 10.0, 0.5, 4.5), (-2.0, 10.0, 1.0, 4.5),\n (-2.0, 10.0, 0.5, 5.0), (-2.0, 8.25, 1.0, 4.5),\n (-2.0, 8.25, 1.0, 5.0), (-2.0, 8.25, 0.5, 5.0),\n (0.75, 10.0, 0.5, 4.5), (0.75, 10.0, 1.0, 4.5),\n (0.75, 10.0, 1.0, 5.0), (0.75, 10.0, 0.5, 5.0),\n (0.75, 8.25, 1.0, 4.5), (0.75, 8.25, 1.0, 5.0),\n (0.75, 8.25, 0.5, 5.0), (-0.625, 9.125, 0.75, 4.75),\n (-2.0, 6.5, 0.5, 4.5), (-2.0, 6.5, 0.0, 4.5),\n (-2.0, 8.25, 0.0, 4.5), (-0.625, 7.375, 0.25, 4.25),\n (-2.0, 10.0, 0.0, 4.5), (0.75, 10.0, 0.0, 4.5),\n (-0.625, 9.125, 0.25, 4.25), (-0.625, 9.125, 0.75, 4.25),\n (-2.0, 8.25, 0.0, 5.0), (0.75, 10.0, 0.0, 5.0),\n (0.75, 8.25, 0.0, 5.0), (-0.625, 9.125, 0.25, 4.75),\n (-2.0, 6.5, 1.0, 4.5), (0.75, 6.5, 1.0, 4.5),\n (-0.625, 7.375, 0.75, 4.25), (-2.0, 6.5, 0.5, 5.0),\n (0.75, 6.5, 1.0, 5.0), (0.75, 6.5, 0.5, 5.0),\n (-0.625, 7.375, 0.75, 4.75), (0.75, 6.5, 0.0, 5.0),\n (-0.625, 7.375, 0.25, 4.75), (2.125, 9.125, 0.25, 4.25),\n (2.125, 9.125, 0.75, 4.25), (2.125, 9.125, 0.75, 4.75),\n (2.125, 9.125, 0.25, 4.75), (2.125, 7.375, 0.75, 4.25),\n (2.125, 7.375, 0.75, 4.75), (2.125, 7.375, 0.25, 4.75),\n (0.75, 6.5, -0.5, 4.5), (2.125, 7.375, -0.25, 4.25),\n (-2.0, 8.25, -0.5, 4.5), (-2.0, 10.0, -0.5, 4.5),\n (-2.0, 10.0, -1.0, 4.5), (-2.0, 10.0, -0.5, 5.0),\n (-2.0, 8.25, -1.0, 4.5), (-2.0, 8.25, -1.0, 5.0),\n (-2.0, 8.25, -0.5, 5.0), (0.75, 10.0, -0.5, 4.5),\n (0.75, 10.0, -1.0, 4.5), (0.75, 10.0, -1.0, 5.0),\n (0.75, 10.0, -0.5, 5.0), (0.75, 8.25, -1.0, 4.5),\n (0.75, 8.25, -1.0, 5.0), (0.75, 8.25, -0.5, 5.0),\n (-0.625, 9.125, -0.75, 4.75), (-2.0, 6.5, -0.5, 4.5),\n (-0.625, 7.375, -0.25, 4.25), (-0.625, 9.125, -0.25, 4.25),\n (-0.625, 9.125, -0.75, 4.25), (-0.625, 9.125, -0.25, 4.75),\n (-2.0, 6.5, -1.0, 4.5), (0.75, 6.5, -1.0, 4.5),\n (-0.625, 7.375, -0.75, 4.25), (-2.0, 6.5, -0.5, 5.0),\n (0.75, 6.5, -1.0, 5.0), (0.75, 6.5, -0.5, 5.0),\n (-0.625, 7.375, -0.75, 4.75), (-0.625, 7.375, -0.25, 4.75),\n (2.125, 9.125, -0.25, 4.25), (2.125, 9.125, -0.75, 4.25),\n (2.125, 9.125, -0.75, 4.75), (2.125, 9.125, -0.25, 4.75),\n (2.125, 7.375, -0.75, 4.25), (2.125, 7.375, -0.75, 4.75),\n (2.125, 7.375, -0.25, 4.75), (0.75, 4.75, 0.5, 4.0),\n (2.125, 5.625, 0.25, 3.75), (-2.0, 4.75, 0.5, 3.5),\n (-2.0, 3.0, 0.5, 3.5), (-2.0, 3.0, 1.0, 3.5),\n (-2.0, 3.0, 0.5, 3.0), (-2.0, 4.75, 1.0, 3.5),\n (-2.0, 4.75, 1.0, 3.0), (-2.0, 4.75, 0.5, 3.0),\n (0.75, 3.0, 0.5, 3.5), (0.75, 3.0, 1.0, 3.5),\n (0.75, 3.0, 1.0, 3.0), (0.75, 3.0, 0.5, 3.0),\n (0.75, 4.75, 1.0, 3.5), (0.75, 4.75, 1.0, 3.0),\n (0.75, 4.75, 0.5, 3.0), (-0.625, 3.875, 0.75, 3.25),\n (-2.0, 4.75, 0.5, 4.0), (-0.625, 5.625, 0.25, 3.75),\n (-2.0, 3.0, 0.5, 4.0), (0.75, 3.0, 0.5, 4.0),\n (-0.625, 3.875, 0.25, 3.75), (-2.0, 4.75, 1.0, 4.0),\n (0.75, 3.0, 1.0, 4.0), (0.75, 4.75, 1.0, 4.0),\n (-0.625, 3.875, 0.75, 3.75), (-0.625, 3.875, 0.25, 3.25),\n (-0.625, 5.625, 0.75, 3.75), (-0.625, 5.625, 0.75, 3.25),\n (-0.625, 5.625, 0.25, 3.25), (2.125, 3.875, 0.25, 3.75),\n (2.125, 3.875, 0.75, 3.75), (2.125, 3.875, 0.75, 3.25),\n (2.125, 3.875, 0.25, 3.25), (2.125, 5.625, 0.75, 3.75),\n (2.125, 5.625, 0.75, 3.25), (2.125, 5.625, 0.25, 3.25),\n (0.75, 4.75, 0.0, 4.5), (2.125, 5.625, 0.25, 4.25),\n (-2.0, 4.75, 0.5, 4.5), (-2.0, 3.0, 0.5, 4.5),\n (-2.0, 3.0, 1.0, 4.5), (-2.0, 3.0, 0.5, 5.0),\n (-2.0, 4.75, 1.0, 4.5), (-2.0, 4.75, 1.0, 5.0),\n (-2.0, 4.75, 0.5, 5.0), (0.75, 3.0, 0.5, 4.5),\n (0.75, 3.0, 1.0, 4.5), (0.75, 3.0, 1.0, 5.0),\n (0.75, 3.0, 0.5, 5.0), (0.75, 4.75, 1.0, 4.5),\n (0.75, 4.75, 1.0, 5.0), (0.75, 4.75, 0.5, 5.0),\n (-0.625, 3.875, 0.75, 4.75), (-2.0, 4.75, 0.0, 4.5),\n (-0.625, 5.625, 0.25, 4.25), (-2.0, 3.0, 0.0, 4.5),\n (0.75, 3.0, 0.0, 4.5), (-0.625, 3.875, 0.25, 4.25),\n (-0.625, 3.875, 0.75, 4.25), (-2.0, 4.75, 0.0, 5.0),\n (0.75, 3.0, 0.0, 5.0), (0.75, 4.75, 0.0, 5.0),\n (-0.625, 3.875, 0.25, 4.75), (-0.625, 5.625, 0.75, 4.25),\n (-0.625, 5.625, 0.75, 4.75), (-0.625, 5.625, 0.25, 4.75),\n (2.125, 3.875, 0.25, 4.25), (2.125, 3.875, 0.75, 4.25),\n (2.125, 3.875, 0.75, 4.75), (2.125, 3.875, 0.25, 4.75),\n (2.125, 5.625, 0.75, 4.25), (2.125, 5.625, 0.75, 4.75),\n (2.125, 5.625, 0.25, 4.75), (2.125, 5.625, -0.25, 4.25),\n (-2.0, 4.75, -0.5, 4.5), (-2.0, 3.0, -0.5, 4.5),\n (-2.0, 3.0, -1.0, 4.5), (-2.0, 3.0, -0.5, 5.0),\n (-2.0, 4.75, -1.0, 4.5), (-2.0, 4.75, -1.0, 5.0),\n (-2.0, 4.75, -0.5, 5.0), (0.75, 3.0, -0.5, 4.5),\n (0.75, 3.0, -1.0, 4.5), (0.75, 3.0, -1.0, 5.0),\n (0.75, 3.0, -0.5, 5.0), (0.75, 4.75, -1.0, 4.5),\n (0.75, 4.75, -1.0, 5.0), (0.75, 4.75, -0.5, 5.0),\n (-0.625, 3.875, -0.75, 4.75), (-0.625, 5.625, -0.25, 4.25),\n (-0.625, 3.875, -0.25, 4.25), (-0.625, 3.875, -0.75, 4.25),\n (-0.625, 3.875, -0.25, 4.75), (-0.625, 5.625, -0.75, 4.25),\n (-0.625, 5.625, -0.75, 4.75), (-0.625, 5.625, -0.25, 4.75),\n (2.125, 3.875, -0.25, 4.25), (2.125, 3.875, -0.75, 4.25),\n (2.125, 3.875, -0.75, 4.75), (2.125, 3.875, -0.25, 4.75),\n (2.125, 5.625, -0.75, 4.25), (2.125, 5.625, -0.75, 4.75),\n (2.125, 5.625, -0.25, 4.75)]\n nn_checks = {(0.75, 3.0, -0.5, 4.0): [(0.75, 4.75, -0.5, 4.0),\n (0.75, 4.75, -0.5, 4.5),\n (2.125, 3.875, -0.25, 3.75),\n (2.125, 3.875, -0.75, 3.75),\n (2.125, 3.875, -0.25, 4.25),\n (2.125, 3.875, -0.75, 4.25),\n (3.5, 3.0, 0.0, 4.0),\n (3.5, 3.0, -0.5, 4.0),\n (0.75, 3.0, 0.0, 4.0),\n (0.75, 3.0, -0.5, 4.5),\n (-2.0, 3.0, -0.5, 4.0),\n (-2.0, 3.0, 0.0, 4.0),\n (-0.625, 3.875, -0.25, 4.25),\n (-0.625, 3.875, -0.75, 4.25),\n (-0.625, 3.875, -0.75, 3.75),\n (0.75, 3.0, -0.5, 3.5),\n (-2.0, 3.0, -1.0, 4.0),\n (0.75, 3.0, -1.0, 4.0),\n (3.5, 3.0, -1.0, 4.0),\n (0.75, 4.75, -0.5, 3.5),\n (-0.625, 3.875, -0.25, 3.75)],\n (-2.0, 3.0, -1.0, 3.0): [(-0.625, 3.875, -0.75, 3.25),\n (0.75, 3.0, -0.5, 3.0),\n (-2.0, 4.75, -1.0, 3.0),\n (-2.0, 4.75, -1.0, 3.5),\n (0.75, 3.0, -1.0, 3.0),\n (0.75, 4.75, -1.0, 3.5),\n (0.75, 3.0, -1.0, 3.5),\n (0.75, 4.75, -1.0, 3.0),\n (-2.0, 3.0, -0.5, 3.0),\n (-2.0, 3.0, -0.5, 3.5),\n (0.75, 4.75, -0.5, 3.0),\n (-2.0, 3.0, -1.0, 3.5),\n (-2.0, 4.75, -0.5, 3.5),\n (-2.0, 4.75, -0.5, 3.0),\n (0.75, 3.0, -0.5, 3.5)],\n (-0.625, 5.625, -0.75, 4.25): [(0.75, 4.75, -0.5, 4.0),\n (0.75, 4.75, -0.5, 4.5),\n (-2.0, 6.5, -1.0, 4.5),\n (-2.0, 6.5, -1.0, 4.0),\n (0.75, 6.5, -1.0, 4.0),\n (0.75, 6.5, -1.0, 4.5),\n (0.75, 4.75, -1.0, 4.5),\n (0.75, 4.75, -1.0, 4.0),\n (-2.0, 4.75, -1.0, 4.0),\n (-2.0, 4.75, -1.0, 4.5),\n (-2.0, 4.75, -0.5, 4.5),\n (-2.0, 4.75, -0.5, 4.0),\n (0.75, 6.5, -0.5, 4.0),\n (0.75, 6.5, -0.5, 4.5),\n (-2.0, 6.5, -0.5, 4.5),\n (-2.0, 6.5, -0.5, 4.0)]}\n\n init_triangulation(4, 2, check, nn_checks,\n bounds=[(-2, 9), (3, 10), (-1, 1), (3, 5)])",
"def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])",
"def __init__(self, M, rat):\n self.M = M\n xc0, _ = np.polynomial.chebyshev.chebgauss(M-0)\n xc1, _ = np.polynomial.chebyshev.chebgauss(M-1)\n xc2, _ = np.polynomial.chebyshev.chebgauss(M-2)\n # vandermonde and inverse vandermonde matrices\n self.V0 = np.polynomial.chebyshev.chebvander(xc0, M-1)\n self.V1 = np.polynomial.chebyshev.chebvander(xc1, M-2)\n self.V2 = np.polynomial.chebyshev.chebvander(xc2, M-3)\n self.VI0 = np.linalg.inv(self.V0)\n self.VI1 = np.linalg.inv(self.V1)\n self.VI2 = np.linalg.inv(self.V2)\n # differentiation matrices\n DC01 = np.polynomial.chebyshev.chebder(np.eye(M-0)) / rat\n DC12 = np.polynomial.chebyshev.chebder(np.eye(M-1)) / rat\n DC00 = np.row_stack([DC01, np.zeros(M)])\n self.D00 = self.V0.dot(DC00.dot(self.VI0))\n self.D01 = self.V1.dot(DC01.dot(self.VI0))\n self.D12 = self.V2.dot(DC12.dot(self.VI1))\n # boundary condition operators\n self.ibc_dirichlet = np.polynomial.chebyshev.chebvander(1, M-1).dot(self.VI0)\n self.obc_dirichlet = np.polynomial.chebyshev.chebvander(-1, M-1).dot(self.VI0)\n self.ibc_neumann = self.ibc_dirichlet.dot(self.D00)\n self.obc_neumann = self.obc_dirichlet.dot(self.D00)\n # rank reduction operators\n temp = np.zeros([M-1, M-0], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R01 = self.V1.dot(temp.dot(self.VI0))\n temp = np.zeros([M-2, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.R12 = self.V2.dot(temp.dot(self.VI1))\n self.R02 = self.R12.dot(self.R01)\n # get poof operator from M-1 --> M\n temp = np.zeros([M, M-1], dtype=float)\n np.fill_diagonal(temp, 1.0)\n self.P10 = self.V0.dot(temp.dot(self.VI1))",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)",
"def test_uv_degrid():\n\n layout = read_layout(layout_path=f\"{test_data}/test_mwa.txt\")\n xyz = enh_xyz(layout=layout, latitude=mwa_geo.latitude.radians)\n uvw = xyz_uvw(xyz=xyz, freq=freq, dec0=mwa_geo.latitude.radians, ha0=0)\n uv = uv_degrid(max_lambda=1400, nside=20, uvw=uvw, sigma=3, kersize=21, kernel=None)\n\n assert uv.shape == (20, 20)\n assert uv[0, 0] == 0.0",
"def run_2dtest(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n xlin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n ylin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n X,Y = np.meshgrid(xlin, ylin)\n\n # Store resulting LoS integrations in results\n results = X\n for i in range(0,num_sight_lines+1):\n for j in range(0,num_sight_lines+1): \n results[i,j] = testsph(X[i,j],Y[i,j],smoothing,dim=dim)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n\n # Have to integrate across x for every y\n Int_step = np.zeros( num_sight_lines+1 )\n for iy in range(0, num_sight_lines+1):\n isfin = np.isfinite(results[iy,:])\n Int_step[iy] = integrate.trapz(results[iy,isfin], xlin[isfin])\n # Now integrate across y\n isfin = np.isfinite(Int_step)\n particle_integral = integrate.trapz(Int_step[isfin], ylin[isfin])\n # \"All smoothing lengths should integrate to the same value of unity \"\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n isfin = np.isfinite(results[0,:])\n traces.append(go.Scatter(y=results[0,isfin], x=xlin[isfin]))\n\n # The integral of the entire particle should be unity, the trace of axis will not be however\n plot(traces)",
"def reg2():\n data2 = np.load(\"./data/measure4_1.npy\")[2:]\n\n x2 = np.arange(0,len(data2),1)\n\n fit = True \n redistribute = True \n\n #x2 = 1.3149710372035508*x2 -22.617788714272098\n c2 = np.where(x2 < 135)\n\n data = data2[c2] \n x = x2[c2]\n print(\"datapoints:\",len(data))\n\n mass = 79/251/6080*52658\n if redistribute == True:\n\n # conserving the mass\n total_mass = mass * len(data)\n remaining = (data > 0)\n\n while True:\n print(\"new redistributing ...\")\n print(\"total mass:\",total_mass)\n # find those which are smaller\n q = (data[remaining] <= mass)\n remaining = ~q\n if len(np.nonzero(q)[0]) == 0:\n data[remaining] -= mass\n break\n print(\"number of smaller values:\",len(np.nonzero(q)[0]),\"\\n\")\n # subtract the mass of this data\n total_mass -= np.sum(data[q])\n mass = total_mass / len(np.nonzero(~remaining)[0]) \n data[q] = 0\n\n # redistribute total remaining mass to single channels\n print(\"number of nonzero:\",len(np.nonzero(data)[0]))\n\n c = np.nonzero(data) \n data = data[c]\n x = x[c]\n\n #scaling to time units\n x = 6.3149710372035508*x -22.617788714272098\n c = (x>0)\n x = x[c]\n data = data[c]\n\n x = x[::-1] - min(x)\n\n\n error = np.sqrt(data) \n # only fit for x < 135\n fig = plt.figure()\n ax = plt.subplot(111)\n plt.grid(True)\n\n if fit==True:\n\n def func(x, *p):\n a,b,c = p\n return a + b * c**x\n\n # p0 is the initial guess for the fitting coefficients \n p0 = [1., 1., 1.]\n\n p, cov = curve_fit(func, x, data, p0=p0, sigma = error)\n p_uc = uc.correlated_values(p, cov)\n c = p_uc[2]\n\n T12_lit = 98 \n lamb_lit = -(np.log(2)/T12_lit)\n print(\"lit\",lamb_lit)\n \n\n lamb = umath.log(c)\n print(lamb)\n T12 = -np.log(2) /lamb \n print(\"t12=\",T12)\n\n x_fit = np.linspace(min(x),max(x))\n\n data_fit = func(x_fit,*p) \n pmin = (p - np.sqrt(np.diag(cov)))\n pmax = (p + np.sqrt(np.diag(cov)))\n\n data_fit_min = func(x_fit, *pmin)\n data_fit_max = func(x_fit, *pmax)\n\n plt.plot(x_fit,data_fit)\n plt.plot(x_fit,90*np.exp(x_fit * lamb_lit))\n plt.fill_between(x_fit, data_fit_min , data_fit_max,facecolor=\"r\", color=\"b\", alpha=0.3 )\n\n # place a text box in upper left in axes coords\n props = dict(boxstyle='round', facecolor='white', alpha=0.5)\n textstr = '$a + b \\cdot c^x$ with\\n$a=%.2f$\\n$b=%.2f$\\n$c=%.2f$'%(p[0], p[1],p[2])\n ax.text(0.6, 0.85, textstr, transform=ax.transAxes, fontsize=18, va='top', bbox=props)\n\n ax.xaxis.set_tick_params(labelsize = 14)\n ax.yaxis.set_tick_params(labelsize = 14)\n\n ax.add_patch(plt.Rectangle((0,0.1),155,100,alpha = 0.2))\n\n plt.errorbar(x,data, yerr=error,fmt=\"x\")\n #plt.scatter(x,data,c=\"blue\",alpha = 0.9,s=100, marker=\"x\")\n plt.ylim(min(data)*0.8,max(data))\n #plt.yscale(\"log\")\n plt.xlim(min(x)*0.8,max(x))\n plt.xlabel(\"time in $ns$\", fontsize = 14)\n plt.ylabel(\"counts\", fontsize = 14)\n make_fig(fig,1,1,name=\"plot4_1_reg\")",
"def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)",
"def _like4(init_par, alpha, delta, plx_obs, mualpha_obs, mudelta_obs, vrad_obs, sigma_obs, sigma_vrad, ccoef, i):\r\n\t\r\n\tplx_mod, v, sigma_v = init_par[i], init_par[-4:-1], init_par[-1] \r\n\tp, q, r = normalTriad(alpha, delta)\r\n\tmualpha_mod = np.dot(np.transpose(p),v)*plx_mod/_A ### [mas/yr]\r\n\tmudelta_mod = np.dot(np.transpose(q),v)*plx_mod/_A ### [mas/yr]\r\n\t### Add the model vector for the radial velocities:\r\n\tvrad_mod = np.dot(np.transpose(r),v) ### [km/s]\r\n \t\r\n\tsigma_plx, sigma_mualpha, sigma_mudelta = np.transpose(sigma_obs)\r\n\tC = np.zeros((4,4),dtype=np.float64) ### This is a 4x4 matrix \r\n\t### Diagonal terms:\r\n\tC[0,0],C[1,1],C[2,2] = sigma_plx**2.,sigma_mualpha**2., sigma_mudelta**2.\r\n\tC[3,3] = sigma_vrad**2.\r\n\t\r\n\tr_plx_muRa, r_plx_muDec, r_muRa_muDec = ccoef[0], ccoef[1], ccoef[2] \r\n \r\n\t### Correlation terms:\r\n\tC[0,1], C[0,2] = r_plx_muRa*sigma_plx*sigma_mualpha, r_plx_muDec*sigma_plx*sigma_mudelta\r\n\tC[1,0], C[1,2] = r_plx_muRa*sigma_plx*sigma_mualpha, r_muRa_muDec*sigma_mualpha*sigma_mudelta\r\n\tC[2,0], C[2,1] = r_plx_muDec*sigma_plx*sigma_mudelta, r_muRa_muDec*sigma_mualpha*sigma_mudelta\r\n\r\n\tE = np.zeros((4,4),dtype=np.float64) ### 4x4 matrix \r\n\tE[1,1],E[2,2] = (sigma_v**2.)*(plx_mod/_A)**2., (sigma_v**2.)*(plx_mod/_A)**2. ### [mas/yr]\r\n\tE[3,3] = sigma_v**2.\t\t\t\t\t\t\t\t ### [km/s]\r\n\r\n\t\r\n\tD = np.add(E,C)\r\n\tdetD = det(D) \r\n\tinvD = inv(D)\r\n\t\t\r\n\ta_c = np.array([plx_obs - plx_mod, mualpha_obs - mualpha_mod, mudelta_obs-mudelta_mod, vrad_obs - vrad_mod])\r\n\tg_func = row_matrix_col_4d(a_c, a_c, invD) \r\n\t\r\n\t\r\n\treturn detD, g_func",
"def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')",
"def test_linear_interp_to_densepred_is_similar_for_two_levels(mock_amg):\n\n # split all the cells so that we have two tiers\n mock_amg.split_all_cells()\n coarse_grid, fine_grid = mock_amg.grids[0], mock_amg.grids[1]\n\n # obtain the reference solution for the whole domain\n nyf, nxf = fine_grid.ny, fine_grid.nx\n u_ref, v_ref = np.random.rand(nyf, nxf), np.random.rand(nyf, nxf)\n\n # set all the window values\n for ii in range(nyf):\n # determine if the value should go onto the coarse or fine tier\n # coarse tiers are 0, 2, 4, etc and so will return 0 (False) when\n # modded with 2\n coarse_i = False if ii % 2 else True\n for jj in range(nxf):\n # determine if the value should go onto the coarse or fine tier\n coarse_j = False if jj % 2 else True\n # if both coarse_i, coarse_j are True, then we are on the coarse\n # grid, else we are on the fine grid\n if coarse_i and coarse_j:\n mock_amg.grids[0]._array[ii//2][jj//2].u = u_ref[ii][jj]\n mock_amg.grids[0]._array[ii//2][jj//2].v = v_ref[ii][jj]\n else:\n mock_amg.grids[1]._array[ii][jj].u = u_ref[ii][jj]\n mock_amg.grids[1]._array[ii][jj].v = v_ref[ii][jj]\n\n # get the expected interpolations\n xe, ye = np.arange(mock_amg.img_dim[1]), np.arange(mock_amg.img_dim[0])\n f_u = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n u_ref, kind='linear')\n u_exp = f_u(xe, ye)\n f_v = interp.interp2d(fine_grid.x_vec, fine_grid.y_vec,\n v_ref, kind='linear')\n v_exp = f_v(xe, ye)\n\n dp_soln = mock_amg.interp_to_densepred(method='linear')\n\n assert np.allclose(u_exp, dp_soln.u)\n assert np.allclose(v_exp, dp_soln.v)",
"def test_3(self):\n for _ in range(10):\n\n # Draw random requests for testing purposes.\n num_draws_emax = np.random.randint(2, 1000)\n dim = np.random.randint(1, 6)\n\n matrix = np.random.uniform(size=dim ** 2).reshape(dim, dim)\n cov = np.dot(matrix, matrix.T)\n\n # PDF of normal distribution\n args = np.random.normal(size=3)\n args[-1] **= 2\n\n f90 = fort_debug.wrapper_normal_pdf(*args)\n py = norm.pdf(*args)\n\n assert_almost_equal(py, f90)\n\n # Singular Value Decomposition\n py = scipy.linalg.svd(matrix)\n f90 = fort_debug.wrapper_svd(matrix, dim)\n\n for i in range(3):\n assert_allclose(py[i], f90[i])\n\n # Pseudo-Inverse\n py = np.linalg.pinv(matrix)\n f90 = fort_debug.wrapper_pinv(matrix, dim)\n\n assert_allclose(py, f90)\n\n # Inverse\n py = np.linalg.inv(cov)\n f90 = fort_debug.wrapper_inverse(cov, dim)\n assert_allclose(py, f90)\n\n # Determinant\n py = np.linalg.det(cov)\n f90 = fort_debug.wrapper_determinant(cov)\n\n assert_allclose(py, f90)\n\n # Trace\n py = np.trace(cov)\n f90 = fort_debug.wrapper_trace(cov)\n\n assert_allclose(py, f90)\n\n # Random normal deviates. This only tests the interface, requires\n # visual inspection in IPYTHON notebook as well.\n fort_debug.wrapper_standard_normal(num_draws_emax)\n\n # Clipping values below and above bounds.\n num_values = np.random.randint(1, 10000)\n lower_bound = np.random.randn()\n upper_bound = lower_bound + np.random.ranf()\n values = np.random.normal(size=num_values)\n\n f90 = fort_debug.wrapper_clip_value(\n values, lower_bound, upper_bound, num_values\n )\n py = np.clip(values, lower_bound, upper_bound)\n\n assert_almost_equal(py, f90)\n\n # Spectral condition number\n py = _spectral_condition_number(cov)\n fort = fort_debug.wrapper_spectral_condition_number(cov)\n assert_almost_equal(py, fort)"
]
| [
"0.6149543",
"0.6069213",
"0.59663194",
"0.5921558",
"0.59111285",
"0.58696795",
"0.5868889",
"0.58214706",
"0.5758329",
"0.569662",
"0.5670989",
"0.5660078",
"0.56426615",
"0.56170434",
"0.5614147",
"0.55748886",
"0.5574285",
"0.5569398",
"0.5566175",
"0.5548728",
"0.5532132",
"0.5527762",
"0.5525535",
"0.5520251",
"0.5510821",
"0.54991853",
"0.5493978",
"0.5488766",
"0.54742163",
"0.54452693"
]
| 0.61538 | 0 |
Testing M4 remeshing formula in 2D, 1 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m4_1k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: 'gpu_1k',
Splitting: 'o2_FullHalf'}
)
advec_py = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: '',
Splitting: 'o2_FullHalf'}
)
assertion_2D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_2D_m4_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_2D_m4_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_MeshMat_1group(self):\n\n MS_grp = self.meshsol.get_group(\"stator\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[0, 1, 2], [1, 2, 3]])\n result_tgl = cells_grp[\"triangle\"]\n testA = np.sum(abs(solution - result_tgl))\n msg = (\n \"Wrong output: returned \" + str(result_tgl) + \", expected: \" + str(solution)\n )\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)\n\n MS_grp = self.meshsol.get_group(\"rotor\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[3, 3], [1, 2], [2, 3]])\n results = cells_grp[\"triangle\"] # The point indices have changed !\n points = MS_grp.get_mesh().get_point(results)\n testA = np.sum(abs(solution - points))\n msg = \"Wrong output: returned \" + str(results) + \", expected: \" + str(solution)\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)",
"def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13",
"def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)",
"def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):\n os.chdir(str(tmpdir))\n RTOL = 1e-10\n\n H1 = 1e6 # external field in A/m\n alpha1 = 0.5 # some sort of damping constant\n omega1 = gamma * H1 # precession frequency\n\n H2 = 2.8e4 # external field in A/m\n alpha2 = 0.3 # some sort of damping constant\n omega2 = gamma * H2 # precession frequency\n\n ##\n # Step 1: Construct a time series of artificial magnetisation\n # data and save it to a bunch of .npy files.\n ##\n t_step = 1e-11\n t_ini = 0\n t_end = 10e-9\n\n N1 = 42 # in a real application this would be the number of mesh vertices\n N2 = 23 # in a real application this would be the number of mesh vertices\n fft_test_helpers.create_test_npy_files_with_two_regions(\n str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)\n\n ##\n # Step 2: compute the FFT of a resampled time series, both by\n # hand and using FFT_m.\n ##\n # XXX TODO: Resampling timesteps is not supported when using .npy\n # files. Either simplify the code below, or implement saving to\n # .h5 files so that it's easier to implement resampling for\n # spatially resolved data, too.\n ##\n t_step_res = t_step\n t_ini_res = t_ini\n t_end_res = t_end\n ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)\n\n # Compute time series based on resampled timesteps\n mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)\n my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)\n mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)\n\n # Compute 'analytical' Fourier transform of resampled time series and\n # determine the power of the spectrum for each component. We also need\n # to multiply by the number of mesh nodes because the numerical algorithm\n # sums up all contributions at the individual nodes (but we can just\n # multiply because they are all identical by construction).\n psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2\n psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2\n psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2\n\n # Compute Fourier transform of resampled time series using FFT_m\n freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \\\n compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,\n t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))\n\n # Check that the analytically determined power spectra are the same as the\n # computed ones.\n assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))\n\n if debug:\n # Plot the spectra for debugging\n fig = plt.figure(figsize=(20, 5))\n ax = fig.gca()\n ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')\n ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')\n ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')\n ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')\n ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')\n ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')\n ax.legend(loc='best')\n fig.savefig('psd_m_McMichaelStiles.png')",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def test_3_2_4D_rec_splits(self):\n check = [(-2.0, 3.0, -1.0, 3.0), (9.0, 10.0, 1.0, 5.0),\n (9.0, 3.0, -1.0, 3.0), (9.0, 10.0, -1.0, 3.0),\n (9.0, 10.0, 1.0, 3.0), (9.0, 10.0, -1.0, 5.0),\n (9.0, 3.0, 1.0, 3.0), (9.0, 3.0, 1.0, 5.0),\n (9.0, 3.0, -1.0, 5.0), (-2.0, 10.0, -1.0, 3.0),\n (-2.0, 10.0, 1.0, 3.0), (-2.0, 10.0, 1.0, 5.0),\n (-2.0, 10.0, -1.0, 5.0), (-2.0, 3.0, 1.0, 3.0),\n (-2.0, 3.0, 1.0, 5.0), (-2.0, 3.0, -1.0, 5.0),\n (3.5, 6.5, 0.0, 4.0), (-2.0, 6.5, 0.0, 4.0),\n (-2.0, 3.0, 0.0, 4.0), (-2.0, 3.0, -1.0, 4.0),\n (-2.0, 3.0, 0.0, 3.0), (-2.0, 6.5, -1.0, 4.0),\n (-2.0, 6.5, -1.0, 3.0), (-2.0, 6.5, 0.0, 3.0),\n (3.5, 3.0, 0.0, 4.0), (3.5, 3.0, -1.0, 4.0),\n (3.5, 3.0, -1.0, 3.0), (3.5, 3.0, 0.0, 3.0),\n (3.5, 6.5, -1.0, 4.0), (3.5, 6.5, -1.0, 3.0),\n (3.5, 6.5, 0.0, 3.0), (0.75, 4.75, -0.5, 3.5),\n (9.0, 6.5, 0.0, 4.0), (9.0, 10.0, 0.0, 4.0),\n (9.0, 10.0, 1.0, 4.0), (9.0, 10.0, 0.0, 5.0),\n (9.0, 6.5, 1.0, 4.0), (9.0, 6.5, 1.0, 5.0),\n (9.0, 6.5, 0.0, 5.0), (3.5, 10.0, 0.0, 4.0),\n (3.5, 10.0, 1.0, 4.0), (3.5, 10.0, 1.0, 5.0),\n (3.5, 10.0, 0.0, 5.0), (3.5, 6.5, 1.0, 4.0),\n (3.5, 6.5, 1.0, 5.0), (3.5, 6.5, 0.0, 5.0),\n (6.25, 8.25, 0.5, 4.5), (9.0, 3.0, 0.0, 4.0),\n (9.0, 3.0, -1.0, 4.0), (9.0, 3.0, 0.0, 3.0),\n (9.0, 6.5, -1.0, 4.0), (9.0, 6.5, -1.0, 3.0),\n (9.0, 6.5, 0.0, 3.0), (6.25, 4.75, -0.5, 3.5),\n (9.0, 10.0, -1.0, 4.0), (9.0, 10.0, 0.0, 3.0),\n (3.5, 10.0, -1.0, 4.0), (3.5, 10.0, -1.0, 3.0),\n (3.5, 10.0, 0.0, 3.0), (6.25, 8.25, -0.5, 3.5),\n (9.0, 6.5, 1.0, 3.0), (3.5, 10.0, 1.0, 3.0),\n (3.5, 6.5, 1.0, 3.0), (6.25, 8.25, 0.5, 3.5),\n (9.0, 6.5, -1.0, 5.0), (3.5, 10.0, -1.0, 5.0),\n (3.5, 6.5, -1.0, 5.0), (6.25, 8.25, -0.5, 4.5),\n (9.0, 3.0, 1.0, 4.0), (3.5, 3.0, 1.0, 4.0),\n (3.5, 3.0, 1.0, 3.0), (6.25, 4.75, 0.5, 3.5),\n (9.0, 3.0, 0.0, 5.0), (3.5, 3.0, 1.0, 5.0),\n (3.5, 3.0, 0.0, 5.0), (6.25, 4.75, 0.5, 4.5),\n (3.5, 3.0, -1.0, 5.0), (6.25, 4.75, -0.5, 4.5),\n (-2.0, 10.0, 0.0, 4.0), (-2.0, 10.0, -1.0, 4.0),\n (-2.0, 10.0, 0.0, 3.0), (0.75, 8.25, -0.5, 3.5),\n (-2.0, 10.0, 1.0, 4.0), (-2.0, 6.5, 1.0, 4.0),\n (-2.0, 6.5, 1.0, 3.0), (0.75, 8.25, 0.5, 3.5),\n (-2.0, 10.0, 0.0, 5.0), (-2.0, 6.5, 1.0, 5.0),\n (-2.0, 6.5, 0.0, 5.0), (0.75, 8.25, 0.5, 4.5),\n (-2.0, 6.5, -1.0, 5.0), (0.75, 8.25, -0.5, 4.5),\n (-2.0, 3.0, 1.0, 4.0), (0.75, 4.75, 0.5, 3.5),\n (-2.0, 3.0, 0.0, 5.0), (0.75, 4.75, 0.5, 4.5),\n (0.75, 4.75, -0.5, 4.5), (3.5, 4.75, -0.5, 3.5),\n (3.5, 6.5, -0.5, 3.5), (3.5, 6.5, 0.0, 3.5),\n (3.5, 6.5, -0.5, 4.0), (3.5, 4.75, 0.0, 3.5),\n (3.5, 4.75, 0.0, 4.0), (3.5, 4.75, -0.5, 4.0),\n (0.75, 6.5, -0.5, 3.5), (0.75, 6.5, 0.0, 3.5),\n (0.75, 6.5, 0.0, 4.0), (0.75, 6.5, -0.5, 4.0),\n (0.75, 4.75, 0.0, 3.5), (0.75, 4.75, 0.0, 4.0),\n (0.75, 4.75, -0.5, 4.0), (2.125, 5.625, -0.25, 3.75),\n (-2.0, 4.75, -0.5, 3.5), (-2.0, 3.0, -0.5, 3.5),\n (-2.0, 3.0, -1.0, 3.5), (-2.0, 3.0, -0.5, 3.0),\n (-2.0, 4.75, -1.0, 3.5), (-2.0, 4.75, -1.0, 3.0),\n (-2.0, 4.75, -0.5, 3.0), (0.75, 3.0, -0.5, 3.5),\n (0.75, 3.0, -1.0, 3.5), (0.75, 3.0, -1.0, 3.0),\n (0.75, 3.0, -0.5, 3.0), (0.75, 4.75, -1.0, 3.5),\n (0.75, 4.75, -1.0, 3.0), (0.75, 4.75, -0.5, 3.0),\n (-0.625, 3.875, -0.75, 3.25), (-2.0, 6.5, -0.5, 3.5),\n (-2.0, 6.5, 0.0, 3.5), (-2.0, 6.5, -0.5, 4.0),\n (-2.0, 4.75, 0.0, 3.5), (-2.0, 4.75, 0.0, 4.0),\n (-2.0, 4.75, -0.5, 4.0), (-0.625, 5.625, -0.25, 3.75),\n (-2.0, 3.0, 0.0, 3.5), (-2.0, 3.0, -0.5, 4.0),\n (0.75, 3.0, 0.0, 3.5), (0.75, 3.0, 0.0, 4.0),\n (0.75, 3.0, -0.5, 4.0), (-0.625, 3.875, -0.25, 3.75),\n (-2.0, 4.75, -1.0, 4.0), (0.75, 3.0, -1.0, 4.0),\n (0.75, 4.75, -1.0, 4.0), (-0.625, 3.875, -0.75, 3.75),\n (-2.0, 4.75, 0.0, 3.0), (0.75, 3.0, 0.0, 3.0),\n (0.75, 4.75, 0.0, 3.0), (-0.625, 3.875, -0.25, 3.25),\n (-2.0, 6.5, -1.0, 3.5), (0.75, 6.5, -1.0, 3.5),\n (0.75, 6.5, -1.0, 4.0), (-0.625, 5.625, -0.75, 3.75),\n (-2.0, 6.5, -0.5, 3.0), (0.75, 6.5, -1.0, 3.0),\n (0.75, 6.5, -0.5, 3.0), (-0.625, 5.625, -0.75, 3.25),\n (0.75, 6.5, 0.0, 3.0), (-0.625, 5.625, -0.25, 3.25),\n (3.5, 3.0, -0.5, 3.5), (3.5, 3.0, 0.0, 3.5),\n (3.5, 3.0, -0.5, 4.0), (2.125, 3.875, -0.25, 3.75),\n (3.5, 3.0, -1.0, 3.5), (3.5, 4.75, -1.0, 3.5),\n (3.5, 4.75, -1.0, 4.0), (2.125, 3.875, -0.75, 3.75),\n (3.5, 3.0, -0.5, 3.0), (3.5, 4.75, -1.0, 3.0),\n (3.5, 4.75, -0.5, 3.0), (2.125, 3.875, -0.75, 3.25),\n (3.5, 4.75, 0.0, 3.0), (2.125, 3.875, -0.25, 3.25),\n (3.5, 6.5, -1.0, 3.5), (2.125, 5.625, -0.75, 3.75),\n (3.5, 6.5, -0.5, 3.0), (2.125, 5.625, -0.75, 3.25),\n (2.125, 5.625, -0.25, 3.25), (3.5, 8.25, 0.5, 4.5),\n (3.5, 6.5, 0.5, 4.5), (3.5, 6.5, 0.0, 4.5),\n (3.5, 6.5, 0.5, 4.0), (3.5, 8.25, 0.0, 4.5),\n (3.5, 8.25, 0.0, 4.0), (3.5, 8.25, 0.5, 4.0),\n (6.25, 6.5, 0.5, 4.5), (6.25, 6.5, 0.0, 4.5),\n (6.25, 6.5, 0.0, 4.0), (6.25, 6.5, 0.5, 4.0),\n (6.25, 8.25, 0.0, 4.5), (6.25, 8.25, 0.0, 4.0),\n (6.25, 8.25, 0.5, 4.0), (4.875, 7.375, 0.25, 4.25),\n (9.0, 8.25, 0.5, 4.5), (9.0, 10.0, 0.5, 4.5),\n (9.0, 10.0, 1.0, 4.5), (9.0, 10.0, 0.5, 5.0),\n (9.0, 8.25, 1.0, 4.5), (9.0, 8.25, 1.0, 5.0),\n (9.0, 8.25, 0.5, 5.0), (6.25, 10.0, 0.5, 4.5),\n (6.25, 10.0, 1.0, 4.5), (6.25, 10.0, 1.0, 5.0),\n (6.25, 10.0, 0.5, 5.0), (6.25, 8.25, 1.0, 4.5),\n (6.25, 8.25, 1.0, 5.0), (6.25, 8.25, 0.5, 5.0),\n (7.625, 9.125, 0.75, 4.75), (9.0, 6.5, 0.5, 4.5),\n (9.0, 6.5, 0.0, 4.5), (9.0, 6.5, 0.5, 4.0),\n (9.0, 8.25, 0.0, 4.5), (9.0, 8.25, 0.0, 4.0),\n (9.0, 8.25, 0.5, 4.0), (7.625, 7.375, 0.25, 4.25),\n (9.0, 10.0, 0.0, 4.5), (9.0, 10.0, 0.5, 4.0),\n (6.25, 10.0, 0.0, 4.5), (6.25, 10.0, 0.0, 4.0),\n (6.25, 10.0, 0.5, 4.0), (7.625, 9.125, 0.25, 4.25),\n (9.0, 8.25, 1.0, 4.0), (6.25, 10.0, 1.0, 4.0),\n (6.25, 8.25, 1.0, 4.0), (7.625, 9.125, 0.75, 4.25),\n (9.0, 8.25, 0.0, 5.0), (6.25, 10.0, 0.0, 5.0),\n (6.25, 8.25, 0.0, 5.0), (7.625, 9.125, 0.25, 4.75),\n (9.0, 6.5, 1.0, 4.5), (6.25, 6.5, 1.0, 4.5),\n (6.25, 6.5, 1.0, 4.0), (7.625, 7.375, 0.75, 4.25),\n (9.0, 6.5, 0.5, 5.0), (6.25, 6.5, 1.0, 5.0),\n (6.25, 6.5, 0.5, 5.0), (7.625, 7.375, 0.75, 4.75),\n (6.25, 6.5, 0.0, 5.0), (7.625, 7.375, 0.25, 4.75),\n (3.5, 10.0, 0.5, 4.5), (3.5, 10.0, 0.0, 4.5),\n (3.5, 10.0, 0.5, 4.0), (4.875, 9.125, 0.25, 4.25),\n (3.5, 10.0, 1.0, 4.5), (3.5, 8.25, 1.0, 4.5),\n (3.5, 8.25, 1.0, 4.0), (4.875, 9.125, 0.75, 4.25),\n (3.5, 10.0, 0.5, 5.0), (3.5, 8.25, 1.0, 5.0),\n (3.5, 8.25, 0.5, 5.0), (4.875, 9.125, 0.75, 4.75),\n (3.5, 8.25, 0.0, 5.0), (4.875, 9.125, 0.25, 4.75),\n (3.5, 6.5, 1.0, 4.5), (4.875, 7.375, 0.75, 4.25),\n (3.5, 6.5, 0.5, 5.0), (4.875, 7.375, 0.75, 4.75),\n (4.875, 7.375, 0.25, 4.75), (6.25, 6.5, -0.5, 3.5),\n (6.25, 6.5, 0.0, 3.5), (6.25, 6.5, -0.5, 4.0),\n (6.25, 4.75, 0.0, 3.5), (6.25, 4.75, 0.0, 4.0),\n (6.25, 4.75, -0.5, 4.0), (4.875, 5.625, -0.25, 3.75),\n (9.0, 4.75, -0.5, 3.5), (9.0, 3.0, -0.5, 3.5),\n (9.0, 3.0, -1.0, 3.5), (9.0, 3.0, -0.5, 3.0),\n (9.0, 4.75, -1.0, 3.5), (9.0, 4.75, -1.0, 3.0),\n (9.0, 4.75, -0.5, 3.0), (6.25, 3.0, -0.5, 3.5),\n (6.25, 3.0, -1.0, 3.5), (6.25, 3.0, -1.0, 3.0),\n (6.25, 3.0, -0.5, 3.0), (6.25, 4.75, -1.0, 3.5),\n (6.25, 4.75, -1.0, 3.0), (6.25, 4.75, -0.5, 3.0),\n (7.625, 3.875, -0.75, 3.25), (9.0, 6.5, -0.5, 3.5),\n (9.0, 6.5, 0.0, 3.5), (9.0, 6.5, -0.5, 4.0),\n (9.0, 4.75, 0.0, 3.5), (9.0, 4.75, 0.0, 4.0),\n (9.0, 4.75, -0.5, 4.0), (7.625, 5.625, -0.25, 3.75),\n (9.0, 3.0, 0.0, 3.5), (9.0, 3.0, -0.5, 4.0),\n (6.25, 3.0, 0.0, 3.5), (6.25, 3.0, 0.0, 4.0),\n (6.25, 3.0, -0.5, 4.0), (7.625, 3.875, -0.25, 3.75),\n (9.0, 4.75, -1.0, 4.0), (6.25, 3.0, -1.0, 4.0),\n (6.25, 4.75, -1.0, 4.0), (7.625, 3.875, -0.75, 3.75),\n (9.0, 4.75, 0.0, 3.0), (6.25, 3.0, 0.0, 3.0),\n (6.25, 4.75, 0.0, 3.0), (7.625, 3.875, -0.25, 3.25),\n (9.0, 6.5, -1.0, 3.5), (6.25, 6.5, -1.0, 3.5),\n (6.25, 6.5, -1.0, 4.0), (7.625, 5.625, -0.75, 3.75),\n (9.0, 6.5, -0.5, 3.0), (6.25, 6.5, -1.0, 3.0),\n (6.25, 6.5, -0.5, 3.0), (7.625, 5.625, -0.75, 3.25),\n (6.25, 6.5, 0.0, 3.0), (7.625, 5.625, -0.25, 3.25),\n (4.875, 3.875, -0.25, 3.75), (4.875, 3.875, -0.75, 3.75),\n (4.875, 3.875, -0.75, 3.25), (4.875, 3.875, -0.25, 3.25),\n (4.875, 5.625, -0.75, 3.75), (4.875, 5.625, -0.75, 3.25),\n (4.875, 5.625, -0.25, 3.25), (3.5, 8.25, -0.5, 3.5),\n (3.5, 8.25, 0.0, 3.5), (3.5, 8.25, -0.5, 4.0),\n (6.25, 8.25, 0.0, 3.5), (6.25, 8.25, -0.5, 4.0),\n (4.875, 7.375, -0.25, 3.75), (9.0, 8.25, -0.5, 3.5),\n (9.0, 10.0, -0.5, 3.5), (9.0, 10.0, -1.0, 3.5),\n (9.0, 10.0, -0.5, 3.0), (9.0, 8.25, -1.0, 3.5),\n (9.0, 8.25, -1.0, 3.0), (9.0, 8.25, -0.5, 3.0),\n (6.25, 10.0, -0.5, 3.5), (6.25, 10.0, -1.0, 3.5),\n (6.25, 10.0, -1.0, 3.0), (6.25, 10.0, -0.5, 3.0),\n (6.25, 8.25, -1.0, 3.5), (6.25, 8.25, -1.0, 3.0),\n (6.25, 8.25, -0.5, 3.0), (7.625, 9.125, -0.75, 3.25),\n (9.0, 8.25, 0.0, 3.5), (9.0, 8.25, -0.5, 4.0),\n (7.625, 7.375, -0.25, 3.75), (9.0, 10.0, 0.0, 3.5),\n (9.0, 10.0, -0.5, 4.0), (6.25, 10.0, 0.0, 3.5),\n (6.25, 10.0, -0.5, 4.0), (7.625, 9.125, -0.25, 3.75),\n (9.0, 8.25, -1.0, 4.0), (6.25, 10.0, -1.0, 4.0),\n (6.25, 8.25, -1.0, 4.0), (7.625, 9.125, -0.75, 3.75),\n (9.0, 8.25, 0.0, 3.0), (6.25, 10.0, 0.0, 3.0),\n (6.25, 8.25, 0.0, 3.0), (7.625, 9.125, -0.25, 3.25),\n (7.625, 7.375, -0.75, 3.75), (7.625, 7.375, -0.75, 3.25),\n (7.625, 7.375, -0.25, 3.25), (3.5, 10.0, -0.5, 3.5),\n (3.5, 10.0, 0.0, 3.5), (3.5, 10.0, -0.5, 4.0),\n (4.875, 9.125, -0.25, 3.75), (3.5, 10.0, -1.0, 3.5),\n (3.5, 8.25, -1.0, 3.5), (3.5, 8.25, -1.0, 4.0),\n (4.875, 9.125, -0.75, 3.75), (3.5, 10.0, -0.5, 3.0),\n (3.5, 8.25, -1.0, 3.0), (3.5, 8.25, -0.5, 3.0),\n (4.875, 9.125, -0.75, 3.25), (3.5, 8.25, 0.0, 3.0),\n (4.875, 9.125, -0.25, 3.25), (4.875, 7.375, -0.75, 3.75),\n (4.875, 7.375, -0.75, 3.25), (4.875, 7.375, -0.25, 3.25),\n (3.5, 8.25, 0.5, 3.5), (3.5, 6.5, 0.5, 3.5),\n (6.25, 6.5, 0.5, 3.5), (4.875, 7.375, 0.25, 3.75),\n (9.0, 8.25, 0.5, 3.5), (9.0, 10.0, 0.5, 3.5),\n (9.0, 10.0, 1.0, 3.5), (9.0, 10.0, 0.5, 3.0),\n (9.0, 8.25, 1.0, 3.5), (9.0, 8.25, 1.0, 3.0),\n (9.0, 8.25, 0.5, 3.0), (6.25, 10.0, 0.5, 3.5),\n (6.25, 10.0, 1.0, 3.5), (6.25, 10.0, 1.0, 3.0),\n (6.25, 10.0, 0.5, 3.0), (6.25, 8.25, 1.0, 3.5),\n (6.25, 8.25, 1.0, 3.0), (6.25, 8.25, 0.5, 3.0),\n (7.625, 9.125, 0.75, 3.25), (9.0, 6.5, 0.5, 3.5),\n (7.625, 7.375, 0.25, 3.75), (7.625, 9.125, 0.25, 3.75),\n (7.625, 9.125, 0.75, 3.75), (7.625, 9.125, 0.25, 3.25),\n (9.0, 6.5, 1.0, 3.5), (6.25, 6.5, 1.0, 3.5),\n (7.625, 7.375, 0.75, 3.75), (9.0, 6.5, 0.5, 3.0),\n (6.25, 6.5, 1.0, 3.0), (6.25, 6.5, 0.5, 3.0),\n (7.625, 7.375, 0.75, 3.25), (7.625, 7.375, 0.25, 3.25),\n (3.5, 10.0, 0.5, 3.5), (4.875, 9.125, 0.25, 3.75),\n (3.5, 10.0, 1.0, 3.5), (3.5, 8.25, 1.0, 3.5),\n (4.875, 9.125, 0.75, 3.75), (3.5, 10.0, 0.5, 3.0),\n (3.5, 8.25, 1.0, 3.0), (3.5, 8.25, 0.5, 3.0),\n (4.875, 9.125, 0.75, 3.25), (4.875, 9.125, 0.25, 3.25),\n (3.5, 6.5, 1.0, 3.5), (4.875, 7.375, 0.75, 3.75),\n (3.5, 6.5, 0.5, 3.0), (4.875, 7.375, 0.75, 3.25),\n (4.875, 7.375, 0.25, 3.25), (3.5, 8.25, -0.5, 4.5),\n (3.5, 6.5, -0.5, 4.5), (6.25, 6.5, -0.5, 4.5),\n (4.875, 7.375, -0.25, 4.25), (9.0, 8.25, -0.5, 4.5),\n (9.0, 10.0, -0.5, 4.5), (9.0, 10.0, -1.0, 4.5),\n (9.0, 10.0, -0.5, 5.0), (9.0, 8.25, -1.0, 4.5),\n (9.0, 8.25, -1.0, 5.0), (9.0, 8.25, -0.5, 5.0),\n (6.25, 10.0, -0.5, 4.5), (6.25, 10.0, -1.0, 4.5),\n (6.25, 10.0, -1.0, 5.0), (6.25, 10.0, -0.5, 5.0),\n (6.25, 8.25, -1.0, 4.5), (6.25, 8.25, -1.0, 5.0),\n (6.25, 8.25, -0.5, 5.0), (7.625, 9.125, -0.75, 4.75),\n (9.0, 6.5, -0.5, 4.5), (7.625, 7.375, -0.25, 4.25),\n (7.625, 9.125, -0.25, 4.25), (7.625, 9.125, -0.75, 4.25),\n (7.625, 9.125, -0.25, 4.75), (9.0, 6.5, -1.0, 4.5),\n (6.25, 6.5, -1.0, 4.5), (7.625, 7.375, -0.75, 4.25),\n (9.0, 6.5, -0.5, 5.0), (6.25, 6.5, -1.0, 5.0),\n (6.25, 6.5, -0.5, 5.0), (7.625, 7.375, -0.75, 4.75),\n (7.625, 7.375, -0.25, 4.75), (3.5, 10.0, -0.5, 4.5),\n (4.875, 9.125, -0.25, 4.25), (3.5, 10.0, -1.0, 4.5),\n (3.5, 8.25, -1.0, 4.5), (4.875, 9.125, -0.75, 4.25),\n (3.5, 10.0, -0.5, 5.0), (3.5, 8.25, -1.0, 5.0),\n (3.5, 8.25, -0.5, 5.0), (4.875, 9.125, -0.75, 4.75),\n (4.875, 9.125, -0.25, 4.75), (3.5, 6.5, -1.0, 4.5),\n (4.875, 7.375, -0.75, 4.25), (3.5, 6.5, -0.5, 5.0),\n (4.875, 7.375, -0.75, 4.75), (4.875, 7.375, -0.25, 4.75),\n (3.5, 4.75, 0.5, 3.5), (3.5, 4.75, 0.5, 4.0),\n (6.25, 4.75, 0.5, 4.0), (4.875, 5.625, 0.25, 3.75),\n (9.0, 4.75, 0.5, 3.5), (9.0, 3.0, 0.5, 3.5),\n (9.0, 3.0, 1.0, 3.5), (9.0, 3.0, 0.5, 3.0),\n (9.0, 4.75, 1.0, 3.5), (9.0, 4.75, 1.0, 3.0),\n (9.0, 4.75, 0.5, 3.0), (6.25, 3.0, 0.5, 3.5),\n (6.25, 3.0, 1.0, 3.5), (6.25, 3.0, 1.0, 3.0),\n (6.25, 3.0, 0.5, 3.0), (6.25, 4.75, 1.0, 3.5),\n (6.25, 4.75, 1.0, 3.0), (6.25, 4.75, 0.5, 3.0),\n (7.625, 3.875, 0.75, 3.25), (9.0, 4.75, 0.5, 4.0),\n (7.625, 5.625, 0.25, 3.75), (9.0, 3.0, 0.5, 4.0),\n (6.25, 3.0, 0.5, 4.0), (7.625, 3.875, 0.25, 3.75),\n (9.0, 4.75, 1.0, 4.0), (6.25, 3.0, 1.0, 4.0),\n (6.25, 4.75, 1.0, 4.0), (7.625, 3.875, 0.75, 3.75),\n (7.625, 3.875, 0.25, 3.25), (7.625, 5.625, 0.75, 3.75),\n (7.625, 5.625, 0.75, 3.25), (7.625, 5.625, 0.25, 3.25),\n (3.5, 3.0, 0.5, 3.5), (3.5, 3.0, 0.5, 4.0),\n (4.875, 3.875, 0.25, 3.75), (3.5, 3.0, 1.0, 3.5),\n (3.5, 4.75, 1.0, 3.5), (3.5, 4.75, 1.0, 4.0),\n (4.875, 3.875, 0.75, 3.75), (3.5, 3.0, 0.5, 3.0),\n (3.5, 4.75, 1.0, 3.0), (3.5, 4.75, 0.5, 3.0),\n (4.875, 3.875, 0.75, 3.25), (4.875, 3.875, 0.25, 3.25),\n (4.875, 5.625, 0.75, 3.75), (4.875, 5.625, 0.75, 3.25),\n (4.875, 5.625, 0.25, 3.25), (3.5, 4.75, 0.5, 4.5),\n (3.5, 4.75, 0.0, 4.5), (6.25, 4.75, 0.0, 4.5),\n (4.875, 5.625, 0.25, 4.25), (9.0, 4.75, 0.5, 4.5),\n (9.0, 3.0, 0.5, 4.5), (9.0, 3.0, 1.0, 4.5),\n (9.0, 3.0, 0.5, 5.0), (9.0, 4.75, 1.0, 4.5),\n (9.0, 4.75, 1.0, 5.0), (9.0, 4.75, 0.5, 5.0),\n (6.25, 3.0, 0.5, 4.5), (6.25, 3.0, 1.0, 4.5),\n (6.25, 3.0, 1.0, 5.0), (6.25, 3.0, 0.5, 5.0),\n (6.25, 4.75, 1.0, 4.5), (6.25, 4.75, 1.0, 5.0),\n (6.25, 4.75, 0.5, 5.0), (7.625, 3.875, 0.75, 4.75),\n (9.0, 4.75, 0.0, 4.5), (7.625, 5.625, 0.25, 4.25),\n (9.0, 3.0, 0.0, 4.5), (6.25, 3.0, 0.0, 4.5),\n (7.625, 3.875, 0.25, 4.25), (7.625, 3.875, 0.75, 4.25),\n (9.0, 4.75, 0.0, 5.0), (6.25, 3.0, 0.0, 5.0),\n (6.25, 4.75, 0.0, 5.0), (7.625, 3.875, 0.25, 4.75),\n (7.625, 5.625, 0.75, 4.25), (7.625, 5.625, 0.75, 4.75),\n (7.625, 5.625, 0.25, 4.75), (3.5, 3.0, 0.5, 4.5),\n (3.5, 3.0, 0.0, 4.5), (4.875, 3.875, 0.25, 4.25),\n (3.5, 3.0, 1.0, 4.5), (3.5, 4.75, 1.0, 4.5),\n (4.875, 3.875, 0.75, 4.25), (3.5, 3.0, 0.5, 5.0),\n (3.5, 4.75, 1.0, 5.0), (3.5, 4.75, 0.5, 5.0),\n (4.875, 3.875, 0.75, 4.75), (3.5, 4.75, 0.0, 5.0),\n (4.875, 3.875, 0.25, 4.75), (4.875, 5.625, 0.75, 4.25),\n (4.875, 5.625, 0.75, 4.75), (4.875, 5.625, 0.25, 4.75),\n (3.5, 4.75, -0.5, 4.5), (4.875, 5.625, -0.25, 4.25),\n (9.0, 4.75, -0.5, 4.5), (9.0, 3.0, -0.5, 4.5),\n (9.0, 3.0, -1.0, 4.5), (9.0, 3.0, -0.5, 5.0),\n (9.0, 4.75, -1.0, 4.5), (9.0, 4.75, -1.0, 5.0),\n (9.0, 4.75, -0.5, 5.0), (6.25, 3.0, -0.5, 4.5),\n (6.25, 3.0, -1.0, 4.5), (6.25, 3.0, -1.0, 5.0),\n (6.25, 3.0, -0.5, 5.0), (6.25, 4.75, -1.0, 4.5),\n (6.25, 4.75, -1.0, 5.0), (6.25, 4.75, -0.5, 5.0),\n (7.625, 3.875, -0.75, 4.75), (7.625, 5.625, -0.25, 4.25),\n (7.625, 3.875, -0.25, 4.25), (7.625, 3.875, -0.75, 4.25),\n (7.625, 3.875, -0.25, 4.75), (7.625, 5.625, -0.75, 4.25),\n (7.625, 5.625, -0.75, 4.75), (7.625, 5.625, -0.25, 4.75),\n (3.5, 3.0, -0.5, 4.5), (4.875, 3.875, -0.25, 4.25),\n (3.5, 3.0, -1.0, 4.5), (3.5, 4.75, -1.0, 4.5),\n (4.875, 3.875, -0.75, 4.25), (3.5, 3.0, -0.5, 5.0),\n (3.5, 4.75, -1.0, 5.0), (3.5, 4.75, -0.5, 5.0),\n (4.875, 3.875, -0.75, 4.75), (4.875, 3.875, -0.25, 4.75),\n (4.875, 5.625, -0.75, 4.25), (4.875, 5.625, -0.75, 4.75),\n (4.875, 5.625, -0.25, 4.75), (0.75, 8.25, 0.0, 3.5),\n (0.75, 8.25, 0.0, 4.0), (0.75, 8.25, -0.5, 4.0),\n (2.125, 7.375, -0.25, 3.75), (-2.0, 8.25, -0.5, 3.5),\n (-2.0, 10.0, -0.5, 3.5), (-2.0, 10.0, -1.0, 3.5),\n (-2.0, 10.0, -0.5, 3.0), (-2.0, 8.25, -1.0, 3.5),\n (-2.0, 8.25, -1.0, 3.0), (-2.0, 8.25, -0.5, 3.0),\n (0.75, 10.0, -0.5, 3.5), (0.75, 10.0, -1.0, 3.5),\n (0.75, 10.0, -1.0, 3.0), (0.75, 10.0, -0.5, 3.0),\n (0.75, 8.25, -1.0, 3.5), (0.75, 8.25, -1.0, 3.0),\n (0.75, 8.25, -0.5, 3.0), (-0.625, 9.125, -0.75, 3.25),\n (-2.0, 8.25, 0.0, 3.5), (-2.0, 8.25, 0.0, 4.0),\n (-2.0, 8.25, -0.5, 4.0), (-0.625, 7.375, -0.25, 3.75),\n (-2.0, 10.0, 0.0, 3.5), (-2.0, 10.0, -0.5, 4.0),\n (0.75, 10.0, 0.0, 3.5), (0.75, 10.0, 0.0, 4.0),\n (0.75, 10.0, -0.5, 4.0), (-0.625, 9.125, -0.25, 3.75),\n (-2.0, 8.25, -1.0, 4.0), (0.75, 10.0, -1.0, 4.0),\n (0.75, 8.25, -1.0, 4.0), (-0.625, 9.125, -0.75, 3.75),\n (-2.0, 8.25, 0.0, 3.0), (0.75, 10.0, 0.0, 3.0),\n (0.75, 8.25, 0.0, 3.0), (-0.625, 9.125, -0.25, 3.25),\n (-0.625, 7.375, -0.75, 3.75), (-0.625, 7.375, -0.75, 3.25),\n (-0.625, 7.375, -0.25, 3.25), (2.125, 9.125, -0.25, 3.75),\n (2.125, 9.125, -0.75, 3.75), (2.125, 9.125, -0.75, 3.25),\n (2.125, 9.125, -0.25, 3.25), (2.125, 7.375, -0.75, 3.75),\n (2.125, 7.375, -0.75, 3.25), (2.125, 7.375, -0.25, 3.25),\n (0.75, 6.5, 0.5, 3.5), (0.75, 6.5, 0.5, 4.0),\n (0.75, 8.25, 0.5, 4.0), (2.125, 7.375, 0.25, 3.75),\n (-2.0, 8.25, 0.5, 3.5), (-2.0, 10.0, 0.5, 3.5),\n (-2.0, 10.0, 1.0, 3.5), (-2.0, 10.0, 0.5, 3.0),\n (-2.0, 8.25, 1.0, 3.5), (-2.0, 8.25, 1.0, 3.0),\n (-2.0, 8.25, 0.5, 3.0), (0.75, 10.0, 0.5, 3.5),\n (0.75, 10.0, 1.0, 3.5), (0.75, 10.0, 1.0, 3.0),\n (0.75, 10.0, 0.5, 3.0), (0.75, 8.25, 1.0, 3.5),\n (0.75, 8.25, 1.0, 3.0), (0.75, 8.25, 0.5, 3.0),\n (-0.625, 9.125, 0.75, 3.25), (-2.0, 6.5, 0.5, 3.5),\n (-2.0, 6.5, 0.5, 4.0), (-2.0, 8.25, 0.5, 4.0),\n (-0.625, 7.375, 0.25, 3.75), (-2.0, 10.0, 0.5, 4.0),\n (0.75, 10.0, 0.5, 4.0), (-0.625, 9.125, 0.25, 3.75),\n (-2.0, 8.25, 1.0, 4.0), (0.75, 10.0, 1.0, 4.0),\n (0.75, 8.25, 1.0, 4.0), (-0.625, 9.125, 0.75, 3.75),\n (-0.625, 9.125, 0.25, 3.25), (-2.0, 6.5, 1.0, 3.5),\n (0.75, 6.5, 1.0, 3.5), (0.75, 6.5, 1.0, 4.0),\n (-0.625, 7.375, 0.75, 3.75), (-2.0, 6.5, 0.5, 3.0),\n (0.75, 6.5, 1.0, 3.0), (0.75, 6.5, 0.5, 3.0),\n (-0.625, 7.375, 0.75, 3.25), (-0.625, 7.375, 0.25, 3.25),\n (2.125, 9.125, 0.25, 3.75), (2.125, 9.125, 0.75, 3.75),\n (2.125, 9.125, 0.75, 3.25), (2.125, 9.125, 0.25, 3.25),\n (2.125, 7.375, 0.75, 3.75), (2.125, 7.375, 0.75, 3.25),\n (2.125, 7.375, 0.25, 3.25), (0.75, 6.5, 0.5, 4.5),\n (0.75, 6.5, 0.0, 4.5), (0.75, 8.25, 0.0, 4.5),\n (2.125, 7.375, 0.25, 4.25), (-2.0, 8.25, 0.5, 4.5),\n (-2.0, 10.0, 0.5, 4.5), (-2.0, 10.0, 1.0, 4.5),\n (-2.0, 10.0, 0.5, 5.0), (-2.0, 8.25, 1.0, 4.5),\n (-2.0, 8.25, 1.0, 5.0), (-2.0, 8.25, 0.5, 5.0),\n (0.75, 10.0, 0.5, 4.5), (0.75, 10.0, 1.0, 4.5),\n (0.75, 10.0, 1.0, 5.0), (0.75, 10.0, 0.5, 5.0),\n (0.75, 8.25, 1.0, 4.5), (0.75, 8.25, 1.0, 5.0),\n (0.75, 8.25, 0.5, 5.0), (-0.625, 9.125, 0.75, 4.75),\n (-2.0, 6.5, 0.5, 4.5), (-2.0, 6.5, 0.0, 4.5),\n (-2.0, 8.25, 0.0, 4.5), (-0.625, 7.375, 0.25, 4.25),\n (-2.0, 10.0, 0.0, 4.5), (0.75, 10.0, 0.0, 4.5),\n (-0.625, 9.125, 0.25, 4.25), (-0.625, 9.125, 0.75, 4.25),\n (-2.0, 8.25, 0.0, 5.0), (0.75, 10.0, 0.0, 5.0),\n (0.75, 8.25, 0.0, 5.0), (-0.625, 9.125, 0.25, 4.75),\n (-2.0, 6.5, 1.0, 4.5), (0.75, 6.5, 1.0, 4.5),\n (-0.625, 7.375, 0.75, 4.25), (-2.0, 6.5, 0.5, 5.0),\n (0.75, 6.5, 1.0, 5.0), (0.75, 6.5, 0.5, 5.0),\n (-0.625, 7.375, 0.75, 4.75), (0.75, 6.5, 0.0, 5.0),\n (-0.625, 7.375, 0.25, 4.75), (2.125, 9.125, 0.25, 4.25),\n (2.125, 9.125, 0.75, 4.25), (2.125, 9.125, 0.75, 4.75),\n (2.125, 9.125, 0.25, 4.75), (2.125, 7.375, 0.75, 4.25),\n (2.125, 7.375, 0.75, 4.75), (2.125, 7.375, 0.25, 4.75),\n (0.75, 6.5, -0.5, 4.5), (2.125, 7.375, -0.25, 4.25),\n (-2.0, 8.25, -0.5, 4.5), (-2.0, 10.0, -0.5, 4.5),\n (-2.0, 10.0, -1.0, 4.5), (-2.0, 10.0, -0.5, 5.0),\n (-2.0, 8.25, -1.0, 4.5), (-2.0, 8.25, -1.0, 5.0),\n (-2.0, 8.25, -0.5, 5.0), (0.75, 10.0, -0.5, 4.5),\n (0.75, 10.0, -1.0, 4.5), (0.75, 10.0, -1.0, 5.0),\n (0.75, 10.0, -0.5, 5.0), (0.75, 8.25, -1.0, 4.5),\n (0.75, 8.25, -1.0, 5.0), (0.75, 8.25, -0.5, 5.0),\n (-0.625, 9.125, -0.75, 4.75), (-2.0, 6.5, -0.5, 4.5),\n (-0.625, 7.375, -0.25, 4.25), (-0.625, 9.125, -0.25, 4.25),\n (-0.625, 9.125, -0.75, 4.25), (-0.625, 9.125, -0.25, 4.75),\n (-2.0, 6.5, -1.0, 4.5), (0.75, 6.5, -1.0, 4.5),\n (-0.625, 7.375, -0.75, 4.25), (-2.0, 6.5, -0.5, 5.0),\n (0.75, 6.5, -1.0, 5.0), (0.75, 6.5, -0.5, 5.0),\n (-0.625, 7.375, -0.75, 4.75), (-0.625, 7.375, -0.25, 4.75),\n (2.125, 9.125, -0.25, 4.25), (2.125, 9.125, -0.75, 4.25),\n (2.125, 9.125, -0.75, 4.75), (2.125, 9.125, -0.25, 4.75),\n (2.125, 7.375, -0.75, 4.25), (2.125, 7.375, -0.75, 4.75),\n (2.125, 7.375, -0.25, 4.75), (0.75, 4.75, 0.5, 4.0),\n (2.125, 5.625, 0.25, 3.75), (-2.0, 4.75, 0.5, 3.5),\n (-2.0, 3.0, 0.5, 3.5), (-2.0, 3.0, 1.0, 3.5),\n (-2.0, 3.0, 0.5, 3.0), (-2.0, 4.75, 1.0, 3.5),\n (-2.0, 4.75, 1.0, 3.0), (-2.0, 4.75, 0.5, 3.0),\n (0.75, 3.0, 0.5, 3.5), (0.75, 3.0, 1.0, 3.5),\n (0.75, 3.0, 1.0, 3.0), (0.75, 3.0, 0.5, 3.0),\n (0.75, 4.75, 1.0, 3.5), (0.75, 4.75, 1.0, 3.0),\n (0.75, 4.75, 0.5, 3.0), (-0.625, 3.875, 0.75, 3.25),\n (-2.0, 4.75, 0.5, 4.0), (-0.625, 5.625, 0.25, 3.75),\n (-2.0, 3.0, 0.5, 4.0), (0.75, 3.0, 0.5, 4.0),\n (-0.625, 3.875, 0.25, 3.75), (-2.0, 4.75, 1.0, 4.0),\n (0.75, 3.0, 1.0, 4.0), (0.75, 4.75, 1.0, 4.0),\n (-0.625, 3.875, 0.75, 3.75), (-0.625, 3.875, 0.25, 3.25),\n (-0.625, 5.625, 0.75, 3.75), (-0.625, 5.625, 0.75, 3.25),\n (-0.625, 5.625, 0.25, 3.25), (2.125, 3.875, 0.25, 3.75),\n (2.125, 3.875, 0.75, 3.75), (2.125, 3.875, 0.75, 3.25),\n (2.125, 3.875, 0.25, 3.25), (2.125, 5.625, 0.75, 3.75),\n (2.125, 5.625, 0.75, 3.25), (2.125, 5.625, 0.25, 3.25),\n (0.75, 4.75, 0.0, 4.5), (2.125, 5.625, 0.25, 4.25),\n (-2.0, 4.75, 0.5, 4.5), (-2.0, 3.0, 0.5, 4.5),\n (-2.0, 3.0, 1.0, 4.5), (-2.0, 3.0, 0.5, 5.0),\n (-2.0, 4.75, 1.0, 4.5), (-2.0, 4.75, 1.0, 5.0),\n (-2.0, 4.75, 0.5, 5.0), (0.75, 3.0, 0.5, 4.5),\n (0.75, 3.0, 1.0, 4.5), (0.75, 3.0, 1.0, 5.0),\n (0.75, 3.0, 0.5, 5.0), (0.75, 4.75, 1.0, 4.5),\n (0.75, 4.75, 1.0, 5.0), (0.75, 4.75, 0.5, 5.0),\n (-0.625, 3.875, 0.75, 4.75), (-2.0, 4.75, 0.0, 4.5),\n (-0.625, 5.625, 0.25, 4.25), (-2.0, 3.0, 0.0, 4.5),\n (0.75, 3.0, 0.0, 4.5), (-0.625, 3.875, 0.25, 4.25),\n (-0.625, 3.875, 0.75, 4.25), (-2.0, 4.75, 0.0, 5.0),\n (0.75, 3.0, 0.0, 5.0), (0.75, 4.75, 0.0, 5.0),\n (-0.625, 3.875, 0.25, 4.75), (-0.625, 5.625, 0.75, 4.25),\n (-0.625, 5.625, 0.75, 4.75), (-0.625, 5.625, 0.25, 4.75),\n (2.125, 3.875, 0.25, 4.25), (2.125, 3.875, 0.75, 4.25),\n (2.125, 3.875, 0.75, 4.75), (2.125, 3.875, 0.25, 4.75),\n (2.125, 5.625, 0.75, 4.25), (2.125, 5.625, 0.75, 4.75),\n (2.125, 5.625, 0.25, 4.75), (2.125, 5.625, -0.25, 4.25),\n (-2.0, 4.75, -0.5, 4.5), (-2.0, 3.0, -0.5, 4.5),\n (-2.0, 3.0, -1.0, 4.5), (-2.0, 3.0, -0.5, 5.0),\n (-2.0, 4.75, -1.0, 4.5), (-2.0, 4.75, -1.0, 5.0),\n (-2.0, 4.75, -0.5, 5.0), (0.75, 3.0, -0.5, 4.5),\n (0.75, 3.0, -1.0, 4.5), (0.75, 3.0, -1.0, 5.0),\n (0.75, 3.0, -0.5, 5.0), (0.75, 4.75, -1.0, 4.5),\n (0.75, 4.75, -1.0, 5.0), (0.75, 4.75, -0.5, 5.0),\n (-0.625, 3.875, -0.75, 4.75), (-0.625, 5.625, -0.25, 4.25),\n (-0.625, 3.875, -0.25, 4.25), (-0.625, 3.875, -0.75, 4.25),\n (-0.625, 3.875, -0.25, 4.75), (-0.625, 5.625, -0.75, 4.25),\n (-0.625, 5.625, -0.75, 4.75), (-0.625, 5.625, -0.25, 4.75),\n (2.125, 3.875, -0.25, 4.25), (2.125, 3.875, -0.75, 4.25),\n (2.125, 3.875, -0.75, 4.75), (2.125, 3.875, -0.25, 4.75),\n (2.125, 5.625, -0.75, 4.25), (2.125, 5.625, -0.75, 4.75),\n (2.125, 5.625, -0.25, 4.75)]\n nn_checks = {(0.75, 3.0, -0.5, 4.0): [(0.75, 4.75, -0.5, 4.0),\n (0.75, 4.75, -0.5, 4.5),\n (2.125, 3.875, -0.25, 3.75),\n (2.125, 3.875, -0.75, 3.75),\n (2.125, 3.875, -0.25, 4.25),\n (2.125, 3.875, -0.75, 4.25),\n (3.5, 3.0, 0.0, 4.0),\n (3.5, 3.0, -0.5, 4.0),\n (0.75, 3.0, 0.0, 4.0),\n (0.75, 3.0, -0.5, 4.5),\n (-2.0, 3.0, -0.5, 4.0),\n (-2.0, 3.0, 0.0, 4.0),\n (-0.625, 3.875, -0.25, 4.25),\n (-0.625, 3.875, -0.75, 4.25),\n (-0.625, 3.875, -0.75, 3.75),\n (0.75, 3.0, -0.5, 3.5),\n (-2.0, 3.0, -1.0, 4.0),\n (0.75, 3.0, -1.0, 4.0),\n (3.5, 3.0, -1.0, 4.0),\n (0.75, 4.75, -0.5, 3.5),\n (-0.625, 3.875, -0.25, 3.75)],\n (-2.0, 3.0, -1.0, 3.0): [(-0.625, 3.875, -0.75, 3.25),\n (0.75, 3.0, -0.5, 3.0),\n (-2.0, 4.75, -1.0, 3.0),\n (-2.0, 4.75, -1.0, 3.5),\n (0.75, 3.0, -1.0, 3.0),\n (0.75, 4.75, -1.0, 3.5),\n (0.75, 3.0, -1.0, 3.5),\n (0.75, 4.75, -1.0, 3.0),\n (-2.0, 3.0, -0.5, 3.0),\n (-2.0, 3.0, -0.5, 3.5),\n (0.75, 4.75, -0.5, 3.0),\n (-2.0, 3.0, -1.0, 3.5),\n (-2.0, 4.75, -0.5, 3.5),\n (-2.0, 4.75, -0.5, 3.0),\n (0.75, 3.0, -0.5, 3.5)],\n (-0.625, 5.625, -0.75, 4.25): [(0.75, 4.75, -0.5, 4.0),\n (0.75, 4.75, -0.5, 4.5),\n (-2.0, 6.5, -1.0, 4.5),\n (-2.0, 6.5, -1.0, 4.0),\n (0.75, 6.5, -1.0, 4.0),\n (0.75, 6.5, -1.0, 4.5),\n (0.75, 4.75, -1.0, 4.5),\n (0.75, 4.75, -1.0, 4.0),\n (-2.0, 4.75, -1.0, 4.0),\n (-2.0, 4.75, -1.0, 4.5),\n (-2.0, 4.75, -0.5, 4.5),\n (-2.0, 4.75, -0.5, 4.0),\n (0.75, 6.5, -0.5, 4.0),\n (0.75, 6.5, -0.5, 4.5),\n (-2.0, 6.5, -0.5, 4.5),\n (-2.0, 6.5, -0.5, 4.0)]}\n\n init_triangulation(4, 2, check, nn_checks,\n bounds=[(-2, 9), (3, 10), (-1, 1), (3, 5)])",
"def test_filter_l2_1():\n box = Box(length=L, origin=O)\n f = Field(box, formula=func, name='f0')\n d_fine = Discretization([513, 513, 513])\n d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])\n op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,\n variables={f: d_coarse},\n method={Remesh: L2_1, })\n op.discretize()\n op.setup()\n topo_coarse = op.discreteFields[f].topology\n topo_fine = [t for t in f.discreteFields.keys()\n if not t is topo_coarse][0]\n f.initialize(topo=topo_fine)\n f_out = f.discreteFields[topo_coarse]\n op.apply(simu)\n valid = [npw.zeros(f_out[0].shape), ]\n valid = func(valid, *topo_coarse.mesh.coords)\n assert np.allclose(valid[0][topo_coarse.mesh.iCompute],\n f_out[0][topo_coarse.mesh.iCompute]), \\\n np.max(np.abs(valid[0][topo_coarse.mesh.iCompute] -\n f_out[0][topo_coarse.mesh.iCompute]))",
"def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])",
"def _apply_array_spatial12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n return self._apply_array_spatial12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb):\n h1e[:, :] -= h2e[:, k, k, :]\n\n if numpy.iscomplex(h1e).any() or numpy.iscomplex(h2e).any():\n dvec = self.calculate_dvec_spatial()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e, dvec)\n dvec = numpy.einsum(\"ijkl,klmn->ijmn\", h2e, dvec)\n out += self._calculate_coeff_spatial_with_dvec(dvec)\n else:\n nij = norb * (norb + 1) // 2\n h1ec = numpy.zeros((nij), dtype=self._dtype)\n h2ec = numpy.zeros((nij, nij), dtype=self._dtype)\n for i in range(norb):\n for j in range(i + 1):\n ijn = j + i * (i + 1) // 2\n h1ec[ijn] = h1e[i, j]\n for k in range(norb):\n for l in range(k + 1):\n kln = l + k * (k + 1) // 2\n h2ec[ijn, kln] = h2e[i, j, k, l]\n dvec = self._calculate_dvec_spatial_compressed()\n out = numpy.einsum(\"i,ikl->kl\", h1ec, dvec)\n dvec = numpy.einsum(\"ik,kmn->imn\", h2ec, dvec)\n for i in range(self.norb()):\n for j in range(self.norb()):\n ijn = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n work = self._core.alpha_map(j, i)\n for source, target, parity in work:\n out[source, :] += dvec[ijn, target, :] * parity\n work = self._core.beta_map(j, i)\n for source, target, parity in work:\n out[:, source] += dvec[ijn, :, target] * parity\n\n return out",
"def subdivision(mesh):\n\t\n\t\n\t# 1. generate new nodes in the centre of quad\n\t# 1/4 o-------o 1/4 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# | * |\n\t# | |\n\t# 1/4 o-------o 1/4\n\n\tnew_coor = mesh.give_nodes().give_coor()\n\t\n\tfor face_index in range(mesh.give_model_inf()[2]): \n\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\tfor vertex_index in range(4):\n\t\t\tmesh.give_faces()\n\t\t\tnode_index = mesh.give_faces().give_node_list(face_index)[vertex_index]\n\n\t\t\tnew_x += 0.25*mesh.give_nodes().give_coor(node_index)[0]\n\t\t\tnew_y += 0.25*mesh.give_nodes().give_coor(node_index)[1]\n\t\t\tnew_z += 0.25*mesh.give_nodes().give_coor(node_index)[2]\n\t\t\t\n\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\n\t# generating new nodes on the edge\n\t# figure out one edge is shared by how many surfaces\n\tedge_shared_by_faces_list = helper.find_edge_shared_by_which_faces(mesh.give_edges(), mesh.give_faces())\n\t\n\tfor edge_index in range(mesh.give_model_inf()[1]):\n\n\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\n\t# 2. generate new node on boundary edge\n\t# o: existing vertices\n\t# 1/2 o---*---o 1/2 *: newly-generated vertices\n\t# \n\n\t\tnew_coor = mesh.give_nodes().give_coor()\n\t\tif len(edge_shared_by_faces_list[edge_index]) == 1:\t\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tnew_x += 0.5*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 0.5*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 0.5*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\t\t\n\t# 3. generate new node on interior edge\n\t# 1/16 o-------o 1/16 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# 3/8 o---*---o 3/8\n\t# | |\n\t# 1/16 o-------o 1/16\n\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tconsidered_node = []\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tconsidered_node.append(this_node)\n\t\t\t\tnew_x += 3./8.*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 3./8.*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 3./8.*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\n\t\t\t# faces contain this node\n\t\t\tpotential_node = []\n\t\t\tfor face_index in edge_shared_by_faces_list[edge_index]:\t\t\n\t\t\t\tfor vertex_index in range(4):\n\t\t\t\t\t\tpotential_node.append(mesh.give_faces().give_node_list(face_index)[vertex_index])\n\t\t\t\n\t\t\touter_node = []\n\t\t\tfor node in potential_node:\n\t\t\t\tif (node not in considered_node) & (node not in outer_node):\n\t\t\t\t\touter_node.append(node)\n\t\t\t\t\t\n\t\t\tfor vertex_index in outer_node:\n\t\t\t\tnew_x += 1./16.*mesh.give_nodes().give_coor()[vertex_index][0]\n\t\t\t\tnew_y += 1./16.*mesh.give_nodes().give_coor()[vertex_index][1]\n\t\t\t\tnew_z += 1./16.*mesh.give_nodes().give_coor()[vertex_index][2]\n\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\n\t# update the links of edges and surfaces\n\tnew_edge_list = []\n\tnew_face_list = []\n\tfor face_index in range(mesh.give_model_inf()[2]):\n\t\told_node0 = mesh.give_faces().give_node_list(face_index)[0]\n\t\told_node1 = mesh.give_faces().give_node_list(face_index)[1]\n\t\told_node2 = mesh.give_faces().give_node_list(face_index)[2]\n\t\told_node3 = mesh.give_faces().give_node_list(face_index)[3]\n\t\t\n\t\told_edge0 = mesh.give_faces().give_edge_list(face_index)[0]\n\t\told_edge1 = mesh.give_faces().give_edge_list(face_index)[1]\n\t\told_edge2 = mesh.give_faces().give_edge_list(face_index)[2]\n\t\told_edge3 = mesh.give_faces().give_edge_list(face_index)[3]\n\t\t\n\t\tnew_node4 = old_edge0 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2] \n\t\tnew_node5 = old_edge1 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node6 = old_edge2 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node7 = old_edge3 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\t\n\t\tnew_node8 = mesh.give_model_inf()[0] + face_index\n\t\t\n\t\tif helper.in_list((old_node0, new_node4), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node0, new_node4))\n\t\tif helper.in_list((new_node4, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, new_node8))\n\t\tif helper.in_list((new_node8, new_node7), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node8, new_node7))\n\t\tif helper.in_list((new_node7, old_node0), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node0))\n\t\tif helper.in_list((new_node4, old_node1), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, old_node1))\n\t\tif helper.in_list((old_node1, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node1, new_node5))\n\t\tif helper.in_list((new_node5, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node5, new_node8))\n\t\tif helper.in_list((new_node7, old_node3), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node3))\n\t\tif helper.in_list((old_node3, new_node6), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node3, new_node6))\n\t\tif helper.in_list((new_node6, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, new_node8))\n\t\tif helper.in_list((new_node6, old_node2), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, old_node2))\n\t\tif helper.in_list((old_node2, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node2, new_node5))\n\t\n\t\tnew_face_list.append((old_node0, new_node4, new_node8, new_node7))\n\t\tnew_face_list.append((new_node4, old_node1, new_node5, new_node8))\n\t\tnew_face_list.append((new_node7, new_node8, new_node6, old_node3))\n\t\tnew_face_list.append((new_node8, new_node5, old_node2, new_node6))\n\t\t\n\tnew_edges = geo.Edge(new_edge_list)\n\t\n\tnew_faces = geo.Face(new_face_list, new_edges)\n\t\t\n\t# update existing nodes\t\n\tfor node_index in range(mesh.give_model_inf()[0]):\n\t\t\n\t\tring1, ring2 = helper.find_neighbour_node(new_edges, new_faces, node_index)\n\t\tvalence = helper.find_valence(node_index, new_faces) \n\t\t#: valence: the number of faces sharing on specific edge\n\n\t# 4. update existing corner vertex\n\t# 2/4 @---* 1/4 *: newly-generated vertices\n\t# | | @: existing vertices to be updated\n\t# 1/4 *---* 0 The higher mask values on neighbouring vertices, \n\t# the more likely a square mesh will be refined into a sphere.\n\t \n\t\tif valence == 1:\n\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tprint\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += 0.*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += 0.*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += 0.*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\t\n\t\t\tnew_x += 2./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 2./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 2./4.*mesh.give_nodes().give_coor()[node_index][2]\n\n\t# 5. update existing boundary joint vertex\n\t# 3/4\n\t# 1/8 *---*---* 1/8 *: newly-generated vertices\n\t# | | | @: existing vertices to be updated\n\t# 0 *---*---* 0\n\n\t\telif valence == 2:\n\t\t\t\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tif helper.find_valence(node_in_ring1, new_faces) <= 2: \n\t\t\t\t\tnew_x += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\t\tnew_y += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\t\tnew_z += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\t\t\n\t\t\tnew_x += 3./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 3./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 3./4.*mesh.give_nodes().give_coor()[node_index][2]\n\t\n\t# 6. update new node on interior edge\n\t# * r/k\n\t# /\\ b/k*\n\t# *__/ \\___ r/k\n\t# \\ \\ /¬¬/ *: newly-generated vertices: \n\t# \\ \\/ / b = 3/2/valence, r = 1/4/valence\n\t# *--@--* b/k\t @: existing vertices to be updated: 1-b-r\t\t\n\t# / /\\ \\\n\t# /__/ \\__\\\n\t# * \\ / * r/k\n\t# \\/\n\t\t\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tbeta = 3./2./valence\n\t\t\tgamma = 1./4./valence\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\n\t\t\tnew_x += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][2]\n\t\t\n\t\tnew_coor[node_index] = (new_x, new_y, new_z)\n\t\n\tnew_nodes = geo.Node(new_coor)\n\t\n\tmesh.update(new_nodes, new_edges, new_faces)\n\t\n\t# return new_mesh\n\treturn mesh",
"def test_3D_m4_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main",
"def test_assembly_inner_product_2_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '2-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '2-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '2-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n print(basis_ext_gauss.num_basis)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)",
"def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)",
"def test_plot_mesh(self):\n plt.close('all')\n\n #\n # Initialize\n #\n fig, ax = plt.subplots(3,3)\n plot = Plot()\n #\n # Define mesh\n # \n mesh = Mesh.newmesh(grid_size=(2,2))\n mesh.refine() \n mesh.root_node().children[1,1].mark(1)\n mesh.refine(1)\n \n # Plot simple mesh\n ax[0,0] = plot.mesh(ax[0,0], mesh)\n \n #\n # Flag a few cells\n # \n mesh.unmark(nodes=True)\n mesh.root_node().children[0,0].mark(2)\n mesh.root_node().children[1,0].mark(1)\n mesh.root_node().children[1,1].children['SW'].mark(3)\n mesh.root_node().children[1,1].children['NE'].mark(3)\n \n # Color flagged cells\n ax[0,1] = plot.mesh(ax[0,1], mesh, color_marked=[1,2,3], nested=True)\n \n # Plot vertex numbers\n ax[0,2] = plot.mesh(ax[0,2], mesh, vertex_numbers=True)\n \n # Plot edge numbers\n ax[1,0] = plot.mesh(ax[1,0], mesh, edge_numbers=True)\n \n # Plot cell numbers nested off\n mesh.refine(2)\n ax[1,1] = plot.mesh(ax[1,1], mesh, cell_numbers=True)\n \n # Plot cell numbers nested on\n ax[1,2] = plot.mesh(ax[1,2], mesh, cell_numbers=True, nested=True)\n\n # Plot dofs\n element = QuadFE(2,'Q1')\n ax[2,0] = plot.mesh(ax[2,0], mesh, element=element, dofs=True)\n \n # Assign dofs in a nested way\n ax[2,1] = plot.mesh(ax[2,1], mesh, element=element, dofs=True, \\\n nested=True)\n \n # Display only dofs of flagged nodes \n ax[2,2] = plot.mesh(ax[2,2], mesh, element=element, dofs=True, \\\n node_flag=3, nested=True, show_axis=True)",
"def test_3D_m4_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def _apply_array_spin12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n #return self._apply_array_spin12_blocked(h1e, h2e)\n return self._apply_array_spin12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb * 2):\n h1e[:, :] -= h2e[:, k, k, :]\n\n (dveca, dvecb) = self.calculate_dvec_spin()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e[:norb, :norb], dveca) \\\n + numpy.einsum(\"ij,ijkl->kl\", h1e[norb:, norb:], dvecb)\n ndveca = numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[:norb, :norb, :norb, :norb], dveca) \\\n + numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[:norb, :norb, norb:, norb:], dvecb)\n ndvecb = numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[norb:, norb:, :norb, :norb], dveca) \\\n + numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[norb:, norb:, norb:, norb:], dvecb)\n out += self._calculate_coeff_spin_with_dvec((ndveca, ndvecb))\n return out",
"def test_4_2_5D_rec_splits(self):\n check = [(0.3, -3.9, -1.5, -3.0, -9.5), (1.0, 11.8, -1.1, 5.0, 11000.5),\n (1.0, -3.9, -1.5, -3.0, -9.5), (1.0, 11.8, -1.5, -3.0, -9.5),\n (1.0, 11.8, -1.1, -3.0, -9.5), (1.0, 11.8, -1.1, 5.0, -9.5),\n (1.0, 11.8, -1.1, -3.0, 11000.5), (1.0, 11.8, -1.5, 5.0, -9.5),\n (1.0, 11.8, -1.5, 5.0, 11000.5),\n (1.0, 11.8, -1.5, -3.0, 11000.5),\n (1.0, -3.9, -1.1, -3.0, -9.5), (1.0, -3.9, -1.1, 5.0, -9.5),\n (1.0, -3.9, -1.1, 5.0, 11000.5),\n (1.0, -3.9, -1.1, -3.0, 11000.5), (1.0, -3.9, -1.5, 5.0, -9.5),\n (1.0, -3.9, -1.5, 5.0, 11000.5),\n (1.0, -3.9, -1.5, -3.0, 11000.5),\n (0.3, 11.8, -1.5, -3.0, -9.5), (0.3, 11.8, -1.1, -3.0, -9.5),\n (0.3, 11.8, -1.1, 5.0, -9.5), (0.3, 11.8, -1.1, 5.0, 11000.5),\n (0.3, 11.8, -1.1, -3.0, 11000.5), (0.3, 11.8, -1.5, 5.0, -9.5),\n (0.3, 11.8, -1.5, 5.0, 11000.5),\n (0.3, 11.8, -1.5, -3.0, 11000.5),\n (0.3, -3.9, -1.1, -3.0, -9.5), (0.3, -3.9, -1.1, 5.0, -9.5),\n (0.3, -3.9, -1.1, 5.0, 11000.5),\n (0.3, -3.9, -1.1, -3.0, 11000.5), (0.3, -3.9, -1.5, 5.0, -9.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, -3.9, -1.5, -3.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5), (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, -3.0, 5495.5), (0.3, -3.9, -1.5, 1.0, -9.5),\n (0.3, -3.9, -1.3, -3.0, 5495.5), (0.3, -3.9, -1.3, -3.0, -9.5),\n (0.3, -3.9, -1.3, 1.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.3, 3.95, -1.5, -3.0, 5495.5),\n (0.3, 3.95, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, 5495.5),\n (0.3, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.3, 1.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5),\n (0.65, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.5, -3.0, -9.5),\n (0.65, -3.9, -1.5, 1.0, -9.5),\n (0.65, -3.9, -1.3, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, -9.5),\n (0.65, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.3, -3.0, -9.5),\n (0.65, 3.95, -1.3, 1.0, -9.5),\n (0.475, 0.025000000000000133, -1.4, -1.0, 2743.0),\n (1.0, 3.95, -1.3, 1.0, 5495.5),\n (1.0, 11.8, -1.3, 1.0, 5495.5), (1.0, 11.8, -1.1, 1.0, 5495.5),\n (1.0, 11.8, -1.1, 5.0, 5495.5),\n (1.0, 11.8, -1.1, 1.0, 11000.5),\n (1.0, 11.8, -1.3, 5.0, 5495.5),\n (1.0, 11.8, -1.3, 5.0, 11000.5),\n (1.0, 11.8, -1.3, 1.0, 11000.5),\n (1.0, 3.95, -1.1, 1.0, 5495.5),\n (1.0, 3.95, -1.1, 5.0, 5495.5),\n (1.0, 3.95, -1.1, 5.0, 11000.5),\n (1.0, 3.95, -1.1, 1.0, 11000.5),\n (1.0, 3.95, -1.3, 5.0, 5495.5),\n (1.0, 3.95, -1.3, 5.0, 11000.5),\n (1.0, 3.95, -1.3, 1.0, 11000.5),\n (0.65, 11.8, -1.3, 1.0, 5495.5),\n (0.65, 11.8, -1.1, 1.0, 5495.5),\n (0.65, 11.8, -1.1, 5.0, 5495.5),\n (0.65, 11.8, -1.1, 5.0, 11000.5),\n (0.65, 11.8, -1.1, 1.0, 11000.5),\n (0.65, 11.8, -1.3, 5.0, 5495.5),\n (0.65, 11.8, -1.3, 5.0, 11000.5),\n (0.65, 11.8, -1.3, 1.0, 11000.5),\n (0.65, 3.95, -1.1, 1.0, 5495.5),\n (0.65, 3.95, -1.1, 5.0, 5495.5),\n (0.65, 3.95, -1.1, 5.0, 11000.5),\n (0.65, 3.95, -1.1, 1.0, 11000.5),\n (0.65, 3.95, -1.3, 5.0, 5495.5),\n (0.65, 3.95, -1.3, 5.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.825, 7.875, -1.2000000000000002, 3.0, 8248.0),\n (1.0, -3.9, -1.3, 1.0, 5495.5), (1.0, -3.9, -1.5, 1.0, 5495.5),\n (1.0, -3.9, -1.5, -3.0, 5495.5), (1.0, -3.9, -1.5, 1.0, -9.5),\n (1.0, -3.9, -1.3, -3.0, 5495.5), (1.0, -3.9, -1.3, -3.0, -9.5),\n (1.0, -3.9, -1.3, 1.0, -9.5),\n (1.0, 3.95, -1.5, 1.0, 5495.5),\n (1.0, 3.95, -1.5, -3.0, 5495.5),\n (1.0, 3.95, -1.5, -3.0, -9.5),\n (1.0, 3.95, -1.5, 1.0, -9.5),\n (1.0, 3.95, -1.3, -3.0, 5495.5),\n (1.0, 3.95, -1.3, -3.0, -9.5),\n (1.0, 3.95, -1.3, 1.0, -9.5),\n (0.825, 0.025000000000000133, -1.4, -1.0, 2743.0),\n (1.0, 11.8, -1.5, 1.0, 5495.5),\n (1.0, 11.8, -1.5, -3.0, 5495.5), (1.0, 11.8, -1.5, 1.0, -9.5),\n (1.0, 11.8, -1.3, -3.0, 5495.5), (1.0, 11.8, -1.3, -3.0, -9.5),\n (1.0, 11.8, -1.3, 1.0, -9.5),\n (0.65, 11.8, -1.5, 1.0, 5495.5),\n (0.65, 11.8, -1.5, -3.0, 5495.5),\n (0.65, 11.8, -1.5, -3.0, -9.5),\n (0.65, 11.8, -1.5, 1.0, -9.5),\n (0.65, 11.8, -1.3, -3.0, 5495.5),\n (0.65, 11.8, -1.3, -3.0, -9.5),\n (0.65, 11.8, -1.3, 1.0, -9.5),\n (0.825, 7.875, -1.4, -1.0, 2743.0),\n (1.0, 11.8, -1.1, -3.0, 5495.5), (1.0, 11.8, -1.1, 1.0, -9.5),\n (1.0, 3.95, -1.1, -3.0, 5495.5),\n (1.0, 3.95, -1.1, -3.0, -9.5),\n (1.0, 3.95, -1.1, 1.0, -9.5),\n (0.65, 11.8, -1.1, -3.0, 5495.5),\n (0.65, 11.8, -1.1, -3.0, -9.5),\n (0.65, 11.8, -1.1, 1.0, -9.5),\n (0.65, 3.95, -1.1, -3.0, 5495.5),\n (0.65, 3.95, -1.1, -3.0, -9.5),\n (0.65, 3.95, -1.1, 1.0, -9.5),\n (0.825, 7.875, -1.2000000000000002, -1.0, 2743.0),\n (1.0, 11.8, -1.3, 5.0, -9.5),\n (1.0, 3.95, -1.1, 5.0, -9.5),\n (1.0, 3.95, -1.3, 5.0, -9.5),\n (0.65, 11.8, -1.1, 5.0, -9.5),\n (0.65, 11.8, -1.3, 5.0, -9.5),\n (0.65, 3.95, -1.1, 5.0, -9.5),\n (0.65, 3.95, -1.3, 5.0, -9.5),\n (0.825, 7.875, -1.2000000000000002, 3.0, 2743.0),\n (1.0, 11.8, -1.3, -3.0, 11000.5),\n (1.0, 3.95, -1.1, -3.0, 11000.5),\n (1.0, 3.95, -1.3, -3.0, 11000.5),\n (0.65, 11.8, -1.1, -3.0, 11000.5),\n (0.65, 11.8, -1.3, -3.0, 11000.5),\n (0.65, 3.95, -1.1, -3.0, 11000.5),\n (0.65, 3.95, -1.3, -3.0, 11000.5),\n (0.825, 7.875, -1.2000000000000002, -1.0, 8248.0),\n (1.0, 11.8, -1.5, 5.0, 5495.5),\n (1.0, 3.95, -1.5, 5.0, 5495.5),\n (1.0, 3.95, -1.5, 5.0, -9.5),\n (0.65, 11.8, -1.5, 5.0, 5495.5),\n (0.65, 11.8, -1.5, 5.0, -9.5),\n (0.65, 3.95, -1.5, 5.0, 5495.5),\n (0.65, 3.95, -1.5, 5.0, -9.5),\n (0.825, 7.875, -1.4, 3.0, 2743.0),\n (1.0, 11.8, -1.5, 1.0, 11000.5),\n (1.0, 3.95, -1.5, 5.0, 11000.5),\n (1.0, 3.95, -1.5, 1.0, 11000.5),\n (0.65, 11.8, -1.5, 5.0, 11000.5),\n (0.65, 11.8, -1.5, 1.0, 11000.5),\n (0.65, 3.95, -1.5, 5.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.825, 7.875, -1.4, 3.0, 8248.0),\n (1.0, 3.95, -1.5, -3.0, 11000.5),\n (0.65, 11.8, -1.5, -3.0, 11000.5),\n (0.65, 3.95, -1.5, -3.0, 11000.5),\n (0.825, 7.875, -1.4, -1.0, 8248.0),\n (1.0, -3.9, -1.1, 1.0, 5495.5),\n (1.0, -3.9, -1.1, -3.0, 5495.5), (1.0, -3.9, -1.1, 1.0, -9.5),\n (0.65, -3.9, -1.1, 1.0, 5495.5),\n (0.65, -3.9, -1.1, -3.0, 5495.5),\n (0.65, -3.9, -1.1, -3.0, -9.5),\n (0.65, -3.9, -1.1, 1.0, -9.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, -1.0,\n 2743.0), (1.0, -3.9, -1.1, 5.0, 5495.5),\n (1.0, -3.9, -1.3, 5.0, 5495.5), (1.0, -3.9, -1.3, 5.0, -9.5),\n (0.65, -3.9, -1.1, 5.0, 5495.5),\n (0.65, -3.9, -1.1, 5.0, -9.5),\n (0.65, -3.9, -1.3, 5.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, -9.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, 3.0, 2743.0),\n (1.0, -3.9, -1.1, 1.0, 11000.5),\n (1.0, -3.9, -1.3, 5.0, 11000.5),\n (1.0, -3.9, -1.3, 1.0, 11000.5),\n (0.65, -3.9, -1.1, 5.0, 11000.5),\n (0.65, -3.9, -1.1, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, 3.0, 8248.0),\n (1.0, -3.9, -1.3, -3.0, 11000.5),\n (0.65, -3.9, -1.1, -3.0, 11000.5),\n (0.65, -3.9, -1.3, -3.0, 11000.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, -1.0,\n 8248.0), (1.0, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.5, 5.0, -9.5),\n (0.825, 0.025000000000000133, -1.4, 3.0, 2743.0),\n (1.0, -3.9, -1.5, 1.0, 11000.5),\n (0.65, -3.9, -1.5, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.825, 0.025000000000000133, -1.4, 3.0, 8248.0),\n (0.65, -3.9, -1.5, -3.0, 11000.5),\n (0.825, 0.025000000000000133, -1.4, -1.0, 8248.0),\n (0.3, 11.8, -1.3, 1.0, 5495.5), (0.3, 11.8, -1.5, 1.0, 5495.5),\n (0.3, 11.8, -1.5, -3.0, 5495.5), (0.3, 11.8, -1.5, 1.0, -9.5),\n (0.3, 11.8, -1.3, -3.0, 5495.5), (0.3, 11.8, -1.3, -3.0, -9.5),\n (0.3, 11.8, -1.3, 1.0, -9.5),\n (0.475, 7.875, -1.4, -1.0, 2743.0),\n (0.3, 11.8, -1.1, 1.0, 5495.5),\n (0.3, 11.8, -1.1, -3.0, 5495.5), (0.3, 11.8, -1.1, 1.0, -9.5),\n (0.3, 3.95, -1.1, 1.0, 5495.5),\n (0.3, 3.95, -1.1, -3.0, 5495.5),\n (0.3, 3.95, -1.1, -3.0, -9.5),\n (0.3, 3.95, -1.1, 1.0, -9.5),\n (0.475, 7.875, -1.2000000000000002, -1.0, 2743.0),\n (0.3, 11.8, -1.1, 5.0, 5495.5), (0.3, 11.8, -1.3, 5.0, 5495.5),\n (0.3, 11.8, -1.3, 5.0, -9.5),\n (0.3, 3.95, -1.1, 5.0, 5495.5),\n (0.3, 3.95, -1.1, 5.0, -9.5),\n (0.3, 3.95, -1.3, 5.0, 5495.5),\n (0.3, 3.95, -1.3, 5.0, -9.5),\n (0.475, 7.875, -1.2000000000000002, 3.0, 2743.0),\n (0.3, 11.8, -1.1, 1.0, 11000.5),\n (0.3, 11.8, -1.3, 5.0, 11000.5),\n (0.3, 11.8, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.1, 5.0, 11000.5),\n (0.3, 3.95, -1.1, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 5.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5),\n (0.475, 7.875, -1.2000000000000002, 3.0, 8248.0),\n (0.3, 11.8, -1.3, -3.0, 11000.5),\n (0.3, 3.95, -1.1, -3.0, 11000.5),\n (0.3, 3.95, -1.3, -3.0, 11000.5),\n (0.475, 7.875, -1.2000000000000002, -1.0, 8248.0),\n (0.3, 11.8, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.5, 5.0, -9.5),\n (0.475, 7.875, -1.4, 3.0, 2743.0),\n (0.3, 11.8, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.475, 7.875, -1.4, 3.0, 8248.0),\n (0.3, 3.95, -1.5, -3.0, 11000.5),\n (0.475, 7.875, -1.4, -1.0, 8248.0),\n (0.3, -3.9, -1.1, 1.0, 5495.5),\n (0.3, -3.9, -1.1, -3.0, 5495.5), (0.3, -3.9, -1.1, 1.0, -9.5),\n (0.475, 0.025000000000000133, -1.2000000000000002, -1.0,\n 2743.0), (0.3, -3.9, -1.1, 5.0, 5495.5),\n (0.3, -3.9, -1.3, 5.0, 5495.5), (0.3, -3.9, -1.3, 5.0, -9.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, 3.0, 2743.0),\n (0.3, -3.9, -1.1, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 5.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, 3.0, 8248.0),\n (0.3, -3.9, -1.3, -3.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, -1.0,\n 8248.0), (0.3, -3.9, -1.5, 5.0, 5495.5),\n (0.475, 0.025000000000000133, -1.4, 3.0, 2743.0),\n (0.3, -3.9, -1.5, 1.0, 11000.5),\n (0.475, 0.025000000000000133, -1.4, 3.0, 8248.0),\n (0.475, 0.025000000000000133, -1.4, -1.0, 8248.0)]\n nn_checks = {\n (0.3, -3.9, -1.5, -3.0, -9.5): [(0.3, -3.9, -1.5, 1.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 1.0, -9.5),\n (0.65, -3.9, -1.5, 1.0, -9.5),\n (0.3, -3.9, -1.3, 1.0, -9.5),\n (0.3, 3.95, -1.5, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.5, -3.0, -9.5),\n (0.65, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, 5495.5),\n (0.3, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, 5495.5),\n (0.3, -3.9, -1.3, -3.0, 5495.5),\n (0.3, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5), (\n 0.475, 0.025000000000000133, -1.4,\n -1.0, 2743.0),\n (0.3, 3.95, -1.3, 1.0, -9.5)],\n (0.3, -3.9, -1.5, 1.0, 11000.5): [(0.3, -3.9, -1.5, -3.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.4,\n 3.0, 8248.0),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5),\n (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5), (\n 0.475, 0.025000000000000133, -1.4,\n -1.0, 8248.0),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5)],\n (0.475, 0.025000000000000133, -1.4, 3.0, 8248.0): [\n (0.65, 3.95, -1.5, 5.0, 11000.5),\n (0.65, 3.95, -1.3, 5.0, 11000.5),\n (0.3, -3.9, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5), (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5), (0.3, 3.95, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.3, 5.0, 5495.5), (0.65, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, 5495.5), (0.3, -3.9, -1.5, 5.0, 5495.5),\n (0.3, -3.9, -1.3, 5.0, 5495.5), (0.65, 3.95, -1.3, 5.0, 5495.5),\n (0.65, 3.95, -1.5, 5.0, 5495.5), (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 5.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 5.0, 11000.5)]}\n\n init_triangulation(5, 1, check, nn_checks,\n bounds=[(0.3, 1), (-3.9, 11.8), (-1.5, -1.1),\n (-3, 5), (-9.5, 11000.5)])",
"def par_test_2(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = [\n self.XYZ_factor.mult(self.scalar),\n self.XYZ_factor.mult(self.scalarf),\n self.scalarf.mult(self.XYZ_factor),\n ]\n\n par_res = [\n self.XYZ_par_factor.mult(self.scalar),\n self.XYZ_par_factor.mult(self.par_scalarf),\n self.par_scalarf.mult(self.XYZ_par_factor),\n ]\n\n for i, ele in enumerate(res):\n assert (\n ele.rand_vars == par_res[i].rand_vars\n and ele.values == par_res[i].values\n )",
"def test_other_side_mesh(self):\n layered_volume = np.array(\n [\n [\n [0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1],\n [0, 1, 1, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0],\n ]\n ]\n )\n\n def quad(v1, v2, v3, v4):\n \"\"\"counterclockwise winding faces to make quad\"\"\"\n return [[v3, v2, v1], [v4, v3, v2]]\n\n top_mesh = trimesh.Trimesh(\n vertices=np.array(\n [\n [0, 1, 5],\n [1, 1, 5],\n [0, 1, 2],\n [1, 1, 2],\n [0, 3.5, 1.5],\n [1, 3.5, 1.5],\n ]\n ),\n faces=np.concatenate([quad(0, 1, 3, 2), quad(2, 3, 5, 4)], axis=0),\n )\n\n bot_mesh = trimesh.Trimesh(\n vertices=np.array([[0, 2, 5], [1, 2, 5], [0, 4, 2], [1, 4, 2]]),\n faces=quad(0, 1, 3, 2),\n )\n\n up = [0, -1, 0]\n dup = [0, -np.sqrt(0.5), -np.sqrt(0.5)]\n nanvec = [np.nan, np.nan, np.nan]\n vectors = np.array(\n [\n [\n [nanvec, nanvec, nanvec, nanvec, nanvec],\n [nanvec, nanvec, dup, up, up],\n [nanvec, dup, dup, nanvec, nanvec],\n [nanvec, dup, up, up, nanvec],\n [nanvec, nanvec, nanvec, nanvec, nanvec],\n ]\n ]\n )\n\n distances, something_wrong = tested.distances_from_voxels_to_meshes_wrt_dir(\n layered_volume, [top_mesh, bot_mesh], vectors\n )\n\n npt.assert_array_almost_equal(distances, get_expected_distances_to_meshes())\n assert not np.any(something_wrong)",
"def test_4_2_5D_cube_splits(self):\n check = [(0, 0, 0, 0, 0), (1, 1, 1, 1, 1), (1, 0, 0, 0, 0),\n (1, 1, 0, 0, 0), (1, 1, 1, 0, 0), (1, 1, 1, 1, 0),\n (1, 1, 1, 0, 1), (1, 1, 0, 1, 0), (1, 1, 0, 1, 1),\n (1, 1, 0, 0, 1), (1, 0, 1, 0, 0), (1, 0, 1, 1, 0),\n (1, 0, 1, 1, 1), (1, 0, 1, 0, 1), (1, 0, 0, 1, 0),\n (1, 0, 0, 1, 1), (1, 0, 0, 0, 1), (0, 1, 0, 0, 0),\n (0, 1, 1, 0, 0), (0, 1, 1, 1, 0), (0, 1, 1, 1, 1),\n (0, 1, 1, 0, 1), (0, 1, 0, 1, 0), (0, 1, 0, 1, 1),\n (0, 1, 0, 0, 1), (0, 0, 1, 0, 0), (0, 0, 1, 1, 0),\n (0, 0, 1, 1, 1), (0, 0, 1, 0, 1), (0, 0, 0, 1, 0),\n (0, 0, 0, 1, 1), (0, 0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.0, 0.5, 0.0), (0.0, 0.0, 0.5, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5, 0.5), (0.0, 0.5, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.0, 0.0, 0.0), (0.0, 0.5, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.5, 0.0, 0.5), (0.0, 0.5, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0, 0.0), (0.5, 0.0, 0.0, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.0, 0.5), (0.5, 0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.0, 0.0), (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.25, 0.25, 0.25, 0.25, 0.25), (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5), (1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 0.5), (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 1.0, 0.5, 0.5, 1.0), (1.0, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0), (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 0.5, 1.0, 1.0), (1.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5), (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0), (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 1.0), (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5), (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0), (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5), (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5, 0.5), (1.0, 0.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.0, 0.5, 0.0),\n (1.0, 0.0, 0.5, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0, 0.0),\n (1.0, 0.0, 0.5, 0.5, 0.0), (1.0, 0.5, 0.0, 0.5, 0.5),\n (1.0, 0.5, 0.0, 0.0, 0.5), (1.0, 0.5, 0.0, 0.0, 0.0),\n (1.0, 0.5, 0.0, 0.5, 0.0), (1.0, 0.5, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25, 0.25), (1.0, 1.0, 0.0, 0.5, 0.5),\n (1.0, 1.0, 0.0, 0.0, 0.5), (1.0, 1.0, 0.0, 0.5, 0.0),\n (1.0, 1.0, 0.5, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0, 0.0),\n (1.0, 1.0, 0.5, 0.5, 0.0), (0.5, 1.0, 0.0, 0.5, 0.5),\n (0.5, 1.0, 0.0, 0.0, 0.5), (0.5, 1.0, 0.0, 0.0, 0.0),\n (0.5, 1.0, 0.0, 0.5, 0.0), (0.5, 1.0, 0.5, 0.0, 0.5),\n (0.5, 1.0, 0.5, 0.0, 0.0), (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25, 0.25), (1.0, 1.0, 1.0, 0.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.0), (1.0, 0.5, 1.0, 0.0, 0.5),\n (1.0, 0.5, 1.0, 0.0, 0.0), (1.0, 0.5, 1.0, 0.5, 0.0),\n (0.5, 1.0, 1.0, 0.0, 0.5), (0.5, 1.0, 1.0, 0.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0), (0.5, 0.5, 1.0, 0.0, 0.5),\n (0.5, 0.5, 1.0, 0.0, 0.0), (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.75, 0.25, 0.25), (1.0, 1.0, 0.5, 1.0, 0.0),\n (1.0, 0.5, 1.0, 1.0, 0.0), (1.0, 0.5, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 1.0, 0.0), (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 0.5, 1.0, 1.0, 0.0), (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.75, 0.25), (1.0, 1.0, 0.5, 0.0, 1.0),\n (1.0, 0.5, 1.0, 0.0, 1.0), (1.0, 0.5, 0.5, 0.0, 1.0),\n (0.5, 1.0, 1.0, 0.0, 1.0), (0.5, 1.0, 0.5, 0.0, 1.0),\n (0.5, 0.5, 1.0, 0.0, 1.0), (0.5, 0.5, 0.5, 0.0, 1.0),\n (0.75, 0.75, 0.75, 0.25, 0.75), (1.0, 1.0, 0.0, 1.0, 0.5),\n (1.0, 0.5, 0.0, 1.0, 0.5), (1.0, 0.5, 0.0, 1.0, 0.0),\n (0.5, 1.0, 0.0, 1.0, 0.5), (0.5, 1.0, 0.0, 1.0, 0.0),\n (0.5, 0.5, 0.0, 1.0, 0.5), (0.5, 0.5, 0.0, 1.0, 0.0),\n (0.75, 0.75, 0.25, 0.75, 0.25), (1.0, 1.0, 0.0, 0.5, 1.0),\n (1.0, 0.5, 0.0, 1.0, 1.0), (1.0, 0.5, 0.0, 0.5, 1.0),\n (0.5, 1.0, 0.0, 1.0, 1.0), (0.5, 1.0, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0, 1.0), (0.5, 0.5, 0.0, 0.5, 1.0),\n (0.75, 0.75, 0.25, 0.75, 0.75), (1.0, 0.5, 0.0, 0.0, 1.0),\n (0.5, 1.0, 0.0, 0.0, 1.0), (0.5, 0.5, 0.0, 0.0, 1.0),\n (0.75, 0.75, 0.25, 0.25, 0.75), (1.0, 0.0, 1.0, 0.5, 0.5),\n (1.0, 0.0, 1.0, 0.0, 0.5), (1.0, 0.0, 1.0, 0.5, 0.0),\n (0.5, 0.0, 1.0, 0.5, 0.5), (0.5, 0.0, 1.0, 0.0, 0.5),\n (0.5, 0.0, 1.0, 0.0, 0.0), (0.5, 0.0, 1.0, 0.5, 0.0),\n (0.75, 0.25, 0.75, 0.25, 0.25), (1.0, 0.0, 1.0, 1.0, 0.5),\n (1.0, 0.0, 0.5, 1.0, 0.5), (1.0, 0.0, 0.5, 1.0, 0.0),\n (0.5, 0.0, 1.0, 1.0, 0.5), (0.5, 0.0, 1.0, 1.0, 0.0),\n (0.5, 0.0, 0.5, 1.0, 0.5), (0.5, 0.0, 0.5, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.75, 0.25), (1.0, 0.0, 1.0, 0.5, 1.0),\n (1.0, 0.0, 0.5, 1.0, 1.0), (1.0, 0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 1.0, 1.0, 1.0), (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0, 1.0), (0.5, 0.0, 0.5, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75, 0.75), (1.0, 0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 1.0, 0.0, 1.0), (0.5, 0.0, 0.5, 0.0, 1.0),\n (0.75, 0.25, 0.75, 0.25, 0.75), (1.0, 0.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 0.0, 1.0, 0.5), (0.5, 0.0, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.25, 0.75, 0.25), (1.0, 0.0, 0.0, 0.5, 1.0),\n (0.5, 0.0, 0.0, 1.0, 1.0), (0.5, 0.0, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.5), (0.0, 1.0, 0.0, 0.0, 0.5),\n (0.0, 1.0, 0.0, 0.5, 0.0), (0.0, 1.0, 0.5, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0, 0.0), (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.25, 0.75, 0.25, 0.25, 0.25), (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.0, 0.5), (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5), (0.0, 0.5, 1.0, 0.0, 0.5),\n (0.0, 0.5, 1.0, 0.0, 0.0), (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.25, 0.75, 0.75, 0.25, 0.25), (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5), (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5), (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.5), (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.75, 0.25), (0.0, 1.0, 1.0, 0.5, 1.0),\n (0.0, 1.0, 0.5, 1.0, 1.0), (0.0, 1.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 1.0, 1.0, 1.0), (0.0, 0.5, 1.0, 0.5, 1.0),\n (0.0, 0.5, 0.5, 1.0, 1.0), (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75, 0.75), (0.0, 1.0, 0.5, 0.0, 1.0),\n (0.0, 0.5, 1.0, 0.0, 1.0), (0.0, 0.5, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.75, 0.25, 0.75), (0.0, 1.0, 0.0, 1.0, 0.5),\n (0.0, 0.5, 0.0, 1.0, 0.5), (0.0, 0.5, 0.0, 1.0, 0.0),\n (0.25, 0.75, 0.25, 0.75, 0.25), (0.0, 1.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0, 1.0), (0.0, 0.5, 0.0, 0.5, 1.0),\n (0.25, 0.75, 0.25, 0.75, 0.75), (0.0, 0.5, 0.0, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.0, 0.5), (0.0, 0.0, 1.0, 0.5, 0.0),\n (0.25, 0.25, 0.75, 0.25, 0.25), (0.0, 0.0, 1.0, 1.0, 0.5),\n (0.0, 0.0, 0.5, 1.0, 0.5), (0.0, 0.0, 0.5, 1.0, 0.0),\n (0.25, 0.25, 0.75, 0.75, 0.25), (0.0, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.0, 0.5, 1.0, 1.0), (0.0, 0.0, 0.5, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75, 0.75), (0.0, 0.0, 0.5, 0.0, 1.0),\n (0.25, 0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(1, 1, 1, 1, 1): [(1.0, 1.0, 1.0, 0.5, 1.0),\n (1.0, 1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 0.5, 0.5),\n (1.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0, 0.5),\n (1.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5, 1.0),\n (1.0, 0.5, 0.5, 1.0, 1.0),\n (0.5, 1.0, 0.5, 0.5, 1.0),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (1.0, 0.5, 0.5, 0.5, 1.0),\n (1.0, 0.5, 1.0, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0.75, 0.75, 0.75, 0.75, 0.75),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0, 1.0),\n (0.5, 1.0, 1.0, 0.5, 1.0),\n (0.5, 1.0, 0.5, 1.0, 1.0)],\n (0.25, 0.75, 0.75, 0.75, 0.25): [(0.5, 1.0, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.5),\n (0, 1, 1, 1, 0),\n (0.5, 1.0, 0.5, 0.5, 0.5),\n (0.5, 1.0, 1.0, 1.0, 0.5),\n (0.0, 1.0, 0.5, 0.5, 0.5),\n (0.0, 1.0, 1.0, 1.0, 0.5),\n (0.5, 1.0, 0.5, 1.0, 0.5),\n (0.0, 1.0, 0.5, 1.0, 0.5),\n (0.5, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 1.0, 0.5, 0.0),\n (0.0, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.5, 1.0, 0.0),\n (0.5, 1.0, 0.5, 0.5, 0.0),\n (0.0, 1.0, 0.5, 0.5, 0.0),\n (0.5, 0.5, 1.0, 0.5, 0.0),\n (0.5, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 0.5, 1.0, 0.0),\n (0.0, 0.5, 0.5, 0.5, 0.0),\n (0.0, 0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.5, 0.5, 1.0, 1.0, 0.5),\n (\n 0.5, 0.5, 1.0, 0.5, 0.5)],\n (0.0, 0.0, 1.0, 0.5, 1.0): [(0.5, 0.0, 0.5, 0.5, 1.0),\n (0.0, 0.5, 0.5, 0.5, 1.0),\n (0.5, 0.5, 0.5, 0.5, 1.0),\n (0.0, 0.0, 0.5, 0.5, 1.0),\n (0, 0, 1, 1, 1),\n (0.5, 0.5, 1.0, 0.5, 1.0),\n (0.5, 0.0, 1.0, 0.5, 1.0),\n (0.0, 0.5, 1.0, 0.5, 1.0),\n (0, 0, 1, 0, 1),\n (0.5, 0.0, 1.0, 0.5, 0.5),\n (0.0, 0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 1.0, 0.5, 0.5),\n (0.0, 0.0, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5, 0.5),\n (0.25, 0.25, 0.75, 0.75, 0.75),\n (0.5, 0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5, 0.5), (\n 0.25, 0.25, 0.75, 0.25, 0.75)]}\n\n init_triangulation(5, 1, check, nn_checks)",
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_assembly_inner_product_1_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '1-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '1-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '1-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)",
"def subdivideMesh(IKLE,MESHX,MESHY): \n # ~~> Singling out edges\n from matplotlib.tri import Triangulation\n edges = Triangulation(MESHX,MESHY,IKLE).get_cpp_triangulation().get_edges()\n \n # ~~> Memory allocation for new MESH\n IELEM = len(IKLE); IPOIN = len(MESHX); IEDGE = len(edges)\n JKLE = np.zeros((IELEM*4,3),dtype=np.int) # you subdivide every elements by 4\n MESHJ = np.zeros((IEDGE,2),dtype=np.int) # you add one point on every edges\n \n # ~~> Lookup tables for node numbering on common edges\n pa,pb = edges.T\n k1b,k1a = np.sort(np.take(IKLE,[0,1],axis=1)).T\n indx1 = np.searchsorted(pa,k1a)\n jndx1 = np.searchsorted(pa,k1a,side='right')\n k2b,k2a = np.sort(np.take(IKLE,[1,2],axis=1)).T\n indx2 = np.searchsorted(pa,k2a)\n jndx2 = np.searchsorted(pa,k2a,side='right')\n k3b,k3a = np.sort(np.take(IKLE,[2,0],axis=1)).T\n indx3 = np.searchsorted(pa,k3a)\n jndx3 = np.searchsorted(pa,k3a,side='right')\n \n # ~~> Building one triangle at a time /!\\ Please get this loop parallelised\n j = 0\n for i in range(IELEM):\n k1 = indx1[i]+np.searchsorted(pb[indx1[i]:jndx1[i]],k1b[i])\n k2 = indx2[i]+np.searchsorted(pb[indx2[i]:jndx2[i]],k2b[i])\n k3 = indx3[i]+np.searchsorted(pb[indx3[i]:jndx3[i]],k3b[i])\n # ~~> New connectivity JKLE\n JKLE[j] = [IKLE[i][0],IPOIN+k1,IPOIN+k3]\n JKLE[j+1] = [IKLE[i][1],IPOIN+k2,IPOIN+k1]\n JKLE[j+2] = [IKLE[i][2],IPOIN+k3,IPOIN+k2]\n JKLE[j+3] = [IPOIN+k1,IPOIN+k2,IPOIN+k3]\n # ~~> New interpolation references for values and coordinates\n MESHJ[k1] = [IKLE[i][0],IKLE[i][1]]\n MESHJ[k2] = [IKLE[i][1],IKLE[i][2]]\n MESHJ[k3] = [IKLE[i][2],IKLE[i][0]]\n j += 4\n\n # ~~> Reset IPOBO while you are at it\n MESHX = np.resize(MESHX,IPOIN+IEDGE)\n MESHY = np.resize(MESHY,IPOIN+IEDGE)\n MESHX[IPOIN:] = np.sum(MESHX[MESHJ],axis=1)/2.\n MESHY[IPOIN:] = np.sum(MESHY[MESHJ],axis=1)/2.\n neighbours = Triangulation(MESHX,MESHY,JKLE).get_cpp_triangulation().get_neighbors()\n JPOBO = np.zeros(IPOIN+IEDGE,np.int)\n for n in range(IELEM*4):\n s1,s2,s3 = neighbours[n]\n e1,e2,e3 = JKLE[n]\n if s1 < 0:\n JPOBO[e1] = e1+1\n JPOBO[e2] = e2+1\n if s2 < 0:\n JPOBO[e2] = e2+1\n JPOBO[e3] = e3+1\n if s3 < 0:\n JPOBO[e3] = e3+1\n JPOBO[e1] = e1+1\n\n return JKLE,MESHX,MESHY,JPOBO,MESHJ",
"def test_half_case(self):\n steps = save_divide(np.ones(2), 2 * np.ones(2))\n np.testing.assert_equal(steps, 0.5 * np.ones(2))"
]
| [
"0.6167299",
"0.6115459",
"0.60603625",
"0.5985592",
"0.5906559",
"0.5905191",
"0.58676517",
"0.5816498",
"0.5809439",
"0.58079594",
"0.5807654",
"0.57482004",
"0.5687916",
"0.5656078",
"0.56546664",
"0.557186",
"0.5525966",
"0.5501036",
"0.54868674",
"0.5477152",
"0.5476477",
"0.54738337",
"0.5465427",
"0.5459868",
"0.5457826",
"0.5450082",
"0.54498714",
"0.5425154",
"0.5419777",
"0.5402831"
]
| 0.63332564 | 0 |
Testing M4 remeshing formula in 2D, 2 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m4_2k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: 'gpu_2k',
Splitting: 'o2_FullHalf'}
)
advec_py = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: L2_1,
Support: '',
Splitting: 'o2_FullHalf'}
)
assertion_2D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def test_filter_l2_1():\n box = Box(length=L, origin=O)\n f = Field(box, formula=func, name='f0')\n d_fine = Discretization([513, 513, 513])\n d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])\n op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,\n variables={f: d_coarse},\n method={Remesh: L2_1, })\n op.discretize()\n op.setup()\n topo_coarse = op.discreteFields[f].topology\n topo_fine = [t for t in f.discreteFields.keys()\n if not t is topo_coarse][0]\n f.initialize(topo=topo_fine)\n f_out = f.discreteFields[topo_coarse]\n op.apply(simu)\n valid = [npw.zeros(f_out[0].shape), ]\n valid = func(valid, *topo_coarse.mesh.coords)\n assert np.allclose(valid[0][topo_coarse.mesh.iCompute],\n f_out[0][topo_coarse.mesh.iCompute]), \\\n np.max(np.abs(valid[0][topo_coarse.mesh.iCompute] -\n f_out[0][topo_coarse.mesh.iCompute]))",
"def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])",
"def test_3_2_4D_rec_splits(self):\n check = [(-2.0, 3.0, -1.0, 3.0), (9.0, 10.0, 1.0, 5.0),\n (9.0, 3.0, -1.0, 3.0), (9.0, 10.0, -1.0, 3.0),\n (9.0, 10.0, 1.0, 3.0), (9.0, 10.0, -1.0, 5.0),\n (9.0, 3.0, 1.0, 3.0), (9.0, 3.0, 1.0, 5.0),\n (9.0, 3.0, -1.0, 5.0), (-2.0, 10.0, -1.0, 3.0),\n (-2.0, 10.0, 1.0, 3.0), (-2.0, 10.0, 1.0, 5.0),\n (-2.0, 10.0, -1.0, 5.0), (-2.0, 3.0, 1.0, 3.0),\n (-2.0, 3.0, 1.0, 5.0), (-2.0, 3.0, -1.0, 5.0),\n (3.5, 6.5, 0.0, 4.0), (-2.0, 6.5, 0.0, 4.0),\n (-2.0, 3.0, 0.0, 4.0), (-2.0, 3.0, -1.0, 4.0),\n (-2.0, 3.0, 0.0, 3.0), (-2.0, 6.5, -1.0, 4.0),\n (-2.0, 6.5, -1.0, 3.0), (-2.0, 6.5, 0.0, 3.0),\n (3.5, 3.0, 0.0, 4.0), (3.5, 3.0, -1.0, 4.0),\n (3.5, 3.0, -1.0, 3.0), (3.5, 3.0, 0.0, 3.0),\n (3.5, 6.5, -1.0, 4.0), (3.5, 6.5, -1.0, 3.0),\n (3.5, 6.5, 0.0, 3.0), (0.75, 4.75, -0.5, 3.5),\n (9.0, 6.5, 0.0, 4.0), (9.0, 10.0, 0.0, 4.0),\n (9.0, 10.0, 1.0, 4.0), (9.0, 10.0, 0.0, 5.0),\n (9.0, 6.5, 1.0, 4.0), (9.0, 6.5, 1.0, 5.0),\n (9.0, 6.5, 0.0, 5.0), (3.5, 10.0, 0.0, 4.0),\n (3.5, 10.0, 1.0, 4.0), (3.5, 10.0, 1.0, 5.0),\n (3.5, 10.0, 0.0, 5.0), (3.5, 6.5, 1.0, 4.0),\n (3.5, 6.5, 1.0, 5.0), (3.5, 6.5, 0.0, 5.0),\n (6.25, 8.25, 0.5, 4.5), (9.0, 3.0, 0.0, 4.0),\n (9.0, 3.0, -1.0, 4.0), (9.0, 3.0, 0.0, 3.0),\n (9.0, 6.5, -1.0, 4.0), (9.0, 6.5, -1.0, 3.0),\n (9.0, 6.5, 0.0, 3.0), (6.25, 4.75, -0.5, 3.5),\n (9.0, 10.0, -1.0, 4.0), (9.0, 10.0, 0.0, 3.0),\n (3.5, 10.0, -1.0, 4.0), (3.5, 10.0, -1.0, 3.0),\n (3.5, 10.0, 0.0, 3.0), (6.25, 8.25, -0.5, 3.5),\n (9.0, 6.5, 1.0, 3.0), (3.5, 10.0, 1.0, 3.0),\n (3.5, 6.5, 1.0, 3.0), (6.25, 8.25, 0.5, 3.5),\n (9.0, 6.5, -1.0, 5.0), (3.5, 10.0, -1.0, 5.0),\n (3.5, 6.5, -1.0, 5.0), (6.25, 8.25, -0.5, 4.5),\n (9.0, 3.0, 1.0, 4.0), (3.5, 3.0, 1.0, 4.0),\n (3.5, 3.0, 1.0, 3.0), (6.25, 4.75, 0.5, 3.5),\n (9.0, 3.0, 0.0, 5.0), (3.5, 3.0, 1.0, 5.0),\n (3.5, 3.0, 0.0, 5.0), (6.25, 4.75, 0.5, 4.5),\n (3.5, 3.0, -1.0, 5.0), (6.25, 4.75, -0.5, 4.5),\n (-2.0, 10.0, 0.0, 4.0), (-2.0, 10.0, -1.0, 4.0),\n (-2.0, 10.0, 0.0, 3.0), (0.75, 8.25, -0.5, 3.5),\n (-2.0, 10.0, 1.0, 4.0), (-2.0, 6.5, 1.0, 4.0),\n (-2.0, 6.5, 1.0, 3.0), (0.75, 8.25, 0.5, 3.5),\n (-2.0, 10.0, 0.0, 5.0), (-2.0, 6.5, 1.0, 5.0),\n (-2.0, 6.5, 0.0, 5.0), (0.75, 8.25, 0.5, 4.5),\n (-2.0, 6.5, -1.0, 5.0), (0.75, 8.25, -0.5, 4.5),\n (-2.0, 3.0, 1.0, 4.0), (0.75, 4.75, 0.5, 3.5),\n (-2.0, 3.0, 0.0, 5.0), (0.75, 4.75, 0.5, 4.5),\n (0.75, 4.75, -0.5, 4.5), (3.5, 4.75, -0.5, 3.5),\n (3.5, 6.5, -0.5, 3.5), (3.5, 6.5, 0.0, 3.5),\n (3.5, 6.5, -0.5, 4.0), (3.5, 4.75, 0.0, 3.5),\n (3.5, 4.75, 0.0, 4.0), (3.5, 4.75, -0.5, 4.0),\n (0.75, 6.5, -0.5, 3.5), (0.75, 6.5, 0.0, 3.5),\n (0.75, 6.5, 0.0, 4.0), (0.75, 6.5, -0.5, 4.0),\n (0.75, 4.75, 0.0, 3.5), (0.75, 4.75, 0.0, 4.0),\n (0.75, 4.75, -0.5, 4.0), (2.125, 5.625, -0.25, 3.75),\n (-2.0, 4.75, -0.5, 3.5), (-2.0, 3.0, -0.5, 3.5),\n (-2.0, 3.0, -1.0, 3.5), (-2.0, 3.0, -0.5, 3.0),\n (-2.0, 4.75, -1.0, 3.5), (-2.0, 4.75, -1.0, 3.0),\n (-2.0, 4.75, -0.5, 3.0), (0.75, 3.0, -0.5, 3.5),\n (0.75, 3.0, -1.0, 3.5), (0.75, 3.0, -1.0, 3.0),\n (0.75, 3.0, -0.5, 3.0), (0.75, 4.75, -1.0, 3.5),\n (0.75, 4.75, -1.0, 3.0), (0.75, 4.75, -0.5, 3.0),\n (-0.625, 3.875, -0.75, 3.25), (-2.0, 6.5, -0.5, 3.5),\n (-2.0, 6.5, 0.0, 3.5), (-2.0, 6.5, -0.5, 4.0),\n (-2.0, 4.75, 0.0, 3.5), (-2.0, 4.75, 0.0, 4.0),\n (-2.0, 4.75, -0.5, 4.0), (-0.625, 5.625, -0.25, 3.75),\n (-2.0, 3.0, 0.0, 3.5), (-2.0, 3.0, -0.5, 4.0),\n (0.75, 3.0, 0.0, 3.5), (0.75, 3.0, 0.0, 4.0),\n (0.75, 3.0, -0.5, 4.0), (-0.625, 3.875, -0.25, 3.75),\n (-2.0, 4.75, -1.0, 4.0), (0.75, 3.0, -1.0, 4.0),\n (0.75, 4.75, -1.0, 4.0), (-0.625, 3.875, -0.75, 3.75),\n (-2.0, 4.75, 0.0, 3.0), (0.75, 3.0, 0.0, 3.0),\n (0.75, 4.75, 0.0, 3.0), (-0.625, 3.875, -0.25, 3.25),\n (-2.0, 6.5, -1.0, 3.5), (0.75, 6.5, -1.0, 3.5),\n (0.75, 6.5, -1.0, 4.0), (-0.625, 5.625, -0.75, 3.75),\n (-2.0, 6.5, -0.5, 3.0), (0.75, 6.5, -1.0, 3.0),\n (0.75, 6.5, -0.5, 3.0), (-0.625, 5.625, -0.75, 3.25),\n (0.75, 6.5, 0.0, 3.0), (-0.625, 5.625, -0.25, 3.25),\n (3.5, 3.0, -0.5, 3.5), (3.5, 3.0, 0.0, 3.5),\n (3.5, 3.0, -0.5, 4.0), (2.125, 3.875, -0.25, 3.75),\n (3.5, 3.0, -1.0, 3.5), (3.5, 4.75, -1.0, 3.5),\n (3.5, 4.75, -1.0, 4.0), (2.125, 3.875, -0.75, 3.75),\n (3.5, 3.0, -0.5, 3.0), (3.5, 4.75, -1.0, 3.0),\n (3.5, 4.75, -0.5, 3.0), (2.125, 3.875, -0.75, 3.25),\n (3.5, 4.75, 0.0, 3.0), (2.125, 3.875, -0.25, 3.25),\n (3.5, 6.5, -1.0, 3.5), (2.125, 5.625, -0.75, 3.75),\n (3.5, 6.5, -0.5, 3.0), (2.125, 5.625, -0.75, 3.25),\n (2.125, 5.625, -0.25, 3.25), (3.5, 8.25, 0.5, 4.5),\n (3.5, 6.5, 0.5, 4.5), (3.5, 6.5, 0.0, 4.5),\n (3.5, 6.5, 0.5, 4.0), (3.5, 8.25, 0.0, 4.5),\n (3.5, 8.25, 0.0, 4.0), (3.5, 8.25, 0.5, 4.0),\n (6.25, 6.5, 0.5, 4.5), (6.25, 6.5, 0.0, 4.5),\n (6.25, 6.5, 0.0, 4.0), (6.25, 6.5, 0.5, 4.0),\n (6.25, 8.25, 0.0, 4.5), (6.25, 8.25, 0.0, 4.0),\n (6.25, 8.25, 0.5, 4.0), (4.875, 7.375, 0.25, 4.25),\n (9.0, 8.25, 0.5, 4.5), (9.0, 10.0, 0.5, 4.5),\n (9.0, 10.0, 1.0, 4.5), (9.0, 10.0, 0.5, 5.0),\n (9.0, 8.25, 1.0, 4.5), (9.0, 8.25, 1.0, 5.0),\n (9.0, 8.25, 0.5, 5.0), (6.25, 10.0, 0.5, 4.5),\n (6.25, 10.0, 1.0, 4.5), (6.25, 10.0, 1.0, 5.0),\n (6.25, 10.0, 0.5, 5.0), (6.25, 8.25, 1.0, 4.5),\n (6.25, 8.25, 1.0, 5.0), (6.25, 8.25, 0.5, 5.0),\n (7.625, 9.125, 0.75, 4.75), (9.0, 6.5, 0.5, 4.5),\n (9.0, 6.5, 0.0, 4.5), (9.0, 6.5, 0.5, 4.0),\n (9.0, 8.25, 0.0, 4.5), (9.0, 8.25, 0.0, 4.0),\n (9.0, 8.25, 0.5, 4.0), (7.625, 7.375, 0.25, 4.25),\n (9.0, 10.0, 0.0, 4.5), (9.0, 10.0, 0.5, 4.0),\n (6.25, 10.0, 0.0, 4.5), (6.25, 10.0, 0.0, 4.0),\n (6.25, 10.0, 0.5, 4.0), (7.625, 9.125, 0.25, 4.25),\n (9.0, 8.25, 1.0, 4.0), (6.25, 10.0, 1.0, 4.0),\n (6.25, 8.25, 1.0, 4.0), (7.625, 9.125, 0.75, 4.25),\n (9.0, 8.25, 0.0, 5.0), (6.25, 10.0, 0.0, 5.0),\n (6.25, 8.25, 0.0, 5.0), (7.625, 9.125, 0.25, 4.75),\n (9.0, 6.5, 1.0, 4.5), (6.25, 6.5, 1.0, 4.5),\n (6.25, 6.5, 1.0, 4.0), (7.625, 7.375, 0.75, 4.25),\n (9.0, 6.5, 0.5, 5.0), (6.25, 6.5, 1.0, 5.0),\n (6.25, 6.5, 0.5, 5.0), (7.625, 7.375, 0.75, 4.75),\n (6.25, 6.5, 0.0, 5.0), (7.625, 7.375, 0.25, 4.75),\n (3.5, 10.0, 0.5, 4.5), (3.5, 10.0, 0.0, 4.5),\n (3.5, 10.0, 0.5, 4.0), (4.875, 9.125, 0.25, 4.25),\n (3.5, 10.0, 1.0, 4.5), (3.5, 8.25, 1.0, 4.5),\n (3.5, 8.25, 1.0, 4.0), (4.875, 9.125, 0.75, 4.25),\n (3.5, 10.0, 0.5, 5.0), (3.5, 8.25, 1.0, 5.0),\n (3.5, 8.25, 0.5, 5.0), (4.875, 9.125, 0.75, 4.75),\n (3.5, 8.25, 0.0, 5.0), (4.875, 9.125, 0.25, 4.75),\n (3.5, 6.5, 1.0, 4.5), (4.875, 7.375, 0.75, 4.25),\n (3.5, 6.5, 0.5, 5.0), (4.875, 7.375, 0.75, 4.75),\n (4.875, 7.375, 0.25, 4.75), (6.25, 6.5, -0.5, 3.5),\n (6.25, 6.5, 0.0, 3.5), (6.25, 6.5, -0.5, 4.0),\n (6.25, 4.75, 0.0, 3.5), (6.25, 4.75, 0.0, 4.0),\n (6.25, 4.75, -0.5, 4.0), (4.875, 5.625, -0.25, 3.75),\n (9.0, 4.75, -0.5, 3.5), (9.0, 3.0, -0.5, 3.5),\n (9.0, 3.0, -1.0, 3.5), (9.0, 3.0, -0.5, 3.0),\n (9.0, 4.75, -1.0, 3.5), (9.0, 4.75, -1.0, 3.0),\n (9.0, 4.75, -0.5, 3.0), (6.25, 3.0, -0.5, 3.5),\n (6.25, 3.0, -1.0, 3.5), (6.25, 3.0, -1.0, 3.0),\n (6.25, 3.0, -0.5, 3.0), (6.25, 4.75, -1.0, 3.5),\n (6.25, 4.75, -1.0, 3.0), (6.25, 4.75, -0.5, 3.0),\n (7.625, 3.875, -0.75, 3.25), (9.0, 6.5, -0.5, 3.5),\n (9.0, 6.5, 0.0, 3.5), (9.0, 6.5, -0.5, 4.0),\n (9.0, 4.75, 0.0, 3.5), (9.0, 4.75, 0.0, 4.0),\n (9.0, 4.75, -0.5, 4.0), (7.625, 5.625, -0.25, 3.75),\n (9.0, 3.0, 0.0, 3.5), (9.0, 3.0, -0.5, 4.0),\n (6.25, 3.0, 0.0, 3.5), (6.25, 3.0, 0.0, 4.0),\n (6.25, 3.0, -0.5, 4.0), (7.625, 3.875, -0.25, 3.75),\n (9.0, 4.75, -1.0, 4.0), (6.25, 3.0, -1.0, 4.0),\n (6.25, 4.75, -1.0, 4.0), (7.625, 3.875, -0.75, 3.75),\n (9.0, 4.75, 0.0, 3.0), (6.25, 3.0, 0.0, 3.0),\n (6.25, 4.75, 0.0, 3.0), (7.625, 3.875, -0.25, 3.25),\n (9.0, 6.5, -1.0, 3.5), (6.25, 6.5, -1.0, 3.5),\n (6.25, 6.5, -1.0, 4.0), (7.625, 5.625, -0.75, 3.75),\n (9.0, 6.5, -0.5, 3.0), (6.25, 6.5, -1.0, 3.0),\n (6.25, 6.5, -0.5, 3.0), (7.625, 5.625, -0.75, 3.25),\n (6.25, 6.5, 0.0, 3.0), (7.625, 5.625, -0.25, 3.25),\n (4.875, 3.875, -0.25, 3.75), (4.875, 3.875, -0.75, 3.75),\n (4.875, 3.875, -0.75, 3.25), (4.875, 3.875, -0.25, 3.25),\n (4.875, 5.625, -0.75, 3.75), (4.875, 5.625, -0.75, 3.25),\n (4.875, 5.625, -0.25, 3.25), (3.5, 8.25, -0.5, 3.5),\n (3.5, 8.25, 0.0, 3.5), (3.5, 8.25, -0.5, 4.0),\n (6.25, 8.25, 0.0, 3.5), (6.25, 8.25, -0.5, 4.0),\n (4.875, 7.375, -0.25, 3.75), (9.0, 8.25, -0.5, 3.5),\n (9.0, 10.0, -0.5, 3.5), (9.0, 10.0, -1.0, 3.5),\n (9.0, 10.0, -0.5, 3.0), (9.0, 8.25, -1.0, 3.5),\n (9.0, 8.25, -1.0, 3.0), (9.0, 8.25, -0.5, 3.0),\n (6.25, 10.0, -0.5, 3.5), (6.25, 10.0, -1.0, 3.5),\n (6.25, 10.0, -1.0, 3.0), (6.25, 10.0, -0.5, 3.0),\n (6.25, 8.25, -1.0, 3.5), (6.25, 8.25, -1.0, 3.0),\n (6.25, 8.25, -0.5, 3.0), (7.625, 9.125, -0.75, 3.25),\n (9.0, 8.25, 0.0, 3.5), (9.0, 8.25, -0.5, 4.0),\n (7.625, 7.375, -0.25, 3.75), (9.0, 10.0, 0.0, 3.5),\n (9.0, 10.0, -0.5, 4.0), (6.25, 10.0, 0.0, 3.5),\n (6.25, 10.0, -0.5, 4.0), (7.625, 9.125, -0.25, 3.75),\n (9.0, 8.25, -1.0, 4.0), (6.25, 10.0, -1.0, 4.0),\n (6.25, 8.25, -1.0, 4.0), (7.625, 9.125, -0.75, 3.75),\n (9.0, 8.25, 0.0, 3.0), (6.25, 10.0, 0.0, 3.0),\n (6.25, 8.25, 0.0, 3.0), (7.625, 9.125, -0.25, 3.25),\n (7.625, 7.375, -0.75, 3.75), (7.625, 7.375, -0.75, 3.25),\n (7.625, 7.375, -0.25, 3.25), (3.5, 10.0, -0.5, 3.5),\n (3.5, 10.0, 0.0, 3.5), (3.5, 10.0, -0.5, 4.0),\n (4.875, 9.125, -0.25, 3.75), (3.5, 10.0, -1.0, 3.5),\n (3.5, 8.25, -1.0, 3.5), (3.5, 8.25, -1.0, 4.0),\n (4.875, 9.125, -0.75, 3.75), (3.5, 10.0, -0.5, 3.0),\n (3.5, 8.25, -1.0, 3.0), (3.5, 8.25, -0.5, 3.0),\n (4.875, 9.125, -0.75, 3.25), (3.5, 8.25, 0.0, 3.0),\n (4.875, 9.125, -0.25, 3.25), (4.875, 7.375, -0.75, 3.75),\n (4.875, 7.375, -0.75, 3.25), (4.875, 7.375, -0.25, 3.25),\n (3.5, 8.25, 0.5, 3.5), (3.5, 6.5, 0.5, 3.5),\n (6.25, 6.5, 0.5, 3.5), (4.875, 7.375, 0.25, 3.75),\n (9.0, 8.25, 0.5, 3.5), (9.0, 10.0, 0.5, 3.5),\n (9.0, 10.0, 1.0, 3.5), (9.0, 10.0, 0.5, 3.0),\n (9.0, 8.25, 1.0, 3.5), (9.0, 8.25, 1.0, 3.0),\n (9.0, 8.25, 0.5, 3.0), (6.25, 10.0, 0.5, 3.5),\n (6.25, 10.0, 1.0, 3.5), (6.25, 10.0, 1.0, 3.0),\n (6.25, 10.0, 0.5, 3.0), (6.25, 8.25, 1.0, 3.5),\n (6.25, 8.25, 1.0, 3.0), (6.25, 8.25, 0.5, 3.0),\n (7.625, 9.125, 0.75, 3.25), (9.0, 6.5, 0.5, 3.5),\n (7.625, 7.375, 0.25, 3.75), (7.625, 9.125, 0.25, 3.75),\n (7.625, 9.125, 0.75, 3.75), (7.625, 9.125, 0.25, 3.25),\n (9.0, 6.5, 1.0, 3.5), (6.25, 6.5, 1.0, 3.5),\n (7.625, 7.375, 0.75, 3.75), (9.0, 6.5, 0.5, 3.0),\n (6.25, 6.5, 1.0, 3.0), (6.25, 6.5, 0.5, 3.0),\n (7.625, 7.375, 0.75, 3.25), (7.625, 7.375, 0.25, 3.25),\n (3.5, 10.0, 0.5, 3.5), (4.875, 9.125, 0.25, 3.75),\n (3.5, 10.0, 1.0, 3.5), (3.5, 8.25, 1.0, 3.5),\n (4.875, 9.125, 0.75, 3.75), (3.5, 10.0, 0.5, 3.0),\n (3.5, 8.25, 1.0, 3.0), (3.5, 8.25, 0.5, 3.0),\n (4.875, 9.125, 0.75, 3.25), (4.875, 9.125, 0.25, 3.25),\n (3.5, 6.5, 1.0, 3.5), (4.875, 7.375, 0.75, 3.75),\n (3.5, 6.5, 0.5, 3.0), (4.875, 7.375, 0.75, 3.25),\n (4.875, 7.375, 0.25, 3.25), (3.5, 8.25, -0.5, 4.5),\n (3.5, 6.5, -0.5, 4.5), (6.25, 6.5, -0.5, 4.5),\n (4.875, 7.375, -0.25, 4.25), (9.0, 8.25, -0.5, 4.5),\n (9.0, 10.0, -0.5, 4.5), (9.0, 10.0, -1.0, 4.5),\n (9.0, 10.0, -0.5, 5.0), (9.0, 8.25, -1.0, 4.5),\n (9.0, 8.25, -1.0, 5.0), (9.0, 8.25, -0.5, 5.0),\n (6.25, 10.0, -0.5, 4.5), (6.25, 10.0, -1.0, 4.5),\n (6.25, 10.0, -1.0, 5.0), (6.25, 10.0, -0.5, 5.0),\n (6.25, 8.25, -1.0, 4.5), (6.25, 8.25, -1.0, 5.0),\n (6.25, 8.25, -0.5, 5.0), (7.625, 9.125, -0.75, 4.75),\n (9.0, 6.5, -0.5, 4.5), (7.625, 7.375, -0.25, 4.25),\n (7.625, 9.125, -0.25, 4.25), (7.625, 9.125, -0.75, 4.25),\n (7.625, 9.125, -0.25, 4.75), (9.0, 6.5, -1.0, 4.5),\n (6.25, 6.5, -1.0, 4.5), (7.625, 7.375, -0.75, 4.25),\n (9.0, 6.5, -0.5, 5.0), (6.25, 6.5, -1.0, 5.0),\n (6.25, 6.5, -0.5, 5.0), (7.625, 7.375, -0.75, 4.75),\n (7.625, 7.375, -0.25, 4.75), (3.5, 10.0, -0.5, 4.5),\n (4.875, 9.125, -0.25, 4.25), (3.5, 10.0, -1.0, 4.5),\n (3.5, 8.25, -1.0, 4.5), (4.875, 9.125, -0.75, 4.25),\n (3.5, 10.0, -0.5, 5.0), (3.5, 8.25, -1.0, 5.0),\n (3.5, 8.25, -0.5, 5.0), (4.875, 9.125, -0.75, 4.75),\n (4.875, 9.125, -0.25, 4.75), (3.5, 6.5, -1.0, 4.5),\n (4.875, 7.375, -0.75, 4.25), (3.5, 6.5, -0.5, 5.0),\n (4.875, 7.375, -0.75, 4.75), (4.875, 7.375, -0.25, 4.75),\n (3.5, 4.75, 0.5, 3.5), (3.5, 4.75, 0.5, 4.0),\n (6.25, 4.75, 0.5, 4.0), (4.875, 5.625, 0.25, 3.75),\n (9.0, 4.75, 0.5, 3.5), (9.0, 3.0, 0.5, 3.5),\n (9.0, 3.0, 1.0, 3.5), (9.0, 3.0, 0.5, 3.0),\n (9.0, 4.75, 1.0, 3.5), (9.0, 4.75, 1.0, 3.0),\n (9.0, 4.75, 0.5, 3.0), (6.25, 3.0, 0.5, 3.5),\n (6.25, 3.0, 1.0, 3.5), (6.25, 3.0, 1.0, 3.0),\n (6.25, 3.0, 0.5, 3.0), (6.25, 4.75, 1.0, 3.5),\n (6.25, 4.75, 1.0, 3.0), (6.25, 4.75, 0.5, 3.0),\n (7.625, 3.875, 0.75, 3.25), (9.0, 4.75, 0.5, 4.0),\n (7.625, 5.625, 0.25, 3.75), (9.0, 3.0, 0.5, 4.0),\n (6.25, 3.0, 0.5, 4.0), (7.625, 3.875, 0.25, 3.75),\n (9.0, 4.75, 1.0, 4.0), (6.25, 3.0, 1.0, 4.0),\n (6.25, 4.75, 1.0, 4.0), (7.625, 3.875, 0.75, 3.75),\n (7.625, 3.875, 0.25, 3.25), (7.625, 5.625, 0.75, 3.75),\n (7.625, 5.625, 0.75, 3.25), (7.625, 5.625, 0.25, 3.25),\n (3.5, 3.0, 0.5, 3.5), (3.5, 3.0, 0.5, 4.0),\n (4.875, 3.875, 0.25, 3.75), (3.5, 3.0, 1.0, 3.5),\n (3.5, 4.75, 1.0, 3.5), (3.5, 4.75, 1.0, 4.0),\n (4.875, 3.875, 0.75, 3.75), (3.5, 3.0, 0.5, 3.0),\n (3.5, 4.75, 1.0, 3.0), (3.5, 4.75, 0.5, 3.0),\n (4.875, 3.875, 0.75, 3.25), (4.875, 3.875, 0.25, 3.25),\n (4.875, 5.625, 0.75, 3.75), (4.875, 5.625, 0.75, 3.25),\n (4.875, 5.625, 0.25, 3.25), (3.5, 4.75, 0.5, 4.5),\n (3.5, 4.75, 0.0, 4.5), (6.25, 4.75, 0.0, 4.5),\n (4.875, 5.625, 0.25, 4.25), (9.0, 4.75, 0.5, 4.5),\n (9.0, 3.0, 0.5, 4.5), (9.0, 3.0, 1.0, 4.5),\n (9.0, 3.0, 0.5, 5.0), (9.0, 4.75, 1.0, 4.5),\n (9.0, 4.75, 1.0, 5.0), (9.0, 4.75, 0.5, 5.0),\n (6.25, 3.0, 0.5, 4.5), (6.25, 3.0, 1.0, 4.5),\n (6.25, 3.0, 1.0, 5.0), (6.25, 3.0, 0.5, 5.0),\n (6.25, 4.75, 1.0, 4.5), (6.25, 4.75, 1.0, 5.0),\n (6.25, 4.75, 0.5, 5.0), (7.625, 3.875, 0.75, 4.75),\n (9.0, 4.75, 0.0, 4.5), (7.625, 5.625, 0.25, 4.25),\n (9.0, 3.0, 0.0, 4.5), (6.25, 3.0, 0.0, 4.5),\n (7.625, 3.875, 0.25, 4.25), (7.625, 3.875, 0.75, 4.25),\n (9.0, 4.75, 0.0, 5.0), (6.25, 3.0, 0.0, 5.0),\n (6.25, 4.75, 0.0, 5.0), (7.625, 3.875, 0.25, 4.75),\n (7.625, 5.625, 0.75, 4.25), (7.625, 5.625, 0.75, 4.75),\n (7.625, 5.625, 0.25, 4.75), (3.5, 3.0, 0.5, 4.5),\n (3.5, 3.0, 0.0, 4.5), (4.875, 3.875, 0.25, 4.25),\n (3.5, 3.0, 1.0, 4.5), (3.5, 4.75, 1.0, 4.5),\n (4.875, 3.875, 0.75, 4.25), (3.5, 3.0, 0.5, 5.0),\n (3.5, 4.75, 1.0, 5.0), (3.5, 4.75, 0.5, 5.0),\n (4.875, 3.875, 0.75, 4.75), (3.5, 4.75, 0.0, 5.0),\n (4.875, 3.875, 0.25, 4.75), (4.875, 5.625, 0.75, 4.25),\n (4.875, 5.625, 0.75, 4.75), (4.875, 5.625, 0.25, 4.75),\n (3.5, 4.75, -0.5, 4.5), (4.875, 5.625, -0.25, 4.25),\n (9.0, 4.75, -0.5, 4.5), (9.0, 3.0, -0.5, 4.5),\n (9.0, 3.0, -1.0, 4.5), (9.0, 3.0, -0.5, 5.0),\n (9.0, 4.75, -1.0, 4.5), (9.0, 4.75, -1.0, 5.0),\n (9.0, 4.75, -0.5, 5.0), (6.25, 3.0, -0.5, 4.5),\n (6.25, 3.0, -1.0, 4.5), (6.25, 3.0, -1.0, 5.0),\n (6.25, 3.0, -0.5, 5.0), (6.25, 4.75, -1.0, 4.5),\n (6.25, 4.75, -1.0, 5.0), (6.25, 4.75, -0.5, 5.0),\n (7.625, 3.875, -0.75, 4.75), (7.625, 5.625, -0.25, 4.25),\n (7.625, 3.875, -0.25, 4.25), (7.625, 3.875, -0.75, 4.25),\n (7.625, 3.875, -0.25, 4.75), (7.625, 5.625, -0.75, 4.25),\n (7.625, 5.625, -0.75, 4.75), (7.625, 5.625, -0.25, 4.75),\n (3.5, 3.0, -0.5, 4.5), (4.875, 3.875, -0.25, 4.25),\n (3.5, 3.0, -1.0, 4.5), (3.5, 4.75, -1.0, 4.5),\n (4.875, 3.875, -0.75, 4.25), (3.5, 3.0, -0.5, 5.0),\n (3.5, 4.75, -1.0, 5.0), (3.5, 4.75, -0.5, 5.0),\n (4.875, 3.875, -0.75, 4.75), (4.875, 3.875, -0.25, 4.75),\n (4.875, 5.625, -0.75, 4.25), (4.875, 5.625, -0.75, 4.75),\n (4.875, 5.625, -0.25, 4.75), (0.75, 8.25, 0.0, 3.5),\n (0.75, 8.25, 0.0, 4.0), (0.75, 8.25, -0.5, 4.0),\n (2.125, 7.375, -0.25, 3.75), (-2.0, 8.25, -0.5, 3.5),\n (-2.0, 10.0, -0.5, 3.5), (-2.0, 10.0, -1.0, 3.5),\n (-2.0, 10.0, -0.5, 3.0), (-2.0, 8.25, -1.0, 3.5),\n (-2.0, 8.25, -1.0, 3.0), (-2.0, 8.25, -0.5, 3.0),\n (0.75, 10.0, -0.5, 3.5), (0.75, 10.0, -1.0, 3.5),\n (0.75, 10.0, -1.0, 3.0), (0.75, 10.0, -0.5, 3.0),\n (0.75, 8.25, -1.0, 3.5), (0.75, 8.25, -1.0, 3.0),\n (0.75, 8.25, -0.5, 3.0), (-0.625, 9.125, -0.75, 3.25),\n (-2.0, 8.25, 0.0, 3.5), (-2.0, 8.25, 0.0, 4.0),\n (-2.0, 8.25, -0.5, 4.0), (-0.625, 7.375, -0.25, 3.75),\n (-2.0, 10.0, 0.0, 3.5), (-2.0, 10.0, -0.5, 4.0),\n (0.75, 10.0, 0.0, 3.5), (0.75, 10.0, 0.0, 4.0),\n (0.75, 10.0, -0.5, 4.0), (-0.625, 9.125, -0.25, 3.75),\n (-2.0, 8.25, -1.0, 4.0), (0.75, 10.0, -1.0, 4.0),\n (0.75, 8.25, -1.0, 4.0), (-0.625, 9.125, -0.75, 3.75),\n (-2.0, 8.25, 0.0, 3.0), (0.75, 10.0, 0.0, 3.0),\n (0.75, 8.25, 0.0, 3.0), (-0.625, 9.125, -0.25, 3.25),\n (-0.625, 7.375, -0.75, 3.75), (-0.625, 7.375, -0.75, 3.25),\n (-0.625, 7.375, -0.25, 3.25), (2.125, 9.125, -0.25, 3.75),\n (2.125, 9.125, -0.75, 3.75), (2.125, 9.125, -0.75, 3.25),\n (2.125, 9.125, -0.25, 3.25), (2.125, 7.375, -0.75, 3.75),\n (2.125, 7.375, -0.75, 3.25), (2.125, 7.375, -0.25, 3.25),\n (0.75, 6.5, 0.5, 3.5), (0.75, 6.5, 0.5, 4.0),\n (0.75, 8.25, 0.5, 4.0), (2.125, 7.375, 0.25, 3.75),\n (-2.0, 8.25, 0.5, 3.5), (-2.0, 10.0, 0.5, 3.5),\n (-2.0, 10.0, 1.0, 3.5), (-2.0, 10.0, 0.5, 3.0),\n (-2.0, 8.25, 1.0, 3.5), (-2.0, 8.25, 1.0, 3.0),\n (-2.0, 8.25, 0.5, 3.0), (0.75, 10.0, 0.5, 3.5),\n (0.75, 10.0, 1.0, 3.5), (0.75, 10.0, 1.0, 3.0),\n (0.75, 10.0, 0.5, 3.0), (0.75, 8.25, 1.0, 3.5),\n (0.75, 8.25, 1.0, 3.0), (0.75, 8.25, 0.5, 3.0),\n (-0.625, 9.125, 0.75, 3.25), (-2.0, 6.5, 0.5, 3.5),\n (-2.0, 6.5, 0.5, 4.0), (-2.0, 8.25, 0.5, 4.0),\n (-0.625, 7.375, 0.25, 3.75), (-2.0, 10.0, 0.5, 4.0),\n (0.75, 10.0, 0.5, 4.0), (-0.625, 9.125, 0.25, 3.75),\n (-2.0, 8.25, 1.0, 4.0), (0.75, 10.0, 1.0, 4.0),\n (0.75, 8.25, 1.0, 4.0), (-0.625, 9.125, 0.75, 3.75),\n (-0.625, 9.125, 0.25, 3.25), (-2.0, 6.5, 1.0, 3.5),\n (0.75, 6.5, 1.0, 3.5), (0.75, 6.5, 1.0, 4.0),\n (-0.625, 7.375, 0.75, 3.75), (-2.0, 6.5, 0.5, 3.0),\n (0.75, 6.5, 1.0, 3.0), (0.75, 6.5, 0.5, 3.0),\n (-0.625, 7.375, 0.75, 3.25), (-0.625, 7.375, 0.25, 3.25),\n (2.125, 9.125, 0.25, 3.75), (2.125, 9.125, 0.75, 3.75),\n (2.125, 9.125, 0.75, 3.25), (2.125, 9.125, 0.25, 3.25),\n (2.125, 7.375, 0.75, 3.75), (2.125, 7.375, 0.75, 3.25),\n (2.125, 7.375, 0.25, 3.25), (0.75, 6.5, 0.5, 4.5),\n (0.75, 6.5, 0.0, 4.5), (0.75, 8.25, 0.0, 4.5),\n (2.125, 7.375, 0.25, 4.25), (-2.0, 8.25, 0.5, 4.5),\n (-2.0, 10.0, 0.5, 4.5), (-2.0, 10.0, 1.0, 4.5),\n (-2.0, 10.0, 0.5, 5.0), (-2.0, 8.25, 1.0, 4.5),\n (-2.0, 8.25, 1.0, 5.0), (-2.0, 8.25, 0.5, 5.0),\n (0.75, 10.0, 0.5, 4.5), (0.75, 10.0, 1.0, 4.5),\n (0.75, 10.0, 1.0, 5.0), (0.75, 10.0, 0.5, 5.0),\n (0.75, 8.25, 1.0, 4.5), (0.75, 8.25, 1.0, 5.0),\n (0.75, 8.25, 0.5, 5.0), (-0.625, 9.125, 0.75, 4.75),\n (-2.0, 6.5, 0.5, 4.5), (-2.0, 6.5, 0.0, 4.5),\n (-2.0, 8.25, 0.0, 4.5), (-0.625, 7.375, 0.25, 4.25),\n (-2.0, 10.0, 0.0, 4.5), (0.75, 10.0, 0.0, 4.5),\n (-0.625, 9.125, 0.25, 4.25), (-0.625, 9.125, 0.75, 4.25),\n (-2.0, 8.25, 0.0, 5.0), (0.75, 10.0, 0.0, 5.0),\n (0.75, 8.25, 0.0, 5.0), (-0.625, 9.125, 0.25, 4.75),\n (-2.0, 6.5, 1.0, 4.5), (0.75, 6.5, 1.0, 4.5),\n (-0.625, 7.375, 0.75, 4.25), (-2.0, 6.5, 0.5, 5.0),\n (0.75, 6.5, 1.0, 5.0), (0.75, 6.5, 0.5, 5.0),\n (-0.625, 7.375, 0.75, 4.75), (0.75, 6.5, 0.0, 5.0),\n (-0.625, 7.375, 0.25, 4.75), (2.125, 9.125, 0.25, 4.25),\n (2.125, 9.125, 0.75, 4.25), (2.125, 9.125, 0.75, 4.75),\n (2.125, 9.125, 0.25, 4.75), (2.125, 7.375, 0.75, 4.25),\n (2.125, 7.375, 0.75, 4.75), (2.125, 7.375, 0.25, 4.75),\n (0.75, 6.5, -0.5, 4.5), (2.125, 7.375, -0.25, 4.25),\n (-2.0, 8.25, -0.5, 4.5), (-2.0, 10.0, -0.5, 4.5),\n (-2.0, 10.0, -1.0, 4.5), (-2.0, 10.0, -0.5, 5.0),\n (-2.0, 8.25, -1.0, 4.5), (-2.0, 8.25, -1.0, 5.0),\n (-2.0, 8.25, -0.5, 5.0), (0.75, 10.0, -0.5, 4.5),\n (0.75, 10.0, -1.0, 4.5), (0.75, 10.0, -1.0, 5.0),\n (0.75, 10.0, -0.5, 5.0), (0.75, 8.25, -1.0, 4.5),\n (0.75, 8.25, -1.0, 5.0), (0.75, 8.25, -0.5, 5.0),\n (-0.625, 9.125, -0.75, 4.75), (-2.0, 6.5, -0.5, 4.5),\n (-0.625, 7.375, -0.25, 4.25), (-0.625, 9.125, -0.25, 4.25),\n (-0.625, 9.125, -0.75, 4.25), (-0.625, 9.125, -0.25, 4.75),\n (-2.0, 6.5, -1.0, 4.5), (0.75, 6.5, -1.0, 4.5),\n (-0.625, 7.375, -0.75, 4.25), (-2.0, 6.5, -0.5, 5.0),\n (0.75, 6.5, -1.0, 5.0), (0.75, 6.5, -0.5, 5.0),\n (-0.625, 7.375, -0.75, 4.75), (-0.625, 7.375, -0.25, 4.75),\n (2.125, 9.125, -0.25, 4.25), (2.125, 9.125, -0.75, 4.25),\n (2.125, 9.125, -0.75, 4.75), (2.125, 9.125, -0.25, 4.75),\n (2.125, 7.375, -0.75, 4.25), (2.125, 7.375, -0.75, 4.75),\n (2.125, 7.375, -0.25, 4.75), (0.75, 4.75, 0.5, 4.0),\n (2.125, 5.625, 0.25, 3.75), (-2.0, 4.75, 0.5, 3.5),\n (-2.0, 3.0, 0.5, 3.5), (-2.0, 3.0, 1.0, 3.5),\n (-2.0, 3.0, 0.5, 3.0), (-2.0, 4.75, 1.0, 3.5),\n (-2.0, 4.75, 1.0, 3.0), (-2.0, 4.75, 0.5, 3.0),\n (0.75, 3.0, 0.5, 3.5), (0.75, 3.0, 1.0, 3.5),\n (0.75, 3.0, 1.0, 3.0), (0.75, 3.0, 0.5, 3.0),\n (0.75, 4.75, 1.0, 3.5), (0.75, 4.75, 1.0, 3.0),\n (0.75, 4.75, 0.5, 3.0), (-0.625, 3.875, 0.75, 3.25),\n (-2.0, 4.75, 0.5, 4.0), (-0.625, 5.625, 0.25, 3.75),\n (-2.0, 3.0, 0.5, 4.0), (0.75, 3.0, 0.5, 4.0),\n (-0.625, 3.875, 0.25, 3.75), (-2.0, 4.75, 1.0, 4.0),\n (0.75, 3.0, 1.0, 4.0), (0.75, 4.75, 1.0, 4.0),\n (-0.625, 3.875, 0.75, 3.75), (-0.625, 3.875, 0.25, 3.25),\n (-0.625, 5.625, 0.75, 3.75), (-0.625, 5.625, 0.75, 3.25),\n (-0.625, 5.625, 0.25, 3.25), (2.125, 3.875, 0.25, 3.75),\n (2.125, 3.875, 0.75, 3.75), (2.125, 3.875, 0.75, 3.25),\n (2.125, 3.875, 0.25, 3.25), (2.125, 5.625, 0.75, 3.75),\n (2.125, 5.625, 0.75, 3.25), (2.125, 5.625, 0.25, 3.25),\n (0.75, 4.75, 0.0, 4.5), (2.125, 5.625, 0.25, 4.25),\n (-2.0, 4.75, 0.5, 4.5), (-2.0, 3.0, 0.5, 4.5),\n (-2.0, 3.0, 1.0, 4.5), (-2.0, 3.0, 0.5, 5.0),\n (-2.0, 4.75, 1.0, 4.5), (-2.0, 4.75, 1.0, 5.0),\n (-2.0, 4.75, 0.5, 5.0), (0.75, 3.0, 0.5, 4.5),\n (0.75, 3.0, 1.0, 4.5), (0.75, 3.0, 1.0, 5.0),\n (0.75, 3.0, 0.5, 5.0), (0.75, 4.75, 1.0, 4.5),\n (0.75, 4.75, 1.0, 5.0), (0.75, 4.75, 0.5, 5.0),\n (-0.625, 3.875, 0.75, 4.75), (-2.0, 4.75, 0.0, 4.5),\n (-0.625, 5.625, 0.25, 4.25), (-2.0, 3.0, 0.0, 4.5),\n (0.75, 3.0, 0.0, 4.5), (-0.625, 3.875, 0.25, 4.25),\n (-0.625, 3.875, 0.75, 4.25), (-2.0, 4.75, 0.0, 5.0),\n (0.75, 3.0, 0.0, 5.0), (0.75, 4.75, 0.0, 5.0),\n (-0.625, 3.875, 0.25, 4.75), (-0.625, 5.625, 0.75, 4.25),\n (-0.625, 5.625, 0.75, 4.75), (-0.625, 5.625, 0.25, 4.75),\n (2.125, 3.875, 0.25, 4.25), (2.125, 3.875, 0.75, 4.25),\n (2.125, 3.875, 0.75, 4.75), (2.125, 3.875, 0.25, 4.75),\n (2.125, 5.625, 0.75, 4.25), (2.125, 5.625, 0.75, 4.75),\n (2.125, 5.625, 0.25, 4.75), (2.125, 5.625, -0.25, 4.25),\n (-2.0, 4.75, -0.5, 4.5), (-2.0, 3.0, -0.5, 4.5),\n (-2.0, 3.0, -1.0, 4.5), (-2.0, 3.0, -0.5, 5.0),\n (-2.0, 4.75, -1.0, 4.5), (-2.0, 4.75, -1.0, 5.0),\n (-2.0, 4.75, -0.5, 5.0), (0.75, 3.0, -0.5, 4.5),\n (0.75, 3.0, -1.0, 4.5), (0.75, 3.0, -1.0, 5.0),\n (0.75, 3.0, -0.5, 5.0), (0.75, 4.75, -1.0, 4.5),\n (0.75, 4.75, -1.0, 5.0), (0.75, 4.75, -0.5, 5.0),\n (-0.625, 3.875, -0.75, 4.75), (-0.625, 5.625, -0.25, 4.25),\n (-0.625, 3.875, -0.25, 4.25), (-0.625, 3.875, -0.75, 4.25),\n (-0.625, 3.875, -0.25, 4.75), (-0.625, 5.625, -0.75, 4.25),\n (-0.625, 5.625, -0.75, 4.75), (-0.625, 5.625, -0.25, 4.75),\n (2.125, 3.875, -0.25, 4.25), (2.125, 3.875, -0.75, 4.25),\n (2.125, 3.875, -0.75, 4.75), (2.125, 3.875, -0.25, 4.75),\n (2.125, 5.625, -0.75, 4.25), (2.125, 5.625, -0.75, 4.75),\n (2.125, 5.625, -0.25, 4.75)]\n nn_checks = {(0.75, 3.0, -0.5, 4.0): [(0.75, 4.75, -0.5, 4.0),\n (0.75, 4.75, -0.5, 4.5),\n (2.125, 3.875, -0.25, 3.75),\n (2.125, 3.875, -0.75, 3.75),\n (2.125, 3.875, -0.25, 4.25),\n (2.125, 3.875, -0.75, 4.25),\n (3.5, 3.0, 0.0, 4.0),\n (3.5, 3.0, -0.5, 4.0),\n (0.75, 3.0, 0.0, 4.0),\n (0.75, 3.0, -0.5, 4.5),\n (-2.0, 3.0, -0.5, 4.0),\n (-2.0, 3.0, 0.0, 4.0),\n (-0.625, 3.875, -0.25, 4.25),\n (-0.625, 3.875, -0.75, 4.25),\n (-0.625, 3.875, -0.75, 3.75),\n (0.75, 3.0, -0.5, 3.5),\n (-2.0, 3.0, -1.0, 4.0),\n (0.75, 3.0, -1.0, 4.0),\n (3.5, 3.0, -1.0, 4.0),\n (0.75, 4.75, -0.5, 3.5),\n (-0.625, 3.875, -0.25, 3.75)],\n (-2.0, 3.0, -1.0, 3.0): [(-0.625, 3.875, -0.75, 3.25),\n (0.75, 3.0, -0.5, 3.0),\n (-2.0, 4.75, -1.0, 3.0),\n (-2.0, 4.75, -1.0, 3.5),\n (0.75, 3.0, -1.0, 3.0),\n (0.75, 4.75, -1.0, 3.5),\n (0.75, 3.0, -1.0, 3.5),\n (0.75, 4.75, -1.0, 3.0),\n (-2.0, 3.0, -0.5, 3.0),\n (-2.0, 3.0, -0.5, 3.5),\n (0.75, 4.75, -0.5, 3.0),\n (-2.0, 3.0, -1.0, 3.5),\n (-2.0, 4.75, -0.5, 3.5),\n (-2.0, 4.75, -0.5, 3.0),\n (0.75, 3.0, -0.5, 3.5)],\n (-0.625, 5.625, -0.75, 4.25): [(0.75, 4.75, -0.5, 4.0),\n (0.75, 4.75, -0.5, 4.5),\n (-2.0, 6.5, -1.0, 4.5),\n (-2.0, 6.5, -1.0, 4.0),\n (0.75, 6.5, -1.0, 4.0),\n (0.75, 6.5, -1.0, 4.5),\n (0.75, 4.75, -1.0, 4.5),\n (0.75, 4.75, -1.0, 4.0),\n (-2.0, 4.75, -1.0, 4.0),\n (-2.0, 4.75, -1.0, 4.5),\n (-2.0, 4.75, -0.5, 4.5),\n (-2.0, 4.75, -0.5, 4.0),\n (0.75, 6.5, -0.5, 4.0),\n (0.75, 6.5, -0.5, 4.5),\n (-2.0, 6.5, -0.5, 4.5),\n (-2.0, 6.5, -0.5, 4.0)]}\n\n init_triangulation(4, 2, check, nn_checks,\n bounds=[(-2, 9), (3, 10), (-1, 1), (3, 5)])",
"def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):\n os.chdir(str(tmpdir))\n RTOL = 1e-10\n\n H1 = 1e6 # external field in A/m\n alpha1 = 0.5 # some sort of damping constant\n omega1 = gamma * H1 # precession frequency\n\n H2 = 2.8e4 # external field in A/m\n alpha2 = 0.3 # some sort of damping constant\n omega2 = gamma * H2 # precession frequency\n\n ##\n # Step 1: Construct a time series of artificial magnetisation\n # data and save it to a bunch of .npy files.\n ##\n t_step = 1e-11\n t_ini = 0\n t_end = 10e-9\n\n N1 = 42 # in a real application this would be the number of mesh vertices\n N2 = 23 # in a real application this would be the number of mesh vertices\n fft_test_helpers.create_test_npy_files_with_two_regions(\n str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)\n\n ##\n # Step 2: compute the FFT of a resampled time series, both by\n # hand and using FFT_m.\n ##\n # XXX TODO: Resampling timesteps is not supported when using .npy\n # files. Either simplify the code below, or implement saving to\n # .h5 files so that it's easier to implement resampling for\n # spatially resolved data, too.\n ##\n t_step_res = t_step\n t_ini_res = t_ini\n t_end_res = t_end\n ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)\n\n # Compute time series based on resampled timesteps\n mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)\n my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)\n mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)\n\n # Compute 'analytical' Fourier transform of resampled time series and\n # determine the power of the spectrum for each component. We also need\n # to multiply by the number of mesh nodes because the numerical algorithm\n # sums up all contributions at the individual nodes (but we can just\n # multiply because they are all identical by construction).\n psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2\n psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2\n psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2\n\n # Compute Fourier transform of resampled time series using FFT_m\n freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \\\n compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,\n t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))\n\n # Check that the analytically determined power spectra are the same as the\n # computed ones.\n assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))\n\n if debug:\n # Plot the spectra for debugging\n fig = plt.figure(figsize=(20, 5))\n ax = fig.gca()\n ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')\n ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')\n ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')\n ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')\n ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')\n ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')\n ax.legend(loc='best')\n fig.savefig('psd_m_McMichaelStiles.png')",
"def test_MeshMat_1group(self):\n\n MS_grp = self.meshsol.get_group(\"stator\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[0, 1, 2], [1, 2, 3]])\n result_tgl = cells_grp[\"triangle\"]\n testA = np.sum(abs(solution - result_tgl))\n msg = (\n \"Wrong output: returned \" + str(result_tgl) + \", expected: \" + str(solution)\n )\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)\n\n MS_grp = self.meshsol.get_group(\"rotor\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[3, 3], [1, 2], [2, 3]])\n results = cells_grp[\"triangle\"] # The point indices have changed !\n points = MS_grp.get_mesh().get_point(results)\n testA = np.sum(abs(solution - points))\n msg = \"Wrong output: returned \" + str(results) + \", expected: \" + str(solution)\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)",
"def _apply_array_spatial12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n return self._apply_array_spatial12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb):\n h1e[:, :] -= h2e[:, k, k, :]\n\n if numpy.iscomplex(h1e).any() or numpy.iscomplex(h2e).any():\n dvec = self.calculate_dvec_spatial()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e, dvec)\n dvec = numpy.einsum(\"ijkl,klmn->ijmn\", h2e, dvec)\n out += self._calculate_coeff_spatial_with_dvec(dvec)\n else:\n nij = norb * (norb + 1) // 2\n h1ec = numpy.zeros((nij), dtype=self._dtype)\n h2ec = numpy.zeros((nij, nij), dtype=self._dtype)\n for i in range(norb):\n for j in range(i + 1):\n ijn = j + i * (i + 1) // 2\n h1ec[ijn] = h1e[i, j]\n for k in range(norb):\n for l in range(k + 1):\n kln = l + k * (k + 1) // 2\n h2ec[ijn, kln] = h2e[i, j, k, l]\n dvec = self._calculate_dvec_spatial_compressed()\n out = numpy.einsum(\"i,ikl->kl\", h1ec, dvec)\n dvec = numpy.einsum(\"ik,kmn->imn\", h2ec, dvec)\n for i in range(self.norb()):\n for j in range(self.norb()):\n ijn = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n work = self._core.alpha_map(j, i)\n for source, target, parity in work:\n out[source, :] += dvec[ijn, target, :] * parity\n work = self._core.beta_map(j, i)\n for source, target, parity in work:\n out[:, source] += dvec[ijn, :, target] * parity\n\n return out",
"def test_3D_m4_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def subdivision(mesh):\n\t\n\t\n\t# 1. generate new nodes in the centre of quad\n\t# 1/4 o-------o 1/4 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# | * |\n\t# | |\n\t# 1/4 o-------o 1/4\n\n\tnew_coor = mesh.give_nodes().give_coor()\n\t\n\tfor face_index in range(mesh.give_model_inf()[2]): \n\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\tfor vertex_index in range(4):\n\t\t\tmesh.give_faces()\n\t\t\tnode_index = mesh.give_faces().give_node_list(face_index)[vertex_index]\n\n\t\t\tnew_x += 0.25*mesh.give_nodes().give_coor(node_index)[0]\n\t\t\tnew_y += 0.25*mesh.give_nodes().give_coor(node_index)[1]\n\t\t\tnew_z += 0.25*mesh.give_nodes().give_coor(node_index)[2]\n\t\t\t\n\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\n\t# generating new nodes on the edge\n\t# figure out one edge is shared by how many surfaces\n\tedge_shared_by_faces_list = helper.find_edge_shared_by_which_faces(mesh.give_edges(), mesh.give_faces())\n\t\n\tfor edge_index in range(mesh.give_model_inf()[1]):\n\n\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\n\t# 2. generate new node on boundary edge\n\t# o: existing vertices\n\t# 1/2 o---*---o 1/2 *: newly-generated vertices\n\t# \n\n\t\tnew_coor = mesh.give_nodes().give_coor()\n\t\tif len(edge_shared_by_faces_list[edge_index]) == 1:\t\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tnew_x += 0.5*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 0.5*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 0.5*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\t\t\n\t# 3. generate new node on interior edge\n\t# 1/16 o-------o 1/16 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# 3/8 o---*---o 3/8\n\t# | |\n\t# 1/16 o-------o 1/16\n\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tconsidered_node = []\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tconsidered_node.append(this_node)\n\t\t\t\tnew_x += 3./8.*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 3./8.*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 3./8.*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\n\t\t\t# faces contain this node\n\t\t\tpotential_node = []\n\t\t\tfor face_index in edge_shared_by_faces_list[edge_index]:\t\t\n\t\t\t\tfor vertex_index in range(4):\n\t\t\t\t\t\tpotential_node.append(mesh.give_faces().give_node_list(face_index)[vertex_index])\n\t\t\t\n\t\t\touter_node = []\n\t\t\tfor node in potential_node:\n\t\t\t\tif (node not in considered_node) & (node not in outer_node):\n\t\t\t\t\touter_node.append(node)\n\t\t\t\t\t\n\t\t\tfor vertex_index in outer_node:\n\t\t\t\tnew_x += 1./16.*mesh.give_nodes().give_coor()[vertex_index][0]\n\t\t\t\tnew_y += 1./16.*mesh.give_nodes().give_coor()[vertex_index][1]\n\t\t\t\tnew_z += 1./16.*mesh.give_nodes().give_coor()[vertex_index][2]\n\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\n\t# update the links of edges and surfaces\n\tnew_edge_list = []\n\tnew_face_list = []\n\tfor face_index in range(mesh.give_model_inf()[2]):\n\t\told_node0 = mesh.give_faces().give_node_list(face_index)[0]\n\t\told_node1 = mesh.give_faces().give_node_list(face_index)[1]\n\t\told_node2 = mesh.give_faces().give_node_list(face_index)[2]\n\t\told_node3 = mesh.give_faces().give_node_list(face_index)[3]\n\t\t\n\t\told_edge0 = mesh.give_faces().give_edge_list(face_index)[0]\n\t\told_edge1 = mesh.give_faces().give_edge_list(face_index)[1]\n\t\told_edge2 = mesh.give_faces().give_edge_list(face_index)[2]\n\t\told_edge3 = mesh.give_faces().give_edge_list(face_index)[3]\n\t\t\n\t\tnew_node4 = old_edge0 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2] \n\t\tnew_node5 = old_edge1 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node6 = old_edge2 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node7 = old_edge3 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\t\n\t\tnew_node8 = mesh.give_model_inf()[0] + face_index\n\t\t\n\t\tif helper.in_list((old_node0, new_node4), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node0, new_node4))\n\t\tif helper.in_list((new_node4, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, new_node8))\n\t\tif helper.in_list((new_node8, new_node7), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node8, new_node7))\n\t\tif helper.in_list((new_node7, old_node0), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node0))\n\t\tif helper.in_list((new_node4, old_node1), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, old_node1))\n\t\tif helper.in_list((old_node1, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node1, new_node5))\n\t\tif helper.in_list((new_node5, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node5, new_node8))\n\t\tif helper.in_list((new_node7, old_node3), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node3))\n\t\tif helper.in_list((old_node3, new_node6), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node3, new_node6))\n\t\tif helper.in_list((new_node6, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, new_node8))\n\t\tif helper.in_list((new_node6, old_node2), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, old_node2))\n\t\tif helper.in_list((old_node2, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node2, new_node5))\n\t\n\t\tnew_face_list.append((old_node0, new_node4, new_node8, new_node7))\n\t\tnew_face_list.append((new_node4, old_node1, new_node5, new_node8))\n\t\tnew_face_list.append((new_node7, new_node8, new_node6, old_node3))\n\t\tnew_face_list.append((new_node8, new_node5, old_node2, new_node6))\n\t\t\n\tnew_edges = geo.Edge(new_edge_list)\n\t\n\tnew_faces = geo.Face(new_face_list, new_edges)\n\t\t\n\t# update existing nodes\t\n\tfor node_index in range(mesh.give_model_inf()[0]):\n\t\t\n\t\tring1, ring2 = helper.find_neighbour_node(new_edges, new_faces, node_index)\n\t\tvalence = helper.find_valence(node_index, new_faces) \n\t\t#: valence: the number of faces sharing on specific edge\n\n\t# 4. update existing corner vertex\n\t# 2/4 @---* 1/4 *: newly-generated vertices\n\t# | | @: existing vertices to be updated\n\t# 1/4 *---* 0 The higher mask values on neighbouring vertices, \n\t# the more likely a square mesh will be refined into a sphere.\n\t \n\t\tif valence == 1:\n\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tprint\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += 0.*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += 0.*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += 0.*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\t\n\t\t\tnew_x += 2./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 2./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 2./4.*mesh.give_nodes().give_coor()[node_index][2]\n\n\t# 5. update existing boundary joint vertex\n\t# 3/4\n\t# 1/8 *---*---* 1/8 *: newly-generated vertices\n\t# | | | @: existing vertices to be updated\n\t# 0 *---*---* 0\n\n\t\telif valence == 2:\n\t\t\t\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tif helper.find_valence(node_in_ring1, new_faces) <= 2: \n\t\t\t\t\tnew_x += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\t\tnew_y += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\t\tnew_z += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\t\t\n\t\t\tnew_x += 3./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 3./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 3./4.*mesh.give_nodes().give_coor()[node_index][2]\n\t\n\t# 6. update new node on interior edge\n\t# * r/k\n\t# /\\ b/k*\n\t# *__/ \\___ r/k\n\t# \\ \\ /¬¬/ *: newly-generated vertices: \n\t# \\ \\/ / b = 3/2/valence, r = 1/4/valence\n\t# *--@--* b/k\t @: existing vertices to be updated: 1-b-r\t\t\n\t# / /\\ \\\n\t# /__/ \\__\\\n\t# * \\ / * r/k\n\t# \\/\n\t\t\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tbeta = 3./2./valence\n\t\t\tgamma = 1./4./valence\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\n\t\t\tnew_x += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][2]\n\t\t\n\t\tnew_coor[node_index] = (new_x, new_y, new_z)\n\t\n\tnew_nodes = geo.Node(new_coor)\n\t\n\tmesh.update(new_nodes, new_edges, new_faces)\n\t\n\t# return new_mesh\n\treturn mesh",
"def test_assembly_inner_product_2_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '2-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '2-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '2-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n print(basis_ext_gauss.num_basis)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)",
"def test_2D_m6_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def _apply_array_spin12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n #return self._apply_array_spin12_blocked(h1e, h2e)\n return self._apply_array_spin12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb * 2):\n h1e[:, :] -= h2e[:, k, k, :]\n\n (dveca, dvecb) = self.calculate_dvec_spin()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e[:norb, :norb], dveca) \\\n + numpy.einsum(\"ij,ijkl->kl\", h1e[norb:, norb:], dvecb)\n ndveca = numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[:norb, :norb, :norb, :norb], dveca) \\\n + numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[:norb, :norb, norb:, norb:], dvecb)\n ndvecb = numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[norb:, norb:, :norb, :norb], dveca) \\\n + numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[norb:, norb:, norb:, norb:], dvecb)\n out += self._calculate_coeff_spin_with_dvec((ndveca, ndvecb))\n return out",
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_other_side_mesh(self):\n layered_volume = np.array(\n [\n [\n [0, 0, 0, 0, 0],\n [0, 0, 1, 1, 1],\n [0, 1, 1, 0, 0],\n [0, 1, 1, 1, 0],\n [0, 0, 0, 0, 0],\n ]\n ]\n )\n\n def quad(v1, v2, v3, v4):\n \"\"\"counterclockwise winding faces to make quad\"\"\"\n return [[v3, v2, v1], [v4, v3, v2]]\n\n top_mesh = trimesh.Trimesh(\n vertices=np.array(\n [\n [0, 1, 5],\n [1, 1, 5],\n [0, 1, 2],\n [1, 1, 2],\n [0, 3.5, 1.5],\n [1, 3.5, 1.5],\n ]\n ),\n faces=np.concatenate([quad(0, 1, 3, 2), quad(2, 3, 5, 4)], axis=0),\n )\n\n bot_mesh = trimesh.Trimesh(\n vertices=np.array([[0, 2, 5], [1, 2, 5], [0, 4, 2], [1, 4, 2]]),\n faces=quad(0, 1, 3, 2),\n )\n\n up = [0, -1, 0]\n dup = [0, -np.sqrt(0.5), -np.sqrt(0.5)]\n nanvec = [np.nan, np.nan, np.nan]\n vectors = np.array(\n [\n [\n [nanvec, nanvec, nanvec, nanvec, nanvec],\n [nanvec, nanvec, dup, up, up],\n [nanvec, dup, dup, nanvec, nanvec],\n [nanvec, dup, up, up, nanvec],\n [nanvec, nanvec, nanvec, nanvec, nanvec],\n ]\n ]\n )\n\n distances, something_wrong = tested.distances_from_voxels_to_meshes_wrt_dir(\n layered_volume, [top_mesh, bot_mesh], vectors\n )\n\n npt.assert_array_almost_equal(distances, get_expected_distances_to_meshes())\n assert not np.any(something_wrong)",
"def test_2D_m6_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)",
"def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main",
"def test_linear_2d_merwe_column():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints2(4, .1, 2., -1)\n kf = UKF2(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([[-1., 1., -1., 1]]).T\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([[i+randn()*0.1],\n [i+randn()*0.1]])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+', c='b')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def test_4_2_5D_rec_splits(self):\n check = [(0.3, -3.9, -1.5, -3.0, -9.5), (1.0, 11.8, -1.1, 5.0, 11000.5),\n (1.0, -3.9, -1.5, -3.0, -9.5), (1.0, 11.8, -1.5, -3.0, -9.5),\n (1.0, 11.8, -1.1, -3.0, -9.5), (1.0, 11.8, -1.1, 5.0, -9.5),\n (1.0, 11.8, -1.1, -3.0, 11000.5), (1.0, 11.8, -1.5, 5.0, -9.5),\n (1.0, 11.8, -1.5, 5.0, 11000.5),\n (1.0, 11.8, -1.5, -3.0, 11000.5),\n (1.0, -3.9, -1.1, -3.0, -9.5), (1.0, -3.9, -1.1, 5.0, -9.5),\n (1.0, -3.9, -1.1, 5.0, 11000.5),\n (1.0, -3.9, -1.1, -3.0, 11000.5), (1.0, -3.9, -1.5, 5.0, -9.5),\n (1.0, -3.9, -1.5, 5.0, 11000.5),\n (1.0, -3.9, -1.5, -3.0, 11000.5),\n (0.3, 11.8, -1.5, -3.0, -9.5), (0.3, 11.8, -1.1, -3.0, -9.5),\n (0.3, 11.8, -1.1, 5.0, -9.5), (0.3, 11.8, -1.1, 5.0, 11000.5),\n (0.3, 11.8, -1.1, -3.0, 11000.5), (0.3, 11.8, -1.5, 5.0, -9.5),\n (0.3, 11.8, -1.5, 5.0, 11000.5),\n (0.3, 11.8, -1.5, -3.0, 11000.5),\n (0.3, -3.9, -1.1, -3.0, -9.5), (0.3, -3.9, -1.1, 5.0, -9.5),\n (0.3, -3.9, -1.1, 5.0, 11000.5),\n (0.3, -3.9, -1.1, -3.0, 11000.5), (0.3, -3.9, -1.5, 5.0, -9.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, -3.9, -1.5, -3.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5), (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, -3.0, 5495.5), (0.3, -3.9, -1.5, 1.0, -9.5),\n (0.3, -3.9, -1.3, -3.0, 5495.5), (0.3, -3.9, -1.3, -3.0, -9.5),\n (0.3, -3.9, -1.3, 1.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.3, 3.95, -1.5, -3.0, 5495.5),\n (0.3, 3.95, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, 5495.5),\n (0.3, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.3, 1.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5),\n (0.65, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.5, -3.0, -9.5),\n (0.65, -3.9, -1.5, 1.0, -9.5),\n (0.65, -3.9, -1.3, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, -9.5),\n (0.65, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.3, -3.0, -9.5),\n (0.65, 3.95, -1.3, 1.0, -9.5),\n (0.475, 0.025000000000000133, -1.4, -1.0, 2743.0),\n (1.0, 3.95, -1.3, 1.0, 5495.5),\n (1.0, 11.8, -1.3, 1.0, 5495.5), (1.0, 11.8, -1.1, 1.0, 5495.5),\n (1.0, 11.8, -1.1, 5.0, 5495.5),\n (1.0, 11.8, -1.1, 1.0, 11000.5),\n (1.0, 11.8, -1.3, 5.0, 5495.5),\n (1.0, 11.8, -1.3, 5.0, 11000.5),\n (1.0, 11.8, -1.3, 1.0, 11000.5),\n (1.0, 3.95, -1.1, 1.0, 5495.5),\n (1.0, 3.95, -1.1, 5.0, 5495.5),\n (1.0, 3.95, -1.1, 5.0, 11000.5),\n (1.0, 3.95, -1.1, 1.0, 11000.5),\n (1.0, 3.95, -1.3, 5.0, 5495.5),\n (1.0, 3.95, -1.3, 5.0, 11000.5),\n (1.0, 3.95, -1.3, 1.0, 11000.5),\n (0.65, 11.8, -1.3, 1.0, 5495.5),\n (0.65, 11.8, -1.1, 1.0, 5495.5),\n (0.65, 11.8, -1.1, 5.0, 5495.5),\n (0.65, 11.8, -1.1, 5.0, 11000.5),\n (0.65, 11.8, -1.1, 1.0, 11000.5),\n (0.65, 11.8, -1.3, 5.0, 5495.5),\n (0.65, 11.8, -1.3, 5.0, 11000.5),\n (0.65, 11.8, -1.3, 1.0, 11000.5),\n (0.65, 3.95, -1.1, 1.0, 5495.5),\n (0.65, 3.95, -1.1, 5.0, 5495.5),\n (0.65, 3.95, -1.1, 5.0, 11000.5),\n (0.65, 3.95, -1.1, 1.0, 11000.5),\n (0.65, 3.95, -1.3, 5.0, 5495.5),\n (0.65, 3.95, -1.3, 5.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.825, 7.875, -1.2000000000000002, 3.0, 8248.0),\n (1.0, -3.9, -1.3, 1.0, 5495.5), (1.0, -3.9, -1.5, 1.0, 5495.5),\n (1.0, -3.9, -1.5, -3.0, 5495.5), (1.0, -3.9, -1.5, 1.0, -9.5),\n (1.0, -3.9, -1.3, -3.0, 5495.5), (1.0, -3.9, -1.3, -3.0, -9.5),\n (1.0, -3.9, -1.3, 1.0, -9.5),\n (1.0, 3.95, -1.5, 1.0, 5495.5),\n (1.0, 3.95, -1.5, -3.0, 5495.5),\n (1.0, 3.95, -1.5, -3.0, -9.5),\n (1.0, 3.95, -1.5, 1.0, -9.5),\n (1.0, 3.95, -1.3, -3.0, 5495.5),\n (1.0, 3.95, -1.3, -3.0, -9.5),\n (1.0, 3.95, -1.3, 1.0, -9.5),\n (0.825, 0.025000000000000133, -1.4, -1.0, 2743.0),\n (1.0, 11.8, -1.5, 1.0, 5495.5),\n (1.0, 11.8, -1.5, -3.0, 5495.5), (1.0, 11.8, -1.5, 1.0, -9.5),\n (1.0, 11.8, -1.3, -3.0, 5495.5), (1.0, 11.8, -1.3, -3.0, -9.5),\n (1.0, 11.8, -1.3, 1.0, -9.5),\n (0.65, 11.8, -1.5, 1.0, 5495.5),\n (0.65, 11.8, -1.5, -3.0, 5495.5),\n (0.65, 11.8, -1.5, -3.0, -9.5),\n (0.65, 11.8, -1.5, 1.0, -9.5),\n (0.65, 11.8, -1.3, -3.0, 5495.5),\n (0.65, 11.8, -1.3, -3.0, -9.5),\n (0.65, 11.8, -1.3, 1.0, -9.5),\n (0.825, 7.875, -1.4, -1.0, 2743.0),\n (1.0, 11.8, -1.1, -3.0, 5495.5), (1.0, 11.8, -1.1, 1.0, -9.5),\n (1.0, 3.95, -1.1, -3.0, 5495.5),\n (1.0, 3.95, -1.1, -3.0, -9.5),\n (1.0, 3.95, -1.1, 1.0, -9.5),\n (0.65, 11.8, -1.1, -3.0, 5495.5),\n (0.65, 11.8, -1.1, -3.0, -9.5),\n (0.65, 11.8, -1.1, 1.0, -9.5),\n (0.65, 3.95, -1.1, -3.0, 5495.5),\n (0.65, 3.95, -1.1, -3.0, -9.5),\n (0.65, 3.95, -1.1, 1.0, -9.5),\n (0.825, 7.875, -1.2000000000000002, -1.0, 2743.0),\n (1.0, 11.8, -1.3, 5.0, -9.5),\n (1.0, 3.95, -1.1, 5.0, -9.5),\n (1.0, 3.95, -1.3, 5.0, -9.5),\n (0.65, 11.8, -1.1, 5.0, -9.5),\n (0.65, 11.8, -1.3, 5.0, -9.5),\n (0.65, 3.95, -1.1, 5.0, -9.5),\n (0.65, 3.95, -1.3, 5.0, -9.5),\n (0.825, 7.875, -1.2000000000000002, 3.0, 2743.0),\n (1.0, 11.8, -1.3, -3.0, 11000.5),\n (1.0, 3.95, -1.1, -3.0, 11000.5),\n (1.0, 3.95, -1.3, -3.0, 11000.5),\n (0.65, 11.8, -1.1, -3.0, 11000.5),\n (0.65, 11.8, -1.3, -3.0, 11000.5),\n (0.65, 3.95, -1.1, -3.0, 11000.5),\n (0.65, 3.95, -1.3, -3.0, 11000.5),\n (0.825, 7.875, -1.2000000000000002, -1.0, 8248.0),\n (1.0, 11.8, -1.5, 5.0, 5495.5),\n (1.0, 3.95, -1.5, 5.0, 5495.5),\n (1.0, 3.95, -1.5, 5.0, -9.5),\n (0.65, 11.8, -1.5, 5.0, 5495.5),\n (0.65, 11.8, -1.5, 5.0, -9.5),\n (0.65, 3.95, -1.5, 5.0, 5495.5),\n (0.65, 3.95, -1.5, 5.0, -9.5),\n (0.825, 7.875, -1.4, 3.0, 2743.0),\n (1.0, 11.8, -1.5, 1.0, 11000.5),\n (1.0, 3.95, -1.5, 5.0, 11000.5),\n (1.0, 3.95, -1.5, 1.0, 11000.5),\n (0.65, 11.8, -1.5, 5.0, 11000.5),\n (0.65, 11.8, -1.5, 1.0, 11000.5),\n (0.65, 3.95, -1.5, 5.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.825, 7.875, -1.4, 3.0, 8248.0),\n (1.0, 3.95, -1.5, -3.0, 11000.5),\n (0.65, 11.8, -1.5, -3.0, 11000.5),\n (0.65, 3.95, -1.5, -3.0, 11000.5),\n (0.825, 7.875, -1.4, -1.0, 8248.0),\n (1.0, -3.9, -1.1, 1.0, 5495.5),\n (1.0, -3.9, -1.1, -3.0, 5495.5), (1.0, -3.9, -1.1, 1.0, -9.5),\n (0.65, -3.9, -1.1, 1.0, 5495.5),\n (0.65, -3.9, -1.1, -3.0, 5495.5),\n (0.65, -3.9, -1.1, -3.0, -9.5),\n (0.65, -3.9, -1.1, 1.0, -9.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, -1.0,\n 2743.0), (1.0, -3.9, -1.1, 5.0, 5495.5),\n (1.0, -3.9, -1.3, 5.0, 5495.5), (1.0, -3.9, -1.3, 5.0, -9.5),\n (0.65, -3.9, -1.1, 5.0, 5495.5),\n (0.65, -3.9, -1.1, 5.0, -9.5),\n (0.65, -3.9, -1.3, 5.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, -9.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, 3.0, 2743.0),\n (1.0, -3.9, -1.1, 1.0, 11000.5),\n (1.0, -3.9, -1.3, 5.0, 11000.5),\n (1.0, -3.9, -1.3, 1.0, 11000.5),\n (0.65, -3.9, -1.1, 5.0, 11000.5),\n (0.65, -3.9, -1.1, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, 3.0, 8248.0),\n (1.0, -3.9, -1.3, -3.0, 11000.5),\n (0.65, -3.9, -1.1, -3.0, 11000.5),\n (0.65, -3.9, -1.3, -3.0, 11000.5), (\n 0.825, 0.025000000000000133, -1.2000000000000002, -1.0,\n 8248.0), (1.0, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.5, 5.0, -9.5),\n (0.825, 0.025000000000000133, -1.4, 3.0, 2743.0),\n (1.0, -3.9, -1.5, 1.0, 11000.5),\n (0.65, -3.9, -1.5, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.825, 0.025000000000000133, -1.4, 3.0, 8248.0),\n (0.65, -3.9, -1.5, -3.0, 11000.5),\n (0.825, 0.025000000000000133, -1.4, -1.0, 8248.0),\n (0.3, 11.8, -1.3, 1.0, 5495.5), (0.3, 11.8, -1.5, 1.0, 5495.5),\n (0.3, 11.8, -1.5, -3.0, 5495.5), (0.3, 11.8, -1.5, 1.0, -9.5),\n (0.3, 11.8, -1.3, -3.0, 5495.5), (0.3, 11.8, -1.3, -3.0, -9.5),\n (0.3, 11.8, -1.3, 1.0, -9.5),\n (0.475, 7.875, -1.4, -1.0, 2743.0),\n (0.3, 11.8, -1.1, 1.0, 5495.5),\n (0.3, 11.8, -1.1, -3.0, 5495.5), (0.3, 11.8, -1.1, 1.0, -9.5),\n (0.3, 3.95, -1.1, 1.0, 5495.5),\n (0.3, 3.95, -1.1, -3.0, 5495.5),\n (0.3, 3.95, -1.1, -3.0, -9.5),\n (0.3, 3.95, -1.1, 1.0, -9.5),\n (0.475, 7.875, -1.2000000000000002, -1.0, 2743.0),\n (0.3, 11.8, -1.1, 5.0, 5495.5), (0.3, 11.8, -1.3, 5.0, 5495.5),\n (0.3, 11.8, -1.3, 5.0, -9.5),\n (0.3, 3.95, -1.1, 5.0, 5495.5),\n (0.3, 3.95, -1.1, 5.0, -9.5),\n (0.3, 3.95, -1.3, 5.0, 5495.5),\n (0.3, 3.95, -1.3, 5.0, -9.5),\n (0.475, 7.875, -1.2000000000000002, 3.0, 2743.0),\n (0.3, 11.8, -1.1, 1.0, 11000.5),\n (0.3, 11.8, -1.3, 5.0, 11000.5),\n (0.3, 11.8, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.1, 5.0, 11000.5),\n (0.3, 3.95, -1.1, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 5.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5),\n (0.475, 7.875, -1.2000000000000002, 3.0, 8248.0),\n (0.3, 11.8, -1.3, -3.0, 11000.5),\n (0.3, 3.95, -1.1, -3.0, 11000.5),\n (0.3, 3.95, -1.3, -3.0, 11000.5),\n (0.475, 7.875, -1.2000000000000002, -1.0, 8248.0),\n (0.3, 11.8, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.5, 5.0, -9.5),\n (0.475, 7.875, -1.4, 3.0, 2743.0),\n (0.3, 11.8, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.475, 7.875, -1.4, 3.0, 8248.0),\n (0.3, 3.95, -1.5, -3.0, 11000.5),\n (0.475, 7.875, -1.4, -1.0, 8248.0),\n (0.3, -3.9, -1.1, 1.0, 5495.5),\n (0.3, -3.9, -1.1, -3.0, 5495.5), (0.3, -3.9, -1.1, 1.0, -9.5),\n (0.475, 0.025000000000000133, -1.2000000000000002, -1.0,\n 2743.0), (0.3, -3.9, -1.1, 5.0, 5495.5),\n (0.3, -3.9, -1.3, 5.0, 5495.5), (0.3, -3.9, -1.3, 5.0, -9.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, 3.0, 2743.0),\n (0.3, -3.9, -1.1, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 5.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, 3.0, 8248.0),\n (0.3, -3.9, -1.3, -3.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.2000000000000002, -1.0,\n 8248.0), (0.3, -3.9, -1.5, 5.0, 5495.5),\n (0.475, 0.025000000000000133, -1.4, 3.0, 2743.0),\n (0.3, -3.9, -1.5, 1.0, 11000.5),\n (0.475, 0.025000000000000133, -1.4, 3.0, 8248.0),\n (0.475, 0.025000000000000133, -1.4, -1.0, 8248.0)]\n nn_checks = {\n (0.3, -3.9, -1.5, -3.0, -9.5): [(0.3, -3.9, -1.5, 1.0, -9.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 1.0, -9.5),\n (0.65, -3.9, -1.5, 1.0, -9.5),\n (0.3, -3.9, -1.3, 1.0, -9.5),\n (0.3, 3.95, -1.5, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.3, -3.0, -9.5),\n (0.3, 3.95, -1.5, -3.0, -9.5),\n (0.65, 3.95, -1.3, -3.0, 5495.5),\n (0.65, 3.95, -1.5, -3.0, 5495.5),\n (0.3, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, 5495.5),\n (0.3, -3.9, -1.3, -3.0, 5495.5),\n (0.3, -3.9, -1.5, -3.0, 5495.5),\n (0.65, -3.9, -1.3, -3.0, -9.5),\n (0.65, -3.9, -1.5, -3.0, -9.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, -9.5),\n (0.3, 3.95, -1.5, 1.0, -9.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5), (\n 0.475, 0.025000000000000133, -1.4,\n -1.0, 2743.0),\n (0.3, 3.95, -1.3, 1.0, -9.5)],\n (0.3, -3.9, -1.5, 1.0, 11000.5): [(0.3, -3.9, -1.5, -3.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5), (\n 0.475, 0.025000000000000133, -1.4,\n 3.0, 8248.0),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5),\n (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5), (\n 0.475, 0.025000000000000133, -1.4,\n -1.0, 8248.0),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5)],\n (0.475, 0.025000000000000133, -1.4, 3.0, 8248.0): [\n (0.65, 3.95, -1.5, 5.0, 11000.5),\n (0.65, 3.95, -1.3, 5.0, 11000.5),\n (0.3, -3.9, -1.5, 1.0, 11000.5),\n (0.3, 3.95, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 11000.5),\n (0.3, 3.95, -1.5, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 1.0, 11000.5),\n (0.65, -3.9, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.5, 1.0, 5495.5), (0.3, -3.9, -1.5, 1.0, 5495.5),\n (0.3, -3.9, -1.3, 1.0, 5495.5), (0.3, 3.95, -1.5, 5.0, 5495.5),\n (0.3, 3.95, -1.3, 5.0, 5495.5), (0.65, -3.9, -1.5, 5.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, 5495.5), (0.3, -3.9, -1.5, 5.0, 5495.5),\n (0.3, -3.9, -1.3, 5.0, 5495.5), (0.65, 3.95, -1.3, 5.0, 5495.5),\n (0.65, 3.95, -1.5, 5.0, 5495.5), (0.3, 3.95, -1.3, 1.0, 5495.5),\n (0.3, 3.95, -1.5, 1.0, 5495.5),\n (0.65, 3.95, -1.3, 1.0, 11000.5),\n (0.3, -3.9, -1.3, 5.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 11000.5),\n (0.3, -3.9, -1.5, 5.0, 11000.5),\n (0.3, 3.95, -1.5, 1.0, 11000.5),\n (0.65, 3.95, -1.5, 1.0, 5495.5),\n (0.3, 3.95, -1.3, 1.0, 11000.5),\n (0.65, 3.95, -1.3, 1.0, 5495.5),\n (0.65, -3.9, -1.3, 5.0, 11000.5),\n (0.65, -3.9, -1.5, 5.0, 11000.5)]}\n\n init_triangulation(5, 1, check, nn_checks,\n bounds=[(0.3, 1), (-3.9, 11.8), (-1.5, -1.1),\n (-3, 5), (-9.5, 11000.5)])",
"def par_test_2(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = [\n self.XYZ_factor.mult(self.scalar),\n self.XYZ_factor.mult(self.scalarf),\n self.scalarf.mult(self.XYZ_factor),\n ]\n\n par_res = [\n self.XYZ_par_factor.mult(self.scalar),\n self.XYZ_par_factor.mult(self.par_scalarf),\n self.par_scalarf.mult(self.XYZ_par_factor),\n ]\n\n for i, ele in enumerate(res):\n assert (\n ele.rand_vars == par_res[i].rand_vars\n and ele.values == par_res[i].values\n )",
"def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')",
"def test_half_case(self):\n steps = save_divide(np.ones(2), 2 * np.ones(2))\n np.testing.assert_equal(steps, 0.5 * np.ones(2))"
]
| [
"0.63592356",
"0.61472815",
"0.6060959",
"0.60506004",
"0.59646344",
"0.58725816",
"0.58380145",
"0.5816965",
"0.580119",
"0.5774057",
"0.5769687",
"0.5768955",
"0.5735332",
"0.5679965",
"0.55848306",
"0.55845535",
"0.5560804",
"0.5544674",
"0.5541364",
"0.55350125",
"0.5520684",
"0.5481955",
"0.5473932",
"0.54642123",
"0.5461019",
"0.54467535",
"0.54443765",
"0.54406154",
"0.543972",
"0.543475"
]
| 0.63131726 | 1 |
Testing M8 remeshing formula in 2D, 1 kernel, simple precision, o2 splitting. | def test_2D_m8_1k():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_1k',
Splitting: 'o2'}
)
advec_py = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: '',
Splitting: 'o2'},
)
assertion_2D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_filter_l2_1():\n box = Box(length=L, origin=O)\n f = Field(box, formula=func, name='f0')\n d_fine = Discretization([513, 513, 513])\n d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])\n op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,\n variables={f: d_coarse},\n method={Remesh: L2_1, })\n op.discretize()\n op.setup()\n topo_coarse = op.discreteFields[f].topology\n topo_fine = [t for t in f.discreteFields.keys()\n if not t is topo_coarse][0]\n f.initialize(topo=topo_fine)\n f_out = f.discreteFields[topo_coarse]\n op.apply(simu)\n valid = [npw.zeros(f_out[0].shape), ]\n valid = func(valid, *topo_coarse.mesh.coords)\n assert np.allclose(valid[0][topo_coarse.mesh.iCompute],\n f_out[0][topo_coarse.mesh.iCompute]), \\\n np.max(np.abs(valid[0][topo_coarse.mesh.iCompute] -\n f_out[0][topo_coarse.mesh.iCompute]))",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def reg2():\n data2 = np.load(\"./data/measure4_1.npy\")[2:]\n\n x2 = np.arange(0,len(data2),1)\n\n fit = True \n redistribute = True \n\n #x2 = 1.3149710372035508*x2 -22.617788714272098\n c2 = np.where(x2 < 135)\n\n data = data2[c2] \n x = x2[c2]\n print(\"datapoints:\",len(data))\n\n mass = 79/251/6080*52658\n if redistribute == True:\n\n # conserving the mass\n total_mass = mass * len(data)\n remaining = (data > 0)\n\n while True:\n print(\"new redistributing ...\")\n print(\"total mass:\",total_mass)\n # find those which are smaller\n q = (data[remaining] <= mass)\n remaining = ~q\n if len(np.nonzero(q)[0]) == 0:\n data[remaining] -= mass\n break\n print(\"number of smaller values:\",len(np.nonzero(q)[0]),\"\\n\")\n # subtract the mass of this data\n total_mass -= np.sum(data[q])\n mass = total_mass / len(np.nonzero(~remaining)[0]) \n data[q] = 0\n\n # redistribute total remaining mass to single channels\n print(\"number of nonzero:\",len(np.nonzero(data)[0]))\n\n c = np.nonzero(data) \n data = data[c]\n x = x[c]\n\n #scaling to time units\n x = 6.3149710372035508*x -22.617788714272098\n c = (x>0)\n x = x[c]\n data = data[c]\n\n x = x[::-1] - min(x)\n\n\n error = np.sqrt(data) \n # only fit for x < 135\n fig = plt.figure()\n ax = plt.subplot(111)\n plt.grid(True)\n\n if fit==True:\n\n def func(x, *p):\n a,b,c = p\n return a + b * c**x\n\n # p0 is the initial guess for the fitting coefficients \n p0 = [1., 1., 1.]\n\n p, cov = curve_fit(func, x, data, p0=p0, sigma = error)\n p_uc = uc.correlated_values(p, cov)\n c = p_uc[2]\n\n T12_lit = 98 \n lamb_lit = -(np.log(2)/T12_lit)\n print(\"lit\",lamb_lit)\n \n\n lamb = umath.log(c)\n print(lamb)\n T12 = -np.log(2) /lamb \n print(\"t12=\",T12)\n\n x_fit = np.linspace(min(x),max(x))\n\n data_fit = func(x_fit,*p) \n pmin = (p - np.sqrt(np.diag(cov)))\n pmax = (p + np.sqrt(np.diag(cov)))\n\n data_fit_min = func(x_fit, *pmin)\n data_fit_max = func(x_fit, *pmax)\n\n plt.plot(x_fit,data_fit)\n plt.plot(x_fit,90*np.exp(x_fit * lamb_lit))\n plt.fill_between(x_fit, data_fit_min , data_fit_max,facecolor=\"r\", color=\"b\", alpha=0.3 )\n\n # place a text box in upper left in axes coords\n props = dict(boxstyle='round', facecolor='white', alpha=0.5)\n textstr = '$a + b \\cdot c^x$ with\\n$a=%.2f$\\n$b=%.2f$\\n$c=%.2f$'%(p[0], p[1],p[2])\n ax.text(0.6, 0.85, textstr, transform=ax.transAxes, fontsize=18, va='top', bbox=props)\n\n ax.xaxis.set_tick_params(labelsize = 14)\n ax.yaxis.set_tick_params(labelsize = 14)\n\n ax.add_patch(plt.Rectangle((0,0.1),155,100,alpha = 0.2))\n\n plt.errorbar(x,data, yerr=error,fmt=\"x\")\n #plt.scatter(x,data,c=\"blue\",alpha = 0.9,s=100, marker=\"x\")\n plt.ylim(min(data)*0.8,max(data))\n #plt.yscale(\"log\")\n plt.xlim(min(x)*0.8,max(x))\n plt.xlabel(\"time in $ns$\", fontsize = 14)\n plt.ylabel(\"counts\", fontsize = 14)\n make_fig(fig,1,1,name=\"plot4_1_reg\")",
"def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13",
"def regularize_fwd(X, y, mu0, mu1, v1, nz, K, verbose=False, showpath=False, fignum=1):\n \n if verbose: sss=0#print '\\ncompute path between mu=%.4f and mu=%.4f'%(mu0, mu1)\n \n n, m = X.shape\n X_nz = np.atleast_2d(X[:, nz])\n b = np.dot(X.T, y)\n G = np.dot(X.T, X)\n \n nbr = 0\n mu = mu0\n trans_type = -1\n trans_sign = 0\n trans_ind = -1\n if verbose: sss=0#print 'initial active features =', nz\n if showpath:\n import matplotlib.pyplot as plt\n pth = np.linspace(mu0, mu1, 100)\n thetapth = np.zeros((m, 100))\n fig = plt.figure(fignum)\n plt.clf()\n allbr = []\n \n while mu < mu1:\n \n # find the breakpoints where coefficients become zero\n b_nz = b[nz]\n Kv1 = np.dot(K, v1)\n Kb_nz = np.dot(K, b_nz)\n mu_0 = Kb_nz / Kv1\n \n # find the breakpoints where new coefficients become active\n z = np.setdiff1d(np.arange(m), nz)\n X_z = np.atleast_2d(X[:, z])\n b_z = b[z]\n M = G[np.ix_(z, nz)]\n MKb_nz = np.dot(M, Kb_nz)\n MKv1 = np.dot(M, Kv1)\n mu_1 = (b_z - MKb_nz) / (1 - MKv1)\n mu_m1 = (b_z - MKb_nz) / (-1 - MKv1)\n \n if trans_type > 0: mu_0[-1] = mu1\n mu_0[mu_0 <= mu] = mu1\n if len(mu_0) > 0: \n mu_0_argmin = mu_0.argmin()\n mu_0_min = mu_0[mu_0_argmin][0]\n else:\n mu_0_min = mu1\n if trans_type == 0:\n if trans_sign == 1: mu_1[np.where(z == trans_ind)[0]] = mu1 + 1\n else: mu_m1[np.where(z == trans_ind)[0]] = mu1 + 1\n mu_1[mu_1 <= mu] = mu1\n if len(mu_1) > 0: \n mu_1_argmin = mu_1.argmin()\n mu_1_min = mu_1[mu_1_argmin][0]\n else:\n mu_1_min = mu1\n mu_m1[mu_m1 <= mu] = mu1\n if len(mu_m1) > 0: \n mu_m1_argmin = mu_m1.argmin()\n mu_m1_min = mu_m1[mu_m1_argmin][0]\n else:\n mu_m1_min = mu1\n \n # compute the breakpoint\n mu_br_all = np.array([mu_0_min, mu_1_min, mu_m1_min])\n trans_type = mu_br_all.argmin()\n mu_br = mu_br_all[trans_type]\n \n if mu_br < mu1:\n \n if showpath:\n if len(nz) > 0:\n inds = np.intersect1d(np.where(pth >= mu)[0], np.where(pth < mu_br)[0])\n thetapth[np.ix_(nz, inds)] = np.tile(Kb_nz, (1, len(inds))) - np.tile(Kv1, (1, len(inds))) * \\\n np.tile(pth[inds], (len(nz), 1))\n allbr.append(mu_br)\n \n nbr += 1\n mu = mu_br\n \n if trans_type == 0: # an element of theta(t) goes to zero\n trans_ind = nz[mu_0_argmin]\n trans_sign = v1[mu_0_argmin]\n if verbose: nbr=nbr#print 'transition point :: mu = %.4f :: feature %d is inactive'%(mu, trans_ind)\n nzind = range(len(nz))\n ####################################\n index=np.where(nz==trans_ind)[0][0]\n ####################################\n #print '1)', nzind, nz, trans_ind, index\n nzind=np.delete(nzind,np.where(nzind==index))#nzind.remove(index)\n #print '2)', nzind\n #nzind.remove(nz.index(trans_ind))\n v1 = v1[nzind]\n nz=np.delete(nz,np.where(nz==trans_ind))\n #print '3)', nz\n #nz.remove(trans_ind)\n X_nz = X[:, nz]\n K = invupdatered(K, mu_0_argmin)\n else: # new active element\n if trans_type == 1: # it is positive\n trans_ind = z[mu_1_argmin]\n if verbose: K=K#print 'transition point :: mu = %.4f :: feature %d is positive'%(mu, trans_ind)\n nz=np.append(nz,trans_ind)\n #nz.append(trans_ind)\n v1 = np.vstack([v1, 1])\n else: # it is negative\n trans_ind = z[mu_m1_argmin]\n if verbose: K=K#print 'transition point :: mu = %.4f :: feature %d is negative'%(mu, trans_ind)\n nz=np.append(nz,trans_ind)\n #nz.append(trans_ind)\n v1 = np.vstack([v1, -1])\n X_new = np.atleast_2d(X[:, trans_ind]).T\n K = invupdateapp(K, np.dot(X_nz.T, X_new), np.dot(X_new.T, X_nz), \n np.dot(X_new.T, X_new))\n X_nz = X[:, nz]\n \n else: # compute solution at mu1\n \n if verbose: sss=0#print 'compute solution at mu =', mu1\n if showpath and len(nz) > 0:\n inds = np.intersect1d(np.where(pth >= mu)[0], np.where(pth <= mu1)[0])\n thetapth[np.ix_(nz, inds)] = np.tile(Kb_nz, (1, len(inds))) - np.tile(Kv1, (1, len(inds))) * \\\n np.tile(pth[inds], (len(nz), 1))\n \n theta_nz = Kb_nz - mu1*Kv1\n mu = mu1\n \n if showpath:\n fig = plt.figure(fignum)\n leg = []\n for i in range(m):\n plt.plot(pth, thetapth[i, :])\n leg.append(r'$\\theta_%d(\\mu)$'%(i+1))\n plt.plot(pth, np.zeros(len(pth),), 'k')\n plt.xlabel(r'$\\mu$', fontsize=16)\n plt.title(r'Step 1: homotopy in $\\mu$', fontsize=16)\n plt.legend(leg, loc='best')\n plt.plot(allbr, np.zeros(nbr), 'ko')\n plt.xlim(mu0, mu1)\n plt.show()\n \n return theta_nz, nz, K, nbr",
"def test_2D_m6_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_linear_2d_merwe_column():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints2(4, .1, 2., -1)\n kf = UKF2(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([[-1., 1., -1., 1]]).T\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([[i+randn()*0.1],\n [i+randn()*0.1]])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+', c='b')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')",
"def test():\n # test getCl\n ISWoutFile = 'ISWout_scalCls.fits'\n ISWinFile = 'ISWin_scalCls.fits'\n ell,temps = getCl(ISWoutFile)\n\n \"\"\"\n # test showCl\n showCl(ell,temps)\n\n # test makeLegendreTable\n # this works fine for small lmax values, but ell=86 and higher have problems\n # possibly due to exceeding the maximum size of a float64 dtype\n makeLegendreTable(9,'testTable.npy')\n table = symLoad('testTable.npy')\n print table\n\n # test powerArray\n powers = powerArray(2,9)\n print powers\n \"\"\"\n\n # test makeCmatrix\n # measured time: 4.25 hrs for 6110 point mask\n startTime = time.time()\n\n # old files no longer used\n #saveMatrixFile = 'covar6110_R010_lowl.npy'\n #saveMatrixFile = 'covar6110_R010.npy'\n #maskFile = '/shared/Data/PSG/hundred_point/ISWmask2_din1_R160.fits'\n #saveMatrixFile = 'covar9875_R160b.npy'\n\n # huge mask\n #maskFile = 'ISWmask9875_RING.fits' #19917 pixels\n #saveMatrixFile = 'covar19917_ISWout_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n # took 24.83 hours\n\n # use ISWin to model expected signal\n #maskFile = 'ISWmask6110_RING.fits'\n #saveMatrixFile = 'covar6110_ISWin_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, nested=True)\n maskFile = 'ISWmask9875_RING.fits' #9875 pixels\n saveMatrixFile = 'covar9875_ISWin_bws_hp12_RING.npy'\n covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n\n # no beam nor window smoothing, high lmax\n #saveMatrixFile = 'covar6110_ISWout_nBW_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=False, pixWin=False, lmax=2200, nested=False)\n\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n symSave(covMat,saveMatrixFile)\n \"\"\"\n\n # test subMatrix\n subMask = '/shared/Data/PSG/small_masks/ISWmask_din1_R010_trunc0500.fits'\n subCmat = subMatrix(subMask,maskFile,saveMatrixFile)\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n \"\"\"",
"def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##",
"def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)",
"def test_linear_2d_merwe():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints(4, .1, 2., -1)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):\n os.chdir(str(tmpdir))\n RTOL = 1e-10\n\n H1 = 1e6 # external field in A/m\n alpha1 = 0.5 # some sort of damping constant\n omega1 = gamma * H1 # precession frequency\n\n H2 = 2.8e4 # external field in A/m\n alpha2 = 0.3 # some sort of damping constant\n omega2 = gamma * H2 # precession frequency\n\n ##\n # Step 1: Construct a time series of artificial magnetisation\n # data and save it to a bunch of .npy files.\n ##\n t_step = 1e-11\n t_ini = 0\n t_end = 10e-9\n\n N1 = 42 # in a real application this would be the number of mesh vertices\n N2 = 23 # in a real application this would be the number of mesh vertices\n fft_test_helpers.create_test_npy_files_with_two_regions(\n str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)\n\n ##\n # Step 2: compute the FFT of a resampled time series, both by\n # hand and using FFT_m.\n ##\n # XXX TODO: Resampling timesteps is not supported when using .npy\n # files. Either simplify the code below, or implement saving to\n # .h5 files so that it's easier to implement resampling for\n # spatially resolved data, too.\n ##\n t_step_res = t_step\n t_ini_res = t_ini\n t_end_res = t_end\n ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)\n\n # Compute time series based on resampled timesteps\n mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)\n my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)\n mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)\n\n # Compute 'analytical' Fourier transform of resampled time series and\n # determine the power of the spectrum for each component. We also need\n # to multiply by the number of mesh nodes because the numerical algorithm\n # sums up all contributions at the individual nodes (but we can just\n # multiply because they are all identical by construction).\n psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2\n psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2\n psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2\n\n # Compute Fourier transform of resampled time series using FFT_m\n freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \\\n compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,\n t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))\n\n # Check that the analytically determined power spectra are the same as the\n # computed ones.\n assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))\n\n if debug:\n # Plot the spectra for debugging\n fig = plt.figure(figsize=(20, 5))\n ax = fig.gca()\n ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')\n ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')\n ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')\n ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')\n ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')\n ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')\n ax.legend(loc='best')\n fig.savefig('psd_m_McMichaelStiles.png')",
"def test_2D_m4_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m8_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m6_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])",
"def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def test_reconstruction_against_simulation(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n # define reconstructor\n reconstructor = HillasReconstructor(subarray)\n\n hillas_dict = {}\n telescope_pointings = {}\n\n for tel_id, dl1 in event.dl1.tel.items():\n\n telescope_pointings[tel_id] = SkyCoord(\n alt=event.pointing.tel[tel_id].altitude,\n az=event.pointing.tel[tel_id].azimuth,\n frame=AltAz(),\n )\n\n geom_CameraFrame = subarray.tel[tel_id].camera.geometry\n\n # this could be done also out of this loop,\n # but in case of real data each telescope would have a\n # different telescope_pointing\n geom_TelescopeFrame = geom_CameraFrame.transform_to(\n TelescopeFrame(telescope_pointing=telescope_pointings[tel_id])\n )\n\n mask = tailcuts_clean(\n geom_TelescopeFrame,\n dl1.image,\n picture_thresh=5.0,\n boundary_thresh=2.5,\n keep_isolated_pixels=False,\n min_number_picture_neighbors=2,\n )\n\n try:\n hillas_dict[tel_id] = hillas_parameters(\n geom_TelescopeFrame[mask], dl1.image[mask]\n )\n\n # the original event is created from a\n # pytest fixture with \"session\" scope, so it's always the same\n # and if we used the same event we would overwrite the image\n # parameters for the next tests, thus causing their failure\n test_event = deepcopy(event)\n test_event.dl1.tel[tel_id].parameters = ImageParametersContainer()\n test_event.dl1.tel[tel_id].parameters.hillas = hillas_dict[tel_id]\n\n except HillasParameterizationError as e:\n print(e)\n continue\n\n # Get shower geometry\n reconstructor(event)\n # get the result from the correct DL2 container\n result = event.dl2.stereo.geometry[\"HillasReconstructor\"]\n\n # get the reconstructed coordinates in the sky\n reco_coord = SkyCoord(alt=result.alt, az=result.az, frame=AltAz())\n # get the simulated coordinates in the sky\n true_coord = SkyCoord(\n alt=event.simulation.shower.alt, az=event.simulation.shower.az, frame=AltAz()\n )\n\n # check that we are not more far than 0.1 degrees\n assert reco_coord.separation(true_coord) < 0.1 * u.deg",
"def setup(self):\n igd = self.options['input_grid_data']\n ogd = self.options['output_grid_data']\n output_subset = self.options['output_subset']\n\n if ogd is None:\n ogd = igd\n\n # Build the interpolation matrix which maps from the input grid to the output grid.\n # Rather than a single phase-wide interpolating polynomial, map each segment.\n # To do this, find the nodes in the output grid which fall in each segment of the input\n # grid. Then build a Lagrange interpolating polynomial for that segment\n L_blocks = []\n output_nodes_ptau = list(ogd.node_ptau[ogd.subset_node_indices[output_subset]])\n\n for iseg in range(igd.num_segments):\n i1, i2 = igd.segment_indices[iseg]\n iptau_segi = np.take(igd.node_ptau, (i1, i2-1))\n istau_segi = np.take(igd.node_stau, (i1, i2-1))\n\n # The indices of the output grid that fall within this segment of the input grid\n if ogd is igd:\n optau_segi = iptau_segi\n else:\n ptau_hi = igd.segment_ends[iseg+1]\n if iseg < igd.num_segments - 1:\n idxs_in_iseg = np.where(output_nodes_ptau <= ptau_hi)[0]\n else:\n idxs_in_iseg = np.arange(len(output_nodes_ptau))\n optau_segi = np.asarray(output_nodes_ptau)[idxs_in_iseg]\n # Remove the captured nodes so we don't accidentally include them again\n output_nodes_ptau = output_nodes_ptau[len(idxs_in_iseg):]\n\n # Now get the output nodes which fall in iseg in iseg's segment tau space.\n ostau_segi = 2.0 * (optau_segi - iptau_segi[0]) / (iptau_segi[-1] - iptau_segi[0]) - 1\n\n # Create the interpolation matrix and add it to the blocks\n L, _ = lagrange_matrices(istau_segi, ostau_segi)\n L_blocks.append(L)\n\n self.interpolation_matrix = block_diag(*L_blocks)\n r, c = np.nonzero(self.interpolation_matrix)\n\n output_num_nodes, input_num_nodes = self.interpolation_matrix.shape\n\n for (name, kwargs) in self._timeseries_outputs:\n\n input_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n input_name = 'input_values:{0}'.format(name)\n self.add_input(input_name,\n shape=(input_num_nodes,) + kwargs['shape'],\n **input_kwargs)\n\n output_name = name\n output_kwargs = {k: kwargs[k] for k in ('units', 'desc')}\n output_kwargs['shape'] = (output_num_nodes,) + kwargs['shape']\n self.add_output(output_name, **output_kwargs)\n\n self._vars.append((input_name, output_name, kwargs['shape']))\n\n size = np.prod(kwargs['shape'])\n val_jac = np.zeros((output_num_nodes, size, input_num_nodes, size))\n\n for i in range(size):\n val_jac[:, i, :, i] = self.interpolation_matrix\n\n val_jac = val_jac.reshape((output_num_nodes * size, input_num_nodes * size),\n order='C')\n\n val_jac_rows, val_jac_cols = np.where(val_jac != 0)\n\n rs, cs = val_jac_rows, val_jac_cols\n self.declare_partials(of=output_name,\n wrt=input_name,\n rows=rs, cols=cs, val=val_jac[rs, cs])",
"def test_3D_m8_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')",
"def prepare_CvD16_for_M2L_calc(templates_lam_range, verbose=False):\n import glob\n import os\n template_glob=os.path.expanduser('~/z/Data/stellarpops/CvD2/vcj_models/VCJ_*.s100')\n\n vcj_models=sorted(glob.glob(template_glob))\n temp_lamdas, x35, x3, x23, kroupa, flat=np.genfromtxt(vcj_models[-1], unpack=True)\n\n n_ages=7\n n_zs=5\n n_imfs=5\n\n \n\n\n Zs=['m1.5', 'm1.0', 'm0.5', 'p0.0', 'p0.2']\n ages=[1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.5]\n model_imfs_order=['x35', 'x3', 'x23', 'kroupa', 'flat']\n\n t_mask = ((temp_lamdas > templates_lam_range[0]) & (temp_lamdas <templates_lam_range[1]))\n\n\n\n y=x35[t_mask]\n x=temp_lamdas[t_mask]\n\n #Make a new lamda array, carrying on the delta lamdas of high resolution bit\n new_x=temp_lamdas[t_mask][0]+0.9*(np.arange(np.ceil((temp_lamdas[t_mask][-1]-temp_lamdas[t_mask][0])/0.9))+1)\n interp=si.interp1d(x, y, fill_value='extrapolate')\n out=interp(new_x)\n\n templates=np.empty((len(out), n_ages, n_zs, n_imfs))\n\n\n\n for a, Z in enumerate(Zs): \n for b, age in enumerate(ages):\n model=glob.glob(os.path.expanduser('~/z/Data/stellarpops/CvD2/vcj_models/VCJ_*{}*{}.ssp.s100'.format(Z, age)))[0]\n if verbose:\n print 'Loading {}'.format(model)\n data=np.genfromtxt(model)\n\n for c, counter in enumerate(reversed(range(1, data.shape[-1]))):\n \n #Interpolate templates onto a uniform wavelength grid and then log-rebin\n y=data[:, counter][t_mask] \n x=temp_lamdas[t_mask]\n #Make a new lamda array, carrying on the delta lamdas of high resolution bit\n new_x=temp_lamdas[t_mask][0]+0.9*(np.arange(np.ceil((temp_lamdas[t_mask][-1]-temp_lamdas[t_mask][0])/0.9))+1)\n\n interp=si.interp1d(x, y, fill_value='extrapolate')\n out=interp(new_x) \n\n templates[:, b, a, c]=out\n\n return templates, new_x"
]
| [
"0.61626244",
"0.61440176",
"0.6001109",
"0.5942162",
"0.5766749",
"0.57562554",
"0.57494867",
"0.57129544",
"0.55944943",
"0.5574273",
"0.5554185",
"0.55528677",
"0.55476743",
"0.55270815",
"0.5513868",
"0.551193",
"0.55054796",
"0.54848176",
"0.54846114",
"0.5481975",
"0.5481606",
"0.5477275",
"0.546959",
"0.5460875",
"0.54555595",
"0.54513603",
"0.54503065",
"0.5442254",
"0.5440174",
"0.5420874"
]
| 0.62266886 | 0 |
Testing M8 remeshing formula in 2D, 2 kernel, simple precision, o2 splitting. | def test_2D_m8_2k():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_2k',
Splitting: 'o2'}
)
advec_py = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: '',
Splitting: 'o2'},
)
assertion_2D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_2D_m8_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def test_filter_l2_1():\n box = Box(length=L, origin=O)\n f = Field(box, formula=func, name='f0')\n d_fine = Discretization([513, 513, 513])\n d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])\n op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,\n variables={f: d_coarse},\n method={Remesh: L2_1, })\n op.discretize()\n op.setup()\n topo_coarse = op.discreteFields[f].topology\n topo_fine = [t for t in f.discreteFields.keys()\n if not t is topo_coarse][0]\n f.initialize(topo=topo_fine)\n f_out = f.discreteFields[topo_coarse]\n op.apply(simu)\n valid = [npw.zeros(f_out[0].shape), ]\n valid = func(valid, *topo_coarse.mesh.coords)\n assert np.allclose(valid[0][topo_coarse.mesh.iCompute],\n f_out[0][topo_coarse.mesh.iCompute]), \\\n np.max(np.abs(valid[0][topo_coarse.mesh.iCompute] -\n f_out[0][topo_coarse.mesh.iCompute]))",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_2D_m6_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13",
"def reg2():\n data2 = np.load(\"./data/measure4_1.npy\")[2:]\n\n x2 = np.arange(0,len(data2),1)\n\n fit = True \n redistribute = True \n\n #x2 = 1.3149710372035508*x2 -22.617788714272098\n c2 = np.where(x2 < 135)\n\n data = data2[c2] \n x = x2[c2]\n print(\"datapoints:\",len(data))\n\n mass = 79/251/6080*52658\n if redistribute == True:\n\n # conserving the mass\n total_mass = mass * len(data)\n remaining = (data > 0)\n\n while True:\n print(\"new redistributing ...\")\n print(\"total mass:\",total_mass)\n # find those which are smaller\n q = (data[remaining] <= mass)\n remaining = ~q\n if len(np.nonzero(q)[0]) == 0:\n data[remaining] -= mass\n break\n print(\"number of smaller values:\",len(np.nonzero(q)[0]),\"\\n\")\n # subtract the mass of this data\n total_mass -= np.sum(data[q])\n mass = total_mass / len(np.nonzero(~remaining)[0]) \n data[q] = 0\n\n # redistribute total remaining mass to single channels\n print(\"number of nonzero:\",len(np.nonzero(data)[0]))\n\n c = np.nonzero(data) \n data = data[c]\n x = x[c]\n\n #scaling to time units\n x = 6.3149710372035508*x -22.617788714272098\n c = (x>0)\n x = x[c]\n data = data[c]\n\n x = x[::-1] - min(x)\n\n\n error = np.sqrt(data) \n # only fit for x < 135\n fig = plt.figure()\n ax = plt.subplot(111)\n plt.grid(True)\n\n if fit==True:\n\n def func(x, *p):\n a,b,c = p\n return a + b * c**x\n\n # p0 is the initial guess for the fitting coefficients \n p0 = [1., 1., 1.]\n\n p, cov = curve_fit(func, x, data, p0=p0, sigma = error)\n p_uc = uc.correlated_values(p, cov)\n c = p_uc[2]\n\n T12_lit = 98 \n lamb_lit = -(np.log(2)/T12_lit)\n print(\"lit\",lamb_lit)\n \n\n lamb = umath.log(c)\n print(lamb)\n T12 = -np.log(2) /lamb \n print(\"t12=\",T12)\n\n x_fit = np.linspace(min(x),max(x))\n\n data_fit = func(x_fit,*p) \n pmin = (p - np.sqrt(np.diag(cov)))\n pmax = (p + np.sqrt(np.diag(cov)))\n\n data_fit_min = func(x_fit, *pmin)\n data_fit_max = func(x_fit, *pmax)\n\n plt.plot(x_fit,data_fit)\n plt.plot(x_fit,90*np.exp(x_fit * lamb_lit))\n plt.fill_between(x_fit, data_fit_min , data_fit_max,facecolor=\"r\", color=\"b\", alpha=0.3 )\n\n # place a text box in upper left in axes coords\n props = dict(boxstyle='round', facecolor='white', alpha=0.5)\n textstr = '$a + b \\cdot c^x$ with\\n$a=%.2f$\\n$b=%.2f$\\n$c=%.2f$'%(p[0], p[1],p[2])\n ax.text(0.6, 0.85, textstr, transform=ax.transAxes, fontsize=18, va='top', bbox=props)\n\n ax.xaxis.set_tick_params(labelsize = 14)\n ax.yaxis.set_tick_params(labelsize = 14)\n\n ax.add_patch(plt.Rectangle((0,0.1),155,100,alpha = 0.2))\n\n plt.errorbar(x,data, yerr=error,fmt=\"x\")\n #plt.scatter(x,data,c=\"blue\",alpha = 0.9,s=100, marker=\"x\")\n plt.ylim(min(data)*0.8,max(data))\n #plt.yscale(\"log\")\n plt.xlim(min(x)*0.8,max(x))\n plt.xlabel(\"time in $ns$\", fontsize = 14)\n plt.ylabel(\"counts\", fontsize = 14)\n make_fig(fig,1,1,name=\"plot4_1_reg\")",
"def test_2D_m6_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_linear_2d_merwe_column():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints2(4, .1, 2., -1)\n kf = UKF2(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([[-1., 1., -1., 1]]).T\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([[i+randn()*0.1],\n [i+randn()*0.1]])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+', c='b')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')",
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')",
"def test_2D_m4_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])",
"def run_2dtest(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n xlin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n ylin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n X,Y = np.meshgrid(xlin, ylin)\n\n # Store resulting LoS integrations in results\n results = X\n for i in range(0,num_sight_lines+1):\n for j in range(0,num_sight_lines+1): \n results[i,j] = testsph(X[i,j],Y[i,j],smoothing,dim=dim)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n\n # Have to integrate across x for every y\n Int_step = np.zeros( num_sight_lines+1 )\n for iy in range(0, num_sight_lines+1):\n isfin = np.isfinite(results[iy,:])\n Int_step[iy] = integrate.trapz(results[iy,isfin], xlin[isfin])\n # Now integrate across y\n isfin = np.isfinite(Int_step)\n particle_integral = integrate.trapz(Int_step[isfin], ylin[isfin])\n # \"All smoothing lengths should integrate to the same value of unity \"\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n isfin = np.isfinite(results[0,:])\n traces.append(go.Scatter(y=results[0,isfin], x=xlin[isfin]))\n\n # The integral of the entire particle should be unity, the trace of axis will not be however\n plot(traces)",
"def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01",
"def test():\n # test getCl\n ISWoutFile = 'ISWout_scalCls.fits'\n ISWinFile = 'ISWin_scalCls.fits'\n ell,temps = getCl(ISWoutFile)\n\n \"\"\"\n # test showCl\n showCl(ell,temps)\n\n # test makeLegendreTable\n # this works fine for small lmax values, but ell=86 and higher have problems\n # possibly due to exceeding the maximum size of a float64 dtype\n makeLegendreTable(9,'testTable.npy')\n table = symLoad('testTable.npy')\n print table\n\n # test powerArray\n powers = powerArray(2,9)\n print powers\n \"\"\"\n\n # test makeCmatrix\n # measured time: 4.25 hrs for 6110 point mask\n startTime = time.time()\n\n # old files no longer used\n #saveMatrixFile = 'covar6110_R010_lowl.npy'\n #saveMatrixFile = 'covar6110_R010.npy'\n #maskFile = '/shared/Data/PSG/hundred_point/ISWmask2_din1_R160.fits'\n #saveMatrixFile = 'covar9875_R160b.npy'\n\n # huge mask\n #maskFile = 'ISWmask9875_RING.fits' #19917 pixels\n #saveMatrixFile = 'covar19917_ISWout_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n # took 24.83 hours\n\n # use ISWin to model expected signal\n #maskFile = 'ISWmask6110_RING.fits'\n #saveMatrixFile = 'covar6110_ISWin_bws_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, nested=True)\n maskFile = 'ISWmask9875_RING.fits' #9875 pixels\n saveMatrixFile = 'covar9875_ISWin_bws_hp12_RING.npy'\n covMat = makeCmatrix(maskFile, ISWinFile, highpass=12, beamSmooth=True, pixWin=True, nested=False)\n\n # no beam nor window smoothing, high lmax\n #saveMatrixFile = 'covar6110_ISWout_nBW_hp12_RING.npy'\n #covMat = makeCmatrix(maskFile, ISWoutFile, highpass=12, beamSmooth=False, pixWin=False, lmax=2200, nested=False)\n\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n symSave(covMat,saveMatrixFile)\n \"\"\"\n\n # test subMatrix\n subMask = '/shared/Data/PSG/small_masks/ISWmask_din1_R010_trunc0500.fits'\n subCmat = subMatrix(subMask,maskFile,saveMatrixFile)\n print 'time elapsed: ',int((time.time()-startTime)/60),' minutes'\n \"\"\"",
"def test_3D_m8_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m6_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def test_fixedkernel(self):\r\n X = np.random.rand(30, 4)\r\n K = np.dot(X, X.T)\r\n kernel = GPy.kern.fixed(4, K)\r\n kern = GPy.kern.poly(5, degree=4)\r\n self.assertTrue(GPy.kern.kern_test(kern, verbose=verbose))",
"def test_3D_m8_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_linear_2d_merwe():\n\n\n def fx(x, dt):\n F = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]], dtype=float)\n\n return np.dot(F, x)\n\n def hx(x):\n return np.array([x[0], x[2]])\n\n\n dt = 0.1\n points = MerweScaledSigmaPoints(4, .1, 2., -1)\n kf = UKF(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)\n\n\n kf.x = np.array([-1., 1., -1., 1])\n kf.P*=0.0001\n #kf.R *=0\n #kf.Q\n\n zs = []\n for i in range(20):\n z = np.array([i+randn()*0.1, i+randn()*0.1])\n zs.append(z)\n\n Ms, Ps = kf.batch_filter(zs)\n smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dt=dt)\n\n if DO_PLOT:\n plt.figure()\n zs = np.asarray(zs)\n plt.plot(zs[:,0], marker='+')\n plt.plot(Ms[:,0], c='b')\n plt.plot(smooth_x[:,0], smooth_x[:,2], c='r')\n print(smooth_x)",
"def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn",
"def test_2D_m6_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)"
]
| [
"0.6273544",
"0.6218895",
"0.61845493",
"0.5922564",
"0.5862043",
"0.58117807",
"0.5725113",
"0.5718566",
"0.56848884",
"0.56645614",
"0.5646553",
"0.5626228",
"0.56077373",
"0.5603067",
"0.5589761",
"0.5585725",
"0.5563543",
"0.55627114",
"0.5533993",
"0.5531129",
"0.5524585",
"0.5522957",
"0.5520537",
"0.55193454",
"0.5515849",
"0.5499219",
"0.5494122",
"0.5493724",
"0.5478254",
"0.547688"
]
| 0.63639283 | 0 |
Testing M8 remeshing formula in 2D, 1 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m8_1k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_1k',
Splitting: 'o2_FullHalf'}
)
advec_py = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: '',
Splitting: 'o2_FullHalf'}
)
assertion_2D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_2D_m8_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def _apply_array_spatial12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n return self._apply_array_spatial12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb):\n h1e[:, :] -= h2e[:, k, k, :]\n\n if numpy.iscomplex(h1e).any() or numpy.iscomplex(h2e).any():\n dvec = self.calculate_dvec_spatial()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e, dvec)\n dvec = numpy.einsum(\"ijkl,klmn->ijmn\", h2e, dvec)\n out += self._calculate_coeff_spatial_with_dvec(dvec)\n else:\n nij = norb * (norb + 1) // 2\n h1ec = numpy.zeros((nij), dtype=self._dtype)\n h2ec = numpy.zeros((nij, nij), dtype=self._dtype)\n for i in range(norb):\n for j in range(i + 1):\n ijn = j + i * (i + 1) // 2\n h1ec[ijn] = h1e[i, j]\n for k in range(norb):\n for l in range(k + 1):\n kln = l + k * (k + 1) // 2\n h2ec[ijn, kln] = h2e[i, j, k, l]\n dvec = self._calculate_dvec_spatial_compressed()\n out = numpy.einsum(\"i,ikl->kl\", h1ec, dvec)\n dvec = numpy.einsum(\"ik,kmn->imn\", h2ec, dvec)\n for i in range(self.norb()):\n for j in range(self.norb()):\n ijn = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n work = self._core.alpha_map(j, i)\n for source, target, parity in work:\n out[source, :] += dvec[ijn, target, :] * parity\n work = self._core.beta_map(j, i)\n for source, target, parity in work:\n out[:, source] += dvec[ijn, :, target] * parity\n\n return out",
"def test_filter_l2_1():\n box = Box(length=L, origin=O)\n f = Field(box, formula=func, name='f0')\n d_fine = Discretization([513, 513, 513])\n d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])\n op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,\n variables={f: d_coarse},\n method={Remesh: L2_1, })\n op.discretize()\n op.setup()\n topo_coarse = op.discreteFields[f].topology\n topo_fine = [t for t in f.discreteFields.keys()\n if not t is topo_coarse][0]\n f.initialize(topo=topo_fine)\n f_out = f.discreteFields[topo_coarse]\n op.apply(simu)\n valid = [npw.zeros(f_out[0].shape), ]\n valid = func(valid, *topo_coarse.mesh.coords)\n assert np.allclose(valid[0][topo_coarse.mesh.iCompute],\n f_out[0][topo_coarse.mesh.iCompute]), \\\n np.max(np.abs(valid[0][topo_coarse.mesh.iCompute] -\n f_out[0][topo_coarse.mesh.iCompute]))",
"def test_2D_m6_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m8_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def _apply_array_spin12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n #return self._apply_array_spin12_blocked(h1e, h2e)\n return self._apply_array_spin12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb * 2):\n h1e[:, :] -= h2e[:, k, k, :]\n\n (dveca, dvecb) = self.calculate_dvec_spin()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e[:norb, :norb], dveca) \\\n + numpy.einsum(\"ij,ijkl->kl\", h1e[norb:, norb:], dvecb)\n ndveca = numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[:norb, :norb, :norb, :norb], dveca) \\\n + numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[:norb, :norb, norb:, norb:], dvecb)\n ndvecb = numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[norb:, norb:, :norb, :norb], dveca) \\\n + numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[norb:, norb:, norb:, norb:], dvecb)\n out += self._calculate_coeff_spin_with_dvec((ndveca, ndvecb))\n return out",
"def test_2D_m6_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_half_case(self):\n steps = save_divide(np.ones(2), 2 * np.ones(2))\n np.testing.assert_equal(steps, 0.5 * np.ones(2))",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])",
"def test_3D_m8_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13",
"def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def _apply_array_spin12(self, h1e: 'Nparray', h2e: 'Nparray') -> 'Nparray':\n norb = self.norb()\n assert h1e.shape == (norb * 2, norb * 2)\n assert h2e.shape == (norb * 2, norb * 2, norb * 2, norb * 2)\n nalpha = self.nalpha()\n nbeta = self.nbeta()\n\n thresh = self._low_thresh\n if nalpha < norb * thresh and nbeta < norb * thresh:\n graphset = FciGraphSet(2, 2)\n graphset.append(self._core)\n if nalpha - 2 >= 0:\n graphset.append(FciGraph(nalpha - 2, nbeta, norb))\n if nalpha - 1 >= 0 and nbeta - 1 >= 0:\n graphset.append(FciGraph(nalpha - 1, nbeta - 1, norb))\n if nbeta - 2 >= 0:\n graphset.append(FciGraph(nalpha, nbeta - 2, norb))\n return self._apply_array_spin12_lowfilling(h1e, h2e)\n\n return self._apply_array_spin12_halffilling(h1e, h2e)",
"def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):\n os.chdir(str(tmpdir))\n RTOL = 1e-10\n\n H1 = 1e6 # external field in A/m\n alpha1 = 0.5 # some sort of damping constant\n omega1 = gamma * H1 # precession frequency\n\n H2 = 2.8e4 # external field in A/m\n alpha2 = 0.3 # some sort of damping constant\n omega2 = gamma * H2 # precession frequency\n\n ##\n # Step 1: Construct a time series of artificial magnetisation\n # data and save it to a bunch of .npy files.\n ##\n t_step = 1e-11\n t_ini = 0\n t_end = 10e-9\n\n N1 = 42 # in a real application this would be the number of mesh vertices\n N2 = 23 # in a real application this would be the number of mesh vertices\n fft_test_helpers.create_test_npy_files_with_two_regions(\n str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)\n\n ##\n # Step 2: compute the FFT of a resampled time series, both by\n # hand and using FFT_m.\n ##\n # XXX TODO: Resampling timesteps is not supported when using .npy\n # files. Either simplify the code below, or implement saving to\n # .h5 files so that it's easier to implement resampling for\n # spatially resolved data, too.\n ##\n t_step_res = t_step\n t_ini_res = t_ini\n t_end_res = t_end\n ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)\n\n # Compute time series based on resampled timesteps\n mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)\n my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)\n mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)\n\n # Compute 'analytical' Fourier transform of resampled time series and\n # determine the power of the spectrum for each component. We also need\n # to multiply by the number of mesh nodes because the numerical algorithm\n # sums up all contributions at the individual nodes (but we can just\n # multiply because they are all identical by construction).\n psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2\n psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2\n psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2\n\n # Compute Fourier transform of resampled time series using FFT_m\n freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \\\n compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,\n t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))\n\n # Check that the analytically determined power spectra are the same as the\n # computed ones.\n assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))\n\n if debug:\n # Plot the spectra for debugging\n fig = plt.figure(figsize=(20, 5))\n ax = fig.gca()\n ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')\n ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')\n ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')\n ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')\n ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')\n ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')\n ax.legend(loc='best')\n fig.savefig('psd_m_McMichaelStiles.png')",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def test_2D_m4_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_assembly_inner_product_2_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '2-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '2-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '2-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n print(basis_ext_gauss.num_basis)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)",
"def test_2D_m6_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def subdivideMesh(IKLE,MESHX,MESHY): \n # ~~> Singling out edges\n from matplotlib.tri import Triangulation\n edges = Triangulation(MESHX,MESHY,IKLE).get_cpp_triangulation().get_edges()\n \n # ~~> Memory allocation for new MESH\n IELEM = len(IKLE); IPOIN = len(MESHX); IEDGE = len(edges)\n JKLE = np.zeros((IELEM*4,3),dtype=np.int) # you subdivide every elements by 4\n MESHJ = np.zeros((IEDGE,2),dtype=np.int) # you add one point on every edges\n \n # ~~> Lookup tables for node numbering on common edges\n pa,pb = edges.T\n k1b,k1a = np.sort(np.take(IKLE,[0,1],axis=1)).T\n indx1 = np.searchsorted(pa,k1a)\n jndx1 = np.searchsorted(pa,k1a,side='right')\n k2b,k2a = np.sort(np.take(IKLE,[1,2],axis=1)).T\n indx2 = np.searchsorted(pa,k2a)\n jndx2 = np.searchsorted(pa,k2a,side='right')\n k3b,k3a = np.sort(np.take(IKLE,[2,0],axis=1)).T\n indx3 = np.searchsorted(pa,k3a)\n jndx3 = np.searchsorted(pa,k3a,side='right')\n \n # ~~> Building one triangle at a time /!\\ Please get this loop parallelised\n j = 0\n for i in range(IELEM):\n k1 = indx1[i]+np.searchsorted(pb[indx1[i]:jndx1[i]],k1b[i])\n k2 = indx2[i]+np.searchsorted(pb[indx2[i]:jndx2[i]],k2b[i])\n k3 = indx3[i]+np.searchsorted(pb[indx3[i]:jndx3[i]],k3b[i])\n # ~~> New connectivity JKLE\n JKLE[j] = [IKLE[i][0],IPOIN+k1,IPOIN+k3]\n JKLE[j+1] = [IKLE[i][1],IPOIN+k2,IPOIN+k1]\n JKLE[j+2] = [IKLE[i][2],IPOIN+k3,IPOIN+k2]\n JKLE[j+3] = [IPOIN+k1,IPOIN+k2,IPOIN+k3]\n # ~~> New interpolation references for values and coordinates\n MESHJ[k1] = [IKLE[i][0],IKLE[i][1]]\n MESHJ[k2] = [IKLE[i][1],IKLE[i][2]]\n MESHJ[k3] = [IKLE[i][2],IKLE[i][0]]\n j += 4\n\n # ~~> Reset IPOBO while you are at it\n MESHX = np.resize(MESHX,IPOIN+IEDGE)\n MESHY = np.resize(MESHY,IPOIN+IEDGE)\n MESHX[IPOIN:] = np.sum(MESHX[MESHJ],axis=1)/2.\n MESHY[IPOIN:] = np.sum(MESHY[MESHJ],axis=1)/2.\n neighbours = Triangulation(MESHX,MESHY,JKLE).get_cpp_triangulation().get_neighbors()\n JPOBO = np.zeros(IPOIN+IEDGE,np.int)\n for n in range(IELEM*4):\n s1,s2,s3 = neighbours[n]\n e1,e2,e3 = JKLE[n]\n if s1 < 0:\n JPOBO[e1] = e1+1\n JPOBO[e2] = e2+1\n if s2 < 0:\n JPOBO[e2] = e2+1\n JPOBO[e3] = e3+1\n if s3 < 0:\n JPOBO[e3] = e3+1\n JPOBO[e1] = e1+1\n\n return JKLE,MESHX,MESHY,JPOBO,MESHJ",
"def run_multi_medium_inversion(datadir, outdir, real_data_fnames, MT_green_func_fnames, single_force_green_func_fnames, data_labels, inversion_type, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, num_samples, comparison_metric, manual_indices_time_shift_MT, manual_indices_time_shift_SF, nlloc_hyp_filename, cut_phase_start_vals=[], cut_phase_length=0, plot_switch=False, num_processors=1, set_pre_time_shift_values_to_zero_switch=True, only_save_non_zero_solns_switch=False, return_absolute_similarity_values_switch=False, invert_for_ratio_of_multiple_media_greens_func_switch=False, green_func_fnames_split_index=0, green_func_phase_labels=[], invert_for_relative_magnitudes_switch=False, rel_exp_mag_range=[1.,1.], auto_shift_for_best_fit=True):\n \n # Load input data (completely, for specific inversion type):\n real_data_array, green_func_array = get_overall_real_and_green_func_data(datadir, real_data_fnames, MT_green_func_fnames, single_force_green_func_fnames, inversion_type, manual_indices_time_shift_MT=manual_indices_time_shift_MT, manual_indices_time_shift_SF=manual_indices_time_shift_SF, cut_phase_start_vals=cut_phase_start_vals, cut_phase_length=cut_phase_length, set_pre_time_shift_values_to_zero_switch=set_pre_time_shift_values_to_zero_switch, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_fnames_split_index=green_func_fnames_split_index)\n \n # Do initial check/s:\n if len(green_func_phase_labels)>0:\n if not len(green_func_array[:,0,0,0]) == len(green_func_phase_labels):\n print(\"Error: Greens functions filename array (for medium 1), does not match length of green_func_phase_labels array.\")\n sys.exit()\n \n # Get number of different phases, if specified:\n num_phase_types_for_media_ratios = 0\n if green_func_phase_labels.count(\"P\")>0:\n num_phase_types_for_media_ratios += 1\n if green_func_phase_labels.count(\"S\")>0:\n num_phase_types_for_media_ratios += 1\n if green_func_phase_labels.count(\"surface\")>0:\n num_phase_types_for_media_ratios += 1\n \n # Define a fraction of the second medium to use for the simple least squares inversion:\n frac_medium_2 = 0.5\n green_func_array_for_lsq_inv = (1. - frac_medium_2)*green_func_array[:,:,:,0] + frac_medium_2*green_func_array[:,:,:,1]\n \n # Perform the inversion:\n M = perform_inversion(real_data_array, green_func_array_for_lsq_inv)\n M_amplitude = ((np.sum(M**2))**0.5)\n\n # And get forward model synthetic waveform result:\n synth_forward_model_result_array = forward_model(green_func_array_for_lsq_inv, M)\n\n # And plot the results:\n if plot_switch:\n plot_specific_forward_model_result(real_data_array, synth_forward_model_result_array, data_labels, plot_title=\"Initial theoretical inversion solution\", perform_normallised_waveform_inversion=perform_normallised_waveform_inversion)\n\n # And save least squares output:\n # Set output arrays to equal least squares output: \n MTs = M\n similarity_curr_sample, shift_idxs = compare_synth_to_real_waveforms(real_data_array, synth_forward_model_result_array, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, auto_shift_for_best_fit) \n MTp = np.array([similarity_curr_sample])\n # And save data to MTFIT style file:\n outdir_least_squares = outdir+\"/least_squares_result\"\n os.system(\"mkdir -p \"+outdir_least_squares)\n save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir_least_squares, shift_idxs=shift_idxs) # Saves pickled dictionary containing data from inversion\n # And save most likely solution and real data waveforms to file:\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:, np.where(MTp==np.max(MTp))[0][0]])\n # And get shift associated with most likely model:\n if len(shift_idxs) > 0:\n shift_idxs_most_likely_result = np.array(shift_idxs)\n else:\n shift_idxs_most_likely_result = []\n # And save:\n save_specific_waveforms_to_file(real_data_array, synth_forward_model_most_likely_result_array, data_labels, nlloc_hyp_filename, inversion_type, outdir_least_squares, shift_idxs=shift_idxs_most_likely_result, normallise_data=perform_normallised_waveform_inversion)\n\n # And do Monte Carlo random sampling to obtain PDF of moment tensor:\n MTs, MTp, MTp_absolute, shift_idxs_all_samples = perform_monte_carlo_sampled_waveform_inversion(real_data_array, green_func_array, num_samples, M_amplitude=M_amplitude,inversion_type=inversion_type, comparison_metric=comparison_metric, perform_normallised_waveform_inversion=perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously=compare_all_waveforms_simultaneously, num_processors=num_processors, return_absolute_similarity_values_switch=return_absolute_similarity_values_switch, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels=green_func_phase_labels, num_phase_types_for_media_ratios=num_phase_types_for_media_ratios, invert_for_relative_magnitudes_switch=invert_for_relative_magnitudes_switch, rel_exp_mag_range=rel_exp_mag_range, auto_shift_for_best_fit=auto_shift_for_best_fit)\n\n # Check that probability of output is non-zero:\n if math.isnan(MTp[0]):\n print(\"Error: Sum of probabilities is equal to zero - therefore no adiquate solution could be found and inversion is terminating.\")\n sys.exit()\n \n # Remove zero probability values if specified:\n if only_save_non_zero_solns_switch:\n MTp, MTs = remove_zero_prob_results(MTp, MTs)\n\n # And plot most likely solution:\n if plot_switch:\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_1_diff_phases_dict={}\n tmp_frac_medium_1_diff_phases_dict[\"P\"] = MTs[-3, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"S\"] = MTs[-2, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"surface\"] = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n # Create actual greens functions for this solution:\n green_func_array_for_most_likely_amp_ratio = np.zeros(np.shape(green_func_array[:,:,:,0]), dtype=float)\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_1 = tmp_frac_medium_1_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array_for_most_likely_amp_ratio[j, :, :] = (1. - tmp_frac_medium_1)*green_func_array[j,:,:,0] + tmp_frac_medium_1*green_func_array[j,:,:,1]\n # And get result:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-4, np.where(MTp==np.max(MTp))[0][0]])\n else:\n frac_medium_1 = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n green_func_array_for_most_likely_amp_ratio = (1. - frac_medium_1)*green_func_array[:,:,:,0] + frac_medium_1*green_func_array[:,:,:,1]\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-2, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_1_diff_phases_dict={}\n tmp_frac_medium_1_diff_phases_dict[\"P\"] = MTs[-3, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"S\"] = MTs[-2, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"surface\"] = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n # Create actual greens functions for this solution:\n green_func_array_for_most_likely_amp_ratio = np.zeros(np.shape(green_func_array[:,:,:,0]), dtype=float)\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_1 = tmp_frac_medium_1_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array_for_most_likely_amp_ratio[j, :, :] = (1. - tmp_frac_medium_1)*green_func_array[j,:,:,0] + tmp_frac_medium_1*green_func_array[j,:,:,1]\n # And get result:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-3, np.where(MTp==np.max(MTp))[0][0]])\n else:\n frac_medium_1 = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n green_func_array_for_most_likely_amp_ratio = (1. - frac_medium_1)*green_func_array[:,:,:,0] + frac_medium_1*green_func_array[:,:,:,1]\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:, np.where(MTp==np.max(MTp))[0][0]])\n plot_specific_forward_model_result(real_data_array, synth_forward_model_most_likely_result_array, data_labels, plot_title=\"Most likely Monte Carlo sampled solution\", perform_normallised_waveform_inversion=perform_normallised_waveform_inversion)\n print(\"Most likely solution:\", MTs[:,np.where(MTp==np.max(MTp))[0][0]])\n\n # And save data to MTFIT style file:\n save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir, MTp_absolute=MTp_absolute, shift_idxs=shift_idxs_all_samples) # Saves pickled dictionary containing data from inversion\n # And save most likely solution and real data waveforms to file:\n synth_forward_model_most_likely_result_array = get_synth_forward_model_most_likely_result(MTs, MTp, green_func_array, inversion_type, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels=green_func_phase_labels, num_phase_types_for_media_ratios=num_phase_types_for_media_ratios)\n # And get shift associated with most likely model:\n if len(shift_idxs_all_samples) > 0:\n shift_idxs_most_likely_result = np.array(shift_idxs_all_samples)[0, np.where(MTp==np.max(MTp))[0][0]]\n else:\n shift_idxs_most_likely_result = []\n # And save:\n save_specific_waveforms_to_file(real_data_array, synth_forward_model_most_likely_result_array, data_labels, nlloc_hyp_filename, inversion_type, outdir, shift_idxs=shift_idxs_most_likely_result, normallise_data=perform_normallised_waveform_inversion)\n \n print(\"Finished\")",
"def test_2D_m6_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##"
]
| [
"0.64638543",
"0.61983424",
"0.61519486",
"0.60451084",
"0.5888241",
"0.58734226",
"0.5817478",
"0.57815385",
"0.5780573",
"0.5759568",
"0.57414657",
"0.5736586",
"0.5728689",
"0.5697968",
"0.5688739",
"0.567302",
"0.5628698",
"0.5611948",
"0.5586767",
"0.55747586",
"0.5564174",
"0.5552587",
"0.5540207",
"0.55318016",
"0.55262005",
"0.5505894",
"0.5465661",
"0.54522014",
"0.5450897",
"0.5447176"
]
| 0.65938306 | 0 |
Testing M8 remeshing formula in 2D, 2 kernel, simple precision, o2_FullHalf splitting. | def test_2D_m8_2k_sFH():
scal, velo = setup_2D()
advec = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_2k',
Splitting: 'o2_FullHalf'}
)
advec_py = Advection(velo, scal, discretization=d2d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: '',
Splitting: 'o2_FullHalf'}
)
assertion_2D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m6_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_filter_l2_1():\n box = Box(length=L, origin=O)\n f = Field(box, formula=func, name='f0')\n d_fine = Discretization([513, 513, 513])\n d_coarse = Discretization([257, 257, 257], ghosts=[2, 2, 2])\n op = MultiresolutionFilter(d_in=d_fine, d_out=d_coarse,\n variables={f: d_coarse},\n method={Remesh: L2_1, })\n op.discretize()\n op.setup()\n topo_coarse = op.discreteFields[f].topology\n topo_fine = [t for t in f.discreteFields.keys()\n if not t is topo_coarse][0]\n f.initialize(topo=topo_fine)\n f_out = f.discreteFields[topo_coarse]\n op.apply(simu)\n valid = [npw.zeros(f_out[0].shape), ]\n valid = func(valid, *topo_coarse.mesh.coords)\n assert np.allclose(valid[0][topo_coarse.mesh.iCompute],\n f_out[0][topo_coarse.mesh.iCompute]), \\\n np.max(np.abs(valid[0][topo_coarse.mesh.iCompute] -\n f_out[0][topo_coarse.mesh.iCompute]))",
"def _apply_array_spatial12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n return self._apply_array_spatial12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb):\n h1e[:, :] -= h2e[:, k, k, :]\n\n if numpy.iscomplex(h1e).any() or numpy.iscomplex(h2e).any():\n dvec = self.calculate_dvec_spatial()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e, dvec)\n dvec = numpy.einsum(\"ijkl,klmn->ijmn\", h2e, dvec)\n out += self._calculate_coeff_spatial_with_dvec(dvec)\n else:\n nij = norb * (norb + 1) // 2\n h1ec = numpy.zeros((nij), dtype=self._dtype)\n h2ec = numpy.zeros((nij, nij), dtype=self._dtype)\n for i in range(norb):\n for j in range(i + 1):\n ijn = j + i * (i + 1) // 2\n h1ec[ijn] = h1e[i, j]\n for k in range(norb):\n for l in range(k + 1):\n kln = l + k * (k + 1) // 2\n h2ec[ijn, kln] = h2e[i, j, k, l]\n dvec = self._calculate_dvec_spatial_compressed()\n out = numpy.einsum(\"i,ikl->kl\", h1ec, dvec)\n dvec = numpy.einsum(\"ik,kmn->imn\", h2ec, dvec)\n for i in range(self.norb()):\n for j in range(self.norb()):\n ijn = min(i, j) + max(i, j) * (max(i, j) + 1) // 2\n work = self._core.alpha_map(j, i)\n for source, target, parity in work:\n out[source, :] += dvec[ijn, target, :] * parity\n work = self._core.beta_map(j, i)\n for source, target, parity in work:\n out[:, source] += dvec[ijn, :, target] * parity\n\n return out",
"def test_2D_m6_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def _apply_array_spin12_halffilling(self, h1e: 'Nparray',\n h2e: 'Nparray') -> 'Nparray':\n if fqe.settings.use_accelerated_code:\n #return self._apply_array_spin12_blocked(h1e, h2e)\n return self._apply_array_spin12_lm(h1e, h2e)\n else:\n h1e = copy.deepcopy(h1e)\n h2e = numpy.moveaxis(copy.deepcopy(h2e), 1, 2) * (-1.0)\n norb = self.norb()\n for k in range(norb * 2):\n h1e[:, :] -= h2e[:, k, k, :]\n\n (dveca, dvecb) = self.calculate_dvec_spin()\n out = numpy.einsum(\"ij,ijkl->kl\", h1e[:norb, :norb], dveca) \\\n + numpy.einsum(\"ij,ijkl->kl\", h1e[norb:, norb:], dvecb)\n ndveca = numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[:norb, :norb, :norb, :norb], dveca) \\\n + numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[:norb, :norb, norb:, norb:], dvecb)\n ndvecb = numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[norb:, norb:, :norb, :norb], dveca) \\\n + numpy.einsum(\"ijkl,klmn->ijmn\",\n h2e[norb:, norb:, norb:, norb:], dvecb)\n out += self._calculate_coeff_spin_with_dvec((ndveca, ndvecb))\n return out",
"def test_3D_m8_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_1_2_2D_rec_splits(self):\n check = [(3.0, -2.0), (7.0, -1.0), (7.0, -2.0), (3.0, -1.0),\n (5.0, -1.5), (3.0, -1.5), (5.0, -2.0), (4.0, -1.75),\n (7.0, -1.5), (5.0, -1.0), (6.0, -1.25), (6.0, -1.75),\n (4.0, -1.25), (5.0, -1.75), (4.0, -1.5), (4.5, -1.625),\n (3.0, -1.75), (4.0, -2.0), (3.5, -1.875), (3.5, -1.625),\n (4.5, -1.875), (5.0, -1.25), (6.0, -1.5), (5.5, -1.375),\n (7.0, -1.25), (6.0, -1.0), (6.5, -1.125), (6.5, -1.375),\n (5.5, -1.125), (5.5, -1.625), (7.0, -1.75), (6.0, -2.0),\n (6.5, -1.875), (6.5, -1.625), (5.5, -1.875), (4.5, -1.375),\n (3.0, -1.25), (4.0, -1.0), (3.5, -1.125), (3.5, -1.375),\n (4.5, -1.125)]\n nn_checks = {(3.0, -2.0): [(3.0, -1.75), (3.5, -1.875), (4.0, -2.0)],\n (5.0, -1.75): [(5.0, -2.0), (5.0, -1.5), (5.5, -1.625),\n (5.5, -1.875), (4.5, -1.625), (6.0, -1.75),\n (4.5, -1.875), (4.0, -1.75)],\n (6.0, -2.0): [(5.0, -2.0), (5.5, -1.875), (6.0, -1.75),\n (6.5, -1.875), (7, -2)],\n (4.5, -1.125): [(5.0, -1.0), (4.0, -1.25), (5.0, -1.25),\n (4.0, -1.0)]}\n\n init_triangulation(2, 2, check, nn_checks, bounds=[(3, 7), (-2, -1)])",
"def test_half_case(self):\n steps = save_divide(np.ones(2), 2 * np.ones(2))\n np.testing.assert_equal(steps, 0.5 * np.ones(2))",
"def test_sw2():\n B1 = 100\n B2 = 200\n h = 18\n t = 1\n H = h + 2 * t\n E1 = 20000\n E2 = 10000\n sections = ((B1, t, 0, E1), (B2, t, h + t, E2))\n EI, top, bot = bm.EI(sections, E1)\n EIc = E1 * B1 * (H ** 3 - h ** 3) / 12\n assert 0.99 < EI / EIc < 1.01",
"def test_dmi_uses_unit_length_2dmesh():\n A = 8.78e-12 # J/m\n D = 1.58e-3 # J/m^2\n Ms = 3.84e5 # A/m\n\n energies = []\n\n # unit_lengths 1e-9 and 1 are common, let's throw in an intermediate length\n # just to challenge the system a little:\n for unit_length in (1, 1e-4, 1e-9):\n radius = 200e-9 / unit_length\n maxh = 5e-9 / unit_length\n helical_period = (4 * pi * A / D) / unit_length\n k = 2 * pi / helical_period\n # HF 27 April 2014: The next command fails in dolfin 1.3\n # mesh = df.CircleMesh(df.Point(0, 0), radius, maxh)\n # The actual shape of the domain shouldn't matter for the test,\n # so let's use a Rectangular mesh which should work the same:\n\n nx = ny = int(round(radius / maxh))\n mesh = df.RectangleMesh(df.Point(0, 0), df.Point(radius, radius), nx, ny)\n\n S3 = df.VectorFunctionSpace(mesh, \"CG\", 1, dim=3)\n m_expr = df.Expression((\"0\", \"cos(k * x[0])\", \"sin(k * x[0])\"), k=k, degree=1)\n m = Field(S3, m_expr, name='m')\n dmi = DMI(D)\n Ms_dg = Field(df.FunctionSpace(mesh, 'DG', 0), Ms)\n dmi.setup(m, Ms_dg, unit_length=unit_length)\n energies.append(dmi.compute_energy())\n\n H = df.Function(S3)\n H.vector()[:] = dmi.compute_field()\n print H(0.0, 0.0)\n\n print \"Using unit_length = {}.\".format(unit_length)\n print \"Helical period {}.\".format(helical_period)\n print \"Energy {}.\".format(dmi.compute_energy())\n\n rel_diff_energies = abs(energies[0] - energies[1]) / abs(energies[1])\n print \"Relative difference of energy {}.\".format(rel_diff_energies)\n assert rel_diff_energies < 1e-13\n\n rel_diff_energies2 = abs(energies[0] - energies[2]) / abs(energies[2])\n print \"Relative difference2 of energy {}.\".format(rel_diff_energies2)\n assert rel_diff_energies2 < 1e-13",
"def test_2D_m6_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def _apply_array_spin12(self, h1e: 'Nparray', h2e: 'Nparray') -> 'Nparray':\n norb = self.norb()\n assert h1e.shape == (norb * 2, norb * 2)\n assert h2e.shape == (norb * 2, norb * 2, norb * 2, norb * 2)\n nalpha = self.nalpha()\n nbeta = self.nbeta()\n\n thresh = self._low_thresh\n if nalpha < norb * thresh and nbeta < norb * thresh:\n graphset = FciGraphSet(2, 2)\n graphset.append(self._core)\n if nalpha - 2 >= 0:\n graphset.append(FciGraph(nalpha - 2, nbeta, norb))\n if nalpha - 1 >= 0 and nbeta - 1 >= 0:\n graphset.append(FciGraph(nalpha - 1, nbeta - 1, norb))\n if nbeta - 2 >= 0:\n graphset.append(FciGraph(nalpha, nbeta - 2, norb))\n return self._apply_array_spin12_lowfilling(h1e, h2e)\n\n return self._apply_array_spin12_halffilling(h1e, h2e)",
"def test_2D_m4_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def test_assembly_inner_product_2_forms(self):\n func_space_lob = FunctionSpace(self.mesh, '2-lobatto', self.p)\n func_space_gauss = FunctionSpace(self.mesh, '2-gauss', self.p)\n func_space_extgauss = FunctionSpace(self.mesh, '2-ext_gauss', self.p)\n\n basis_lob = BasisForm(func_space_lob)\n basis_lob.quad_grid = 'gauss'\n M_lob = inner(basis_lob, basis_lob)\n\n basis_gauss = BasisForm(func_space_gauss)\n basis_gauss.quad_grid = 'lobatto'\n M_gauss = inner(basis_gauss, basis_gauss)\n\n basis_ext_gauss = BasisForm(func_space_extgauss)\n print(basis_ext_gauss.num_basis)\n basis_ext_gauss.quad_grid = 'lobatto'\n M_extgauss = inner(basis_ext_gauss, basis_ext_gauss)\n\n M_lob_ass_ref = assemble_slow(self.mesh, M_lob, func_space_lob.dof_map.dof_map,\n func_space_lob.dof_map.dof_map)\n M_gauss_ass_ref = assemble_slow(self.mesh, M_gauss, func_space_gauss.dof_map.dof_map,\n func_space_gauss.dof_map.dof_map)\n M_extgauss_ass_ref = assemble_slow(\n self.mesh, M_extgauss, func_space_extgauss.dof_map.dof_map_internal, func_space_extgauss.dof_map.dof_map_internal)\n\n M_lob_ass = assemble(M_lob, func_space_lob, func_space_lob).toarray()\n M_gauss_ass = assemble(M_gauss, func_space_gauss, func_space_gauss).toarray()\n M_extgauss_ass = assemble(M_extgauss, func_space_extgauss,\n func_space_extgauss).toarray()\n\n npt.assert_array_almost_equal(M_lob_ass_ref, M_lob_ass)\n npt.assert_array_almost_equal(M_gauss_ass_ref, M_gauss_ass)\n npt.assert_array_almost_equal(M_extgauss_ass_ref, M_extgauss_ass)",
"def test_2D_m6_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def test_3D_m8_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m4_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):\n os.chdir(str(tmpdir))\n RTOL = 1e-10\n\n H1 = 1e6 # external field in A/m\n alpha1 = 0.5 # some sort of damping constant\n omega1 = gamma * H1 # precession frequency\n\n H2 = 2.8e4 # external field in A/m\n alpha2 = 0.3 # some sort of damping constant\n omega2 = gamma * H2 # precession frequency\n\n ##\n # Step 1: Construct a time series of artificial magnetisation\n # data and save it to a bunch of .npy files.\n ##\n t_step = 1e-11\n t_ini = 0\n t_end = 10e-9\n\n N1 = 42 # in a real application this would be the number of mesh vertices\n N2 = 23 # in a real application this would be the number of mesh vertices\n fft_test_helpers.create_test_npy_files_with_two_regions(\n str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)\n\n ##\n # Step 2: compute the FFT of a resampled time series, both by\n # hand and using FFT_m.\n ##\n # XXX TODO: Resampling timesteps is not supported when using .npy\n # files. Either simplify the code below, or implement saving to\n # .h5 files so that it's easier to implement resampling for\n # spatially resolved data, too.\n ##\n t_step_res = t_step\n t_ini_res = t_ini\n t_end_res = t_end\n ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)\n\n # Compute time series based on resampled timesteps\n mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)\n my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)\n mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)\n\n # Compute 'analytical' Fourier transform of resampled time series and\n # determine the power of the spectrum for each component. We also need\n # to multiply by the number of mesh nodes because the numerical algorithm\n # sums up all contributions at the individual nodes (but we can just\n # multiply because they are all identical by construction).\n psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2\n psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2\n psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2\n\n # Compute Fourier transform of resampled time series using FFT_m\n freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \\\n compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,\n t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))\n\n # Check that the analytically determined power spectra are the same as the\n # computed ones.\n assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))\n\n if debug:\n # Plot the spectra for debugging\n fig = plt.figure(figsize=(20, 5))\n ax = fig.gca()\n ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')\n ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')\n ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')\n ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')\n ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')\n ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')\n ax.legend(loc='best')\n fig.savefig('psd_m_McMichaelStiles.png')",
"def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')",
"def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')"
]
| [
"0.6612045",
"0.6316624",
"0.6205833",
"0.6069077",
"0.60371995",
"0.58924127",
"0.58888525",
"0.58776313",
"0.58374417",
"0.5809537",
"0.5788198",
"0.57790375",
"0.57689774",
"0.574581",
"0.5729717",
"0.56743294",
"0.5644362",
"0.56299007",
"0.56171143",
"0.56143606",
"0.56053174",
"0.5569654",
"0.5563536",
"0.55521184",
"0.55474603",
"0.5539677",
"0.5529111",
"0.55085313",
"0.54953724",
"0.54669434"
]
| 0.65982985 | 1 |
Testing M8 remeshing formula in 3D, 1 kernel, simple precision, o2 splitting. | def test_3D_m8_1k():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_1k',
Splitting: 'o2'}
)
advec_py = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: '',
Splitting: 'o2'},
)
assertion_3D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_3D_m8_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m8_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m8_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def test_2():\n\n n1 = 10\n n2 = 100\n ndim = 3\n\n semi_axes = np.random.random((n1,ndim))\n coords = np.array([sample_ellipsoidal_volume(n2, semi_axes[i]) for i in range(0,n1)])\n\n Is = iterative_inertia_tensors_3D(coords)\n\n assert np.shape(Is)==(n1,ndim,ndim)",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def ThreeDTest(SMethod,IMethod,Fraction,Plot = False):\r\n \r\n # Cylinder Parameters--------------------------------------------------------- \r\n CL = 100 # cylinder length\r\n Pt = 120 # number of points in each cylinder\r\n Cn = 50 # number of horizontal slices in cylinder\r\n\r\n x = np.zeros(Cn*Pt)\r\n y = np.zeros(Cn*Pt)\r\n z = np.zeros(Cn*Pt)\r\n # Generate cylinder-----------------------------------------------------------\r\n n = 0\r\n for i in range(Cn):\r\n for j in range(Pt):\r\n x[n] = np.cos((2*pi*j)/Pt)\r\n y[n] = np.sin((2*pi*j)/Pt)\r\n z[n] = i*(CL/Cn)\r\n n += 1\r\n \r\n YFull = (np.sin(2*pi*0.03*z))+(np.cos(2*pi*x+2*pi*x))\r\n XFull = np.column_stack((x,y,z))\r\n MFull = np.column_stack((x,y,z,YFull))\r\n\r\n # Randomise matrix and Generate sparse version of geometry--------------------\r\n split = int(np.ceil((MFull.shape[0])*Fraction)) \r\n np.random.shuffle(MFull)\r\n # Sparse Set\r\n XTrain = MFull[:split,:3]\r\n YTrain = MFull[:split,3]\r\n # Training set\r\n XStar = MFull[split:,:3]\r\n CStar = MFull[split:,3]\r\n\r\n # Reconstruct XFull's geometry using XTrain and YTrain------------------------\r\n YHat = ThreeDPointInter(XTrain,YTrain,XFull,SMethod,IMethod,10)\r\n mse = mseCalc(YFull,YHat)\r\n print('Mean Squared Error =',mse)\r\n # Plot whole data-----------------------------------------------------------\r\n if Plot:\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(131, projection='3d')\r\n ax1.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YFull],cmap='plasma')\r\n ax1.set_xlabel('x')\r\n ax1.set_ylabel('y')\r\n ax1.set_zlabel('z')\r\n # Plot training Data\r\n ax2 = fig.add_subplot(132, projection='3d')\r\n ax2.scatter(XTrain[:,0],XTrain[:,1],XTrain[:,2],c=[float(i) for i in YTrain],cmap='plasma')\r\n ax2.set_xlabel('XTrain1')\r\n ax2.set_ylabel('XTrain2')\r\n ax2.set_zlabel('XTrain3')\r\n # Plot Reconstruction of XFull\r\n ax3 = fig.add_subplot(133, projection='3d')\r\n ax3.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YHat],cmap='plasma')\r\n ax3.set_xlabel('x')\r\n ax3.set_ylabel('y')\r\n ax3.set_zlabel('z')\r\n \r\n plt.show()\r\n\r\n return mse",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)",
"def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##",
"def test_c0q1(self):\n self.check_c0q1(test_hexMesh_3x3=False,use_petsc=True, name=\"_proteusMesh_\")",
"def test_reconstruction_against_simulation(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n # define reconstructor\n reconstructor = HillasReconstructor(subarray)\n\n hillas_dict = {}\n telescope_pointings = {}\n\n for tel_id, dl1 in event.dl1.tel.items():\n\n telescope_pointings[tel_id] = SkyCoord(\n alt=event.pointing.tel[tel_id].altitude,\n az=event.pointing.tel[tel_id].azimuth,\n frame=AltAz(),\n )\n\n geom_CameraFrame = subarray.tel[tel_id].camera.geometry\n\n # this could be done also out of this loop,\n # but in case of real data each telescope would have a\n # different telescope_pointing\n geom_TelescopeFrame = geom_CameraFrame.transform_to(\n TelescopeFrame(telescope_pointing=telescope_pointings[tel_id])\n )\n\n mask = tailcuts_clean(\n geom_TelescopeFrame,\n dl1.image,\n picture_thresh=5.0,\n boundary_thresh=2.5,\n keep_isolated_pixels=False,\n min_number_picture_neighbors=2,\n )\n\n try:\n hillas_dict[tel_id] = hillas_parameters(\n geom_TelescopeFrame[mask], dl1.image[mask]\n )\n\n # the original event is created from a\n # pytest fixture with \"session\" scope, so it's always the same\n # and if we used the same event we would overwrite the image\n # parameters for the next tests, thus causing their failure\n test_event = deepcopy(event)\n test_event.dl1.tel[tel_id].parameters = ImageParametersContainer()\n test_event.dl1.tel[tel_id].parameters.hillas = hillas_dict[tel_id]\n\n except HillasParameterizationError as e:\n print(e)\n continue\n\n # Get shower geometry\n reconstructor(event)\n # get the result from the correct DL2 container\n result = event.dl2.stereo.geometry[\"HillasReconstructor\"]\n\n # get the reconstructed coordinates in the sky\n reco_coord = SkyCoord(alt=result.alt, az=result.az, frame=AltAz())\n # get the simulated coordinates in the sky\n true_coord = SkyCoord(\n alt=event.simulation.shower.alt, az=event.simulation.shower.az, frame=AltAz()\n )\n\n # check that we are not more far than 0.1 degrees\n assert reco_coord.separation(true_coord) < 0.1 * u.deg",
"def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def test_2D_m8_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)",
"def test_MeshMat_1group(self):\n\n MS_grp = self.meshsol.get_group(\"stator\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[0, 1, 2], [1, 2, 3]])\n result_tgl = cells_grp[\"triangle\"]\n testA = np.sum(abs(solution - result_tgl))\n msg = (\n \"Wrong output: returned \" + str(result_tgl) + \", expected: \" + str(solution)\n )\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)\n\n MS_grp = self.meshsol.get_group(\"rotor\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[3, 3], [1, 2], [2, 3]])\n results = cells_grp[\"triangle\"] # The point indices have changed !\n points = MS_grp.get_mesh().get_point(results)\n testA = np.sum(abs(solution - points))\n msg = \"Wrong output: returned \" + str(results) + \", expected: \" + str(solution)\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)",
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_spatial_smoothing_xesmf_reduce_spatial_dims_MPI_curv(\r\n PM_ds_control_3d_full,\r\n):\r\n da = PM_ds_control_3d_full\r\n step = 5\r\n actual = spatial_smoothing_xesmf(\r\n da,\r\n d_lon_lat_kws={\"lon\": step},\r\n )\r\n expected_lat_size = 180 // step\r\n assert actual[\"lon\"].size < da.lon.size\r\n assert actual[\"lat\"].size == expected_lat_size",
"def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test",
"def Sphere_ExactSerendipityLagrangeQuad():\n\n mesh = Sphere_CubeToSerendipityLagrangeQuad(1)\n \n ################\n # Modifications for exact sphere\n ################\n # x=+1 side\n def posXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[0].vals = posXvals\n mesh.eList[0].normals = posXnormals\n mesh.eList[0].J = posXJ\n \n def posYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[1].vals = posYvals\n mesh.eList[1].normals = posYnormals\n mesh.eList[1].J = posYJ\n \n # x=-1 side\n def negXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[2].vals = negXvals\n mesh.eList[2].normals = negXnormals\n mesh.eList[2].J = negXJ\n\n # y=-1 side\n def negYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[3].vals = negYvals\n mesh.eList[3].normals = negYnormals\n mesh.eList[3].J = negYJ\n \n # z=+1 side\n def posZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1)\n yb=np.array(-xi2)\n zb=np.ones(xi1.shape)\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[4].vals = posZvals\n mesh.eList[4].normals = posZnormals\n mesh.eList[4].J = posZJ\n \n # z=-1 side\n def negZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[5].vals = negZvals\n mesh.eList[5].normals = negZnormals\n mesh.eList[5].J = negZJ\n \n for e in mesh.eList:\n e.ExactElement = True\n \n return mesh",
"def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn",
"def test_3D_m6_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def par_test_8(self):\n\n for i in range(4):\n self.XY_par_factor.setMaxDepth(i)\n self.XY_par_factor.setMaxDepth(i)\n\n res = self.XY_factor.mult(self.XY_factor)\n par_res = self.XY_par_factor.mult(self.XY_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values",
"def test_3D_m4_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def test_3D_m6_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m6_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def InterpolateSurfaceVectors():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n \r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(Centroids1,Vectors1,Centroids2)\r\n # Make the data sparser to display better.\r\n C1,V1 = SparseData(Centroids1,Vectors1,0.2)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.2)\r\n\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,1,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,1,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/NormalVectorInterpolation.dat\",Vectors2,header = header,comments='')"
]
| [
"0.6256041",
"0.62469673",
"0.61554265",
"0.6052721",
"0.59591246",
"0.59132755",
"0.5910311",
"0.58924216",
"0.58591473",
"0.5825424",
"0.5753714",
"0.57487774",
"0.57378185",
"0.57196903",
"0.57180595",
"0.5703061",
"0.57020545",
"0.56810784",
"0.56383675",
"0.5617995",
"0.5586566",
"0.55831045",
"0.55664325",
"0.5561099",
"0.55503035",
"0.55497265",
"0.55429727",
"0.55395746",
"0.55162853",
"0.5505951"
]
| 0.6248369 | 1 |
Testing M8 remeshing formula in 3D, 2 kernel, simple precision, o2 splitting. | def test_3D_m8_2k():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_2k',
Splitting: 'o2'}
)
advec_py = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: '',
Splitting: 'o2'},
)
assertion_3D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_3D_m8_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m8_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m8_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def test_2():\n\n n1 = 10\n n2 = 100\n ndim = 3\n\n semi_axes = np.random.random((n1,ndim))\n coords = np.array([sample_ellipsoidal_volume(n2, semi_axes[i]) for i in range(0,n1)])\n\n Is = iterative_inertia_tensors_3D(coords)\n\n assert np.shape(Is)==(n1,ndim,ndim)",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def ThreeDTest(SMethod,IMethod,Fraction,Plot = False):\r\n \r\n # Cylinder Parameters--------------------------------------------------------- \r\n CL = 100 # cylinder length\r\n Pt = 120 # number of points in each cylinder\r\n Cn = 50 # number of horizontal slices in cylinder\r\n\r\n x = np.zeros(Cn*Pt)\r\n y = np.zeros(Cn*Pt)\r\n z = np.zeros(Cn*Pt)\r\n # Generate cylinder-----------------------------------------------------------\r\n n = 0\r\n for i in range(Cn):\r\n for j in range(Pt):\r\n x[n] = np.cos((2*pi*j)/Pt)\r\n y[n] = np.sin((2*pi*j)/Pt)\r\n z[n] = i*(CL/Cn)\r\n n += 1\r\n \r\n YFull = (np.sin(2*pi*0.03*z))+(np.cos(2*pi*x+2*pi*x))\r\n XFull = np.column_stack((x,y,z))\r\n MFull = np.column_stack((x,y,z,YFull))\r\n\r\n # Randomise matrix and Generate sparse version of geometry--------------------\r\n split = int(np.ceil((MFull.shape[0])*Fraction)) \r\n np.random.shuffle(MFull)\r\n # Sparse Set\r\n XTrain = MFull[:split,:3]\r\n YTrain = MFull[:split,3]\r\n # Training set\r\n XStar = MFull[split:,:3]\r\n CStar = MFull[split:,3]\r\n\r\n # Reconstruct XFull's geometry using XTrain and YTrain------------------------\r\n YHat = ThreeDPointInter(XTrain,YTrain,XFull,SMethod,IMethod,10)\r\n mse = mseCalc(YFull,YHat)\r\n print('Mean Squared Error =',mse)\r\n # Plot whole data-----------------------------------------------------------\r\n if Plot:\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(131, projection='3d')\r\n ax1.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YFull],cmap='plasma')\r\n ax1.set_xlabel('x')\r\n ax1.set_ylabel('y')\r\n ax1.set_zlabel('z')\r\n # Plot training Data\r\n ax2 = fig.add_subplot(132, projection='3d')\r\n ax2.scatter(XTrain[:,0],XTrain[:,1],XTrain[:,2],c=[float(i) for i in YTrain],cmap='plasma')\r\n ax2.set_xlabel('XTrain1')\r\n ax2.set_ylabel('XTrain2')\r\n ax2.set_zlabel('XTrain3')\r\n # Plot Reconstruction of XFull\r\n ax3 = fig.add_subplot(133, projection='3d')\r\n ax3.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YHat],cmap='plasma')\r\n ax3.set_xlabel('x')\r\n ax3.set_ylabel('y')\r\n ax3.set_zlabel('z')\r\n \r\n plt.show()\r\n\r\n return mse",
"def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)",
"def test_2D_m8_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_reconstruction_against_simulation(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n # define reconstructor\n reconstructor = HillasReconstructor(subarray)\n\n hillas_dict = {}\n telescope_pointings = {}\n\n for tel_id, dl1 in event.dl1.tel.items():\n\n telescope_pointings[tel_id] = SkyCoord(\n alt=event.pointing.tel[tel_id].altitude,\n az=event.pointing.tel[tel_id].azimuth,\n frame=AltAz(),\n )\n\n geom_CameraFrame = subarray.tel[tel_id].camera.geometry\n\n # this could be done also out of this loop,\n # but in case of real data each telescope would have a\n # different telescope_pointing\n geom_TelescopeFrame = geom_CameraFrame.transform_to(\n TelescopeFrame(telescope_pointing=telescope_pointings[tel_id])\n )\n\n mask = tailcuts_clean(\n geom_TelescopeFrame,\n dl1.image,\n picture_thresh=5.0,\n boundary_thresh=2.5,\n keep_isolated_pixels=False,\n min_number_picture_neighbors=2,\n )\n\n try:\n hillas_dict[tel_id] = hillas_parameters(\n geom_TelescopeFrame[mask], dl1.image[mask]\n )\n\n # the original event is created from a\n # pytest fixture with \"session\" scope, so it's always the same\n # and if we used the same event we would overwrite the image\n # parameters for the next tests, thus causing their failure\n test_event = deepcopy(event)\n test_event.dl1.tel[tel_id].parameters = ImageParametersContainer()\n test_event.dl1.tel[tel_id].parameters.hillas = hillas_dict[tel_id]\n\n except HillasParameterizationError as e:\n print(e)\n continue\n\n # Get shower geometry\n reconstructor(event)\n # get the result from the correct DL2 container\n result = event.dl2.stereo.geometry[\"HillasReconstructor\"]\n\n # get the reconstructed coordinates in the sky\n reco_coord = SkyCoord(alt=result.alt, az=result.az, frame=AltAz())\n # get the simulated coordinates in the sky\n true_coord = SkyCoord(\n alt=event.simulation.shower.alt, az=event.simulation.shower.az, frame=AltAz()\n )\n\n # check that we are not more far than 0.1 degrees\n assert reco_coord.separation(true_coord) < 0.1 * u.deg",
"def test_2D_m8_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##",
"def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn",
"def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def hexapodZernikeMultiLinearModel_hexapodcoordinate():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n Vfile = '/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_validate.cp'\n b=p.load(open(Tfile))\n vb=p.load(open(Vfile))\n nobs = len(b)\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n xh = x*1000 # convert to hexapod coordinate\n yh = -y*1000\n zh = -z*1000\n xtilth = - thetay\n ytilth = - thetax\n dataX = b[:,8:68]\n coeff_xh = sm.WLS(xh,dataX).fit().params\n coeff_yh = sm.WLS(yh,dataX).fit().params\n coeff_zh = sm.WLS(zh,dataX).fit().params\n coeff_xtilth = sm.WLS(xtilth,dataX).fit().params\n coeff_ytilth = sm.WLS(ytilth,dataX).fit().params\n coeff = np.array([coeff_xh,coeff_yh,coeff_zh,coeff_xtilth,coeff_ytilth])\n vx = vb[:,0]\n vy = vb[:,1]\n vz = vb[:,2]\n vtheta = vb[:,3]\n vphi = vb[:,4]\n vfwhm = vb[:,5]\n ve1 = vb[:,6]\n ve2 = vb[:,7]\n vthetax = vtheta*np.cos(np.deg2rad(vphi))\n vthetay = vtheta*np.sin(np.deg2rad(vphi))\n vxh = vx*1000 # convert to hexapod coordinate\n vyh = -vy*1000\n vzh = -vz*1000\n vxtilth = - vthetay\n vytilth = - vthetax\n vdataX = vb[:,8:68]\n fit = np.dot(vdataX,coeff.T)\n bp.bin_scatter(vxh,fit[:,0],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vyh,fit[:,1],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vzh,fit[:,2],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vxtilth,fit[:,3],nbins=20,fmt='bo',scatter=True)\n bp.bin_scatter(vytilth,fit[:,4],nbins=20,fmt='bo',scatter=True)",
"def test_3D_m6_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_spatial_smoothing_xesmf_reduce_spatial_dims_MPI_curv(\r\n PM_ds_control_3d_full,\r\n):\r\n da = PM_ds_control_3d_full\r\n step = 5\r\n actual = spatial_smoothing_xesmf(\r\n da,\r\n d_lon_lat_kws={\"lon\": step},\r\n )\r\n expected_lat_size = 180 // step\r\n assert actual[\"lon\"].size < da.lon.size\r\n assert actual[\"lat\"].size == expected_lat_size",
"def match_det2cube(self, input_model,\n x, y, file_slice_no,\n this_par1, this_par2,\n spaxel,\n c1_offset, c2_offset):\n\n#________________________________________________________________________________\n if self.instrument == 'MIRI':\n\n det2ab_transform = input_model.meta.wcs.get_transform('detector', 'alpha_beta')\n detector2v23 = input_model.meta.wcs.get_transform('detector', 'v2v3')\n v23toworld = input_model.meta.wcs.get_transform(\"v2v3\", \"world\")\n worldtov23 = input_model.meta.wcs.get_transform(\"world\", \"v2v3\")\n v2ab_transform = input_model.meta.wcs.get_transform('v2v3',\n 'alpha_beta')\n\n alpha, beta, wave = det2ab_transform(x, y)\n v2, v3, lam23 = detector2v23(x, y)\n ra, dec, lam = v23toworld(v2, v3, lam23)\n\n valid1 = np.isfinite(v2)\n valid2 = np.isfinite(v3)\n\n if self.weighting == 'miripsf':\n wave_resol = self.instrument_info.Get_RP_ave_Wave(this_par1, this_par2)\n alpha_resol = self.instrument_info.Get_psf_alpha_parameters()\n beta_resol = self.instrument_info.Get_psf_beta_parameters()\n\n # transform Cube Spaxel centers to alpha,beta of exposure\n # for MIRI weighting parameters are based on distance in\n # alpha-beta coord system\n # transform the cube coordinate values to alpha and beta values\n # xi,eta -> ra,dec\n # world -> v2,v3\n # v2,v3 -> local alpha,beta\n\n elif self.instrument == 'NIRSPEC':\n islice = file_slice_no\n slice_wcs = nirspec.nrs_wcs_set_input(input_model, islice)\n\n x, y = wcstools.grid_from_bounding_box(slice_wcs.bounding_box, \n step=(1, 1), center=True)\n ra, dec, lam = slice_wcs(x, y) # return v2,v3 are in degrees\n valid1 = np.isfinite(ra)\n valid2 = np.isfinite(dec)\n#________________________________________________________________________________\n#________________________________________________________________________________\n# Slices are curved on detector. A slice region is grabbed by corner regions so\n# the region returned may include pixels not value for slice. There are gaps\n# between the slices. Pixels not belonging to a slice are assigned NaN values.\n\n x = x.astype(np.int)\n y = y.astype(np.int)\n\n flux_all = input_model.data[y, x]\n# error_all = input_model.err[y, x]\n dq_all = input_model.dq[y, x]\n\n valid3 = np.isfinite(lam)\n valid4 = np.isfinite(flux_all)\n valid = valid1 & valid2 & valid3 & valid4\n#________________________________________________________________________________\n# using the DQFlags from the input_image find pixels that should be excluded\n# from the cube mapping\n all_flags = (dqflags.pixel['DO_NOT_USE'] + dqflags.pixel['DROPOUT'] +\n dqflags.pixel['NON_SCIENCE'] +\n dqflags.pixel['DEAD'] + dqflags.pixel['HOT'] +\n dqflags.pixel['RC'] + dqflags.pixel['NONLINEAR'])\n\n # find the location of all the values to reject in cube building\n good_data = np.where((np.bitwise_and(dq_all, all_flags) == 0) & (valid == True))\n\n # good data holds the location of pixels we want to map to cube\n flux = flux_all[good_data]\n# error = error_all[good_data]\n wave = lam[good_data]\n\n# xpix = x[good_data] # only used for testing\n# ypix = y[good_data] # only used for testing\n\n ra = ra - c1_offset / 3600.0\n dec = dec - c2_offset / 3600.0\n ra_use = ra[good_data]\n dec_use = dec[good_data]\n if self.instrument == 'MIRI':\n # need alpha,beta if weigthing is miripsf or cubes in alpha-beta space\n alpha_det = alpha[good_data]\n beta_det = beta[good_data]\n# MIRI can make cubes in alpha-beta:\n if self.coord_system == 'alpha-beta':\n coord1 = alpha[good_data]\n coord2 = beta[good_data]\n\n else:\n# xi,eta in arc seconds\n xi, eta = coord.radec2std(self.Crval1, self.Crval2, ra_use, dec_use)\n coord1 = xi\n coord2 = eta\n\n nplane = self.naxis1 * self.naxis2\n lower_limit = 0.01\n\n# iprint = 0\n# now loop over the pixel values for this region and find the spaxels that fall\n# withing the region of interest.\n nn = coord1.size\n\n# print('looping over n points mapping to cloud',nn)\n#________________________________________________________________________________\n for ipt in range(0, nn - 1):\n#________________________________________________________________________________\n # Cube.Xcenters, ycenters is a flattened 1-D array of the 2 X 2 xy plane\n # cube coordinates.\n # find the spaxels that fall withing ROI of point cloud defined by\n # coord1,coord2,wave\n# if(ipt > 2): sys.exit('STOP')\n# print('For point ',coord1[ipt],coord2[ipt],wave[ipt],ipt)\n\n# if(ipt == 0):\n# print('size of Xcenters',self.Xcenters.size)\n xdistance = (self.Xcenters - coord1[ipt])\n ydistance = (self.Ycenters - coord2[ipt])\n radius = np.sqrt(xdistance * xdistance + ydistance * ydistance)\n indexr = np.where(radius <= self.rois)\n indexz = np.where(abs(self.zcoord - wave[ipt]) <= self.roiw)\n\n# print('indexz',indexz)\n# print('indexr',indexr)\n zlam = self.zcoord[indexz] # z Cube values falling in wavelength roi\n xi_cube = self.Xcenters[indexr] # x Cube values within radius\n eta_cube = self.Ycenters[indexr] # y cube values with the radius\n\n# print('found xi_cube',xi_cube)\n# print('found eta_cube',eta_cube)\n\n#________________________________________________________________________________\n# loop over the points in the ROI\n for iz, zz in enumerate(indexz[0]):\n istart = zz * nplane\n for ir, rr in enumerate(indexr[0]):\n# yy_cube = int(rr / self.naxis1)\n# xx_cube = rr - yy_cube * self.naxis1\n# print('xx yy cube',rr,self.naxis1,xx_cube,yy_cube)\n#________________________________________________________________________________\n if self.weighting == 'msm':\n d1 = (xi_cube[ir] - coord1[ipt]) / self.Cdelt1\n d2 = (eta_cube[ir] - coord2[ipt]) / self.Cdelt2\n d3 = (zlam[iz] - wave[ipt]) / self.Cdelt3\n\n weight_distance = math.sqrt(d1 * d1 + d2 * d2 + d3 * d3)\n weight_distance = math.pow(weight_distance, self.weight_power)\n#________________________________________________________________________________\n# if weight is miripsf -distances determined in alpha-beta coordinate system\n elif self.weighting == 'miripsf':\n weights = FindNormalizationWeights(wave[ipt],\n wave_resol,\n alpha_resol,\n beta_resol)\n\n\n ra_spaxel, dec_spaxel = coord.std2radec(self.Crval1,\n self.Crval2,\n xi_cube[ir],\n eta_cube[ir])\n\n v2_spaxel, v3_spaxel, zl = worldtov23(ra_spaxel,\n dec_spaxel,\n zlam[iz])\n\n alpha_spaxel, beta_spaxel, wave_spaxel = v2ab_transform(v2_spaxel,\n v3_spaxel,\n zlam[iz])\n alpha_distance = alpha_det[ipt] - alpha_spaxel\n beta_distance = beta_det[ipt] - beta_spaxel\n wave_distance = abs(wave[ipt] - wave_spaxel)\n\n xn = alpha_distance / weights[0]\n yn = beta_distance / weights[1]\n wn = wave_distance / weights[2]\n\n # only included the spatial dimensions\n weight_distance = math.sqrt(xn * xn + yn * yn + wn * wn)\n weight_distance = math.pow(weight_distance, self.weight_power)\n#________________________________________________________________________________\n# We have found the weight_distance based on instrument type\n\n if weight_distance < lower_limit: weight_distance = lower_limit\n weight_distance = 1.0 / weight_distance\n\n cube_index = istart + rr\n spaxel[cube_index].flux = spaxel[cube_index].flux + weight_distance * flux[ipt]\n spaxel[cube_index].flux_weight = spaxel[cube_index].flux_weight + weight_distance\n spaxel[cube_index].iflux = spaxel[cube_index].iflux + 1",
"def Sphere_ExactSerendipityLagrangeQuad():\n\n mesh = Sphere_CubeToSerendipityLagrangeQuad(1)\n \n ################\n # Modifications for exact sphere\n ################\n # x=+1 side\n def posXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[0].vals = posXvals\n mesh.eList[0].normals = posXnormals\n mesh.eList[0].J = posXJ\n \n def posYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[1].vals = posYvals\n mesh.eList[1].normals = posYnormals\n mesh.eList[1].J = posYJ\n \n # x=-1 side\n def negXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[2].vals = negXvals\n mesh.eList[2].normals = negXnormals\n mesh.eList[2].J = negXJ\n\n # y=-1 side\n def negYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[3].vals = negYvals\n mesh.eList[3].normals = negYnormals\n mesh.eList[3].J = negYJ\n \n # z=+1 side\n def posZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1)\n yb=np.array(-xi2)\n zb=np.ones(xi1.shape)\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[4].vals = posZvals\n mesh.eList[4].normals = posZnormals\n mesh.eList[4].J = posZJ\n \n # z=-1 side\n def negZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[5].vals = negZvals\n mesh.eList[5].normals = negZnormals\n mesh.eList[5].J = negZJ\n \n for e in mesh.eList:\n e.ExactElement = True\n \n return mesh",
"def test_c0q1(self):\n self.check_c0q1(test_hexMesh_3x3=False,use_petsc=True, name=\"_proteusMesh_\")",
"def test_3D_m4_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_MeshMat_1group(self):\n\n MS_grp = self.meshsol.get_group(\"stator\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[0, 1, 2], [1, 2, 3]])\n result_tgl = cells_grp[\"triangle\"]\n testA = np.sum(abs(solution - result_tgl))\n msg = (\n \"Wrong output: returned \" + str(result_tgl) + \", expected: \" + str(solution)\n )\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)\n\n MS_grp = self.meshsol.get_group(\"rotor\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[3, 3], [1, 2], [2, 3]])\n results = cells_grp[\"triangle\"] # The point indices have changed !\n points = MS_grp.get_mesh().get_point(results)\n testA = np.sum(abs(solution - points))\n msg = \"Wrong output: returned \" + str(results) + \", expected: \" + str(solution)\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)",
"def test_3D_m4_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)"
]
| [
"0.6212858",
"0.61790264",
"0.61441815",
"0.60225594",
"0.59996915",
"0.59342116",
"0.59044826",
"0.5891348",
"0.58359265",
"0.5797982",
"0.57896024",
"0.57776266",
"0.5761643",
"0.57230186",
"0.57105297",
"0.5710375",
"0.570535",
"0.569546",
"0.5687734",
"0.56662655",
"0.56496733",
"0.56083673",
"0.558133",
"0.55622023",
"0.55595654",
"0.55525494",
"0.55401534",
"0.5537841",
"0.5528487",
"0.5514496"
]
| 0.63192964 | 0 |
Testing M8 remeshing formula in 3D, 1 kernel, simple precision, o2_FullHalf splitting. | def test_3D_m8_1k_sFH():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_1k',
Splitting: 'o2_FullHalf'}
)
advec_py = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: '',
Splitting: 'o2_FullHalf'}
)
assertion_3D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_3D_m8_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m8_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def test_3D_m8_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)",
"def test_2D_m8_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_3D_m4_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m4_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##",
"def subdivision(mesh):\n\t\n\t\n\t# 1. generate new nodes in the centre of quad\n\t# 1/4 o-------o 1/4 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# | * |\n\t# | |\n\t# 1/4 o-------o 1/4\n\n\tnew_coor = mesh.give_nodes().give_coor()\n\t\n\tfor face_index in range(mesh.give_model_inf()[2]): \n\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\tfor vertex_index in range(4):\n\t\t\tmesh.give_faces()\n\t\t\tnode_index = mesh.give_faces().give_node_list(face_index)[vertex_index]\n\n\t\t\tnew_x += 0.25*mesh.give_nodes().give_coor(node_index)[0]\n\t\t\tnew_y += 0.25*mesh.give_nodes().give_coor(node_index)[1]\n\t\t\tnew_z += 0.25*mesh.give_nodes().give_coor(node_index)[2]\n\t\t\t\n\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\n\t# generating new nodes on the edge\n\t# figure out one edge is shared by how many surfaces\n\tedge_shared_by_faces_list = helper.find_edge_shared_by_which_faces(mesh.give_edges(), mesh.give_faces())\n\t\n\tfor edge_index in range(mesh.give_model_inf()[1]):\n\n\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\n\t# 2. generate new node on boundary edge\n\t# o: existing vertices\n\t# 1/2 o---*---o 1/2 *: newly-generated vertices\n\t# \n\n\t\tnew_coor = mesh.give_nodes().give_coor()\n\t\tif len(edge_shared_by_faces_list[edge_index]) == 1:\t\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tnew_x += 0.5*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 0.5*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 0.5*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\t\t\n\t# 3. generate new node on interior edge\n\t# 1/16 o-------o 1/16 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# 3/8 o---*---o 3/8\n\t# | |\n\t# 1/16 o-------o 1/16\n\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tconsidered_node = []\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tconsidered_node.append(this_node)\n\t\t\t\tnew_x += 3./8.*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 3./8.*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 3./8.*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\n\t\t\t# faces contain this node\n\t\t\tpotential_node = []\n\t\t\tfor face_index in edge_shared_by_faces_list[edge_index]:\t\t\n\t\t\t\tfor vertex_index in range(4):\n\t\t\t\t\t\tpotential_node.append(mesh.give_faces().give_node_list(face_index)[vertex_index])\n\t\t\t\n\t\t\touter_node = []\n\t\t\tfor node in potential_node:\n\t\t\t\tif (node not in considered_node) & (node not in outer_node):\n\t\t\t\t\touter_node.append(node)\n\t\t\t\t\t\n\t\t\tfor vertex_index in outer_node:\n\t\t\t\tnew_x += 1./16.*mesh.give_nodes().give_coor()[vertex_index][0]\n\t\t\t\tnew_y += 1./16.*mesh.give_nodes().give_coor()[vertex_index][1]\n\t\t\t\tnew_z += 1./16.*mesh.give_nodes().give_coor()[vertex_index][2]\n\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\n\t# update the links of edges and surfaces\n\tnew_edge_list = []\n\tnew_face_list = []\n\tfor face_index in range(mesh.give_model_inf()[2]):\n\t\told_node0 = mesh.give_faces().give_node_list(face_index)[0]\n\t\told_node1 = mesh.give_faces().give_node_list(face_index)[1]\n\t\told_node2 = mesh.give_faces().give_node_list(face_index)[2]\n\t\told_node3 = mesh.give_faces().give_node_list(face_index)[3]\n\t\t\n\t\told_edge0 = mesh.give_faces().give_edge_list(face_index)[0]\n\t\told_edge1 = mesh.give_faces().give_edge_list(face_index)[1]\n\t\told_edge2 = mesh.give_faces().give_edge_list(face_index)[2]\n\t\told_edge3 = mesh.give_faces().give_edge_list(face_index)[3]\n\t\t\n\t\tnew_node4 = old_edge0 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2] \n\t\tnew_node5 = old_edge1 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node6 = old_edge2 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node7 = old_edge3 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\t\n\t\tnew_node8 = mesh.give_model_inf()[0] + face_index\n\t\t\n\t\tif helper.in_list((old_node0, new_node4), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node0, new_node4))\n\t\tif helper.in_list((new_node4, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, new_node8))\n\t\tif helper.in_list((new_node8, new_node7), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node8, new_node7))\n\t\tif helper.in_list((new_node7, old_node0), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node0))\n\t\tif helper.in_list((new_node4, old_node1), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, old_node1))\n\t\tif helper.in_list((old_node1, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node1, new_node5))\n\t\tif helper.in_list((new_node5, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node5, new_node8))\n\t\tif helper.in_list((new_node7, old_node3), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node3))\n\t\tif helper.in_list((old_node3, new_node6), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node3, new_node6))\n\t\tif helper.in_list((new_node6, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, new_node8))\n\t\tif helper.in_list((new_node6, old_node2), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, old_node2))\n\t\tif helper.in_list((old_node2, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node2, new_node5))\n\t\n\t\tnew_face_list.append((old_node0, new_node4, new_node8, new_node7))\n\t\tnew_face_list.append((new_node4, old_node1, new_node5, new_node8))\n\t\tnew_face_list.append((new_node7, new_node8, new_node6, old_node3))\n\t\tnew_face_list.append((new_node8, new_node5, old_node2, new_node6))\n\t\t\n\tnew_edges = geo.Edge(new_edge_list)\n\t\n\tnew_faces = geo.Face(new_face_list, new_edges)\n\t\t\n\t# update existing nodes\t\n\tfor node_index in range(mesh.give_model_inf()[0]):\n\t\t\n\t\tring1, ring2 = helper.find_neighbour_node(new_edges, new_faces, node_index)\n\t\tvalence = helper.find_valence(node_index, new_faces) \n\t\t#: valence: the number of faces sharing on specific edge\n\n\t# 4. update existing corner vertex\n\t# 2/4 @---* 1/4 *: newly-generated vertices\n\t# | | @: existing vertices to be updated\n\t# 1/4 *---* 0 The higher mask values on neighbouring vertices, \n\t# the more likely a square mesh will be refined into a sphere.\n\t \n\t\tif valence == 1:\n\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tprint\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += 0.*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += 0.*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += 0.*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\t\n\t\t\tnew_x += 2./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 2./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 2./4.*mesh.give_nodes().give_coor()[node_index][2]\n\n\t# 5. update existing boundary joint vertex\n\t# 3/4\n\t# 1/8 *---*---* 1/8 *: newly-generated vertices\n\t# | | | @: existing vertices to be updated\n\t# 0 *---*---* 0\n\n\t\telif valence == 2:\n\t\t\t\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tif helper.find_valence(node_in_ring1, new_faces) <= 2: \n\t\t\t\t\tnew_x += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\t\tnew_y += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\t\tnew_z += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\t\t\n\t\t\tnew_x += 3./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 3./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 3./4.*mesh.give_nodes().give_coor()[node_index][2]\n\t\n\t# 6. update new node on interior edge\n\t# * r/k\n\t# /\\ b/k*\n\t# *__/ \\___ r/k\n\t# \\ \\ /¬¬/ *: newly-generated vertices: \n\t# \\ \\/ / b = 3/2/valence, r = 1/4/valence\n\t# *--@--* b/k\t @: existing vertices to be updated: 1-b-r\t\t\n\t# / /\\ \\\n\t# /__/ \\__\\\n\t# * \\ / * r/k\n\t# \\/\n\t\t\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tbeta = 3./2./valence\n\t\t\tgamma = 1./4./valence\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\n\t\t\tnew_x += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][2]\n\t\t\n\t\tnew_coor[node_index] = (new_x, new_y, new_z)\n\t\n\tnew_nodes = geo.Node(new_coor)\n\t\n\tmesh.update(new_nodes, new_edges, new_faces)\n\t\n\t# return new_mesh\n\treturn mesh",
"def test_3D_m6_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def test_reconstruction_against_simulation(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n # define reconstructor\n reconstructor = HillasReconstructor(subarray)\n\n hillas_dict = {}\n telescope_pointings = {}\n\n for tel_id, dl1 in event.dl1.tel.items():\n\n telescope_pointings[tel_id] = SkyCoord(\n alt=event.pointing.tel[tel_id].altitude,\n az=event.pointing.tel[tel_id].azimuth,\n frame=AltAz(),\n )\n\n geom_CameraFrame = subarray.tel[tel_id].camera.geometry\n\n # this could be done also out of this loop,\n # but in case of real data each telescope would have a\n # different telescope_pointing\n geom_TelescopeFrame = geom_CameraFrame.transform_to(\n TelescopeFrame(telescope_pointing=telescope_pointings[tel_id])\n )\n\n mask = tailcuts_clean(\n geom_TelescopeFrame,\n dl1.image,\n picture_thresh=5.0,\n boundary_thresh=2.5,\n keep_isolated_pixels=False,\n min_number_picture_neighbors=2,\n )\n\n try:\n hillas_dict[tel_id] = hillas_parameters(\n geom_TelescopeFrame[mask], dl1.image[mask]\n )\n\n # the original event is created from a\n # pytest fixture with \"session\" scope, so it's always the same\n # and if we used the same event we would overwrite the image\n # parameters for the next tests, thus causing their failure\n test_event = deepcopy(event)\n test_event.dl1.tel[tel_id].parameters = ImageParametersContainer()\n test_event.dl1.tel[tel_id].parameters.hillas = hillas_dict[tel_id]\n\n except HillasParameterizationError as e:\n print(e)\n continue\n\n # Get shower geometry\n reconstructor(event)\n # get the result from the correct DL2 container\n result = event.dl2.stereo.geometry[\"HillasReconstructor\"]\n\n # get the reconstructed coordinates in the sky\n reco_coord = SkyCoord(alt=result.alt, az=result.az, frame=AltAz())\n # get the simulated coordinates in the sky\n true_coord = SkyCoord(\n alt=event.simulation.shower.alt, az=event.simulation.shower.az, frame=AltAz()\n )\n\n # check that we are not more far than 0.1 degrees\n assert reco_coord.separation(true_coord) < 0.1 * u.deg",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')",
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def Sphere_ExactSerendipityLagrangeQuad():\n\n mesh = Sphere_CubeToSerendipityLagrangeQuad(1)\n \n ################\n # Modifications for exact sphere\n ################\n # x=+1 side\n def posXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[0].vals = posXvals\n mesh.eList[0].normals = posXnormals\n mesh.eList[0].J = posXJ\n \n def posYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[1].vals = posYvals\n mesh.eList[1].normals = posYnormals\n mesh.eList[1].J = posYJ\n \n # x=-1 side\n def negXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[2].vals = negXvals\n mesh.eList[2].normals = negXnormals\n mesh.eList[2].J = negXJ\n\n # y=-1 side\n def negYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[3].vals = negYvals\n mesh.eList[3].normals = negYnormals\n mesh.eList[3].J = negYJ\n \n # z=+1 side\n def posZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1)\n yb=np.array(-xi2)\n zb=np.ones(xi1.shape)\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[4].vals = posZvals\n mesh.eList[4].normals = posZnormals\n mesh.eList[4].J = posZJ\n \n # z=-1 side\n def negZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[5].vals = negZvals\n mesh.eList[5].normals = negZnormals\n mesh.eList[5].J = negZJ\n \n for e in mesh.eList:\n e.ExactElement = True\n \n return mesh",
"def test_3D_m6_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def run_multi_medium_inversion(datadir, outdir, real_data_fnames, MT_green_func_fnames, single_force_green_func_fnames, data_labels, inversion_type, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, num_samples, comparison_metric, manual_indices_time_shift_MT, manual_indices_time_shift_SF, nlloc_hyp_filename, cut_phase_start_vals=[], cut_phase_length=0, plot_switch=False, num_processors=1, set_pre_time_shift_values_to_zero_switch=True, only_save_non_zero_solns_switch=False, return_absolute_similarity_values_switch=False, invert_for_ratio_of_multiple_media_greens_func_switch=False, green_func_fnames_split_index=0, green_func_phase_labels=[], invert_for_relative_magnitudes_switch=False, rel_exp_mag_range=[1.,1.], auto_shift_for_best_fit=True):\n \n # Load input data (completely, for specific inversion type):\n real_data_array, green_func_array = get_overall_real_and_green_func_data(datadir, real_data_fnames, MT_green_func_fnames, single_force_green_func_fnames, inversion_type, manual_indices_time_shift_MT=manual_indices_time_shift_MT, manual_indices_time_shift_SF=manual_indices_time_shift_SF, cut_phase_start_vals=cut_phase_start_vals, cut_phase_length=cut_phase_length, set_pre_time_shift_values_to_zero_switch=set_pre_time_shift_values_to_zero_switch, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_fnames_split_index=green_func_fnames_split_index)\n \n # Do initial check/s:\n if len(green_func_phase_labels)>0:\n if not len(green_func_array[:,0,0,0]) == len(green_func_phase_labels):\n print(\"Error: Greens functions filename array (for medium 1), does not match length of green_func_phase_labels array.\")\n sys.exit()\n \n # Get number of different phases, if specified:\n num_phase_types_for_media_ratios = 0\n if green_func_phase_labels.count(\"P\")>0:\n num_phase_types_for_media_ratios += 1\n if green_func_phase_labels.count(\"S\")>0:\n num_phase_types_for_media_ratios += 1\n if green_func_phase_labels.count(\"surface\")>0:\n num_phase_types_for_media_ratios += 1\n \n # Define a fraction of the second medium to use for the simple least squares inversion:\n frac_medium_2 = 0.5\n green_func_array_for_lsq_inv = (1. - frac_medium_2)*green_func_array[:,:,:,0] + frac_medium_2*green_func_array[:,:,:,1]\n \n # Perform the inversion:\n M = perform_inversion(real_data_array, green_func_array_for_lsq_inv)\n M_amplitude = ((np.sum(M**2))**0.5)\n\n # And get forward model synthetic waveform result:\n synth_forward_model_result_array = forward_model(green_func_array_for_lsq_inv, M)\n\n # And plot the results:\n if plot_switch:\n plot_specific_forward_model_result(real_data_array, synth_forward_model_result_array, data_labels, plot_title=\"Initial theoretical inversion solution\", perform_normallised_waveform_inversion=perform_normallised_waveform_inversion)\n\n # And save least squares output:\n # Set output arrays to equal least squares output: \n MTs = M\n similarity_curr_sample, shift_idxs = compare_synth_to_real_waveforms(real_data_array, synth_forward_model_result_array, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, auto_shift_for_best_fit) \n MTp = np.array([similarity_curr_sample])\n # And save data to MTFIT style file:\n outdir_least_squares = outdir+\"/least_squares_result\"\n os.system(\"mkdir -p \"+outdir_least_squares)\n save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir_least_squares, shift_idxs=shift_idxs) # Saves pickled dictionary containing data from inversion\n # And save most likely solution and real data waveforms to file:\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:, np.where(MTp==np.max(MTp))[0][0]])\n # And get shift associated with most likely model:\n if len(shift_idxs) > 0:\n shift_idxs_most_likely_result = np.array(shift_idxs)\n else:\n shift_idxs_most_likely_result = []\n # And save:\n save_specific_waveforms_to_file(real_data_array, synth_forward_model_most_likely_result_array, data_labels, nlloc_hyp_filename, inversion_type, outdir_least_squares, shift_idxs=shift_idxs_most_likely_result, normallise_data=perform_normallised_waveform_inversion)\n\n # And do Monte Carlo random sampling to obtain PDF of moment tensor:\n MTs, MTp, MTp_absolute, shift_idxs_all_samples = perform_monte_carlo_sampled_waveform_inversion(real_data_array, green_func_array, num_samples, M_amplitude=M_amplitude,inversion_type=inversion_type, comparison_metric=comparison_metric, perform_normallised_waveform_inversion=perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously=compare_all_waveforms_simultaneously, num_processors=num_processors, return_absolute_similarity_values_switch=return_absolute_similarity_values_switch, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels=green_func_phase_labels, num_phase_types_for_media_ratios=num_phase_types_for_media_ratios, invert_for_relative_magnitudes_switch=invert_for_relative_magnitudes_switch, rel_exp_mag_range=rel_exp_mag_range, auto_shift_for_best_fit=auto_shift_for_best_fit)\n\n # Check that probability of output is non-zero:\n if math.isnan(MTp[0]):\n print(\"Error: Sum of probabilities is equal to zero - therefore no adiquate solution could be found and inversion is terminating.\")\n sys.exit()\n \n # Remove zero probability values if specified:\n if only_save_non_zero_solns_switch:\n MTp, MTs = remove_zero_prob_results(MTp, MTs)\n\n # And plot most likely solution:\n if plot_switch:\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_1_diff_phases_dict={}\n tmp_frac_medium_1_diff_phases_dict[\"P\"] = MTs[-3, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"S\"] = MTs[-2, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"surface\"] = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n # Create actual greens functions for this solution:\n green_func_array_for_most_likely_amp_ratio = np.zeros(np.shape(green_func_array[:,:,:,0]), dtype=float)\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_1 = tmp_frac_medium_1_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array_for_most_likely_amp_ratio[j, :, :] = (1. - tmp_frac_medium_1)*green_func_array[j,:,:,0] + tmp_frac_medium_1*green_func_array[j,:,:,1]\n # And get result:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-4, np.where(MTp==np.max(MTp))[0][0]])\n else:\n frac_medium_1 = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n green_func_array_for_most_likely_amp_ratio = (1. - frac_medium_1)*green_func_array[:,:,:,0] + frac_medium_1*green_func_array[:,:,:,1]\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-2, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_1_diff_phases_dict={}\n tmp_frac_medium_1_diff_phases_dict[\"P\"] = MTs[-3, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"S\"] = MTs[-2, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"surface\"] = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n # Create actual greens functions for this solution:\n green_func_array_for_most_likely_amp_ratio = np.zeros(np.shape(green_func_array[:,:,:,0]), dtype=float)\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_1 = tmp_frac_medium_1_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array_for_most_likely_amp_ratio[j, :, :] = (1. - tmp_frac_medium_1)*green_func_array[j,:,:,0] + tmp_frac_medium_1*green_func_array[j,:,:,1]\n # And get result:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-3, np.where(MTp==np.max(MTp))[0][0]])\n else:\n frac_medium_1 = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n green_func_array_for_most_likely_amp_ratio = (1. - frac_medium_1)*green_func_array[:,:,:,0] + frac_medium_1*green_func_array[:,:,:,1]\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:, np.where(MTp==np.max(MTp))[0][0]])\n plot_specific_forward_model_result(real_data_array, synth_forward_model_most_likely_result_array, data_labels, plot_title=\"Most likely Monte Carlo sampled solution\", perform_normallised_waveform_inversion=perform_normallised_waveform_inversion)\n print(\"Most likely solution:\", MTs[:,np.where(MTp==np.max(MTp))[0][0]])\n\n # And save data to MTFIT style file:\n save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir, MTp_absolute=MTp_absolute, shift_idxs=shift_idxs_all_samples) # Saves pickled dictionary containing data from inversion\n # And save most likely solution and real data waveforms to file:\n synth_forward_model_most_likely_result_array = get_synth_forward_model_most_likely_result(MTs, MTp, green_func_array, inversion_type, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels=green_func_phase_labels, num_phase_types_for_media_ratios=num_phase_types_for_media_ratios)\n # And get shift associated with most likely model:\n if len(shift_idxs_all_samples) > 0:\n shift_idxs_most_likely_result = np.array(shift_idxs_all_samples)[0, np.where(MTp==np.max(MTp))[0][0]]\n else:\n shift_idxs_most_likely_result = []\n # And save:\n save_specific_waveforms_to_file(real_data_array, synth_forward_model_most_likely_result_array, data_labels, nlloc_hyp_filename, inversion_type, outdir, shift_idxs=shift_idxs_most_likely_result, normallise_data=perform_normallised_waveform_inversion)\n \n print(\"Finished\")",
"def test_2D_m8_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):\n os.chdir(str(tmpdir))\n RTOL = 1e-10\n\n H1 = 1e6 # external field in A/m\n alpha1 = 0.5 # some sort of damping constant\n omega1 = gamma * H1 # precession frequency\n\n H2 = 2.8e4 # external field in A/m\n alpha2 = 0.3 # some sort of damping constant\n omega2 = gamma * H2 # precession frequency\n\n ##\n # Step 1: Construct a time series of artificial magnetisation\n # data and save it to a bunch of .npy files.\n ##\n t_step = 1e-11\n t_ini = 0\n t_end = 10e-9\n\n N1 = 42 # in a real application this would be the number of mesh vertices\n N2 = 23 # in a real application this would be the number of mesh vertices\n fft_test_helpers.create_test_npy_files_with_two_regions(\n str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)\n\n ##\n # Step 2: compute the FFT of a resampled time series, both by\n # hand and using FFT_m.\n ##\n # XXX TODO: Resampling timesteps is not supported when using .npy\n # files. Either simplify the code below, or implement saving to\n # .h5 files so that it's easier to implement resampling for\n # spatially resolved data, too.\n ##\n t_step_res = t_step\n t_ini_res = t_ini\n t_end_res = t_end\n ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)\n\n # Compute time series based on resampled timesteps\n mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)\n my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)\n mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)\n\n # Compute 'analytical' Fourier transform of resampled time series and\n # determine the power of the spectrum for each component. We also need\n # to multiply by the number of mesh nodes because the numerical algorithm\n # sums up all contributions at the individual nodes (but we can just\n # multiply because they are all identical by construction).\n psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2\n psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2\n psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2\n\n # Compute Fourier transform of resampled time series using FFT_m\n freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \\\n compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,\n t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))\n\n # Check that the analytically determined power spectra are the same as the\n # computed ones.\n assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))\n\n if debug:\n # Plot the spectra for debugging\n fig = plt.figure(figsize=(20, 5))\n ax = fig.gca()\n ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')\n ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')\n ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')\n ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')\n ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')\n ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')\n ax.legend(loc='best')\n fig.savefig('psd_m_McMichaelStiles.png')",
"def test_c0q1(self):\n self.check_c0q1(test_hexMesh_3x3=False,use_petsc=True, name=\"_proteusMesh_\")",
"def subdivideMesh(IKLE,MESHX,MESHY): \n # ~~> Singling out edges\n from matplotlib.tri import Triangulation\n edges = Triangulation(MESHX,MESHY,IKLE).get_cpp_triangulation().get_edges()\n \n # ~~> Memory allocation for new MESH\n IELEM = len(IKLE); IPOIN = len(MESHX); IEDGE = len(edges)\n JKLE = np.zeros((IELEM*4,3),dtype=np.int) # you subdivide every elements by 4\n MESHJ = np.zeros((IEDGE,2),dtype=np.int) # you add one point on every edges\n \n # ~~> Lookup tables for node numbering on common edges\n pa,pb = edges.T\n k1b,k1a = np.sort(np.take(IKLE,[0,1],axis=1)).T\n indx1 = np.searchsorted(pa,k1a)\n jndx1 = np.searchsorted(pa,k1a,side='right')\n k2b,k2a = np.sort(np.take(IKLE,[1,2],axis=1)).T\n indx2 = np.searchsorted(pa,k2a)\n jndx2 = np.searchsorted(pa,k2a,side='right')\n k3b,k3a = np.sort(np.take(IKLE,[2,0],axis=1)).T\n indx3 = np.searchsorted(pa,k3a)\n jndx3 = np.searchsorted(pa,k3a,side='right')\n \n # ~~> Building one triangle at a time /!\\ Please get this loop parallelised\n j = 0\n for i in range(IELEM):\n k1 = indx1[i]+np.searchsorted(pb[indx1[i]:jndx1[i]],k1b[i])\n k2 = indx2[i]+np.searchsorted(pb[indx2[i]:jndx2[i]],k2b[i])\n k3 = indx3[i]+np.searchsorted(pb[indx3[i]:jndx3[i]],k3b[i])\n # ~~> New connectivity JKLE\n JKLE[j] = [IKLE[i][0],IPOIN+k1,IPOIN+k3]\n JKLE[j+1] = [IKLE[i][1],IPOIN+k2,IPOIN+k1]\n JKLE[j+2] = [IKLE[i][2],IPOIN+k3,IPOIN+k2]\n JKLE[j+3] = [IPOIN+k1,IPOIN+k2,IPOIN+k3]\n # ~~> New interpolation references for values and coordinates\n MESHJ[k1] = [IKLE[i][0],IKLE[i][1]]\n MESHJ[k2] = [IKLE[i][1],IKLE[i][2]]\n MESHJ[k3] = [IKLE[i][2],IKLE[i][0]]\n j += 4\n\n # ~~> Reset IPOBO while you are at it\n MESHX = np.resize(MESHX,IPOIN+IEDGE)\n MESHY = np.resize(MESHY,IPOIN+IEDGE)\n MESHX[IPOIN:] = np.sum(MESHX[MESHJ],axis=1)/2.\n MESHY[IPOIN:] = np.sum(MESHY[MESHJ],axis=1)/2.\n neighbours = Triangulation(MESHX,MESHY,JKLE).get_cpp_triangulation().get_neighbors()\n JPOBO = np.zeros(IPOIN+IEDGE,np.int)\n for n in range(IELEM*4):\n s1,s2,s3 = neighbours[n]\n e1,e2,e3 = JKLE[n]\n if s1 < 0:\n JPOBO[e1] = e1+1\n JPOBO[e2] = e2+1\n if s2 < 0:\n JPOBO[e2] = e2+1\n JPOBO[e3] = e3+1\n if s3 < 0:\n JPOBO[e3] = e3+1\n JPOBO[e1] = e1+1\n\n return JKLE,MESHX,MESHY,JPOBO,MESHJ",
"def test_MeshMat_1group(self):\n\n MS_grp = self.meshsol.get_group(\"stator\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[0, 1, 2], [1, 2, 3]])\n result_tgl = cells_grp[\"triangle\"]\n testA = np.sum(abs(solution - result_tgl))\n msg = (\n \"Wrong output: returned \" + str(result_tgl) + \", expected: \" + str(solution)\n )\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)\n\n MS_grp = self.meshsol.get_group(\"rotor\")\n cells_grp, nb_cell, indices = MS_grp.get_mesh().get_cell()\n solution = np.array([[3, 3], [1, 2], [2, 3]])\n results = cells_grp[\"triangle\"] # The point indices have changed !\n points = MS_grp.get_mesh().get_point(results)\n testA = np.sum(abs(solution - points))\n msg = \"Wrong output: returned \" + str(results) + \", expected: \" + str(solution)\n self.assertAlmostEqual(testA, 0, msg=msg, delta=self.DELTA)",
"def ThreeDTest(SMethod,IMethod,Fraction,Plot = False):\r\n \r\n # Cylinder Parameters--------------------------------------------------------- \r\n CL = 100 # cylinder length\r\n Pt = 120 # number of points in each cylinder\r\n Cn = 50 # number of horizontal slices in cylinder\r\n\r\n x = np.zeros(Cn*Pt)\r\n y = np.zeros(Cn*Pt)\r\n z = np.zeros(Cn*Pt)\r\n # Generate cylinder-----------------------------------------------------------\r\n n = 0\r\n for i in range(Cn):\r\n for j in range(Pt):\r\n x[n] = np.cos((2*pi*j)/Pt)\r\n y[n] = np.sin((2*pi*j)/Pt)\r\n z[n] = i*(CL/Cn)\r\n n += 1\r\n \r\n YFull = (np.sin(2*pi*0.03*z))+(np.cos(2*pi*x+2*pi*x))\r\n XFull = np.column_stack((x,y,z))\r\n MFull = np.column_stack((x,y,z,YFull))\r\n\r\n # Randomise matrix and Generate sparse version of geometry--------------------\r\n split = int(np.ceil((MFull.shape[0])*Fraction)) \r\n np.random.shuffle(MFull)\r\n # Sparse Set\r\n XTrain = MFull[:split,:3]\r\n YTrain = MFull[:split,3]\r\n # Training set\r\n XStar = MFull[split:,:3]\r\n CStar = MFull[split:,3]\r\n\r\n # Reconstruct XFull's geometry using XTrain and YTrain------------------------\r\n YHat = ThreeDPointInter(XTrain,YTrain,XFull,SMethod,IMethod,10)\r\n mse = mseCalc(YFull,YHat)\r\n print('Mean Squared Error =',mse)\r\n # Plot whole data-----------------------------------------------------------\r\n if Plot:\r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(131, projection='3d')\r\n ax1.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YFull],cmap='plasma')\r\n ax1.set_xlabel('x')\r\n ax1.set_ylabel('y')\r\n ax1.set_zlabel('z')\r\n # Plot training Data\r\n ax2 = fig.add_subplot(132, projection='3d')\r\n ax2.scatter(XTrain[:,0],XTrain[:,1],XTrain[:,2],c=[float(i) for i in YTrain],cmap='plasma')\r\n ax2.set_xlabel('XTrain1')\r\n ax2.set_ylabel('XTrain2')\r\n ax2.set_zlabel('XTrain3')\r\n # Plot Reconstruction of XFull\r\n ax3 = fig.add_subplot(133, projection='3d')\r\n ax3.scatter(XFull[:,0],XFull[:,1],XFull[:,2],c=[float(i) for i in YHat],cmap='plasma')\r\n ax3.set_xlabel('x')\r\n ax3.set_ylabel('y')\r\n ax3.set_zlabel('z')\r\n \r\n plt.show()\r\n\r\n return mse"
]
| [
"0.6412918",
"0.6150319",
"0.60951227",
"0.6055545",
"0.6023906",
"0.5983679",
"0.59347147",
"0.58496296",
"0.5808829",
"0.57917595",
"0.5779339",
"0.57610947",
"0.5742945",
"0.57332003",
"0.5708701",
"0.5674671",
"0.56742716",
"0.5661019",
"0.56583065",
"0.56562775",
"0.5637465",
"0.5629036",
"0.5626316",
"0.56175834",
"0.56070644",
"0.5597975",
"0.5585362",
"0.5581018",
"0.5572131",
"0.55689657"
]
| 0.64558506 | 0 |
Testing M8 remeshing formula in 3D, 2 kernel, simple precision, o2_FullHalf splitting. | def test_3D_m8_2k_sFH():
scal, velo = setup_3D()
advec = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: 'gpu_2k',
Splitting: 'o2_FullHalf'}
)
advec_py = Advection(velo, scal, discretization=d3d,
method={TimeIntegrator: RK2,
Interpolation: Linear,
Remesh: M8Prime,
Support: '',
Splitting: 'o2_FullHalf'}
)
assertion_3D_withPython(scal, velo, advec, advec_py) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_3D_m8_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_3D_m8_2k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2_2_3D_rec_splits(self):\n check = [(-3.0, -2.0, 0.0), (4.0, 10.0, 1.0), (4.0, -2.0, 0.0),\n (4.0, 10.0, 0.0), (4.0, -2.0, 1.0), (-3.0, 10.0, 0.0),\n (-3.0, 10.0, 1.0), (-3.0, -2.0, 1.0), (0.5, 4.0, 0.5),\n (-3.0, 4.0, 0.5), (-3.0, -2.0, 0.5), (-3.0, 4.0, 0.0),\n (0.5, -2.0, 0.5), (0.5, -2.0, 0.0), (0.5, 4.0, 0.0),\n (-1.25, 1.0, 0.25), (4.0, 4.0, 0.5), (4.0, 10.0, 0.5),\n (4.0, 4.0, 1.0), (0.5, 10.0, 0.5), (0.5, 10.0, 1.0),\n (0.5, 4.0, 1.0), (2.25, 7.0, 0.75), (4.0, -2.0, 0.5),\n (4.0, 4.0, 0.0), (2.25, 1.0, 0.25), (0.5, 10.0, 0.0),\n (2.25, 7.0, 0.25), (0.5, -2.0, 1.0), (2.25, 1.0, 0.75),\n (-3.0, 10.0, 0.5), (-1.25, 7.0, 0.25), (-3.0, 4.0, 1.0),\n (-1.25, 7.0, 0.75), (-1.25, 1.0, 0.75), (0.5, 1.0, 0.25),\n (0.5, 4.0, 0.25), (0.5, 1.0, 0.5), (-1.25, 4.0, 0.25),\n (-1.25, 4.0, 0.5), (-1.25, 1.0, 0.5), (-0.375, 2.5, 0.375),\n (-3.0, 1.0, 0.25), (-3.0, -2.0, 0.25), (-3.0, 1.0, 0.0),\n (-1.25, -2.0, 0.25), (-1.25, -2.0, 0.0), (-1.25, 1.0, 0.0),\n (-2.125, -0.5, 0.125), (-3.0, 4.0, 0.25), (-3.0, 1.0, 0.5),\n (-2.125, 2.5, 0.375), (-1.25, -2.0, 0.5),\n (-2.125, -0.5, 0.375), (-1.25, 4.0, 0.0), (-2.125, 2.5, 0.125),\n (0.5, -2.0, 0.25), (-0.375, -0.5, 0.375), (0.5, 1.0, 0.0),\n (-0.375, -0.5, 0.125), (-0.375, 2.5, 0.125), (0.5, 7.0, 0.75),\n (0.5, 4.0, 0.75), (0.5, 7.0, 0.5), (2.25, 4.0, 0.75),\n (2.25, 4.0, 0.5), (2.25, 7.0, 0.5), (1.375, 5.5, 0.625),\n (4.0, 7.0, 0.75), (4.0, 10.0, 0.75), (4.0, 7.0, 1.0),\n (2.25, 10.0, 0.75), (2.25, 10.0, 1.0), (2.25, 7.0, 1.0),\n (3.125, 8.5, 0.875), (4.0, 4.0, 0.75), (4.0, 7.0, 0.5),\n (3.125, 5.5, 0.625), (2.25, 10.0, 0.5), (3.125, 8.5, 0.625),\n (2.25, 4.0, 1.0), (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (1.375, 8.5, 0.625), (0.5, 7.0, 1.0), (1.375, 8.5, 0.875),\n (1.375, 5.5, 0.875), (2.25, 4.0, 0.25), (2.25, 1.0, 0.5),\n (1.375, 2.5, 0.375), (4.0, 1.0, 0.25), (4.0, -2.0, 0.25),\n (4.0, 1.0, 0.0), (2.25, -2.0, 0.25), (2.25, -2.0, 0.0),\n (2.25, 1.0, 0.0), (3.125, -0.5, 0.125), (4.0, 4.0, 0.25),\n (4.0, 1.0, 0.5), (3.125, 2.5, 0.375), (2.25, -2.0, 0.5),\n (3.125, -0.5, 0.375), (2.25, 4.0, 0.0), (3.125, 2.5, 0.125),\n (1.375, -0.5, 0.375), (1.375, -0.5, 0.125),\n (1.375, 2.5, 0.125), (0.5, 7.0, 0.25), (1.375, 5.5, 0.375),\n (4.0, 7.0, 0.25), (4.0, 10.0, 0.25), (4.0, 7.0, 0.0),\n (2.25, 10.0, 0.25), (2.25, 10.0, 0.0), (2.25, 7.0, 0.0),\n (3.125, 8.5, 0.125), (3.125, 5.5, 0.375), (3.125, 8.5, 0.375),\n (3.125, 5.5, 0.125), (0.5, 10.0, 0.25), (1.375, 8.5, 0.375),\n (0.5, 7.0, 0.0), (1.375, 8.5, 0.125), (1.375, 5.5, 0.125),\n (0.5, 1.0, 0.75), (1.375, 2.5, 0.625), (4.0, 1.0, 0.75),\n (4.0, -2.0, 0.75), (4.0, 1.0, 1.0), (2.25, -2.0, 0.75),\n (2.25, -2.0, 1.0), (2.25, 1.0, 1.0), (3.125, -0.5, 0.875),\n (3.125, 2.5, 0.625), (3.125, -0.5, 0.625), (3.125, 2.5, 0.875),\n (0.5, -2.0, 0.75), (1.375, -0.5, 0.625), (0.5, 1.0, 1.0),\n (1.375, -0.5, 0.875), (1.375, 2.5, 0.875), (-1.25, 7.0, 0.5),\n (-0.375, 5.5, 0.375), (-3.0, 7.0, 0.25), (-3.0, 10.0, 0.25),\n (-3.0, 7.0, 0.0), (-1.25, 10.0, 0.25), (-1.25, 10.0, 0.0),\n (-1.25, 7.0, 0.0), (-2.125, 8.5, 0.125), (-3.0, 7.0, 0.5),\n (-2.125, 5.5, 0.375), (-1.25, 10.0, 0.5), (-2.125, 8.5, 0.375),\n (-2.125, 5.5, 0.125), (-0.375, 8.5, 0.375),\n (-0.375, 8.5, 0.125), (-0.375, 5.5, 0.125), (-1.25, 4.0, 0.75),\n (-0.375, 5.5, 0.625), (-3.0, 7.0, 0.75), (-3.0, 10.0, 0.75),\n (-3.0, 7.0, 1.0), (-1.25, 10.0, 0.75), (-1.25, 10.0, 1.0),\n (-1.25, 7.0, 1.0), (-2.125, 8.5, 0.875), (-3.0, 4.0, 0.75),\n (-2.125, 5.5, 0.625), (-2.125, 8.5, 0.625), (-1.25, 4.0, 1.0),\n (-2.125, 5.5, 0.875), (-0.375, 8.5, 0.625),\n (-0.375, 8.5, 0.875), (-0.375, 5.5, 0.875),\n (-0.375, 2.5, 0.625), (-3.0, 1.0, 0.75), (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-1.25, -2.0, 0.75), (-1.25, -2.0, 1.0),\n (-1.25, 1.0, 1.0), (-2.125, -0.5, 0.875), (-2.125, 2.5, 0.625),\n (-2.125, -0.5, 0.625), (-2.125, 2.5, 0.875),\n (-0.375, -0.5, 0.625), (-0.375, -0.5, 0.875),\n (-0.375, 2.5, 0.875)]\n nn_checks = {(2.25, 7.0, 0.75): [(4.0, 7.0, 0.75), (2.25, 7.0, 1.0),\n (4.0, 7.0, 0.5), (4.0, 7.0, 1.0),\n (4.0, 4.0, 0.75), (1.375, 5.5, 0.875),\n (2.25, 4.0, 1.0), (2.25, 4.0, 0.5),\n (2.25, 4.0, 0.75), (3.125, 8.5, 0.875),\n (3.125, 8.5, 0.625), (4.0, 10.0, 0.75),\n (2.25, 10.0, 1.0), (2.25, 10.0, 0.75),\n (2.25, 10.0, 0.5), (1.375, 8.5, 0.625),\n (1.375, 8.5, 0.875), (0.5, 7.0, 0.75),\n (0.5, 7.0, 0.5), (3.125, 5.5, 0.625),\n (3.125, 5.5, 0.875), (0.5, 10.0, 0.75),\n (0.5, 7.0, 1.0), (0.5, 4.0, 0.75),\n (2.25, 7.0, 0.5), (1.375, 5.5, 0.625)],\n (4.0, -2.0, 0.5): [(4.0, -2.0, 0.75), (4.0, -2.0, 0.25),\n (2.25, 1.0, 0.5), (2.25, -2.0, 0.75),\n (2.25, -2.0, 0.5), (2.25, -2.0, 0.25),\n (4.0, 1.0, 0.25), (4.0, 1.0, 0.75),\n (4.0, 1.0, 0.5), (3.125, -0.5, 0.375),\n (3.125, -0.5, 0.625)],\n (-2.125, -0.5, 0.875): [(-1.25, 1.0, 1.0),\n (-1.25, 1.0, 0.75),\n (-1.25, -2.0, 0.75),\n (-1.25, -2.0, 1.0),\n (-3.0, -2.0, 0.75),\n (-3.0, 1.0, 1.0), (-3, -2, 1),\n (-3.0, 1.0, 0.75)]}\n\n init_triangulation(3, 2, check, nn_checks, bounds=[(-3, 4), (-2, 10), (0, 1)])",
"def test_2_2_3D_cube_splits(self):\n check = [(0, 0, 0), (1, 1, 1), (1, 0, 0), (1, 1, 0), (1, 0, 1),\n (0, 1, 0),\n (0, 1, 1), (0, 0, 1), (0.5, 0.5, 0.5), (0.0, 0.5, 0.5),\n (0.0, 0.0, 0.5), (0.0, 0.5, 0.0), (0.5, 0.0, 0.5),\n (0.5, 0.0, 0.0),\n (0.5, 0.5, 0.0), (0.25, 0.25, 0.25), (1.0, 0.5, 0.5),\n (1.0, 1.0, 0.5),\n (1.0, 0.5, 1.0), (0.5, 1.0, 0.5), (0.5, 1.0, 1.0),\n (0.5, 0.5, 1.0),\n (0.75, 0.75, 0.75), (1.0, 0.0, 0.5), (1.0, 0.5, 0.0),\n (0.75, 0.25, 0.25), (0.5, 1.0, 0.0), (0.75, 0.75, 0.25),\n (0.5, 0.0, 1.0), (0.75, 0.25, 0.75), (0.0, 1.0, 0.5),\n (0.25, 0.75, 0.25), (0.0, 0.5, 1.0), (0.25, 0.75, 0.75),\n (0.25, 0.25, 0.75), (0.5, 0.25, 0.25), (0.5, 0.5, 0.25),\n (0.5, 0.25, 0.5), (0.25, 0.5, 0.25), (0.25, 0.5, 0.5),\n (0.25, 0.25, 0.5), (0.375, 0.375, 0.375), (0.0, 0.25, 0.25),\n (0.0, 0.0, 0.25), (0.0, 0.25, 0.0), (0.25, 0.0, 0.25),\n (0.25, 0.0, 0.0), (0.25, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.5, 0.25), (0.0, 0.25, 0.5), (0.125, 0.375, 0.375),\n (0.25, 0.0, 0.5), (0.125, 0.125, 0.375), (0.25, 0.5, 0.0),\n (0.125, 0.375, 0.125), (0.5, 0.0, 0.25), (0.375, 0.125, 0.375),\n (0.5, 0.25, 0.0), (0.375, 0.125, 0.125), (0.375, 0.375, 0.125),\n (0.5, 0.75, 0.75), (0.5, 0.5, 0.75), (0.5, 0.75, 0.5),\n (0.75, 0.5, 0.75), (0.75, 0.5, 0.5), (0.75, 0.75, 0.5),\n (0.625, 0.625, 0.625), (1.0, 0.75, 0.75), (1.0, 1.0, 0.75),\n (1.0, 0.75, 1.0), (0.75, 1.0, 0.75), (0.75, 1.0, 1.0),\n (0.75, 0.75, 1.0), (0.875, 0.875, 0.875), (1.0, 0.5, 0.75),\n (1.0, 0.75, 0.5), (0.875, 0.625, 0.625), (0.75, 1.0, 0.5),\n (0.875, 0.875, 0.625), (0.75, 0.5, 1.0), (0.875, 0.625, 0.875),\n (0.5, 1.0, 0.75), (0.625, 0.875, 0.625), (0.5, 0.75, 1.0),\n (0.625, 0.875, 0.875), (0.625, 0.625, 0.875),\n (0.75, 0.5, 0.25),\n (0.75, 0.25, 0.5), (0.625, 0.375, 0.375), (1.0, 0.25, 0.25),\n (1.0, 0.0, 0.25), (1.0, 0.25, 0.0), (0.75, 0.0, 0.25),\n (0.75, 0.0, 0.0), (0.75, 0.25, 0.0), (0.875, 0.125, 0.125),\n (1.0, 0.5, 0.25), (1.0, 0.25, 0.5), (0.875, 0.375, 0.375),\n (0.75, 0.0, 0.5), (0.875, 0.125, 0.375), (0.75, 0.5, 0.0),\n (0.875, 0.375, 0.125), (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.625, 0.375, 0.125), (0.5, 0.75, 0.25),\n (0.625, 0.625, 0.375),\n (1.0, 0.75, 0.25), (1.0, 1.0, 0.25), (1.0, 0.75, 0.0),\n (0.75, 1.0, 0.25), (0.75, 1.0, 0.0), (0.75, 0.75, 0.0),\n (0.875, 0.875, 0.125), (0.875, 0.625, 0.375),\n (0.875, 0.875, 0.375),\n (0.875, 0.625, 0.125), (0.5, 1.0, 0.25), (0.625, 0.875, 0.375),\n (0.5, 0.75, 0.0), (0.625, 0.875, 0.125), (0.625, 0.625, 0.125),\n (0.5, 0.25, 0.75), (0.625, 0.375, 0.625), (1.0, 0.25, 0.75),\n (1.0, 0.0, 0.75), (1.0, 0.25, 1.0), (0.75, 0.0, 0.75),\n (0.75, 0.0, 1.0), (0.75, 0.25, 1.0), (0.875, 0.125, 0.875),\n (0.875, 0.375, 0.625), (0.875, 0.125, 0.625),\n (0.875, 0.375, 0.875),\n (0.5, 0.0, 0.75), (0.625, 0.125, 0.625), (0.5, 0.25, 1.0),\n (0.625, 0.125, 0.875), (0.625, 0.375, 0.875),\n (0.25, 0.75, 0.5),\n (0.375, 0.625, 0.375), (0.0, 0.75, 0.25), (0.0, 1.0, 0.25),\n (0.0, 0.75, 0.0), (0.25, 1.0, 0.25), (0.25, 1.0, 0.0),\n (0.25, 0.75, 0.0), (0.125, 0.875, 0.125), (0.0, 0.75, 0.5),\n (0.125, 0.625, 0.375), (0.25, 1.0, 0.5), (0.125, 0.875, 0.375),\n (0.125, 0.625, 0.125), (0.375, 0.875, 0.375),\n (0.375, 0.875, 0.125),\n (0.375, 0.625, 0.125), (0.25, 0.5, 0.75),\n (0.375, 0.625, 0.625),\n (0.0, 0.75, 0.75), (0.0, 1.0, 0.75), (0.0, 0.75, 1.0),\n (0.25, 1.0, 0.75), (0.25, 1.0, 1.0), (0.25, 0.75, 1.0),\n (0.125, 0.875, 0.875), (0.0, 0.5, 0.75), (0.125, 0.625, 0.625),\n (0.125, 0.875, 0.625), (0.25, 0.5, 1.0), (0.125, 0.625, 0.875),\n (0.375, 0.875, 0.625), (0.375, 0.875, 0.875),\n (0.375, 0.625, 0.875),\n (0.375, 0.375, 0.625), (0.0, 0.25, 0.75), (0.0, 0.0, 0.75),\n (0.0, 0.25, 1.0), (0.25, 0.0, 0.75), (0.25, 0.0, 1.0),\n (0.25, 0.25, 1.0), (0.125, 0.125, 0.875),\n (0.125, 0.375, 0.625),\n (0.125, 0.125, 0.625), (0.125, 0.375, 0.875),\n (0.375, 0.125, 0.625),\n (0.375, 0.125, 0.875), (0.375, 0.375, 0.875)]\n\n nn_checks = {(0.5, 0.25, 0.25): [(0.375, 0.375, 0.125), (0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25),\n (0.625, 0.375, 0.375),\n (0.625, 0.125, 0.375),\n (0.625, 0.125, 0.125),\n (0.5, 0.5, 0.25), (0.25, 0.25, 0.25),\n (0.375, 0.375, 0.375),\n (0.5, 0.25, 0.5), (0.5, 0.5, 0.5),\n (0.5, 0.0, 0.25),\n (0.375, 0.125, 0.375), (0.5, 0.0, 0.5),\n (0.5, 0.25, 0.0),\n (0.375, 0.125, 0.125), (0.5, 0.0, 0.0),\n (0.625, 0.375, 0.125)],\n (0.625, 0.625, 0.875): [(0.75, 0.5, 1.0),\n (0.75, 0.75, 1.0),\n (0.5, 0.75, 1.0), (0.5, 0.5, 1.0),\n (0.5, 0.5, 0.75),\n (0.5, 0.75, 0.75),\n (0.75, 0.5, 0.75),\n (0.75, 0.75, 0.75)],\n (0, 0, 0): [(0.0, 0.25, 0.0), (0.125, 0.125, 0.125),\n (0.0, 0.0, 0.25), (0.25, 0.0, 0.0),\n (0.0, 0.25, 0.25), (0.25, 0.25, 0.0),\n (0.25, 0.0, 0.25)]}\n\n init_triangulation(3, 2, check, nn_checks)",
"def test_3D_m8_1k():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer",
"def test_2D_m8_1k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_1k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_1_2_2D_cube_splits(self):\n check = [(0, 0), (1, 1), (1, 0), (0, 1), (0.5, 0.5), (0.0, 0.5),\n (0.5, 0.0),\n (0.25, 0.25), (1.0, 0.5), (0.5, 1.0), (0.75, 0.75),\n (0.75, 0.25),\n (0.25, 0.75), (0.5, 0.25), (0.25, 0.5), (0.375, 0.375),\n (0.0, 0.25),\n (0.25, 0.0), (0.125, 0.125), (0.125, 0.375), (0.375, 0.125),\n (0.5, 0.75), (0.75, 0.5), (0.625, 0.625), (1.0, 0.75),\n (0.75, 1.0),\n (0.875, 0.875), (0.875, 0.625), (0.625, 0.875), (0.625, 0.375),\n (1.0, 0.25), (0.75, 0.0), (0.875, 0.125), (0.875, 0.375),\n (0.625, 0.125), (0.375, 0.625), (0.0, 0.75), (0.25, 1.0),\n (0.125, 0.875), (0.125, 0.625), (0.375, 0.875)]\n\n nn_checks = {(0, 0): [(0.25, 0.0), (0.0, 0.25), (0.125, 0.125)],\n (0.625, 0.375): [(0.5, 0.5), (0.75, 0.25), (0.75, 0.5),\n (0.5, 0.25)],\n (0, 1): [(0.25, 1.0), (0.125, 0.875),(0.0, 0.75)],\n (0.625, 0.125): [(0.5, 0.0), (0.75, 0.25), (0.75, 0.0),\n (0.5, 0.25)]}\n\n\n init_triangulation(2, 2, check, nn_checks)",
"def test_3D_m4_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def test_2D_m8_2k():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: 'gpu_2k',\n Splitting: 'o2'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: M8Prime,\n Support: '',\n Splitting: 'o2'},\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_3_2_4D_cube_splits(self):\n check = [(0, 0, 0, 0), (1, 1, 1, 1), (1, 0, 0, 0), (1, 1, 0, 0),\n (1, 1, 1, 0),\n (1, 1, 0, 1), (1, 0, 1, 0), (1, 0, 1, 1), (1, 0, 0, 1),\n (0, 1, 0, 0),\n (0, 1, 1, 0), (0, 1, 1, 1), (0, 1, 0, 1), (0, 0, 1, 0),\n (0, 0, 1, 1),\n (0, 0, 0, 1), (0.5, 0.5, 0.5, 0.5), (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.0, 0.5, 0.5), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.5), (0.0, 0.5, 0.0, 0.0),\n (0.0, 0.5, 0.5, 0.0),\n (0.5, 0.0, 0.5, 0.5), (0.5, 0.0, 0.0, 0.5),\n (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.0, 0.5, 0.0), (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0), (0.25, 0.25, 0.25, 0.25),\n (1.0, 0.5, 0.5, 0.5),\n (1.0, 1.0, 0.5, 0.5), (1.0, 1.0, 1.0, 0.5),\n (1.0, 1.0, 0.5, 1.0),\n (1.0, 0.5, 1.0, 0.5), (1.0, 0.5, 1.0, 1.0),\n (1.0, 0.5, 0.5, 1.0),\n (0.5, 1.0, 0.5, 0.5), (0.5, 1.0, 1.0, 0.5),\n (0.5, 1.0, 1.0, 1.0),\n (0.5, 1.0, 0.5, 1.0), (0.5, 0.5, 1.0, 0.5),\n (0.5, 0.5, 1.0, 1.0),\n (0.5, 0.5, 0.5, 1.0), (0.75, 0.75, 0.75, 0.75),\n (1.0, 0.0, 0.5, 0.5),\n (1.0, 0.0, 0.0, 0.5), (1.0, 0.0, 0.5, 0.0),\n (1.0, 0.5, 0.0, 0.5),\n (1.0, 0.5, 0.0, 0.0), (1.0, 0.5, 0.5, 0.0),\n (0.75, 0.25, 0.25, 0.25),\n (1.0, 1.0, 0.0, 0.5), (1.0, 1.0, 0.5, 0.0),\n (0.5, 1.0, 0.0, 0.5),\n (0.5, 1.0, 0.0, 0.0), (0.5, 1.0, 0.5, 0.0),\n (0.75, 0.75, 0.25, 0.25),\n (1.0, 0.5, 1.0, 0.0), (0.5, 1.0, 1.0, 0.0),\n (0.5, 0.5, 1.0, 0.0),\n (0.75, 0.75, 0.75, 0.25), (1.0, 0.5, 0.0, 1.0),\n (0.5, 1.0, 0.0, 1.0),\n (0.5, 0.5, 0.0, 1.0), (0.75, 0.75, 0.25, 0.75),\n (1.0, 0.0, 1.0, 0.5),\n (0.5, 0.0, 1.0, 0.5), (0.5, 0.0, 1.0, 0.0),\n (0.75, 0.25, 0.75, 0.25),\n (1.0, 0.0, 0.5, 1.0), (0.5, 0.0, 1.0, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.75, 0.25, 0.75, 0.75), (0.5, 0.0, 0.0, 1.0),\n (0.75, 0.25, 0.25, 0.75), (0.0, 1.0, 0.5, 0.5),\n (0.0, 1.0, 0.0, 0.5),\n (0.0, 1.0, 0.5, 0.0), (0.25, 0.75, 0.25, 0.25),\n (0.0, 1.0, 1.0, 0.5),\n (0.0, 0.5, 1.0, 0.5), (0.0, 0.5, 1.0, 0.0),\n (0.25, 0.75, 0.75, 0.25),\n (0.0, 1.0, 0.5, 1.0), (0.0, 0.5, 1.0, 1.0),\n (0.0, 0.5, 0.5, 1.0),\n (0.25, 0.75, 0.75, 0.75), (0.0, 0.5, 0.0, 1.0),\n (0.25, 0.75, 0.25, 0.75), (0.0, 0.0, 1.0, 0.5),\n (0.25, 0.25, 0.75, 0.25), (0.0, 0.0, 0.5, 1.0),\n (0.25, 0.25, 0.75, 0.75), (0.25, 0.25, 0.25, 0.75)]\n\n nn_checks = {(0, 0, 0, 0): [(0.0, 0.5, 0.5, 0.0), (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.25, 0.25, 0.25, 0.25),\n (0.5, 0.0, 0.0, 0.5), (0.5, 0.0, 0.5, 0.0),\n (0.5, 0.5, 0.0, 0.0), (0.0, 0.0, 0.5, 0.0),\n (0.0, 0.5, 0.0, 0.0), (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.5, 0.5, 0.5), (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5), (0.5, 0.0, 0.0, 0.0),\n (0.5, 0.5, 0.5, 0.0)],\n (1.0, 1.0, 0.5, 0.5): [(1.0, 1.0, 0.5, 1.0), (1, 1, 0, 1),\n (1.0, 1.0, 1.0, 0.5),\n (1.0, 0.5, 0.5, 0.5), (1, 1, 1, 0),\n (1.0, 1.0, 0.5, 0.0),\n (1.0, 1.0, 0.0, 0.5), (1, 1, 0, 0),\n (1, 1, 1, 1), (0.5, 1.0, 0.5, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.75, 0.75, 0.75, 0.75),\n (0.75, 0.75, 0.25, 0.25),\n (0.75, 0.75, 0.75, 0.25),\n (0.75, 0.75, 0.25, 0.75)],\n (0.25, 0.25, 0.25, 0.75): [(0.0, 0.0, 0.5, 1.0),\n (0.0, 0.5, 0.0, 1.0),\n (0.5, 0.0, 0.0, 1.0),\n (0.5, 0.5, 0.5, 1.0),\n (0, 0, 0, 1),\n (0.0, 0.5, 0.5, 1.0),\n (0.5, 0.0, 0.5, 1.0),\n (0.5, 0.5, 0.0, 1.0),\n (0.0, 0.0, 0.0, 0.5),\n (0.0, 0.0, 0.5, 0.5),\n (0.0, 0.5, 0.5, 0.5),\n (0.0, 0.5, 0.0, 0.5),\n (0.5, 0.0, 0.5, 0.5),\n (0.5, 0.5, 0.0, 0.5),\n (0.5, 0.5, 0.5, 0.5),\n (0.5, 0.0, 0.0, 0.5)]}\n\n init_triangulation(4, 1, check, nn_checks)",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def test_2_layer():\r\n # angular frequency in radians * THz\r\n w = 100 * nu.THz\r\n # Relative permittivity of metal and dielectric\r\n em = -4.56 + 0.12j\r\n ed = 1.23 + 0.01j\r\n ex_list = ez_list = [ed, em]\r\n # Relative permeabilities\r\n mu_list = [1,1]\r\n # Dictionary of input parameters\r\n input_params = {'w': w, 'd_list': [inf,inf], 'ex_list': ex_list,\r\n 'ez_list': ez_list, 'mu_list': mu_list}\r\n \r\n # Calculate the theoretical kx\r\n theo_kx = (w / nu.c0) * cmath.sqrt((em * ed) / (em + ed))\r\n if theo_kx.imag < 0:\r\n theo_kx *= -1\r\n print('Theoretical kx:',\r\n '(%.7g+%.7gj) rad/um' % (theo_kx.real / nu.um**-1, theo_kx.imag / nu.um**-1))\r\n \r\n # If I use the theoretical kx value, the mode should be correct and\r\n # all my tests should pass.\r\n params = deepcopy(input_params)\r\n params['kx'] = theo_kx\r\n params = find_all_params_from_kx(params)\r\n kzd, kzm = params['kz_list']\r\n # check that kz_list is correct\r\n assert_floats_are_equal(kzd**2, (w**2 / nu.c0**2) * ed**2 / (em + ed))\r\n assert_floats_are_equal(kzm**2, (w**2 / nu.c0**2) * em**2 / (em + ed))\r\n # check that layer_bottom_list is correct\r\n assert params['layer_bottom_list'][0] == -inf\r\n assert params['layer_bottom_list'][1] == 0\r\n # Check that the boundary condition matrix agrees with hand-calculation\r\n bc_mat = bc_matrix(params)\r\n # ...top-left is Ex0down / H0down\r\n assert_floats_are_equal(bc_mat[0,0], -kzd / (w * ed * nu.eps0))\r\n # ...top-right is -Ex1up / H1up\r\n assert_floats_are_equal(bc_mat[0,1], -kzm / (w * em * nu.eps0))\r\n # ...bottom-left is eps0 * Ez0down / H0down\r\n assert_floats_are_equal(bc_mat[1,0], ed * -theo_kx / (w * ed * nu.eps0))\r\n # ...bottom-right is -eps1 * Ez1up / H1up\r\n assert_floats_are_equal(bc_mat[1,1], -em * -theo_kx / (w * em * nu.eps0))\r\n # Check that one of the eigenvalues is almost zero (compared to the size\r\n # of the matrix elements).\r\n eigenvalues = np.linalg.eig(bc_mat)[0]\r\n assert abs(eigenvalues).min() / abs(bc_mat).max() < 1e-6\r\n # Check that the mode passes all tests.\r\n assert check_mode(params, thorough=True) is True\r\n # Check that I can scale the fields and it still passes all tests.\r\n params_scaled = rescale_fields(1.23+4.56j, params)\r\n assert check_mode(params_scaled, thorough=True) is True\r\n \r\n # Now try my kx-finding algorithm, to see if it finds the right value.\r\n kx_list = find_kx(input_params)\r\n print('kx_list:',\r\n ['(%.7g+%.7gj) rad/um' % (kx.real / nu.um**-1, kx.imag / nu.um**-1)\r\n for kx in kx_list])\r\n kx = kx_list[0]\r\n assert_floats_are_equal(theo_kx, kx)\r\n \r\n plot_mode(params)\r\n \r\n print('If you see this message, all the tests succeeded!!')",
"def test_3D_m4_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def kernel_test(slabs, data, backend):\n Q = data[:, 0]\n\n layers = []\n for thickness, rsld, isld, sigma in slabs:\n layers.append(\n model.Layer(\n b=(rsld - 1j * isld), dens=0.1, d=thickness, sigma=sigma\n )\n )\n layers.reverse()\n stack = model.Stack(Layers=list(layers[1:-1]), Repetitions=1)\n sample = model.Sample(\n Stacks=[stack], Ambient=layers[-1], Substrate=layers[0]\n )\n # print(sample)\n\n inst = model.Instrument(\n probe=backend,\n wavelength=1.54,\n coords=\"q\",\n I0=1,\n res=0,\n restype=\"no conv\",\n respoints=5,\n resintrange=2,\n beamw=0.1,\n footype=\"no corr\",\n samplelen=10,\n pol=\"uu\",\n )\n if data.shape[1] == 4:\n dQ = data[:, 3]\n inst.restype = \"full conv and varying res.\"\n inst.res = dQ\n if backend == \"neutron pol spin flip\":\n # memory issues in matrix formalism if too many data points\n inst.respoints = 101\n else:\n inst.respoints = (\n 10001 # try to use same convolution as ref1d when generating\n )\n inst.resintrange = 3.5\n\n # print(inst)\n R = sample.SimSpecular(Q, inst)\n\n assert R.shape == data[:, 1].shape\n if data.shape[1] == 4:\n # validation accuracy is reduced for resolution runs, as strongly\n # depends on numerical convolution scheme\n if backend == \"neutron pol spin flip\":\n np.testing.assert_allclose(R, data[:, 1], rtol=0.005)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)\n else:\n np.testing.assert_allclose(R, data[:, 1], rtol=0.001)",
"def test_2D_m4_1k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def lpt_prototype(mesh,\n nc=FLAGS.nc,\n bs=FLAGS.box_size,\n batch_size=FLAGS.batch_size,\n a0=FLAGS.a0,\n a=FLAGS.af,\n nsteps=FLAGS.nsteps):\n\n stages = np.linspace(a0, a, nsteps, endpoint=True)\n klin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[0]\n plin = np.loadtxt('../flowpm/data/Planck15_a1p00.txt').T[1]\n ipklin = iuspline(klin, plin)\n\n # Define the named dimensions\n # Parameters of the small scales decomposition\n n_block_x = FLAGS.nx\n n_block_y = FLAGS.ny\n n_block_z = 1\n halo_size = FLAGS.hsize\n\n if halo_size >= 0.5 * min(nc // n_block_x, nc // n_block_y, nc // n_block_z):\n new_size = int(0.5 *\n min(nc // n_block_x, nc // n_block_y, nc // n_block_z))\n print('WARNING: REDUCING HALO SIZE from %d to %d' % (halo_size, new_size))\n halo_size = new_size\n\n # Parameters of the large scales decomposition\n downsampling_factor = FLAGS.dsample\n lnc = nc // 2**downsampling_factor\n\n #\n\n fx_dim = mtf.Dimension(\"nx\", nc)\n fy_dim = mtf.Dimension(\"ny\", nc)\n fz_dim = mtf.Dimension(\"nz\", nc)\n\n tfx_dim = mtf.Dimension(\"tx\", nc)\n tfy_dim = mtf.Dimension(\"ty\", nc)\n tfz_dim = mtf.Dimension(\"tz\", nc)\n\n # Dimensions of the low resolution grid\n x_dim = mtf.Dimension(\"nx_lr\", lnc)\n y_dim = mtf.Dimension(\"ny_lr\", lnc)\n z_dim = mtf.Dimension(\"nz_lr\", lnc)\n\n tx_dim = mtf.Dimension(\"tx_lr\", lnc)\n ty_dim = mtf.Dimension(\"ty_lr\", lnc)\n tz_dim = mtf.Dimension(\"tz_lr\", lnc)\n\n nx_dim = mtf.Dimension('nx_block', n_block_x)\n ny_dim = mtf.Dimension('ny_block', n_block_y)\n nz_dim = mtf.Dimension('nz_block', n_block_z)\n\n sx_dim = mtf.Dimension('sx_block', nc // n_block_x)\n sy_dim = mtf.Dimension('sy_block', nc // n_block_y)\n sz_dim = mtf.Dimension('sz_block', nc // n_block_z)\n\n k_dims = [tx_dim, ty_dim, tz_dim]\n\n batch_dim = mtf.Dimension(\"batch\", batch_size)\n pk_dim = mtf.Dimension(\"npk\", len(plin))\n pk = mtf.import_tf_tensor(mesh, plin.astype('float32'), shape=[pk_dim])\n\n # Compute necessary Fourier kernels\n kvec = flowpm.kernels.fftk((nc, nc, nc), symmetric=False)\n kx = mtf.import_tf_tensor(mesh,\n kvec[0].squeeze().astype('float32'),\n shape=[tfx_dim])\n ky = mtf.import_tf_tensor(mesh,\n kvec[1].squeeze().astype('float32'),\n shape=[tfy_dim])\n kz = mtf.import_tf_tensor(mesh,\n kvec[2].squeeze().astype('float32'),\n shape=[tfz_dim])\n kv = [ky, kz, kx]\n\n # kvec for low resolution grid\n kvec_lr = flowpm.kernels.fftk([lnc, lnc, lnc], symmetric=False)\n\n kx_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[0].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tx_dim])\n ky_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[1].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[ty_dim])\n kz_lr = mtf.import_tf_tensor(mesh,\n kvec_lr[2].squeeze().astype('float32') /\n 2**downsampling_factor,\n shape=[tz_dim])\n kv_lr = [ky_lr, kz_lr, kx_lr]\n\n # kvec for high resolution blocks\n padded_sx_dim = mtf.Dimension('padded_sx_block',\n nc // n_block_x + 2 * halo_size)\n padded_sy_dim = mtf.Dimension('padded_sy_block',\n nc // n_block_y + 2 * halo_size)\n padded_sz_dim = mtf.Dimension('padded_sz_block',\n nc // n_block_z + 2 * halo_size)\n kvec_hr = flowpm.kernels.fftk([\n nc // n_block_x + 2 * halo_size, nc // n_block_y + 2 * halo_size,\n nc // n_block_z + 2 * halo_size\n ],\n symmetric=False)\n\n kx_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[0].squeeze().astype('float32'),\n shape=[padded_sx_dim])\n ky_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[1].squeeze().astype('float32'),\n shape=[padded_sy_dim])\n kz_hr = mtf.import_tf_tensor(mesh,\n kvec_hr[2].squeeze().astype('float32'),\n shape=[padded_sz_dim])\n kv_hr = [ky_hr, kz_hr, kx_hr]\n\n shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n lr_shape = [batch_dim, x_dim, y_dim, z_dim]\n hr_shape = [batch_dim, nx_dim, ny_dim, nz_dim, sx_dim, sy_dim, sz_dim]\n part_shape = [batch_dim, fx_dim, fy_dim, fz_dim]\n\n # Begin simulation\n\n initc = mtfpm.linear_field(mesh, shape, bs, nc, pk, kv)\n\n # Reshaping array into high resolution mesh\n field = mtf.slicewise(lambda x: tf.expand_dims(\n tf.expand_dims(tf.expand_dims(x, axis=1), axis=1), axis=1), [initc],\n output_dtype=tf.float32,\n output_shape=hr_shape,\n name='my_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[1:4] +\n part_shape[1:3])\n\n for block_size_dim in hr_shape[-3:]:\n field = mtf.pad(field, [halo_size, halo_size], block_size_dim.name)\n\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], field.shape[-3:]):\n field = mpm.halo_reduce(field, blocks_dim, block_size_dim, halo_size)\n\n field = mtf.reshape(field, field.shape + [mtf.Dimension('h_dim', 1)])\n high = field\n low = mesh_utils.downsample(field, downsampling_factor, antialias=True)\n\n low = mtf.reshape(low, low.shape[:-1])\n high = mtf.reshape(high, high.shape[:-1])\n\n for block_size_dim in hr_shape[-3:]:\n low = mtf.slice(low, halo_size // 2**downsampling_factor,\n block_size_dim.size // 2**downsampling_factor,\n block_size_dim.name)\n # Hack usisng custom reshape because mesh is pretty dumb\n low = mtf.slicewise(lambda x: x[:, 0, 0, 0], [low],\n output_dtype=tf.float32,\n output_shape=lr_shape,\n name='my_dumb_reshape',\n splittable_dims=lr_shape[:-1] + hr_shape[:4])\n\n state = mtfpm.lpt_init(\n low,\n high,\n 0.1,\n kv_lr,\n kv_hr,\n halo_size,\n hr_shape,\n lr_shape,\n part_shape[1:],\n downsampling_factor=downsampling_factor,\n antialias=True,\n )\n\n # Here we can run our nbody\n final_state = state #mtfpm.nbody(state, stages, lr_shape, hr_shape, k_dims, kv_lr, kv_hr, halo_size, downsampling_factor=downsampling_factor)\n\n # paint the field\n final_field = mtf.zeros(mesh, shape=hr_shape)\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.pad(final_field, [halo_size, halo_size],\n block_size_dim.name)\n final_field = mesh_utils.cic_paint(final_field, final_state[0], halo_size)\n # Halo exchange\n for blocks_dim, block_size_dim in zip(hr_shape[1:4], final_field.shape[-3:]):\n final_field = mpm.halo_reduce(final_field, blocks_dim, block_size_dim,\n halo_size)\n # Remove borders\n for block_size_dim in hr_shape[-3:]:\n final_field = mtf.slice(final_field, halo_size, block_size_dim.size,\n block_size_dim.name)\n\n #final_field = mtf.reshape(final_field, [batch_dim, fx_dim, fy_dim, fz_dim])\n # Hack usisng custom reshape because mesh is pretty dumb\n final_field = mtf.slicewise(lambda x: x[:, 0, 0, 0], [final_field],\n output_dtype=tf.float32,\n output_shape=[batch_dim, fx_dim, fy_dim, fz_dim],\n name='my_dumb_reshape',\n splittable_dims=part_shape[:-1] + hr_shape[:4])\n\n return initc, final_field\n\n ##",
"def test_3D_m6_2k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def subdivision(mesh):\n\t\n\t\n\t# 1. generate new nodes in the centre of quad\n\t# 1/4 o-------o 1/4 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# | * |\n\t# | |\n\t# 1/4 o-------o 1/4\n\n\tnew_coor = mesh.give_nodes().give_coor()\n\t\n\tfor face_index in range(mesh.give_model_inf()[2]): \n\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\tfor vertex_index in range(4):\n\t\t\tmesh.give_faces()\n\t\t\tnode_index = mesh.give_faces().give_node_list(face_index)[vertex_index]\n\n\t\t\tnew_x += 0.25*mesh.give_nodes().give_coor(node_index)[0]\n\t\t\tnew_y += 0.25*mesh.give_nodes().give_coor(node_index)[1]\n\t\t\tnew_z += 0.25*mesh.give_nodes().give_coor(node_index)[2]\n\t\t\t\n\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\n\t# generating new nodes on the edge\n\t# figure out one edge is shared by how many surfaces\n\tedge_shared_by_faces_list = helper.find_edge_shared_by_which_faces(mesh.give_edges(), mesh.give_faces())\n\t\n\tfor edge_index in range(mesh.give_model_inf()[1]):\n\n\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\n\t# 2. generate new node on boundary edge\n\t# o: existing vertices\n\t# 1/2 o---*---o 1/2 *: newly-generated vertices\n\t# \n\n\t\tnew_coor = mesh.give_nodes().give_coor()\n\t\tif len(edge_shared_by_faces_list[edge_index]) == 1:\t\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tnew_x += 0.5*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 0.5*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 0.5*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\t\t\t\t\n\t# 3. generate new node on interior edge\n\t# 1/16 o-------o 1/16 o: existing vertices\n\t# | | *: newly-generated vertices\n\t# 3/8 o---*---o 3/8\n\t# | |\n\t# 1/16 o-------o 1/16\n\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0., 0., 0.)\n\t\t\tconsidered_node = []\n\t\t\tfor vertex_index in range(2):\n\t\t\t\tthis_node = mesh.give_edges().give_node(edge_index)[vertex_index]\n\t\t\t\tconsidered_node.append(this_node)\n\t\t\t\tnew_x += 3./8.*mesh.give_nodes().give_coor()[this_node][0]\n\t\t\t\tnew_y += 3./8.*mesh.give_nodes().give_coor()[this_node][1]\n\t\t\t\tnew_z += 3./8.*mesh.give_nodes().give_coor()[this_node][2]\n\t\t\t\n\t\t\t# faces contain this node\n\t\t\tpotential_node = []\n\t\t\tfor face_index in edge_shared_by_faces_list[edge_index]:\t\t\n\t\t\t\tfor vertex_index in range(4):\n\t\t\t\t\t\tpotential_node.append(mesh.give_faces().give_node_list(face_index)[vertex_index])\n\t\t\t\n\t\t\touter_node = []\n\t\t\tfor node in potential_node:\n\t\t\t\tif (node not in considered_node) & (node not in outer_node):\n\t\t\t\t\touter_node.append(node)\n\t\t\t\t\t\n\t\t\tfor vertex_index in outer_node:\n\t\t\t\tnew_x += 1./16.*mesh.give_nodes().give_coor()[vertex_index][0]\n\t\t\t\tnew_y += 1./16.*mesh.give_nodes().give_coor()[vertex_index][1]\n\t\t\t\tnew_z += 1./16.*mesh.give_nodes().give_coor()[vertex_index][2]\n\t\t\t\n\t\t\tnew_coor.append((new_x, new_y, new_z))\n\n\t# update the links of edges and surfaces\n\tnew_edge_list = []\n\tnew_face_list = []\n\tfor face_index in range(mesh.give_model_inf()[2]):\n\t\told_node0 = mesh.give_faces().give_node_list(face_index)[0]\n\t\told_node1 = mesh.give_faces().give_node_list(face_index)[1]\n\t\told_node2 = mesh.give_faces().give_node_list(face_index)[2]\n\t\told_node3 = mesh.give_faces().give_node_list(face_index)[3]\n\t\t\n\t\told_edge0 = mesh.give_faces().give_edge_list(face_index)[0]\n\t\told_edge1 = mesh.give_faces().give_edge_list(face_index)[1]\n\t\told_edge2 = mesh.give_faces().give_edge_list(face_index)[2]\n\t\told_edge3 = mesh.give_faces().give_edge_list(face_index)[3]\n\t\t\n\t\tnew_node4 = old_edge0 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2] \n\t\tnew_node5 = old_edge1 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node6 = old_edge2 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\n\t\tnew_node7 = old_edge3 + mesh.give_model_inf()[0] + mesh.give_model_inf()[2]\t\n\t\tnew_node8 = mesh.give_model_inf()[0] + face_index\n\t\t\n\t\tif helper.in_list((old_node0, new_node4), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node0, new_node4))\n\t\tif helper.in_list((new_node4, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, new_node8))\n\t\tif helper.in_list((new_node8, new_node7), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node8, new_node7))\n\t\tif helper.in_list((new_node7, old_node0), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node0))\n\t\tif helper.in_list((new_node4, old_node1), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node4, old_node1))\n\t\tif helper.in_list((old_node1, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node1, new_node5))\n\t\tif helper.in_list((new_node5, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node5, new_node8))\n\t\tif helper.in_list((new_node7, old_node3), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node7, old_node3))\n\t\tif helper.in_list((old_node3, new_node6), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node3, new_node6))\n\t\tif helper.in_list((new_node6, new_node8), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, new_node8))\n\t\tif helper.in_list((new_node6, old_node2), new_edge_list) == False: \n\t\t\tnew_edge_list.append((new_node6, old_node2))\n\t\tif helper.in_list((old_node2, new_node5), new_edge_list) == False: \n\t\t\tnew_edge_list.append((old_node2, new_node5))\n\t\n\t\tnew_face_list.append((old_node0, new_node4, new_node8, new_node7))\n\t\tnew_face_list.append((new_node4, old_node1, new_node5, new_node8))\n\t\tnew_face_list.append((new_node7, new_node8, new_node6, old_node3))\n\t\tnew_face_list.append((new_node8, new_node5, old_node2, new_node6))\n\t\t\n\tnew_edges = geo.Edge(new_edge_list)\n\t\n\tnew_faces = geo.Face(new_face_list, new_edges)\n\t\t\n\t# update existing nodes\t\n\tfor node_index in range(mesh.give_model_inf()[0]):\n\t\t\n\t\tring1, ring2 = helper.find_neighbour_node(new_edges, new_faces, node_index)\n\t\tvalence = helper.find_valence(node_index, new_faces) \n\t\t#: valence: the number of faces sharing on specific edge\n\n\t# 4. update existing corner vertex\n\t# 2/4 @---* 1/4 *: newly-generated vertices\n\t# | | @: existing vertices to be updated\n\t# 1/4 *---* 0 The higher mask values on neighbouring vertices, \n\t# the more likely a square mesh will be refined into a sphere.\n\t \n\t\tif valence == 1:\n\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tprint\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += 1./4.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += 0.*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += 0.*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += 0.*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\t\n\t\t\tnew_x += 2./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 2./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 2./4.*mesh.give_nodes().give_coor()[node_index][2]\n\n\t# 5. update existing boundary joint vertex\n\t# 3/4\n\t# 1/8 *---*---* 1/8 *: newly-generated vertices\n\t# | | | @: existing vertices to be updated\n\t# 0 *---*---* 0\n\n\t\telif valence == 2:\n\t\t\t\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tif helper.find_valence(node_in_ring1, new_faces) <= 2: \n\t\t\t\t\tnew_x += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\t\tnew_y += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\t\tnew_z += 1./8.*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\t\t\n\t\t\tnew_x += 3./4.*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += 3./4.*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += 3./4.*mesh.give_nodes().give_coor()[node_index][2]\n\t\n\t# 6. update new node on interior edge\n\t# * r/k\n\t# /\\ b/k*\n\t# *__/ \\___ r/k\n\t# \\ \\ /¬¬/ *: newly-generated vertices: \n\t# \\ \\/ / b = 3/2/valence, r = 1/4/valence\n\t# *--@--* b/k\t @: existing vertices to be updated: 1-b-r\t\t\n\t# / /\\ \\\n\t# /__/ \\__\\\n\t# * \\ / * r/k\n\t# \\/\n\t\t\n\t\telse:\n\t\t\tnew_x, new_y, new_z = (0, 0, 0)\n\t\t\tbeta = 3./2./valence\n\t\t\tgamma = 1./4./valence\n\t\t\tfor node_in_ring1 in ring1:\n\t\t\t\tnew_x += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][0]\n\t\t\t\tnew_y += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][1]\n\t\t\t\tnew_z += beta/valence*mesh.give_nodes().give_coor()[node_in_ring1][2]\n\t\t\t\n\t\t\tfor node_in_ring2 in ring2:\n\t\t\t\tnew_x += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][0]\n\t\t\t\tnew_y += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][1]\n\t\t\t\tnew_z += gamma/valence*mesh.give_nodes().give_coor()[node_in_ring2][2]\n\t\t\t\n\t\t\tnew_x += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][0]\n\t\t\tnew_y += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][1]\n\t\t\tnew_z += (1. - beta - gamma)*mesh.give_nodes().give_coor()[node_index][2]\n\t\t\n\t\tnew_coor[node_index] = (new_x, new_y, new_z)\n\t\n\tnew_nodes = geo.Node(new_coor)\n\t\n\tmesh.update(new_nodes, new_edges, new_faces)\n\t\n\t# return new_mesh\n\treturn mesh",
"def test_reconstruction_against_simulation(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n # define reconstructor\n reconstructor = HillasReconstructor(subarray)\n\n hillas_dict = {}\n telescope_pointings = {}\n\n for tel_id, dl1 in event.dl1.tel.items():\n\n telescope_pointings[tel_id] = SkyCoord(\n alt=event.pointing.tel[tel_id].altitude,\n az=event.pointing.tel[tel_id].azimuth,\n frame=AltAz(),\n )\n\n geom_CameraFrame = subarray.tel[tel_id].camera.geometry\n\n # this could be done also out of this loop,\n # but in case of real data each telescope would have a\n # different telescope_pointing\n geom_TelescopeFrame = geom_CameraFrame.transform_to(\n TelescopeFrame(telescope_pointing=telescope_pointings[tel_id])\n )\n\n mask = tailcuts_clean(\n geom_TelescopeFrame,\n dl1.image,\n picture_thresh=5.0,\n boundary_thresh=2.5,\n keep_isolated_pixels=False,\n min_number_picture_neighbors=2,\n )\n\n try:\n hillas_dict[tel_id] = hillas_parameters(\n geom_TelescopeFrame[mask], dl1.image[mask]\n )\n\n # the original event is created from a\n # pytest fixture with \"session\" scope, so it's always the same\n # and if we used the same event we would overwrite the image\n # parameters for the next tests, thus causing their failure\n test_event = deepcopy(event)\n test_event.dl1.tel[tel_id].parameters = ImageParametersContainer()\n test_event.dl1.tel[tel_id].parameters.hillas = hillas_dict[tel_id]\n\n except HillasParameterizationError as e:\n print(e)\n continue\n\n # Get shower geometry\n reconstructor(event)\n # get the result from the correct DL2 container\n result = event.dl2.stereo.geometry[\"HillasReconstructor\"]\n\n # get the reconstructed coordinates in the sky\n reco_coord = SkyCoord(alt=result.alt, az=result.az, frame=AltAz())\n # get the simulated coordinates in the sky\n true_coord = SkyCoord(\n alt=event.simulation.shower.alt, az=event.simulation.shower.az, frame=AltAz()\n )\n\n # check that we are not more far than 0.1 degrees\n assert reco_coord.separation(true_coord) < 0.1 * u.deg",
"def test_3D_m6_1k_sFH():\n scal, velo = setup_3D()\n\n advec = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: 'gpu_1k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d3d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L4_2,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_3D_withPython(scal, velo, advec, advec_py)",
"def run_multi_medium_inversion(datadir, outdir, real_data_fnames, MT_green_func_fnames, single_force_green_func_fnames, data_labels, inversion_type, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, num_samples, comparison_metric, manual_indices_time_shift_MT, manual_indices_time_shift_SF, nlloc_hyp_filename, cut_phase_start_vals=[], cut_phase_length=0, plot_switch=False, num_processors=1, set_pre_time_shift_values_to_zero_switch=True, only_save_non_zero_solns_switch=False, return_absolute_similarity_values_switch=False, invert_for_ratio_of_multiple_media_greens_func_switch=False, green_func_fnames_split_index=0, green_func_phase_labels=[], invert_for_relative_magnitudes_switch=False, rel_exp_mag_range=[1.,1.], auto_shift_for_best_fit=True):\n \n # Load input data (completely, for specific inversion type):\n real_data_array, green_func_array = get_overall_real_and_green_func_data(datadir, real_data_fnames, MT_green_func_fnames, single_force_green_func_fnames, inversion_type, manual_indices_time_shift_MT=manual_indices_time_shift_MT, manual_indices_time_shift_SF=manual_indices_time_shift_SF, cut_phase_start_vals=cut_phase_start_vals, cut_phase_length=cut_phase_length, set_pre_time_shift_values_to_zero_switch=set_pre_time_shift_values_to_zero_switch, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_fnames_split_index=green_func_fnames_split_index)\n \n # Do initial check/s:\n if len(green_func_phase_labels)>0:\n if not len(green_func_array[:,0,0,0]) == len(green_func_phase_labels):\n print(\"Error: Greens functions filename array (for medium 1), does not match length of green_func_phase_labels array.\")\n sys.exit()\n \n # Get number of different phases, if specified:\n num_phase_types_for_media_ratios = 0\n if green_func_phase_labels.count(\"P\")>0:\n num_phase_types_for_media_ratios += 1\n if green_func_phase_labels.count(\"S\")>0:\n num_phase_types_for_media_ratios += 1\n if green_func_phase_labels.count(\"surface\")>0:\n num_phase_types_for_media_ratios += 1\n \n # Define a fraction of the second medium to use for the simple least squares inversion:\n frac_medium_2 = 0.5\n green_func_array_for_lsq_inv = (1. - frac_medium_2)*green_func_array[:,:,:,0] + frac_medium_2*green_func_array[:,:,:,1]\n \n # Perform the inversion:\n M = perform_inversion(real_data_array, green_func_array_for_lsq_inv)\n M_amplitude = ((np.sum(M**2))**0.5)\n\n # And get forward model synthetic waveform result:\n synth_forward_model_result_array = forward_model(green_func_array_for_lsq_inv, M)\n\n # And plot the results:\n if plot_switch:\n plot_specific_forward_model_result(real_data_array, synth_forward_model_result_array, data_labels, plot_title=\"Initial theoretical inversion solution\", perform_normallised_waveform_inversion=perform_normallised_waveform_inversion)\n\n # And save least squares output:\n # Set output arrays to equal least squares output: \n MTs = M\n similarity_curr_sample, shift_idxs = compare_synth_to_real_waveforms(real_data_array, synth_forward_model_result_array, comparison_metric, perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously, auto_shift_for_best_fit) \n MTp = np.array([similarity_curr_sample])\n # And save data to MTFIT style file:\n outdir_least_squares = outdir+\"/least_squares_result\"\n os.system(\"mkdir -p \"+outdir_least_squares)\n save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir_least_squares, shift_idxs=shift_idxs) # Saves pickled dictionary containing data from inversion\n # And save most likely solution and real data waveforms to file:\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:, np.where(MTp==np.max(MTp))[0][0]])\n # And get shift associated with most likely model:\n if len(shift_idxs) > 0:\n shift_idxs_most_likely_result = np.array(shift_idxs)\n else:\n shift_idxs_most_likely_result = []\n # And save:\n save_specific_waveforms_to_file(real_data_array, synth_forward_model_most_likely_result_array, data_labels, nlloc_hyp_filename, inversion_type, outdir_least_squares, shift_idxs=shift_idxs_most_likely_result, normallise_data=perform_normallised_waveform_inversion)\n\n # And do Monte Carlo random sampling to obtain PDF of moment tensor:\n MTs, MTp, MTp_absolute, shift_idxs_all_samples = perform_monte_carlo_sampled_waveform_inversion(real_data_array, green_func_array, num_samples, M_amplitude=M_amplitude,inversion_type=inversion_type, comparison_metric=comparison_metric, perform_normallised_waveform_inversion=perform_normallised_waveform_inversion, compare_all_waveforms_simultaneously=compare_all_waveforms_simultaneously, num_processors=num_processors, return_absolute_similarity_values_switch=return_absolute_similarity_values_switch, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels=green_func_phase_labels, num_phase_types_for_media_ratios=num_phase_types_for_media_ratios, invert_for_relative_magnitudes_switch=invert_for_relative_magnitudes_switch, rel_exp_mag_range=rel_exp_mag_range, auto_shift_for_best_fit=auto_shift_for_best_fit)\n\n # Check that probability of output is non-zero:\n if math.isnan(MTp[0]):\n print(\"Error: Sum of probabilities is equal to zero - therefore no adiquate solution could be found and inversion is terminating.\")\n sys.exit()\n \n # Remove zero probability values if specified:\n if only_save_non_zero_solns_switch:\n MTp, MTs = remove_zero_prob_results(MTp, MTs)\n\n # And plot most likely solution:\n if plot_switch:\n if inversion_type == \"DC_single_force_couple\" or inversion_type == \"DC_single_force_no_coupling\" or inversion_type == \"DC_crack_couple\" or inversion_type == \"single_force_crack_no_coupling\":\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_1_diff_phases_dict={}\n tmp_frac_medium_1_diff_phases_dict[\"P\"] = MTs[-3, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"S\"] = MTs[-2, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"surface\"] = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n # Create actual greens functions for this solution:\n green_func_array_for_most_likely_amp_ratio = np.zeros(np.shape(green_func_array[:,:,:,0]), dtype=float)\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_1 = tmp_frac_medium_1_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array_for_most_likely_amp_ratio[j, :, :] = (1. - tmp_frac_medium_1)*green_func_array[j,:,:,0] + tmp_frac_medium_1*green_func_array[j,:,:,1]\n # And get result:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-4, np.where(MTp==np.max(MTp))[0][0]])\n else:\n frac_medium_1 = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n green_func_array_for_most_likely_amp_ratio = (1. - frac_medium_1)*green_func_array[:,:,:,0] + frac_medium_1*green_func_array[:,:,:,1]\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-2, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n if invert_for_ratio_of_multiple_media_greens_func_switch:\n if num_phase_types_for_media_ratios>0:\n # Generate different phase fractions:\n tmp_frac_medium_1_diff_phases_dict={}\n tmp_frac_medium_1_diff_phases_dict[\"P\"] = MTs[-3, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"S\"] = MTs[-2, np.where(MTp==np.max(MTp))[0][0]]\n tmp_frac_medium_1_diff_phases_dict[\"surface\"] = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n # Create actual greens functions for this solution:\n green_func_array_for_most_likely_amp_ratio = np.zeros(np.shape(green_func_array[:,:,:,0]), dtype=float)\n for j in range(len(green_func_phase_labels)):\n tmp_frac_medium_1 = tmp_frac_medium_1_diff_phases_dict[green_func_phase_labels[j]] # Get fraction for specific phase, for specific greens functions for specific station-phase\n green_func_array_for_most_likely_amp_ratio[j, :, :] = (1. - tmp_frac_medium_1)*green_func_array[j,:,:,0] + tmp_frac_medium_1*green_func_array[j,:,:,1]\n # And get result:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-3, np.where(MTp==np.max(MTp))[0][0]])\n else:\n frac_medium_1 = MTs[-1, np.where(MTp==np.max(MTp))[0][0]]\n green_func_array_for_most_likely_amp_ratio = (1. - frac_medium_1)*green_func_array[:,:,:,0] + frac_medium_1*green_func_array[:,:,:,1]\n synth_forward_model_most_likely_result_array = forward_model(green_func_array_for_most_likely_amp_ratio, MTs[:-1, np.where(MTp==np.max(MTp))[0][0]])\n else:\n synth_forward_model_most_likely_result_array = forward_model(green_func_array, MTs[:, np.where(MTp==np.max(MTp))[0][0]])\n plot_specific_forward_model_result(real_data_array, synth_forward_model_most_likely_result_array, data_labels, plot_title=\"Most likely Monte Carlo sampled solution\", perform_normallised_waveform_inversion=perform_normallised_waveform_inversion)\n print(\"Most likely solution:\", MTs[:,np.where(MTp==np.max(MTp))[0][0]])\n\n # And save data to MTFIT style file:\n save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir, MTp_absolute=MTp_absolute, shift_idxs=shift_idxs_all_samples) # Saves pickled dictionary containing data from inversion\n # And save most likely solution and real data waveforms to file:\n synth_forward_model_most_likely_result_array = get_synth_forward_model_most_likely_result(MTs, MTp, green_func_array, inversion_type, invert_for_ratio_of_multiple_media_greens_func_switch=invert_for_ratio_of_multiple_media_greens_func_switch, green_func_phase_labels=green_func_phase_labels, num_phase_types_for_media_ratios=num_phase_types_for_media_ratios)\n # And get shift associated with most likely model:\n if len(shift_idxs_all_samples) > 0:\n shift_idxs_most_likely_result = np.array(shift_idxs_all_samples)[0, np.where(MTp==np.max(MTp))[0][0]]\n else:\n shift_idxs_most_likely_result = []\n # And save:\n save_specific_waveforms_to_file(real_data_array, synth_forward_model_most_likely_result_array, data_labels, nlloc_hyp_filename, inversion_type, outdir, shift_idxs=shift_idxs_most_likely_result, normallise_data=perform_normallised_waveform_inversion)\n \n print(\"Finished\")",
"def test_power_spectral_density_from_spatially_resolved_magnetisation_confined_to_mesh_region(tmpdir, debug=False):\n os.chdir(str(tmpdir))\n RTOL = 1e-10\n\n H1 = 1e6 # external field in A/m\n alpha1 = 0.5 # some sort of damping constant\n omega1 = gamma * H1 # precession frequency\n\n H2 = 2.8e4 # external field in A/m\n alpha2 = 0.3 # some sort of damping constant\n omega2 = gamma * H2 # precession frequency\n\n ##\n # Step 1: Construct a time series of artificial magnetisation\n # data and save it to a bunch of .npy files.\n ##\n t_step = 1e-11\n t_ini = 0\n t_end = 10e-9\n\n N1 = 42 # in a real application this would be the number of mesh vertices\n N2 = 23 # in a real application this would be the number of mesh vertices\n fft_test_helpers.create_test_npy_files_with_two_regions(\n str(tmpdir), t_step, t_ini, t_end, omega1, alpha1, N1, omega2, alpha2, N2)\n\n ##\n # Step 2: compute the FFT of a resampled time series, both by\n # hand and using FFT_m.\n ##\n # XXX TODO: Resampling timesteps is not supported when using .npy\n # files. Either simplify the code below, or implement saving to\n # .h5 files so that it's easier to implement resampling for\n # spatially resolved data, too.\n ##\n t_step_res = t_step\n t_ini_res = t_ini\n t_end_res = t_end\n ts_resampled = np.arange(t_ini_res, t_end_res, t_step_res)\n\n # Compute time series based on resampled timesteps\n mx_res = exp(-ts_resampled * 1e8 / alpha1) * sin(omega1 * ts_resampled)\n my_res = exp(-ts_resampled * 1e8 / alpha1) * cos(omega1 * ts_resampled)\n mz_res = 1 - sqrt(mx_res ** 2 + my_res ** 2)\n\n # Compute 'analytical' Fourier transform of resampled time series and\n # determine the power of the spectrum for each component. We also need\n # to multiply by the number of mesh nodes because the numerical algorithm\n # sums up all contributions at the individual nodes (but we can just\n # multiply because they are all identical by construction).\n psd_mx_expected = N1 * np.absolute(np.fft.rfft(mx_res)) ** 2\n psd_my_expected = N1 * np.absolute(np.fft.rfft(my_res)) ** 2\n psd_mz_expected = N1 * np.absolute(np.fft.rfft(mz_res)) ** 2\n\n # Compute Fourier transform of resampled time series using FFT_m\n freqs_computed, psd_mx_computed, psd_my_computed, psd_mz_computed = \\\n compute_power_spectral_density('m_ringdown*.npy', t_step_res, t_ini=t_ini_res,\n t_end=t_end_res, subtract_values=None, restrict_to_vertices=xrange(N1))\n\n # Check that the analytically determined power spectra are the same as the\n # computed ones.\n assert(np.allclose(psd_mx_expected, psd_mx_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_my_expected, psd_my_computed, atol=0, rtol=RTOL))\n assert(np.allclose(psd_mz_expected, psd_mz_computed, atol=0, rtol=RTOL))\n\n if debug:\n # Plot the spectra for debugging\n fig = plt.figure(figsize=(20, 5))\n ax = fig.gca()\n ax.plot(freqs_computed, psd_mx_expected, label='psd_mx_expected')\n ax.plot(freqs_computed, psd_my_expected, label='psd_my_expected')\n ax.plot(freqs_computed, psd_mz_expected, label='psd_mz_expected')\n ax.plot(freqs_computed, psd_mx_computed, label='psd_mx_computed')\n ax.plot(freqs_computed, psd_my_computed, label='psd_my_computed')\n ax.plot(freqs_computed, psd_mz_computed, label='psd_mz_computed')\n ax.legend(loc='best')\n fig.savefig('psd_m_McMichaelStiles.png')",
"def Sphere_ExactSerendipityLagrangeQuad():\n\n mesh = Sphere_CubeToSerendipityLagrangeQuad(1)\n \n ################\n # Modifications for exact sphere\n ################\n # x=+1 side\n def posXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.ones(xi1.shape);yb=np.array(-xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = -1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0 * xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0 * np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0 * yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0 * zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0 * np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[0].vals = posXvals\n mesh.eList[0].normals = posXnormals\n mesh.eList[0].J = posXJ\n \n def posYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[1].vals = posYvals\n mesh.eList[1].normals = posYnormals\n mesh.eList[1].J = posYJ\n \n # x=-1 side\n def negXvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negXnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negXJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=-np.ones(xi1.shape);yb=np.array(xi1);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-zb+2.0*yy*zb/3.0)\n dydxi1 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dydxi2 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-zb+2.0*xx*zb/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[2].vals = negXvals\n mesh.eList[2].normals = negXnormals\n mesh.eList[2].J = negXJ\n\n # y=-1 side\n def negYvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negYnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negYJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(-xi1);yb=-np.ones(xi1.shape);zb=np.array(xi2);\n xx=xb**2;yy=yb**2;zz=zb**2; \n dxdxi1 = -1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*0.5*xb*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5*(-zb+2.0*yy*zb/3.0)\n dydxi1 = -1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*0.5*yb*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5*(-zb+2.0*xx*zb/3.0)\n dzdxi1 = -1.0*0.5*zb*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5*(-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0) \n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[3].vals = negYvals\n mesh.eList[3].normals = negYnormals\n mesh.eList[3].J = negYJ\n \n # z=+1 side\n def posZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1)\n yb=np.array(-xi2)\n zb=np.ones(xi1.shape)\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def posZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def posZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(-xi2);zb=np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = -1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = -1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = -1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[4].vals = posZvals\n mesh.eList[4].normals = posZnormals\n mesh.eList[4].J = posZJ\n \n # z=-1 side\n def negZvals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n x = xb*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n y = yb*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n z = zb*np.sqrt(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)\n return np.vstack([x,y,z])\n def negZnormals(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n magnitude = np.sqrt(np.sum(J**2,axis=1))\n return J.T/magnitude\n def negZJ(xi1,xi2):\n xi1=np.asarray(xi1,np.float).reshape(-1,)\n xi2=np.asarray(xi2,np.float).reshape(-1,)\n xb=np.array(xi1);yb=np.array(xi2);zb=-np.ones(xi1.shape);\n xx=xb**2;yy=yb**2;zz=zb**2;\n dxdxi1 = 1.0*np.sqrt(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)\n dxdxi2 = 1.0*xb*0.5*(1.0 - yy/2.0 - zz/2.0 + yy*zz/3.0)**-0.5 * (-yb+2.0*yb*zz/3.0)\n dydxi1 = 1.0*yb*0.5*(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)**-0.5 * (-xb+2.0*xb*zz/3.0)\n dydxi2 = 1.0*np.sqrt(1.0 - xx/2.0 - zz/2.0 + xx*zz/3.0)\n dzdxi1 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-xb+2.0*xb*yy/3.0)\n dzdxi2 = 1.0*zb*0.5*(1.0 - xx/2.0 - yy/2.0 + xx*yy/3.0)**-0.5 * (-yb+2.0*xx*yb/3.0)\n J = np.array([[dxdxi1,dxdxi2],[dydxi1,dydxi2],[dzdxi1,dzdxi2]]).T\n J = np.cross(J[:,0,:],J[:,1,:])\n return np.sqrt(np.sum(J**2,axis=1))\n mesh.eList[5].vals = negZvals\n mesh.eList[5].normals = negZnormals\n mesh.eList[5].J = negZJ\n \n for e in mesh.eList:\n e.ExactElement = True\n \n return mesh",
"def subdivideMesh(IKLE,MESHX,MESHY): \n # ~~> Singling out edges\n from matplotlib.tri import Triangulation\n edges = Triangulation(MESHX,MESHY,IKLE).get_cpp_triangulation().get_edges()\n \n # ~~> Memory allocation for new MESH\n IELEM = len(IKLE); IPOIN = len(MESHX); IEDGE = len(edges)\n JKLE = np.zeros((IELEM*4,3),dtype=np.int) # you subdivide every elements by 4\n MESHJ = np.zeros((IEDGE,2),dtype=np.int) # you add one point on every edges\n \n # ~~> Lookup tables for node numbering on common edges\n pa,pb = edges.T\n k1b,k1a = np.sort(np.take(IKLE,[0,1],axis=1)).T\n indx1 = np.searchsorted(pa,k1a)\n jndx1 = np.searchsorted(pa,k1a,side='right')\n k2b,k2a = np.sort(np.take(IKLE,[1,2],axis=1)).T\n indx2 = np.searchsorted(pa,k2a)\n jndx2 = np.searchsorted(pa,k2a,side='right')\n k3b,k3a = np.sort(np.take(IKLE,[2,0],axis=1)).T\n indx3 = np.searchsorted(pa,k3a)\n jndx3 = np.searchsorted(pa,k3a,side='right')\n \n # ~~> Building one triangle at a time /!\\ Please get this loop parallelised\n j = 0\n for i in range(IELEM):\n k1 = indx1[i]+np.searchsorted(pb[indx1[i]:jndx1[i]],k1b[i])\n k2 = indx2[i]+np.searchsorted(pb[indx2[i]:jndx2[i]],k2b[i])\n k3 = indx3[i]+np.searchsorted(pb[indx3[i]:jndx3[i]],k3b[i])\n # ~~> New connectivity JKLE\n JKLE[j] = [IKLE[i][0],IPOIN+k1,IPOIN+k3]\n JKLE[j+1] = [IKLE[i][1],IPOIN+k2,IPOIN+k1]\n JKLE[j+2] = [IKLE[i][2],IPOIN+k3,IPOIN+k2]\n JKLE[j+3] = [IPOIN+k1,IPOIN+k2,IPOIN+k3]\n # ~~> New interpolation references for values and coordinates\n MESHJ[k1] = [IKLE[i][0],IKLE[i][1]]\n MESHJ[k2] = [IKLE[i][1],IKLE[i][2]]\n MESHJ[k3] = [IKLE[i][2],IKLE[i][0]]\n j += 4\n\n # ~~> Reset IPOBO while you are at it\n MESHX = np.resize(MESHX,IPOIN+IEDGE)\n MESHY = np.resize(MESHY,IPOIN+IEDGE)\n MESHX[IPOIN:] = np.sum(MESHX[MESHJ],axis=1)/2.\n MESHY[IPOIN:] = np.sum(MESHY[MESHJ],axis=1)/2.\n neighbours = Triangulation(MESHX,MESHY,JKLE).get_cpp_triangulation().get_neighbors()\n JPOBO = np.zeros(IPOIN+IEDGE,np.int)\n for n in range(IELEM*4):\n s1,s2,s3 = neighbours[n]\n e1,e2,e3 = JKLE[n]\n if s1 < 0:\n JPOBO[e1] = e1+1\n JPOBO[e2] = e2+1\n if s2 < 0:\n JPOBO[e2] = e2+1\n JPOBO[e3] = e3+1\n if s3 < 0:\n JPOBO[e3] = e3+1\n JPOBO[e1] = e1+1\n\n return JKLE,MESHX,MESHY,JPOBO,MESHJ",
"def init():\n\tN = np.int32(DIM) #prepare for stitching\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN)/DIM\n\tHII_scale = np.float32(BOX_LEN)/HII_DIM\n\tshape = (N,N,N)\n\t\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\n\tkernel_source = open(cmd_folder+\"/initialize.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'VOLUME': VOLUME,\n\t\t'DIM': DIM\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_kernel = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tadj_complex_conj = main_module.get_function(\"adj_complex_conj\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d, np.int32(DIM), block=block_size, grid=grid_size)\n\n\t#import IPython; IPython.embed()\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tinit_kernel(largebox_d_imag, np.int32(DIM), block=block_size, grid=grid_size)\n\n\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\n\t#adj_complex_conj(largebox_d, DIM, block=block_size, grid=grid_size)\n\tlargebox = largebox_d.get()\n\t#np.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox)\n\n\t#save real space box before smoothing\n\tplan = Plan(shape, dtype=np.complex64)\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(DIM, BOX_LEN), largebox_d.real.get_async())\n\n\t#save real space box after smoothing and subsampling\n\t# host largebox is still in k space, no need to reload from disk\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tHII_filter(largebox_d, N, ZERO, smoothR, block=block_size, grid=grid_size);\n\tplan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\tlargebox_d /= scale**3\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tsubsample_kernel(largebox_d.real, smallbox_d, N, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_grid_size) #subsample in real space\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), smallbox_d.get_async())\n\n\t# reload the k-space box for velocity boxes\n\tlargebox_d = gpuarray.to_gpu(largebox)\n\t\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,DIM), dtype=np.complex64)\n\tsmallbox_d = gpuarray.zeros(HII_shape, dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(num), block=block_size, grid=grid_size)\n\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=grid_size)\n\t\tplan.execute(largevbox_d, inverse=True)\n\t\tlargevbox_d /= scale**3\n\t\t#import IPython; IPython.embed()\n\t\tsubsample_kernel(largevbox_d.real, smallbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_grid_size)\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallbox_d.get())\n\n\treturn",
"def test_2D_m4_2k_sFH():\n scal, velo = setup_2D()\n\n advec = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: 'gpu_2k',\n Splitting: 'o2_FullHalf'}\n )\n advec_py = Advection(velo, scal, discretization=d2d,\n method={TimeIntegrator: RK2,\n Interpolation: Linear,\n Remesh: L2_1,\n Support: '',\n Splitting: 'o2_FullHalf'}\n )\n assertion_2D_withPython(scal, velo, advec, advec_py)",
"def test_2():\n\n n1 = 10\n n2 = 100\n ndim = 3\n\n semi_axes = np.random.random((n1,ndim))\n coords = np.array([sample_ellipsoidal_volume(n2, semi_axes[i]) for i in range(0,n1)])\n\n Is = iterative_inertia_tensors_3D(coords)\n\n assert np.shape(Is)==(n1,ndim,ndim)"
]
| [
"0.636811",
"0.620209",
"0.61466837",
"0.6098183",
"0.60520035",
"0.59614044",
"0.59098226",
"0.5829521",
"0.58274263",
"0.5822031",
"0.5816027",
"0.58075863",
"0.5801952",
"0.5774439",
"0.5718799",
"0.5712643",
"0.56994295",
"0.569744",
"0.5670637",
"0.5666977",
"0.5634393",
"0.56201756",
"0.56015354",
"0.5583797",
"0.5580193",
"0.5572161",
"0.55651087",
"0.55613583",
"0.55611795",
"0.5548896"
]
| 0.6449545 | 0 |
Attach a model to our Buddy, and move it onto `buddy.device`. If a model isn't explicitly passed into the constructor's `model` field, `attach_model` should be called before any optimization, checkpointing, etc happens. | def attach_model(self, model: nn.Module) -> None:
assert isinstance(model, nn.Module)
# Move model to correct device
model.to(self._device)
# Attach model
self._model = model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _attach_to_model(self, model):\n self._model = model",
"def _addmodel(self, model: Model):\n model = copy.deepcopy(model)\n\n if self.domain is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(model, self.domain)\n\n # Add in model\n self.model = model\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.model = model",
"def model(self, model):\n \n self._model = model",
"def add_model(self, model, delay_sort=True):\n assert isinstance(model, Model)\n\n if self.model_dict.has_key(model.model_id):\n raise ModelOverwrite()\n\n ## set default model if not set\n if self.default_model is None:\n self.default_model = model\n\n self.model_list.append(model)\n self.model_dict[model.model_id] = model\n\n model.structure = self\n\n if not delay_sort:\n self.model_list.sort()",
"def add_model(self, model):\n self.add_model_sig(ModelSignature.from_model(model))",
"def add_model(self, model):\n self.add_model_sig(ModelSignature.from_model(model))",
"def model(self, model):\n\n self._model = model",
"def model(self, model):\n\n self._model = model",
"def model(self, model):\n\n self._model = model",
"def model(self, model):\n\n self._model = model",
"def model(self, model):\n\n self._model = model",
"def _train_model(self, model_id, detector):\n detector.learn_normal()\n self._detectors[model_id] = detector\n\n if model_id in self._models:\n # set the model status to ready\n self._models[model_id]['status'] = 'ready'\n\n # save model to database\n self._db.add_model(model_id, (detector, self._models[model_id]))",
"def set_model(self, model):\r\n self.model = model.model\r\n with context.eager_mode():\r\n self._close_writers()\r\n if self.write_graph:\r\n with self._get_writer(self._train_run_name).as_default():\r\n with summary_ops_v2.always_record_summaries():\r\n if not self.model.run_eagerly:\r\n summary_ops_v2.graph(K.get_graph(), step=0)\r\n\r\n summary_writable = (\r\n self.model._is_graph_network or # pylint: disable=protected-access\r\n self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access\r\n if summary_writable:\r\n summary_ops_v2.keras_model('keras', self.model, step=0)\r\n\r\n if self.embeddings_freq:\r\n self._configure_embeddings()",
"def setup_model(self,\n model_weights_path: Optional[str] = None,\n model_def_path: Optional[str] = None) -> None:\n if self.model is not None:\n self.model.to(self.device)\n return\n\n self._onnx_mode = (model_weights_path is not None\n and model_weights_path.lower().endswith('.onnx'))\n if self._onnx_mode:\n model = self.load_onnx_model(model_weights_path)\n else:\n model = self.build_model(model_def_path)\n\n if self.cfg.model.external_def is not None:\n # this model will have 1 extra output classes that we will ignore\n self.model = TorchVisionODAdapter(model, ignored_output_inds=[0])\n else:\n # this model will have 2 extra output classes that we will ignore\n num_classes = self.cfg.data.num_classes\n self.model = TorchVisionODAdapter(\n model, ignored_output_inds=[0, num_classes + 1])\n\n if not self._onnx_mode:\n self.model.to(self.device)\n self.load_init_weights(model_weights_path)",
"def register_model(self, model):\n\n self._model = model",
"def set_model(self, model):\n self.model = model",
"def set_model(self, model):\n self.model = model",
"def set_model(self, model=None):\n self.model = model",
"def set_model(self, model):\n\n # attach the model to the object\n self._likelihood_model = model\n\n # the position for the point source is freed\n for key in self._likelihood_model.point_sources.keys():\n self._likelihood_model.point_sources[key].position.ra.free = True\n self._likelihood_model.point_sources[key].position.dec.free = True\n\n # set proper priors for the coordinates\n self._likelihood_model.point_sources[key].position.ra.prior = Uniform_prior(lower_bound=0., upper_bound=360)\n self._likelihood_model.point_sources[key].position.dec.prior = Cosine_Prior(lower_bound=-90., upper_bound=90)",
"def set_model(self, model: tf.keras.Model):\n self.model = model",
"def add_model(self, model):\n name = '.'.join((model.__module__, model.__name__))\n\n if model.__name__ is not 'MongoModel':\n self._models[name] = model\n self._process_relations(model)\n\n if self._waited_relations:\n self._handle_waited_relations()",
"def set_model(self, likelihood_model_instance):\n pass",
"def set_model(self, likelihood_model_instance):\n pass",
"def set_model(self):\n self.model = self.get_model()",
"def bind_model(self, model: GraphModelSpace) -> Iterator[MutatorSequence]:\n try:\n self.model = model\n yield self\n finally:\n self.model = None",
"def bind_model(self, model: GraphModelSpace) -> Iterator[Mutator]:\n try:\n self.model = model\n yield self\n finally:\n self.model = None",
"def set_model(self, model):\n\n self._model = model\n\n return self",
"def model(self, model: Model):\n if model is None:\n raise ValueError(\"Invalid value for `model`, must not be `None`\") # noqa: E501\n\n self._model = model",
"def __init__(self,model,device):\n self.model = model\n self.device = device",
"def attach_yombo_device(self, yombo_device):\n logger.info(\"Attach yombo device to me.. {label}\", label=yombo_device.full_label)\n self.yombo_device = yombo_device\n self.yombo_device.wemo_device = self\n self.FEATURES = self.yombo_device.FEATURES\n self.update_value(self.state)"
]
| [
"0.7275075",
"0.6263499",
"0.6240799",
"0.6118507",
"0.60706156",
"0.60706156",
"0.605017",
"0.605017",
"0.605017",
"0.605017",
"0.605017",
"0.5989531",
"0.58149546",
"0.57625425",
"0.5756338",
"0.57553875",
"0.57553875",
"0.57202077",
"0.5699022",
"0.5698126",
"0.5638279",
"0.5633782",
"0.5633782",
"0.56171757",
"0.56094646",
"0.5602988",
"0.55733705",
"0.55250746",
"0.55133957",
"0.5447878"
]
| 0.76799136 | 0 |
Readonly interface for the active torch device. Autodetected in the constructor based on CUDA support. | def device(self) -> torch.device:
return self._device | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_device(self):\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")",
"def device():\n return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')",
"def set_device(self, device: torch.Tensor) -> None:\n raise NotImplementedError",
"def setup_device(n_gpus: int) -> object:\n if n_gpus >= 1 and torch.cuda.is_available():\n LOG.info('\\n CUDA is available! using GPU...')\n return torch.device('cuda')\n else:\n LOG.info('\\n Using CPU...')\n return torch.device('cpu')",
"def device(self) -> torch.device:\n for param in self.parameters():\n return param.device\n return get_device(\"cpu\")",
"def set_device(in_arg): \n \n return torch.device(\"cuda\" if torch.cuda.is_available() and in_arg.gpu == 1 else \"cpu\")",
"def setup_device(self, conf: DictConfig) -> device:\n device = torch.device(conf.runner.device) if torch.cuda.is_available() else torch.device('cpu')\n\n return device",
"def device(self):\n return torch.cuda.current_device()",
"def __init__(self, torch_jit: str):\n nonserializable_attribute.__init__(self, [\"model\", \"device\"])\n self.torch_jit = torch_jit",
"def __init__(self, cfg, task_queue, result_queue, gpu_id=None):\n super().__init__()\n self.cfg = cfg\n self.task_queue = task_queue\n self.result_queue = result_queue\n self.gpu_id = gpu_id\n\n self.device = (\n torch.device(\"cuda:{}\".format(self.gpu_id))\n if self.cfg.NUM_GPUS\n else \"cpu\"\n )",
"def cuda(self):\n self.reader.model.cuda()\n self.reader.device = torch.device(\"cuda\")\n return self",
"def set_device(device, backend='autograd'):\n if backend == 'autograd':\n return None\n elif backend == 'pytorch':\n try:\n tc.cuda.set_device(device)\n except:\n pass",
"def cuda(self):\n if torch.cuda.is_available():\n self.automata = self.automata.cuda()\n self.inv_automata = self.inv_automata.cuda()\n self.action = self.action.cuda()\n self.inv_action = self.inv_action.cuda()",
"def device(self):\n return self._tensor.device",
"def distribution_torch_class(self):\n raise NotImplementedError()",
"def get_device():\n import torch\n\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')",
"def device(deviceid):\n\n # Torch device\n # pylint: disable=E1101\n return deviceid if isinstance(deviceid, torch.device) else torch.device(Models.reference(deviceid))",
"def cpu(self):\n self.reader.model.cpu()\n self.reader.device = torch.device(\"cpu\")\n return self",
"def set_device(gpu_arg):\n\n dev = 'cpu'\n if gpu_arg and torch.cuda.is_available():\n dev = 'cuda'\n elif gpu_arg:\n print('Not gpu found. Using cpu instead.') \n\n return torch.device(dev)",
"def change_device(self, device=None):\n\n if device is None:\n # If the function is called without a device, use the current device\n device = self.device\n\n # Create the appropriate device object\n device = torch.device(f'cuda:{device}'\n if torch.cuda.is_available() else 'cpu')\n\n # Change device field\n self.device = device\n # Load the transcription model onto the device\n self.to(self.device)",
"def set_device(sys_device_id):\n device_id = -1\n cuda = (sys_device_id != -1)\n if cuda:\n # CUDA_VISIBLE_DEVICE is a list, and device_id is the index of its members.\n import os\n os.environ['CUDA_VISIBLE_DEVICES'] = str(sys_device_id)\n device_id = 0\n TVT = TransferVarTensor(device_id)\n TMO = TransferModulesOptims(device_id)\n return TVT, TMO",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def get_default_device():\n if torch.cuda.is_available():\n return torch.device('cuda')\n else:\n return torch.device('cpu')",
"def maybe_cuda(t):\n if torch.cuda.is_available():\n return t\n return t",
"def real_decorator(f):\n\n def inner(*args, **kwargs):\n \"\"\"returns a torch tensor for cpu or gpu when appropriate.\"\"\"\n\n s = f(*args, **kwargs)\n\n if torch.cuda.is_available():\n return torch.from_numpy(s).cuda().type(dtype)\n\n return torch.from_numpy(s).type(dtype)\n\n return inner",
"def get_device(self):\n raise NotImplementedError()",
"def set_device(self, cuda=True):\n if cuda and torch.cuda.is_available():\n self.cuda = True\n self.device = torch.device('cuda')\n else:\n self.cuda = False\n self.device = torch.device('cpu')\n\n if self.verbose:\n if not cuda:\n print('Using CPU device')\n elif not self.cuda:\n print('CUDA is not available. Defaulting to CPU device')\n else:\n print('Using CUDA device')\n\n self.encoder.to(self.device)\n self.decoder.to(self.device)\n self.critic.to(self.device)",
"def get_default_device():\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n return device"
]
| [
"0.7243774",
"0.6864857",
"0.67070234",
"0.6623963",
"0.66236806",
"0.65979505",
"0.65841067",
"0.65643656",
"0.64700145",
"0.6351698",
"0.6300667",
"0.62400496",
"0.62294424",
"0.61740786",
"0.61147404",
"0.610777",
"0.6085627",
"0.6059798",
"0.59826255",
"0.5965916",
"0.59366065",
"0.5931384",
"0.5931384",
"0.5931384",
"0.5931384",
"0.5898017",
"0.58894354",
"0.58881104",
"0.5883036",
"0.5881945"
]
| 0.6884824 | 1 |
Converts Latin character to similar english characters" | def latin_to_english(self, latin_string):
return ''.join(
c for c in unicodedata.normalize('NFD', latin_string)
if unicodedata.category(c) != 'Mn'
and c in self.all_chars) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _normalize_asian(cls, sentence: str) ->str:\n sentence = re.sub('([\\\\u4e00-\\\\u9fff\\\\u3400-\\\\u4dbf])', ' \\\\1 ', sentence)\n sentence = re.sub('([\\\\u31c0-\\\\u31ef\\\\u2e80-\\\\u2eff])', ' \\\\1 ', sentence)\n sentence = re.sub('([\\\\u3300-\\\\u33ff\\\\uf900-\\\\ufaff\\\\ufe30-\\\\ufe4f])', ' \\\\1 ', sentence)\n sentence = re.sub('([\\\\u3200-\\\\u3f22])', ' \\\\1 ', sentence)\n sentence = re.sub('(^|^[\\\\u3040-\\\\u309f])([\\\\u3040-\\\\u309f]+)(?=$|^[\\\\u3040-\\\\u309f])', '\\\\1 \\\\2 ', sentence)\n sentence = re.sub('(^|^[\\\\u30a0-\\\\u30ff])([\\\\u30a0-\\\\u30ff]+)(?=$|^[\\\\u30a0-\\\\u30ff])', '\\\\1 \\\\2 ', sentence)\n sentence = re.sub('(^|^[\\\\u31f0-\\\\u31ff])([\\\\u31f0-\\\\u31ff]+)(?=$|^[\\\\u31f0-\\\\u31ff])', '\\\\1 \\\\2 ', sentence)\n sentence = re.sub(cls._ASIAN_PUNCTUATION, ' \\\\1 ', sentence)\n sentence = re.sub(cls._FULL_WIDTH_PUNCTUATION, ' \\\\1 ', sentence)\n return sentence",
"def convert_latin_to_english(text):\n try:\n text = text.decode('UTF-8')\n except (UnicodeDecodeError, AttributeError):\n pass\n return \"\".join(char for char in\n unicodedata.normalize('NFKD', text)\n if unicodedata.category(char) != 'Mn')",
"def latinize_sentence(sentence):\n words = sentence.split()\n latanized_words = [latinize_word(word) for word in words]\n return \" \".join(latanized_words)",
"def normalize_latin(raw_word):\n nfkd = unicodedata.normalize('NFKD', raw_word)\n lowercased = nfkd.lower()\n no_digits = DIGITS.sub('', lowercased)\n j_to_i = re.sub('j', 'i', no_digits)\n v_to_u = re.sub('v', 'u', j_to_i)\n return NONWORDS.sub('', v_to_u)",
"def toChar(s):\n s = s.lower()\n ans = \"\"\n for c in s:\n if c in \"abcdefghijklmnopqrstuvwxyz\":\n ans+=c\n return ans",
"def singleencode(self, word):\n replace = {u'\\u0d15\\u0d4d\\u200d': u'\\u0d7f',\n u'\\u0d23\\u0d4d\\u200d': u'\\u0d7a',\n u'\\u0d28\\u0d4d\\u200d': u'\\u0d7b',\n u'\\u0d30\\u0d4d\\u200d': u'\\u0d7c',\n u'\\u0d32\\u0d4d\\u200d': u'\\u0d7d',\n u'\\u0d33\\u0d4d\\u200d': u'\\u0d7e'}\n for character in replace:\n word = word.replace(character, replace[character])\n return word",
"def normalize_alphabet(sentence):\n marks = (\n ('á', 'a'), ('â', 'a'), ('ã', 'a'), ('à', 'a'),\n ('Á', 'A'), ('Â', 'A'), ('Ã', 'A'), ('À', 'A'),\n ('é', 'e'), ('ê', 'e'),\n ('É', 'E'), ('Ê', 'E'),\n ('í', 'i'),\n ('Í', 'I'),\n ('ó', 'o'), ('ô', 'o'), ('õ', 'o'),\n ('Ó', 'O'), ('Ô', 'O'), ('Õ', 'O'),\n ('ú', 'u'),\n ('Ú', 'U'),\n ('ç', 'c'),\n ('Ç', 'C'),\n )\n for mark in marks:\n sentence = re.sub(mark[0], mark[1], sentence)\n sentence = sentence.lower()\n sentence = re.sub(r'[?|\\.|!|:|,|;]', '', sentence)\n sentence = re.sub(r'^\\w+\\t+[^\\w]', '', sentence) # Drop tags (?!?)\n return str(sentence)",
"def normalize_alef_maksura_ar(s):\n\n return s.replace(u'\\u0649', u'\\u064a')",
"def makePigLatin(word): \n m = len(word)\n vowels = \"a\", \"e\", \"i\", \"o\", \"u\", \"y\" \n # short words are not converted \n if m<3 or word==\"the\":\n return word\n else:\n for i in vowels:\n if word.find(i) < m and word.find(i) != -1:\n m = word.find(i)\n if m==0:\n return word+\"way\" \n else:\n return word[m:]+word[:m]+\"ay\"",
"def replace_greek_latin(s):\n for greek_spelled_out, latin in greek_to_latin.items():\n s = s.replace(greek_spelled_out, latin)\n return s",
"def translate(inp: str) -> str:\n\t# list for encdoe cirylic symbols in latinc.\n\tsymbols = (u\"абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯöÖåÅ\",\n\t\t\tu\"abvgdeejzijklmnoprstufhzcss_y_euaABVGDEEJZIJKLMNOPRSTUFHZCSS_Y_EUAoOaA\")\n\t# generate dict like {\"a\":\"a\",\"б\":\"\",...}\n\ttr = {ord(a):ord(b) for a, b in zip(*symbols)}\n\t# switch all symbols\n\toutput = inp.translate(tr)\n\treturn output",
"def to_ascii(word_str: str):\n # grammars/ definitions for the mapping of characters\n non_ascii = 'âàêèëéîïôçûùü'\n ascii_mapping = {'âà': 'a',\n 'êèëé': 'e',\n 'îï': 'i',\n 'ô': 'o',\n 'ç': 'c',\n 'ûùü': 'u'}\n non_ascii_upper = non_ascii.upper()\n ascii_mapping_upper = {k.upper(): ascii_mapping[k].upper() for k in ascii_mapping.keys()}\n # building the ascii string\n ret_str = ''\n for char in word_str:\n # lower case french\n if char in non_ascii:\n k = None\n for k_chars in ascii_mapping.keys():\n if char in k_chars:\n k = k_chars\n break\n if k is not None:\n ret_str += ascii_mapping[k]\n # upper case french\n elif char in non_ascii_upper:\n k = None\n for k_chars in ascii_mapping_upper.keys():\n if char in k_chars:\n k = k_chars\n break\n if k is not None:\n ret_str += ascii_mapping_upper[k]\n # regular ascii\n else:\n ret_str += char\n # ascii encoding of replaces characters\n ascii_str = ret_str.encode('utf-8', 'ignore').decode('utf-8', 'ignore')\n # if '?' in ret_str.encode('ascii', 'replace').decode('ascii', 'ignore'):\n # #print(word_str, ret_str.encode('ascii', 'replace').decode('ascii', 'ignore'), ascii_str)\n # print(word_str, ret_str.encode('ascii', 'replace').decode('ascii', 'ignore'), ascii_str)\n # pass\n # return ret_str.encode('ascii', 'replace').decode('ascii', 'ignore')\n return ascii_str",
"def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval",
"def replace_special_chars(self, word):\n try:\n if (self.lang==\"tr\"):\n word = re.sub(u\"\\^db\", u\"+db\", word)\n word = re.sub(u\"\\^\", u\"¬\", word)\n word = re.sub(u\"\\$\", u\"£\", word)\n except UnicodeDecodeError:\n word = ''\n return word",
"def normalize_arabic_alphabet(self, text):\n text = re.sub(\"[إأآا]\", \"ا\", text)\n text = re.sub(\"ى\", \"ي\", text)\n text = re.sub(\"ؤ\", \"ء\", text)\n text = re.sub(\"ئ\", \"ء\", text)\n text = re.sub(\"ة\", \"ه\", text)\n text = re.sub(\"گ\", \"ك\", text)\n return text",
"def desaccentueMessage(message):\r\n\tm = message.upper().replace(\"É\", \"E\").replace(\"À\", \"A\").replace(\"Æ\", \"AE\").replace(\"Ç\", \"C\").replace(\"È\", \"E\")\r\n\tm = m.replace(\"Œ\", \"OE\").replace(\"Ù\", \"U\").replace(\"Î\", \"I\").replace(\"Ï\", \"I\").replace(\"Ê\", \"E\").replace(\"Ë\", \"E\")\r\n\tm = m.replace(\"Ö\", \"O\").replace(\"Ô\", \"O\").replace(\"Â\", \"A\").replace(\"Ä\", \"A\")\r\n\treturn m",
"def latinize_word(word):\n if word[0].lower() in 'bcdfghjklmnpqrstvwxyz':\n word = word[1:] + word[0] + 'uzz'\n else:\n word += 'buzz'\n return word.lower()",
"def replace_greek_uni(s):\n for greek_uni, greek_spelled_out in greek_alphabet.items():\n s = s.replace(greek_spelled_out, greek_uni)\n return s",
"def normalize_teh_marbuta_ar(s):\n\n return s.replace(u'\\u0629', u'\\u0647')",
"def pig_latin(word):\n if word[0] in 'aeiou':\n return f'{word}way'\n\n return f'{word[1:]}{word[0]}ay'",
"def pig_latin(word):\n if word[0] in 'aeiou':\n return f\"{word}way\"\n\n return f\"{word[1:]}{word[0]}ay\"",
"def correctWord (w):\r\n if len(re.findall(r\"[а-я]\",w))>len(re.findall(r\"[a-z]\",w)):\r\n return w.translate(eng_rusTranslateTable)\r\n else:\r\n return w.translate(rus_engTranslateTable)",
"def toGoatLatin(S):\n S = S.split(' ')\n ret = ''\n \n for i, el in enumerate(S):\n if el[0].lower() in 'aeiou':\n ret += el + 'ma' + 'a'*(i+1) + ' '\n else:\n x = el[:1]\n ret += el[1:] + x + 'ma' + 'a'*(i+1) + ' '\n return ret.rstrip(' ')",
"def geminates_checker(self, s):\n s = re.sub(r'([йцкгшщзхфвпрлджчсмтб])\\1+', r'\\1', s)\n s = re.sub(r'н{2}([йцкгшщзхфвпрлджчсмтб ])', r'н\\1', s) \n return s",
"def latin1_to_ascii(self, unicrap):\n xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',\n 0xc6: 'Ae', 0xc7: 'C',\n 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E',\n 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',\n 0xd0: 'Th', 0xd1: 'N',\n 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',\n 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',\n 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',\n 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',\n 0xe6: 'ae', 0xe7: 'c',\n 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e',\n 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',\n 0xf0: 'th', 0xf1: 'n',\n 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',\n 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',\n 0xfd: 'y', 0xfe: 'th', 0xff: 'y',\n 0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',\n 0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',\n 0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',\n 0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',\n 0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: \"'\",\n 0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',\n 0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',\n 0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',\n 0xd7: '*', 0xf7: '/'\n }\n\n r = ''\n for i in unicrap:\n if xlate.has_key(ord(i)):\n r += xlate[ord(i)]\n elif ord(i) >= 0x80:\n pass\n else:\n r += str(i)\n return r",
"def normalize_text(self, text):\n text = self.normalize_arabic_alphabet(text)\n text = self.remove_diacritics(text)\n\n return text",
"def pig_latin(word):\n \n first_letter = word[0]\n rest_of_word = word[1 : ]\n \n # Student should complete function on the next lines.\n \n if first_letter == 'a' or first_letter == 'e' or first_letter == 'i' or first_letter == 'o' or first_letter == 'u':\n return word + \"way\"\n else:\n return rest_of_word + first_letter + \"ay\"",
"def beginning_checker(self, translit):\n tr_new = re.sub(r'(\\A|·)夫', r'\\1弗', translit)\n tr_new = re.sub(r'(\\A|·)耶', r'\\1叶', tr_new)\n return tr_new",
"def filter_jchars(c):\r\n if is_asian(c):\r\n return ' '\r\n return c",
"def test_ukrainian_symbols(self):\n string = \"Минає ніч від’їзду\"\n expected = \"Minaye nich vid’yizdu\"\n self.assertEqual(transliterate(string), expected)"
]
| [
"0.6842372",
"0.68036175",
"0.669434",
"0.6690672",
"0.66607344",
"0.65947497",
"0.6528134",
"0.65152377",
"0.65121776",
"0.64854544",
"0.64776546",
"0.6462483",
"0.6440842",
"0.6438411",
"0.6436063",
"0.64220375",
"0.6407699",
"0.6407324",
"0.6399416",
"0.636626",
"0.63300616",
"0.63225865",
"0.62747467",
"0.6235037",
"0.6213579",
"0.6207123",
"0.620201",
"0.6201959",
"0.61993015",
"0.6190673"
]
| 0.7076683 | 0 |
Insert JSON lines from a file or stdin into a CrateDB cluster. If no hosts are specified the statements will be printed. | def insert_json(table=None,
bulk_size=1000,
concurrency=25,
hosts=None,
infile=None,
output_fmt=None):
if not hosts:
return print_only(infile, table)
queries = (to_insert(table, d) for d in dicts_from_lines(infile))
bulk_queries = as_bulk_queries(queries, bulk_size)
print('Executing inserts: bulk_size={} concurrency={}'.format(
bulk_size, concurrency), file=sys.stderr)
stats = Stats()
with clients.client(hosts, concurrency=concurrency) as client:
f = partial(aio.measure, stats, client.execute_many)
try:
aio.run_many(f, bulk_queries, concurrency)
except clients.SqlException as e:
raise SystemExit(str(e))
try:
print(format_stats(stats.get(), output_fmt))
except KeyError:
if not stats.sampler.values:
raise SystemExit('No data received via stdin')
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_json(self, json_file, database):\n\n\t\tfile = open(json_file,'r')\n\t\tsp_out = file.read()\n\t\tfile.close()\n\n\t\t#print s\n\n\t\tmasscan_out = json.loads(sp_out.replace('\\0', ''))\n\n\n\t\tfor line in masscan_out:\n\n\t\t\tif database.host_exist(line[\"ip\"]):\n\n\t\t\t\tadd_host = database.session.query(targets).filter( targets.address == line[\"ip\"] ).one()\n\n\t\t\telse:\n\t\t\t\t# add the host to the db\n\t\t\t\tadd_host = targets(address=line[\"ip\"], status=\"up\")\n\t\t\t\t\n\t\t\t\t# commit to db\n\t\t\t\tdatabase.session.add(add_host)\n\n\t\t\t\t#out[addr] = add_host\n\n\t\t\tfor port in line[\"ports\"]:\n\t\t\t\tif port_exist(add_host.id, port[\"port\"], port[\"proto\"]):\n\n\t\t\t\t\t# update the existing port\n\t\t\t\t\tadd_port = database.session.query(services).filter( services.host_id == add_host.id, services.port == port[\"port\"], services.protocol == port[\"proto\"] ).one()\n\n\t\t\t\t\ttry:\n\t\t\t\t\t\tif len(port[\"status\"]) > 0:\n\t\t\t\t\t\t\tadd_port.state = port[\"status\"]\n\n\t\t\t\t\t\tif len(port[\"service\"][\"name\"]) > 0:\n\t\t\t\t\t\t\tadd_port.service = port[\"service\"][\"name\"]\n\n\t\t\t\t\t\tif len(port[\"service\"][\"banner\"]) > 0:\n\t\t\t\t\t\t\tadd_port.fingerprint = banner\n\n\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\n\t\t\t\telse:\n\t\t\t\t\t# add the new port\n\t\t\t\t\tadd_port = services(port=port[\"port\"], protocol=port[\"proto\"], service=port[\"service\"][\"name\"], fingerprint=port[\"service\"][\"banner\"], state=port[\"status\"], banner=\"\", host = line[\"ip\"])\n\n\t\t\t\t\t# commit to db\n\t\t\t\t\tdatabase.session.add(add_port)\n\n\t\t\t\tdatabase.session.commit()",
"def writer_loop():\n dbconn = DatabaseConnection()\n\n NODES = {}\n rows = dbconn.select(\"SELECT hostname, id FROM nm_node\")\n for row in rows:\n NODES[row[0]] = row[1]\n\n while True:\n in_line = sys.stdin.readline()\n\n if not in_line: #EOF occured\n break\n\n host, facility, priority, level, tag, program, isodate, msg = in_line.split('[-]')\n\n host = host.strip()\n node_id = NODES.get(host, None)\n\n if node_id is None:\n rows = dbconn.select(\"SELECT id FROM nm_node WHERE hostname=%s\",(host,))\n if rows:\n NODES[host] = rows[0][0]\n node_id = rows[0][0]\n\n dbconn.modify(\"INSERT INTO logs (node_id, host, facility, priority, level, tag, program, log_timestamp, msg) \\\n VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s)\", (node_id, host, facility.strip(), priority.strip(),\n level.strip(), tag.strip(), program.strip(), isodate.strip(), msg.strip()))",
"def main():\n\n dbase = sqlite3.connect('database.db')\n with dbase:\n cursor = dbase.cursor()\n try:\n cursor.execute(\"DROP TABLE Graph;\")\n except sqlite3.OperationalError:\n pass\n\n try:\n cursor.execute(\"CREATE TABLE Graph (name TEXT, json TEXT);\")\n except sqlite3.OperationalError:\n pass\n\n for i, item in enumerate(SOME_JSONS):\n cursor.execute(\"insert into Graph (name, json) values\"\n \"(?, ?);\",\n ('example_' + str(i + 1), json.dumps(item)))",
"def import_data(self, file, import_type):\n if import_type == \"data\":\n collection = self.prog_logs\n elif import_type == \"log\":\n collection = self.monk_logs\n elif import_type == \"food\":\n collection = self.food_logs\n else:\n collection = \"\"\n logging.error(\"Invalid Type\")\n exit(1)\n try:\n fl = open(file)\n data = fl.readlines()\n for line in data:\n collection.insert_one(json.loads(line))\n except FileNotFoundError:\n logging.error(\"File Not Found\")\n exit(1)",
"def bulk_insert(cls, path=\"data.json\"):\n from json import load\n from codecs import open\n \n lists = load(open(path, \"r\", \"utf8\"))\n for lst in lists:\n ing = cls(content = lst)\n ing.put()",
"def insert_data(table, jsonfile):\n with open(jsonfile) as infile:\n data = json.load(infile)\n table_models_map[table]['insert'](data)",
"def insert(\n path,\n table,\n json_file,\n pk,\n nl,\n csv,\n tsv,\n batch_size,\n alter,\n ignore,\n replace,\n truncate,\n not_null,\n default,\n):\n insert_upsert_implementation(\n path,\n table,\n json_file,\n pk,\n nl,\n csv,\n tsv,\n batch_size,\n alter=alter,\n upsert=False,\n ignore=ignore,\n replace=replace,\n truncate=truncate,\n not_null=not_null,\n default=default,\n )",
"def insert(self, conn, matches: List[Dict], verbose: bool) -> None:\n for i in range(0, len(matches), self.batch_size):\n batch = self._get_batch(i, matches)\n self._insert_json_to_sql(conn, batch, verbose)",
"def main():\n \n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(config['CLUSTER']['HOST'], config['CLUSTER']['DB_NAME'], config['CLUSTER']['DB_USER'], config['CLUSTER']['DB_PASSWORD'], config['CLUSTER']['DB_PORT']))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def inject_hosts_files(self):\n self.log.info(\"Injecting host files\")\n hosts = dict()\n for i in self.all_nodes:\n hosts[i.name] = i.get_public_addr()\n #add the host names to etc/hosts\n orchestrator.inject_hostnames(hosts, delete=self.cluster_name)\n for i in self.all_nodes:\n i.inject_hostnames(hosts, delete=self.cluster_name)\n self.all_nodes[0].run_command(\"service ganglia-monitor restart; service gmetad restart\", silent=True)\n orchestrator.run_command(\"service ganglia-monitor restart; service gmetad restart\", silent=True)",
"def main():\n config = configparser.ConfigParser()\n config.read('dwh.cfg')\n\n conn = psycopg2.connect(\"host={} dbname={} user={} password={} \\\n port={}\".format(*config['CLUSTER'].values()))\n cur = conn.cursor()\n \n load_staging_tables(cur, conn)\n insert_tables(cur, conn)\n\n conn.close()",
"def insert_host(self, host):\n if host['host'] and host['user'] and host['passw']:\n hosts = Config().hosts\n cred = {'username': host['user'], 'password': host['passw']}\n hosts[host['host']] = cred\n Config().hosts = hosts",
"def proc_inc_coms():\n content = request.json \n host_str = content['hosts']\n coms = content['commands']\n hosts = host_str.split('|')\n for h in hosts:\n com_file = \"/tmp/cc/hosts/\" + h # write the commands the file named <ip>\n if os.path.isfile(com_file):\n with open(com_file, 'a') as f:\n f.write(coms)\n else:\n with open(com_file, 'w') as f:\n f.write(coms)\n return \"\"",
"def main():\n cur, conn = connect('dwh.cfg')\n \n set_schema = schema_queries[1]\n cur.execute(set_schema)\n \n print('Loading Staging Tables.')\n load_staging_tables(cur, conn)\n \n print('Inserting Rows.')\n insert_tables(cur, conn)\n\n \n conn.close()",
"def insert(json):\n\tid = json['id']\n\tname = json['name'].strip()\n\n\ttry:\n\t\tname = name.encode('ascii') # remove unicode character (’tis -> tis)\n\texcept UnicodeEncodeError:\n\t\ttry:\n\t\t\tname = to_ascii(name)\n\t\texcept KeyError:\n\t\t\tutil.put(name, 'untracked_unicode.txt')\n\n\tjson_str = uglify(json)\n\n\tCURSOR.execute('INSERT INTO {} ([ID], [Name], [Definition]) VALUES (?, ?, ?)'.format(TABLE_NAME),\n\t\t\t(id, name, json_str))",
"def write_records(client, columns, lines, args):\n records = {}\n # Parse records.\n for i in lines:\n try:\n data = parse_line(i)\n except LineFormatError as err:\n print(f'LineFormatError: \"{err}\". Line: {i}')\n continue\n\n table_name = data['measurement']\n # len(time)==10 for s\n # len(time)==13 for ms\n # len(time)==16 for u\n # len(time)==19 for ns - influx default\n row = {'time': int(data['time'] / 10**(len(str(data['time']))-10) * 10**args.time_precision)}\n row.update(data['tags'])\n row.update(data['fields'])\n # Sanitize column names\n row = dict((sanitize_column_name(k), v) for k, v in row.items())\n row_columns = ','.join(row.keys())\n\n if table_name not in columns:\n print(f'Skipping 1 row because {table_name} table does not exist.')\n continue\n\n # Add missing columns.\n for k, v in columns[table_name].items():\n if k in row.keys():\n continue\n\n v = v.lower()\n if 'string' in v:\n row[k] = ''\n elif 'int' in v or 'float' in v:\n row[k] = 0\n else:\n print(f'Need to set default value for column \"{k}\" of type \"{v}\" in the script!')\n sys.exit(-1)\n\n if table_name not in records:\n records[table_name] = []\n\n records[table_name].append(row)\n\n # Write records.\n for table_name, rows in records.items():\n row_columns = '`,`'.join(columns[table_name].keys())\n query = f'INSERT INTO `{table_name}` (`{row_columns}`) VALUES'\n try:\n client.execute(query, rows, settings=INSERT_SETTINGS)\n except KeyError as err:\n print(f'KeyError: {err}. Skipping batch of {len(rows)} points.')\n continue\n except BaseException as err:\n print(err)\n print('Retrying...')\n client.execute(query, rows, settings=INSERT_SETTINGS)\n\n print(f' {table_name}:{len(rows)}')",
"def _process_json(self, json_content):\n if self._ns_sqlcon.connection is None:\n LOG.error(f'failed to open connection to DB')\n return\n entries = [entry for entry in json_content]\n LOG.info('started updating DB')\n num_of_entries = len(entries)\n for x in range(num_of_entries):\n entry = entries[x]\n try:\n self._ns_sqlcon.update_plugins_table(entry['_source'])\n except AttributeError:\n LOG.exception(f'malformed entry: {entry}')\n if x % 2000 != 0:\n continue\n LOG.info(f'Updated {x} records')\n\n LOG.info(f'Updated {num_of_entries} records')\n try:\n LOG.info('Commit started')\n self._ns_sqlcon.session.commit()\n LOG.info('Commit finished')\n except sqlalchemy.exc.IntegrityError:\n LOG.exception('failed committing updates to DB')\n self._ns_sqlcon.session.rollback()\n\n LOG.info('Finished updating DB')",
"def add_host_entries(hosts_file=None):\n from fabric.contrib.files import append\n if hosts_file:\n try:\n hosts = open(hosts_file)\n for line in hosts:\n append(\"/etc/hosts\", line.rstrip(\"\\n\"), use_sudo=True)\n except IOError:\n print \"ERROR: defined hosts file is missing!\"",
"def import_from_dict(session, data, sync=[]):\n if isinstance(data, dict):\n logging.info('Importing %d %s',\n len(data.get(DATABASES_KEY, [])),\n DATABASES_KEY)\n for database in data.get(DATABASES_KEY, []):\n Database.import_from_dict(session, database, sync=sync)\n\n logging.info('Importing %d %s',\n len(data.get(DRUID_CLUSTERS_KEY, [])),\n DRUID_CLUSTERS_KEY)\n for datasource in data.get(DRUID_CLUSTERS_KEY, []):\n DruidCluster.import_from_dict(session, datasource, sync=sync)\n session.commit()\n else:\n logging.info('Supplied object is not a dictionary.')",
"def run_insert_example():\n table = \"actors\"\n insert_values = {\n 'id': 3,\n 'name': \"Matt\",\n 'last_name': \"Smith\",\n 'country': \"England\"}\n print querify.insert_from_dict(table, insert_values)\n\n insert_col_list = [\"id\", \"name\", \"last_name\", \"country\"]\n insert_val_list = [\n [1, \"Chris\", \"Eccleston\", \"England\"],\n [2, \"David\", \"Tennant\", \"Scotland\"],\n [3, \"Matt\", \"Smith\", \"England\"]]\n print querify.insert_from_list(table, insert_col_list, insert_val_list)",
"def load_data(client):\n codes = [\"DUB\", \"LHR\", \"ETC\", \"XXX\"]\n q = generateMultiInsertQuery(codes, \"Airport\")\n #print(json.dumps(q.json(), indent=4))\n q.execute(client)",
"def process_clusters(conn: Connection, path: Path) -> None:\n sql = \"INSERT OR IGNORE INTO Clusters (tag, src, dest, destType) VALUES (?, ?, ?, ?)\"\n types = (str, int, str, str)\n cur = conn.cursor()\n with open(path) as file:\n reader = csv.reader(file)\n for row in reader:\n args = [t(a) for t, a in zip(types, row)]\n if args[3] == 'N':\n args[2] = int(args[2])\n try:\n cur.execute(sql, args)\n except IntegrityError:\n pass",
"def setupDictionaryDatabases(options, inp):\n\n # trace(\"version=%s\" % requireJSON(\"version\", inp, \"version\"))\n requireJSON(\"databases\", inp, \"databases\")\n con = None\n try:\n con = dbConnect(database = options[\"dbname\"], user = options[\"dbuser\"], password = options[\"dbpassword\"], host = options[\"dbhost\"])\n setupDatabases(con, options, requireJSON(\"databases\", inp, \"databases\"))\n\n except psycopg2.DatabaseError as e:\n die('Error %s' % e)\n\n finally:\n if con:\n con.commit()\n con.close()",
"def insetData(jsonData, ibHost, ibPort):\n inclient = InfluxDBClient(\n host=ibHost,\n port=ibPort,\n username='admin',\n password='password'\n )\n inclient.switch_database('efergy')\n inclient.write_points(jsonData)",
"def append_entry(host, email, password, mailbox):\n\n new_entry = {\n\n 'host': host,\n 'email': email,\n 'password': password,\n 'mailbox': mailbox\n }\n\n with open('data.json') as f:\n data = load(f)\n\n data[\"items\"].append(new_entry)\n\n with open('data.json', 'w') as outfile:\n dump(data, outfile, indent=4)\n\n print('\\nNew Entry Added Successfully!')",
"def import_json(path):\n click.echo(\"WARNING: Continue will delete all data in the databse\")\n if not click.confirm('Do you want to continue?'):\n raise click.Abort()\n\n init_db(False)\n import_clean_json(path)\n click.echo('JSON data has been imported')",
"def main():\n local = salt.client.LocalClient()\n\n if len(sys.argv) == 2 and sys.argv[1] == '--list':\n print json.dumps(local.cmd('*', 'grains.items'), indent=4, sort_keys=True)\n elif len(sys.argv) == 3 and sys.argv[1] == '--host':\n print json.dumps(local.cmd(sys.argv[2], 'grains.items'), indent=4, sort_keys=True)\n else:\n print \"Need an argument, either --list or --host <host>\""
]
| [
"0.54399925",
"0.5259793",
"0.52510303",
"0.5229402",
"0.5170605",
"0.5112336",
"0.5007926",
"0.50039613",
"0.4979881",
"0.49585456",
"0.49466792",
"0.49466792",
"0.4929705",
"0.49289107",
"0.48176137",
"0.47960815",
"0.47917533",
"0.4782383",
"0.47815147",
"0.47773403",
"0.47727612",
"0.4746684",
"0.47418237",
"0.47190648",
"0.46711773",
"0.46702164",
"0.463734",
"0.46229285",
"0.46177378",
"0.4613196"
]
| 0.67863613 | 0 |
for send product to styleboard on homepage | def _set_send_to_styleboard_product_positions(request, obj, sessionid):
prod_id = obj.get('prod_id')
product = Product.objects.get(pk=int(prod_id))
alt_id = obj.get('alt_id', None)
if not alt_id:
original_image = product.original_image
no_background_image = product.no_background
alternate = product.productalternateimage_set.filter(is_default_image=True)
if alternate.count():
original_image = alternate[0].original_image
no_background_image = alternate[0].no_background
else:
alternate = ProductAlternateImage.objects.get(pk=int(alt_id))
original_image = alternate.original_image
no_background_image = alternate.no_background
obj_counter = 0
unique_identifier = 1
changes_counter = 0
product_objects = ''
embellishment_objects = ''
template_objects = ''
action_url = '/cart/add/'
total = ''
quantity = ''
selected_prev_prod_qty = ''
buy_table_html = ''
tables = ''
guests = ''
try:
jsonize = StyleboardJsonize.objects.get(sessionid=sessionid)
except StyleboardJsonize.DoesNotExist:
jsonize = StyleboardJsonize(sessionid=sessionid)
if jsonize.data:
json_objs = simplejson.loads(jsonize.data)
obj_counter = len(json_objs)
product_positions = request.session.get('product_positions', None)
if not product_positions:
request.session['product_positions'] = {}
else:
unique_identifier = int(product_positions.get('unique_identifier', 0)) + 1
changes_counter += 1
product_objects = product_positions.get('product_objects')
embellishment_objects = product_positions.get('embellishment_objects')
template_objects = product_positions.get('template_objects')
action_url = product_positions.get('action_url')
total = product_positions.get('total')
quantity = product_positions.get('quatity')
selected_prev_prod_qty = product_positions.get('selected_prev_prod_qty')
buy_table_html = product_positions.get('buy_table_html')
tables = product_positions.get('tables')
guests = product_positions.get('guests')
t = get_template('interface/product_object.html')
img = Image.open("%s%s%s" % (settings.MEDIA_ROOT, "products/", original_image))
width, height = img.size
context = {
'uid' : prod_id,
'original_image' : original_image,
'no_background_image' : no_background_image,
'object_id' : unique_identifier,
'width' : width,
'height' : height,
}
html = t.render(Context(context))
product_objects += html
request.session['product_positions'] = {
'obj_counter' : str(obj_counter),
'unique_identifier' : str(unique_identifier),
'changes_counter' : str(changes_counter),
'product_objects' : str(product_objects),
'embellishment_objects' : str(embellishment_objects),
'template_objects' : str(template_objects),
'action_url' : str(action_url),
'total' : str(total),
'quantity' : str(quantity),
'selected_prev_prod_qty' : str(selected_prev_prod_qty),
'buy_table_html' : str(buy_table_html),
'tables' : str(tables),
'guests' : str(guests),
}
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def styleboard(request, cat_id=None):\n if cat_id:\n if not get_cat(cat_id):\n return redirect('styleboard')\n\n sessionid = request.session.get('cartsession',None)\n if not sessionid: \n session_id = generate_unique_id()\n request.session['cartsession'] = session_id\n\n info = {}\n\n idecorateSettings = IdecorateSettings.objects.get(pk=1)\n info['global_default_quantity'] = idecorateSettings.global_default_quantity\n info['global_guest_table'] = idecorateSettings.global_table \n\n info['mode'] = 'styleboard'\n search = request.POST.get('search',None)\n if search:\n info['keyword'] = search\n info['keyword_cat'] = 0\n search_result_cat = search_category(search)\n if search_result_cat:\n cat_id = search_result_cat.id\n info['keyword_cat'] = cat_id\n info['mode'] = 'search' \n info['category_count'] = 0\n else:\n categories = get_categories(cat_id)\n if categories.count() > 0:\n info['categories'] = categories\n\n info['category_count'] = categories.count()\n\n if not cat_id:\n cat_id = 0\n info['cat_id'] = cat_id\n\n product_positions = request.session.get('product_positions', None)\n\n if product_positions:\n info['product_positions'] = mark_safe(str(product_positions))\n #del request.session['product_positions']\n else:\n info['product_positions'] = mark_safe(\"''\")\n\n info['max_emb_size'] = settings.MAX_UPLOAD_EMBELLISHMENT_IMAGE_SIZE\n info['text_items'] = TextFonts.objects.filter(is_active=True, is_deleted=False)\n\n \"\"\"\n save styleboard personalize or modify\n \n try:\n del request.session['customer_styleboard']\n except:\n pass\n \n try:\n del request.session['cartsession']\n except:\n pass\n \"\"\"\n\n sms = st_man(request)\n\n if sms['sbid']:\n\n request.session['sbid'] = sms['sbid']\n\n info.update(sms)\n \n template_view = request.GET.get('template')\n\n if template_view :\n\n info['view_template'] = template_view\n\n return render_to_response('interface/styleboard2.html', info,RequestContext(request))",
"def styleboard2(request, cat_id=None):\n if cat_id:\n if not get_cat(cat_id):\n return redirect('styleboard')\n\n sessionid = request.session.get('cartsession',None)\n if not sessionid: \n session_id = generate_unique_id()\n request.session['cartsession'] = session_id\n\n info = {}\n\n idecorateSettings = IdecorateSettings.objects.get(pk=1)\n info['global_default_quantity'] = idecorateSettings.global_default_quantity\n info['global_guest_table'] = idecorateSettings.global_table \n\n info['mode'] = 'styleboard'\n search = request.POST.get('search',None)\n if search:\n info['keyword'] = search\n info['keyword_cat'] = 0\n search_result_cat = search_category(search)\n if search_result_cat:\n cat_id = search_result_cat.id\n info['keyword_cat'] = cat_id\n info['mode'] = 'search' \n info['category_count'] = 0\n else:\n categories = get_categories(cat_id)\n if categories.count() > 0:\n info['categories'] = categories\n\n info['category_count'] = categories.count()\n\n if not cat_id:\n cat_id = 0\n info['cat_id'] = cat_id\n\n product_positions = request.session.get('product_positions', None)\n\n if product_positions:\n info['product_positions'] = mark_safe(str(product_positions))\n #del request.session['product_positions']\n else:\n info['product_positions'] = mark_safe(\"''\")\n\n return render_to_response('interface/styleboard2.html', info,RequestContext(request))",
"def productactivate():\n pass",
"async def _vis_buy(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n number, item = ch.parse_number_and_name(args)\n if item:\n await ctx.send(vis_helpers.shop_buy(ctx.user_object, item, number))",
"async def _vis_shop(self, ctx):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n await ctx.send(vis_helpers.shop_print())",
"def show_homepage():\n return flask.redirect(\"products.show_product_manager\")",
"def open_products_page(catalog_menu):\n catalog_menu.open_products_page()",
"def buySingleProduct(url):\n #parsed_url = urlparse(url)\n assert \"http\" and \"://\" in url, \"Bitte die URL komplett kopieren, inklusive \\\"http://\\\" bzw. \\\"https://\\\" am Anfang.\"\n assert \"amazon\" in url, \"Die aufzurufende Seite ist nicht die Amazon-Seite oder konnte nicht erkannt werden.\"\n print(\"Open page '\"+url+\"'\")\n driver.get(url)\n print(\"Find add-to-cart element\")\n try:\n print(\"actually find element\")\n #add_to_cart_button = driver.find_element_by_css_selector(amazon_add_to_cart)\n\n print(\"scroll element into view using native js\")\n driver.execute_script(\"window.scrollTo(0, document.GetElementById(\"+amazon_add_to_cart+\"));\")\n print(\"Send 'click' to element\")\n add_to_cart_button.click()\n print(\"Success.\")\n except Exception, e:\n print(\"Element could not be found. General exception: \"+str(e))\n #driver.close()",
"def manageorder3(request, product_slug, template_name=\"merchant/manageorder3.html\"):\n postdata = request.POST.copy()\n cart_id = postdata['cart_id']\n customer_jid = postdata['xmpp_jid']\n cart_items2_3_accept = cart.merchant_cart_items2_3_accept(request, \"1\", cart_id)\n product_cache_key = request.path\n # try to get product from cache\n p = cache.get(product_cache_key)\n # if a cache miss, fall back on db query\n if not p:\n p = get_object_or_404(Product.active, slug=product_slug)\n # store item in cache for next time\n cache.set(product_cache_key, p, CACHE_TIMEOUT)\n categories = p.categories.filter(is_active=True)\n page_title = p.name\n meta_keywords = p.meta_keywords\n meta_description = p.meta_description\n venue_id = categories[0].venue.id\n # evaluate the HTTP method, change as needed\n #create the unbound form. Notice the request as a keyword argument\n form = ProductAddToCartForm(request=request, label_suffix=':')\n # assign the hidden input the product slug\n form.fields['product_slug'].widget.attrs['value'] = product_slug\n # set test cookie to make sure cookies are enabled\n request.session.set_test_cookie()\n stats.log_product_view(request, p)\n return render_to_response(template_name, locals(), context_instance=RequestContext(request))",
"def product_post(request):\n\n u = request.user\n try:\n p = Product.objects.get(id=request.POST['product_id'])\n request.POST['sku'] = p.sku\n result = item(u, p.sku)\n except Product.DoesNotExist:\n result = {'result':'0'}\n return JSONHttpResponse( result )",
"def index(request):\n\n products = Top_selling_product.objects.all()\n context = {'products':products}\n\n return render(request, 'home/index.html',context)",
"def go_product_page(self, driver, product_id, website):\n link = self.product_url(website, product_id)\n self.go_and_assert(driver, link, website)",
"def room(request):\n cart = cartData(request)\n cart_items = cart['cart_items']\n # order = cart['order']\n # items = cart['items']\n \n # Get all our object\n products = BobaProduct.objects.all()\n # Dictionary to hold our products\n context = {\"products\": products, \"cart_items\": cart_items}\n\n return render(request, 'chat/room.html', context)",
"def contact_linkup(self, request, pk):\n obj_api = api()\n title_contact = \"Tu contacto Linkup\"\n token = request.session['token']\n resp = obj_api.get(slug='sellers/' + pk + \"/\", token=token)\n return render(request, 'frontend/actors/client/my_account.html', {'data_user': resp, \n 'title_contact': title_contact})",
"def products(request):\n\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n products = Product.objects.all()\n template = \"auctionsmng/products.html\"\n\n context = {\n 'products': products\n }\n\n return render(request, template, context)",
"def pol_to_cart():\n pass",
"def onProductLinkClicked(self, linkId=None):\n self.OpenProductWeb.emit()",
"def set_sms_product(self, product):\n self.single_selection_from_static_kendo_dropdown(self.sms_product_kendo_dropdown_locator, product)",
"def product(request, product_id):\n\n u = request.user\n try:\n p = Product.objects.get(id=product_id)\n request.POST['sku'] = p.sku\n result = item(u, p.sku)\n except Product.DoesNotExist:\n result = {'result':'0'}\n return JSONHttpResponse( result )",
"async def shibe(self, ctx: Message):\n\t\timage_url = requests.get(\"https://shibe.online/api/shibes?count=1\").json()[0]\n\t\tawait self.send(image_url, whisper=[ctx.author.id])",
"def product_save(request):\n user = request.user\n substitue = request.GET.get(\"substitute\")\n substitue_save = Product.objects.get(bar_code=substitue)\n Favorite().save_substitute(user, substitue_save)\n return render(request, \"product_save.html\")",
"def manageorder2(request, product_slug, template_name=\"merchant/manageorder2.html\"):\n postdata = request.POST.copy()\n cart_id = postdata['cart_id']\n customer_jid = postdata['xmpp_jid']\n cart_items2_2_accept = cart.merchant_cart_items2_2_accept(request, \"1\", cart_id)\n product_cache_key = request.path\n # try to get product from cache\n p = cache.get(product_cache_key)\n # if a cache miss, fall back on db query\n if not p:\n p = get_object_or_404(Product.active, slug=product_slug)\n # store item in cache for next time\n cache.set(product_cache_key, p, CACHE_TIMEOUT)\n categories = p.categories.filter(is_active=True)\n page_title = p.name\n meta_keywords = p.meta_keywords\n meta_description = p.meta_description\n venue_id = categories[0].venue.id\n # evaluate the HTTP method, change as needed\n #create the unbound form. Notice the request as a keyword argument\n form = ProductAddToCartForm(request=request, label_suffix=':')\n # assign the hidden input the product slug\n form.fields['product_slug'].widget.attrs['value'] = product_slug\n # set test cookie to make sure cookies are enabled\n request.session.set_test_cookie()\n stats.log_product_view(request, p)\n return render_to_response(template_name, locals(), context_instance=RequestContext(request))",
"def discord_webhook(self, product_item):\n\n data = {}\n data[\"username\"] = CONFIG['USERNAME']\n data[\"avatar_url\"] = CONFIG['AVATAR_URL']\n data[\"embeds\"] = []\n\n embed = {}\n \n if product_item == 'initial':\n embed[\"description\"] = \"Thank you for using Yasser's Sneaker Monitors. This message is to let you know \" \\\n \"that everything is working fine! You can find more monitoring solutions at \" \\\n \"https://github.com/yasserqureshi1/Sneaker-Monitors \"\n else:\n embed[\"title\"] = product_item[0] + ' - ' + product_item[1] + ' - ' + product_item[2]\n embed[\"description\"] = product_item[3]\n embed[\"thumbnail\"] = {'url': product_item[4]}\n embed['url'] = product_item[5]\n\n embed[\"color\"] = CONFIG['COLOUR']\n embed[\"footer\"] = {'text': 'Made by Yasser & Bogdan'}\n embed[\"timestamp\"] = str(datetime.utcnow())\n data[\"embeds\"].append(embed)\n\n result = rq.post(self.webhook, data=json.dumps(data), headers={\"Content-Type\": \"application/json\"})\n\n try:\n result.raise_for_status()\n except rq.exceptions.HTTPError as err:\n print(err)\n logging.error(msg=err)\n else:\n print(\"Payload delivered successfully, code {}.\".format(result.status_code))\n logging.info(msg=\"Payload delivered successfully, code {}.\".format(result.status_code))",
"def sheetcakes(request):\n products = Product.objects.all()\n return render(request, \"sheetcakes.html\", {\"products\": products})",
"def products():\n\n\treturn render_template(\"products.html\")",
"def manageorder(request, product_slug, template_name=\"merchant/manageorder.html\"):\n postdata = request.POST.copy()\n cart_id = postdata['cart_id']\n customer_jid = postdata['xmpp_jid']\n cart_items2_1_accept = cart.merchant_cart_items2_1_accept(request, \"1\", cart_id)\n product_cache_key = request.path\n # try to get product from cache\n p = cache.get(product_cache_key)\n # if a cache miss, fall back on db query\n if not p:\n p = get_object_or_404(Product.active, slug=product_slug)\n # store item in cache for next time\n cache.set(product_cache_key, p, CACHE_TIMEOUT)\n categories = p.categories.filter(is_active=True)\n page_title = p.name\n meta_keywords = p.meta_keywords\n meta_description = p.meta_description\n venue_id = categories[0].venue.id\n # evaluate the HTTP method, change as needed\n #create the unbound form. Notice the request as a keyword argument\n form = ProductAddToCartForm(request=request, label_suffix=':')\n # assign the hidden input the product slug\n form.fields['product_slug'].widget.attrs['value'] = product_slug\n # set test cookie to make sure cookies are enabled\n request.session.set_test_cookie()\n stats.log_product_view(request, p)\n return render_to_response(template_name, locals(), context_instance=RequestContext(request))",
"def product(request, product_id, template_name='doppler/shift/catalog/product.haml'):\n product = get_object_or_404(Product, pk=product_id, category__isnull=False, category__enabled=True, enabled=True)\n category = product.category\n form = AddProductToCartForm(data=request.POST or None, shipment=product.get_minimal_enabled_price())\n if form.is_valid():\n form.save(request)\n messages.success(request, AddProductToCartForm.success_message)\n return render_to_response(\n template_name,\n {\n 'category': category,\n 'product': product,\n 'form': form,\n },\n context_instance=RequestContext(request))",
"def menu(request):\n cart = cartData(request)\n cart_items = cart['cart_items']\n # order = cart['order']\n # items = cart['items']\n # Get all our object\n products = BobaProduct.objects.all()\n # Dictionary to hold our products\n context = {\"products\": products, \"cart_items\": cart_items}\n return render(request, 'store/menu.html', context)",
"def put_on_sale():\n\n item = {\n \"status\": 'for_sale',\n \"category\": request.form['item-type'],\n \"name\": request.form['item-name'],\n \"price\": request.form['item-price'],\n \"description\": request.form['item-description'],\n \"mail\": request.form['seller-email']\n }\n\n put_item(item)\n\n return redirect('/')",
"def _add_styleboard_items_positions(request, obj, sessionid):\n obj_counter = 0\n unique_identifier = 1\n changes_counter = 0\n product_objects = ''\n embellishment_objects = ''\n template_objects = ''\n action_url = '/cart/add/'\n total = ''\n quantity = ''\n selected_prev_prod_qty = ''\n buy_table_html = ''\n tables = ''\n guests = ''\n\n try:\n jsonize = StyleboardJsonize.objects.get(sessionid=sessionid)\n except StyleboardJsonize.DoesNotExist:\n jsonize = StyleboardJsonize(sessionid=sessionid)\n\n if jsonize.data:\n json_objs = simplejson.loads(jsonize.data)\n obj_counter = len(json_objs)\n\n product_positions = request.session.get('product_positions', None)\n\n if not product_positions:\n request.session['product_positions'] = {}\n else:\n unique_identifier = product_positions.get('unique_identifier')\n changes_counter += 1\n product_objects = product_positions.get('product_objects')\n embellishment_objects = product_positions.get('embellishment_objects')\n template_objects = product_positions.get('template_objects')\n action_url = product_positions.get('action_url')\n total = product_positions.get('total')\n quantity = product_positions.get('quatity')\n selected_prev_prod_qty = product_positions.get('selected_prev_prod_qty')\n buy_table_html = product_positions.get('buy_table_html')\n tables = product_positions.get('tables')\n guests = product_positions.get('guests')\n\n try:\n jsonize = StyleboardJsonize.objects.get(sessionid=sessionid)\n except StyleboardJsonize.DoesNotExist:\n jsonize = StyleboardJsonize()\n\n if jsonize.data:\n json_objs = simplejson.loads(jsonize.data)\n else:\n json_objs = simplejson.loads('[]')\n\n items = simplejson.loads(obj.item)\n for item in items:\n t = get_template('interface/styleboard_items.html')\n obj_counter += 1\n item['object_id'] = obj_counter\n item['src'] = item['img'][0]['src']\n item['nb'] = item['img'][0]['nb']\n item['wb'] = item['img'][0]['wb']\n item['img_style'] = item['img'][0]['style']\n item['matrix'] = simplejson.dumps(item['matrix'][0])\n\n json_objs.append(item)\n\n html = t.render(Context(item))\n\n if item['_type'] == 'product':\n product_objects += html\n\n jsonize.data = simplejson.dumps(json_objs)\n jsonize.save()\n\n request.session['product_positions'] = {\n 'obj_counter' : str(obj_counter),\n 'unique_identifier' : str(unique_identifier),\n 'changes_counter' : str(changes_counter),\n 'product_objects' : str(product_objects),\n 'embellishment_objects' : str(embellishment_objects),\n 'template_objects' : str(template_objects),\n 'action_url' : str(action_url),\n 'total' : str(total),\n 'quantity' : str(quantity),\n 'selected_prev_prod_qty' : str(selected_prev_prod_qty),\n 'buy_table_html' : str(buy_table_html),\n 'tables' : str(tables),\n 'guests' : str(guests),\n }\n\n return True"
]
| [
"0.6259545",
"0.5897496",
"0.5775418",
"0.5617676",
"0.5584862",
"0.55459595",
"0.5519481",
"0.5502217",
"0.54668105",
"0.5454652",
"0.54535717",
"0.54460275",
"0.5413482",
"0.53988165",
"0.5393537",
"0.53888464",
"0.5385505",
"0.53757674",
"0.53673685",
"0.53665763",
"0.5349861",
"0.5346882",
"0.5341157",
"0.5328454",
"0.5311748",
"0.53056633",
"0.5300027",
"0.52803856",
"0.52558875",
"0.5252898"
]
| 0.66817373 | 0 |
generates a dictionary from the occupations CSV file | def gen_dict():
lines = [line for line in csv.reader(open(__ppath__ + "/data/occupations.csv"))] # uses a csv.reader to parse the file, converts the generic iterable to a list
lines = [(line[0],float(line[1])) for line in lines[1:-2]]# removes the column names and "Total" row, re-expresses as a list of tuples to enable dictionary conversion
lines.append(("Unemployed",0.2)) # accounts for missing 0.2% of jobs
return dict(lines) # converts to dictionary | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_file(file):\n \n dictionary = {}\n csv_fp = csv.reader(file)\n #L[46] = manufacturer, L[63] = year\n #L[4]= city mileage, L[34]=highway mileage\n for line in csv_fp:\n #Skip the headings and the year 2017\n if (not (line[46] == 'make')) and (not (line[63] == '2017')):\n if line[46] in dictionary:\n #Add the city and highway mileage if the year has been made\n if line[63] in dictionary[line[46]]:\n dictionary[line[46]][line[63]][0] += [int(line[4])]\n dictionary[line[46]][line[63]][1] += [int(line[34])]\n #Add the year and data if it was not made previously\n else:\n dictionary[line[46]][line[63]] = [[int(line[4])],\\\n [int(line[34])]]\n #Adds a new manufacturer\n else:\n dictionary[line[46]] = {line[63]:[[int(line[4])],\\\n [int(line[34])]]}\n return dictionary",
"def read_names_into_dict():\n d = dict()\n with open(\"SP_500_firms.csv\") as csvfile:\n input_file = csv.DictReader(csvfile)\n for row in input_file:\n #print(row)\n d[row['Symbol']] = [row['Name'],row['Sector']]\n return d",
"def getPrefectureLocationDict(prefecture_location_file):\n\tfile_handle = open(prefecture_location_file,'rb')\n file_reader = csv.DictReader(file_handle)\n\n prefec_location_dict = {}\n counter = 0\n for row in file_reader:\n prefec_location_dict[row['PREF_NAME']] = row\n counter += 1\n assert len(prefec_location_dict.keys()) == counter\n\n file_handle.close()\n return prefec_location_dict",
"def dictionary_formation():\r\n sales_data = {}\r\n with open('beer_data.csv', \"r\") as data_file:\r\n file_contents = csv.reader(data_file, delimiter=',')\r\n #Value of lines_read used as key value for each dictionary\r\n #in sales_data\r\n lines_read = 1\r\n for line in file_contents:\r\n if lines_read == 1:\r\n lines_read = lines_read + 1\r\n else:\r\n #Stores each column in row as key value in dictionary\r\n sales_data[str(lines_read)] = {\r\n \"invoice_number\": line[0],\r\n \"customer\": line[1],\r\n \"date_required\": line[2],\r\n \"recipe\": line[3],\r\n \"gyle_number\": line[4],\r\n \"quantity_ordered\": int(line[5])\r\n }\r\n lines_read = lines_read + 1\r\n data_file.close()\r\n return sales_data",
"def getRiverIDs(lookupCsv):\n\n d = {}\n with open(lookupCsv, \"rb\") as f:\n reader = csv.reader(f)\n\n # Discard header row\n reader.next()\n\n for row in reader:\n d[row[0]] = row[1]\n\n return d",
"def creating_dict_from_csv(self) -> dict:\n dictionary = {}\n for row in self.__read_csv():\n if dictionary.get(row[0]):\n dictionary[row[0]].append((row[1], row[2]))\n else:\n dictionary[row[0]] = [(row[1], row[2])]\n\n for key, value in dictionary.items():\n dictionary[key] = sorted(value, key=lambda x: x[1], reverse=True)\n\n return dictionary",
"def read_sailor_data(filename):\n\td=OrderedDict()\n\twith open(filename) as csvfile:\n\t\trdr = csv.reader(csvfile)\t\n\t\tfor i in rdr:\n\t\t\t#This except is so that if the line trying to be inputted into the dictionary is a string\n\t\t\t#It will ignore it and go to the next line\n\t\t\ttry: d[i[0]]=(float(i[1]),float(i[2]))\n\t\t\texcept: None\n\treturn d",
"def load_employees(self):\n empcsv = open('employees.csv','r')\n emp_temp = []\n empcsv = empcsv.readlines()[1:]\n for line in empcsv:\n for i in line.split(','):\n if line == 0:\n pass\n else:\n emp_temp.append(i)\n employee = emp_temp[0::13]\n data_1 = []\n data = []\n for i in emp_temp:\n if i in employee:\n pass\n else:\n data_1.append(i)\n for i in range(26):\n data_temp = data_1[(i * 12):((i + 1) * 12)]\n data.append(data_temp)\n for i in range(len(employee)):\n self.emp_dict[employee[i]] = data[i]\n #print(self.emp_dict)\n for i in self.emp_dict:\n self.emp_dict[i] = [x.replace('\\n', '') for x in self.emp_dict[i]]\n return self.emp_dict",
"def load_data(filename):\n data = dict()\n with open(filename) as f:\n reader = csv.DictReader(f)\n for row in reader:\n name = row[\"name\"]\n data[name] = {\n \"name\": name,\n \"mother\": row[\"mother\"],\n \"father\": row[\"father\"],\n \"trait\": (True if row[\"trait\"] == \"1\" else\n False if row[\"trait\"] == \"0\" else None)\n }\n return data",
"def load_data(filename):\n data = dict()\n with open(filename) as f:\n reader = csv.DictReader(f)\n for row in reader:\n name = row[\"name\"]\n data[name] = {\n \"name\": name,\n \"mother\": row[\"mother\"] or None,\n \"father\": row[\"father\"] or None,\n \"trait\": (True if row[\"trait\"] == \"1\" else\n False if row[\"trait\"] == \"0\" else None)\n }\n return data",
"def load_csv(input):\n with open(input['csv'], 'r', encoding=input['encoding']) as f:\n invoice_dict = dict()\n reader = csv.reader(f, delimiter=';')\n\n for row in reader:\n invoice_id = row[0]\n\n if invoice_id in invoice_dict:\n invoice_dict[invoice_id].add_entry(row[1:])\n else:\n invoice_dict[invoice_id] = Invoice(row)\n\n return invoice_dict",
"def csv_dict_reader(file_obj):\r\n with open('heatmap_data_10_200_out.csv','wb') as file:\r\n\t reader = csv.DictReader(file_obj, delimiter=',')\r\n\t for line in reader:\r\n\t \t# data = \"{location: new google.maps.LatLng(\" + str(line[\"latitude\"]) + \", \" + str(line[\"longitude\"]) + \") , weight: \" + str(float(line[\"predicted_price\"])/1000) + \" }, \"\r\n\t \tdata = line[\"predicted_price\"] + \";\" + str(line[\"latitude\"]) + \"; \" + str(line[\"longitude\"]) \r\n\t \t# data = \"new google.maps.LatLng(\" + str(line[\"latitude\"]) + \", \" + str(line[\"longitude\"]) + \"),\"\r\n\t \tprint data\r\n\t # print(line[\"latitude\"]),\r\n\t # print(line[\"longitude\"])\r\n\r\n\t \r\n\t \tfile.write(data)\r\n\t \tfile.write('\\n')",
"def csvObj():\n CSV_URL = \"http://unitedstates.sunlightfoundation.com/legislators/legislators.csv\"\n s = requests.get(CSV_URL) # Download the csv using requests.\n reader = csv.DictReader(s.text.splitlines(), lineterminator=\"\\n\") # Use the dictreader to make a dictionary with the attribute name paired with the rows value for that attribute.\n name2twitter_id = {}\n for row in reader:\n if (row['in_office'] == \"1\" and row['twitter_id'] != \"\"):\n name = row['firstname'] + \" \" # Construct the name.\n if (row['middlename'] != \"\"): # Not all names have middle names.\n name += row['middlename'] + \" \"\n name += row['lastname']\n name2twitter_id[name] = row['twitter_id'] # Assign the name to their handle.\n del name2twitter_id[\"Tim Murphy\"] # This representative does not have an active twitter handle. \n name2twitter_id[\"Gregory W. Meeks\"] = \"RepGregoryMeeks\" # Insert this representatives twitter handle manually.\n return name2twitter_id",
"def read_name_map( name_map_path) :\n with open( name_map_path, newline=\"\") as csvfile:\n table = { }\n reader = csv.reader(csvfile)\n for row in reader:\n if len(row) < 2:\n continue\n if row[key_col] == key_header:\n continue\n key = row[key_col]\n val = row[val_col]\n table[key] = val\n return table",
"def get_data():\n data = {}\n with open(app.config['DATA_CSV'], 'r') as csvfile:\n presence_reader = csv.reader(csvfile, delimiter=',')\n for i, row in enumerate(presence_reader):\n if len(row) != 4:\n # ignore header and footer lines\n continue\n\n try:\n user_id = int(row[0])\n date = datetime.strptime(row[1], '%Y-%m-%d').date()\n start = datetime.strptime(row[2], '%H:%M:%S').time()\n end = datetime.strptime(row[3], '%H:%M:%S').time()\n except (ValueError, TypeError):\n log.debug('Problem with line %d: ', i, exc_info=True)\n\n data.setdefault(user_id, {})[date] = {'start': start, 'end': end}\n return data",
"def etl_csv_file(input_file_location):\n\n all_employee_dict = {}\n supervisor_employee_dict = {}\n header_row = 'employee_id,first_name,last_name,hire_date,supervisor_id'\n\n with open(input_file_location, mode='r') as employee_csv_file:\n\n # verify the header exists. If the header is not correct error out and return\n first_row = next(employee_csv_file, None)\n if first_row.rstrip() != header_row:\n return False, \"The header row in the %s CSV file must be %s\" % (input_file_location, header_row)\n\n employee_csv_reader = csv.reader(employee_csv_file)\n for count, row in enumerate(employee_csv_reader):\n\n # validate each date in the input file can be casted to datetime object\n try:\n hire_date = datetime.strptime(row[3], '%Y-%m-%d')\n except ValueError as e:\n print (e)\n message = \"There has been an error parsing a date in the input file. Please correct '{0}' at \" \\\n \"line '{1}' so that it follows follows the '2011-03-24' date format.\".format(row[3], count)\n return False, message\n\n employee_id = row[0]\n employee = {\n 'employee_id': employee_id,\n 'first_name': row[1],\n 'last_name': row[2],\n 'hire_date': hire_date,\n }\n\n supervisor_id = row[4]\n\n # This is used later to print out ALL employees according to requirements\n all_employee_dict[employee_id] = 'Sorry, this person is not a supervisor'\n\n # Append to list if key already exists\n group = supervisor_employee_dict.setdefault(supervisor_id, [])\n group.append(employee)\n\n return all_employee_dict, supervisor_employee_dict",
"def extract_data(file):\n countries = [\n \"Brunei Darussalam\",\n \"Cambodia\",\n \"Indonesia\",\n \"Lao People's Democratic Republic\",\n \"Malaysia\",\n \"Myanmar\",\n \"Philippines\",\n \"Singapore\",\n \"Thailand\",\n \"Viet Nam\",\n ]\n\n data = dict()\n\n with open(file, mode=\"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n for row in csv_reader:\n if row[\"Region\"] in countries and row[\"Year\"] == \"2014\":\n value = int(float(row[\"Population\"]))\n data[row[\"Region\"]] = value\n\n return data",
"def getCityCodeDict():\n \n dictionary = {}\n for input in open(filename1,'r'):\n if input:\n input = input.rstrip() # remove the newline\n input = input.replace('\"','') # replace double quotes with null\n input = input.split(',') # split at the comma \n airport = airlineClasses.Airport() # create new object\n airport.cityCode = input[0] # assign into new object\n airport.city = input[1]\n dictionary[airport.cityCode] = airport # store in dictionary\n return dictionary",
"def open_client_file_to_dict():\n clients_dict = []\n file = open(r'../clientmailerproj/client.csv', encoding='utf-8-sig')\n client_ordered_dict = csv.DictReader(file)\n for row in client_ordered_dict:\n clients_dict.append({\n 'First': row['First Name'],\n 'Last': row['Last Name'],\n 'Company': row['Account Name'],\n 'Email': row['Email'],\n 'Job': row['Job']\n })\n return clients_dict",
"def open_overlapping(self, filename):\n\n with open(filename) as overlap:\n overlap_reader = csv.DictReader(overlap)\n\n\n overlap_dict = {}\n dubbels = []\n\n for row in overlap_reader:\n course = row['0']\n for i in row:\n dubbels.append(row[i])\n overlap_dict[course] = dubbels\n dubbels = []\n\n return overlap_dict",
"def make_dict():\n\n problems = {}\n\n with open('problems.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n cc_name = row['cc_name']\n url_link = row['url_link']\n problems[cc_name] = url_link\n\n return problems",
"def clean_information(data):\n\n\t# create a list dict\n\tcountries = []\n\t\n\t# open csv file\n\twith open('input.csv') as csvfile:\n\n\t\t# read in file as dictionary\n\t\tdatareader = csv.DictReader(csvfile)\n\n\t\t# for every row in data reader\n\t\tfor row in datareader:\n\n\t\t\t# create space for a dictionary\n\t\t\tdictionary = {}\n\n\t\t\t# if value is unknown go to next country\n\t\t\tif row['Pop. Density (per sq. mi.)'] == 'unknown':\n\t\t\t\tcontinue\n\n\t\t\tif row['GDP ($ per capita) dollars'] == 'unknown':\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\n\t\t\tif not row['Pop. Density (per sq. mi.)']:\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\t\n\t\t\tif not row['Infant mortality (per 1000 births)']:\n\t\t\t\tcontinue\n\n\t\t\t# if no value go to next country\n\t\t\tif not row['GDP ($ per capita) dollars']:\n\t\t\t\tcontinue\n\n\t\t\t# find country and strip for white space\n\t\t\tdictionary['Country'] = row['Country'].rstrip()\n\n\t\t\t# get region and put it in a dictionary\n\t\t\tdictionary['Region'] = row['Region'].rstrip()\n\n\t\t\t# add population density to dictionary\n\t\t\tdictionary['Pop. Density (per sq. mi.)'] = row['Pop. Density (per sq. mi.)']\n\n\t\t\t# add infant mortality to dictionary\n\t\t\tdictionary['Infant mortality (per 1000 births)'] = row['Infant mortality (per 1000 births)']\n\n\t\t\t# add GDP per capita to dictionary and keep only numbers\n\t\t\tdictionary['GDP ($ per capita) dollars'] = row['GDP ($ per capita) dollars'].split()[0]\n\n\t\t\t# append everything to a list\n\t\t\tcountries.append(dictionary)\n\n\t\treturn countries",
"def read_dictionary(filename):\r\n dictionaryoutput = {}\r\n with open(filename) as file:\r\n entries = csv.reader(file)\r\n for item in entries:\r\n dictionaryoutput[item[0]] = item[1]\r\n return dictionaryoutput",
"def get_levels():\n \n levels = {}\n students = set()\n\n # initialize levels dict\n for i in range(1, 11):\n levels[i] = []\n\n # get the levels\n level_file = open('levels.csv')\n for line in level_file:\n if line.startswith('name,level'):\n continue\n\n name, level = line.split(',')\n level = int(level)\n levels[level].append(name)\n students.add(name)\n\n level_file.close()\n return levels, students",
"def extract_data(file_name):\n population_data = {\n \"gTitle\": \"SAARC Countries Population For Year 2004 - 2014\",\n \"xLabels\": [\n \"2004\",\n \"2005\",\n \"2006\",\n \"2007\",\n \"2008\",\n \"2009\",\n \"2010\",\n \"2011\",\n \"2012\",\n \"2013\",\n \"2014\",\n ],\n \"xText\": \"Years\",\n \"yText\": \"Population in millions\",\n \"data\": [],\n }\n temp = {}\n with open(file_name, mode=\"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n for row in csv_reader:\n if (\n row[\"Region\"] in saarc_countries\n and row[\"Year\"] in population_data[\"xLabels\"]\n ):\n value = float(row[\"Population\"])\n temp[row[\"Year\"]] = temp.get(row[\"Year\"], 0) + value\n\n for val in population_data[\"xLabels\"]:\n population_data[\"data\"].append(int((temp[val] / 1000)))\n\n return population_data",
"def _read_names_file(self):\n filename = os.path.join(self.path, 'names.csv')\n lookup = collections.defaultdict(list)\n with open(filename) as f:\n reader = csv.reader(f)\n for line in reader:\n matches = set(line)\n for match in matches:\n lookup[match].append(matches)\n return lookup",
"def parse_file(file_path): \n map = OrderedDict() \n with open(file_path) as file:\n reader = csv.reader(file, delimiter='\\t')\n headers = next(reader)\n for i in range(len(headers)):\n # print(headers[i])\n map[headers[i]]=np.array([])\n for row in reader:\n for i in range(len(row)):\n map[headers[i]]=np.append(map[headers[i]],row[i])\n return map",
"def get_data_from_films_locations_file(path):\n film_locations_data = {}\n with open(path, encoding='utf-8', errors='ignore') as f:\n for line in f:\n line = line.strip()\n line_values = line.split(',')\n film, year, location = line_values[0], line_values[1],\\\n line_values[-1]\n if year in film_locations_data:\n if location not in film_locations_data[year]:\n film_locations_data[year][location] = {film}\n else:\n film_locations_data[year][location].add(film)\n else:\n film_locations_data[year] = {location: {film}}\n return film_locations_data",
"def load_structure(filename=STRUCTURE_FILENAME):\n\n with open(filename) as f:\n reader = csv.reader(f)\n\n for row in reader:\n structure_entry = {}\n structure_entry['floorID'] = row[1]\n structure_entry['roomID'] = row[2]\n \n if row[3] == 's': # Sensor\n dict_sensor[row[0]] = structure_entry\n elif row[3] == 'a': # Actuator\n dict_ac[row[0]] = structure_entry",
"def get_patients_dict(table):\n\tf = open(table)\n\tpatients = f.readline().strip().split(\"\\t\")[1:]\n\t\t \n\tpatients_dict = {}\n\tfor i in patients:\n\t\tpatients_dict[i.replace('\"', '')] = {}\n\t\t \n\tfor i in f:\n\t\tl = i.strip().split(\"\\t\")\n\t\tgene = l[0]\n\n\t\tfor j in range(len(l[1:])):\n\t\t\tpatients_dict[patients[j]][gene] = int(l[1:][j])\n\treturn patients_dict"
]
| [
"0.68956435",
"0.6732116",
"0.6721879",
"0.66882974",
"0.66748565",
"0.65674627",
"0.6453858",
"0.6429546",
"0.6380264",
"0.6357282",
"0.6320279",
"0.6303778",
"0.62971205",
"0.6296261",
"0.629254",
"0.62874764",
"0.62746036",
"0.62385505",
"0.6234372",
"0.6204792",
"0.6175358",
"0.614719",
"0.6145067",
"0.6138984",
"0.61260515",
"0.6106745",
"0.60804826",
"0.6072519",
"0.60671175",
"0.60486716"
]
| 0.81257343 | 0 |
Get cache path for given fuse_path. If it is a file and file is not in cache, return path to dummy file. If there is no dummy file either, then the file does not exist. In this case, return None | def _get_path_or_dummy(self, fuse_path):
cache_path = self.converter.to_cache_path(fuse_path)
dummy_cache_path = self.converter.add_dummy_ending(cache_path)
if os.path.exists(cache_path):
return cache_path
elif os.path.exists(dummy_cache_path):
return dummy_cache_path
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _locate_from_cache_file():\n path_file = os.path.join(_get_temp_dir(), _config.pathfile)\n return _read_file(path_file) if os.path.isfile(path_file) else None",
"def get_cached_path(self):\n if util.IS_CACHE_ENABLED and not self.physical_key.is_local():\n return ObjectPathCache.get(str(self.physical_key))\n return None",
"def get_cache_file_path(self) -> str:\n return self.cache_file_path",
"def get_cache_path(self):",
"def get_cache_path(self):",
"def cached_shapefile_path(fpath):\n\n p, ext = os.path.splitext(fpath)\n\n if ext.lower() == '.p':\n # No need to recache pickled files (this is for nested calls)\n return fpath\n\n if ext.lower() != '.shp':\n raise ValueError('File extension not recognised: {}'.format(ext))\n\n # Cached directory and file\n cp = os.path.commonprefix([cache_dir, p])\n cp = os.path.join(cache_dir, hash_cache_dir + '_shp',\n os.path.relpath(p, cp))\n ct = '{:d}'.format(int(round(os.path.getmtime(fpath)*1000.)))\n of = os.path.join(cp, ct + '.p')\n if os.path.exists(cp):\n # We have to check if the file changed\n if os.path.exists(of):\n return of\n else:\n # the file has changed\n shutil.rmtree(cp)\n\n os.makedirs(cp)\n return of",
"def _GetCachedFileByPath(self, safe_key_path):\n longest_key_path_prefix = u''\n longest_key_path_prefix_length = len(longest_key_path_prefix)\n for key_path_prefix in self._registry_files.iterkeys():\n if safe_key_path.startswith(key_path_prefix):\n key_path_prefix_length = len(key_path_prefix)\n if key_path_prefix_length > longest_key_path_prefix_length:\n longest_key_path_prefix = key_path_prefix\n longest_key_path_prefix_length = key_path_prefix_length\n\n if not longest_key_path_prefix:\n return None, None\n\n registry_file = self._registry_files.get(longest_key_path_prefix, None)\n return longest_key_path_prefix, registry_file",
"def _abs_path(self, path):\n\n debug(\"current cache: %s\", self._cache)\n\n #save path in argument\n arg_path = path\n try:\n #try to return whats in cache:\n debug(\"trying to access %s path in cache\", arg_path)\n return self._cache[arg_path]\n except KeyError:\n debug(\"%s not found in cache\", arg_path)\n #normalize path:\n path = os.path.expanduser(path)\n path = os.path.expandvars(path)\n path = os.path.normpath(path)\n #save the result in the cache:\n self._cache[arg_path] = path\n debug(\"stored %s in cache\", self._cache[arg_path])\n return path",
"def find_real_dso_path(dso_path_in_record_file, binary_cache_path):\n if dso_path_in_record_file[0] != '/' or dso_path_in_record_file == '//anon':\n return None\n if binary_cache_path:\n tmp_path = os.path.join(binary_cache_path, dso_path_in_record_file[1:])\n if os.path.isfile(tmp_path):\n return tmp_path\n if os.path.isfile(dso_path_in_record_file):\n return dso_path_in_record_file\n return None",
"def get_cache_file_path(self):\n home_path = os.path.expanduser(\"~\")\n # path to the programs cache directory\n full_cache_dir = os.path.join(home_path, \".cache\", CACHE_DIR)\n\n if not os.path.exists( full_cache_dir ):\n os.makedirs( full_cache_dir )\n \n return os.path.join( full_cache_dir, FILE_NAME )",
"def _get_cached_file_name(bucket_name, saltenv, path):\n\n file_path = os.path.join(_get_cache_dir(), saltenv, bucket_name, path)\n\n # make sure bucket and saltenv directories exist\n if not os.path.exists(os.path.dirname(file_path)):\n os.makedirs(os.path.dirname(file_path))\n\n return file_path",
"def cache_path(self, vpath):\n return os.path.join(self.cache_root, \n *vpath.split('/') )",
"def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BIGGAN_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in ('http', 'https', 's3'):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == '':\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))",
"def get_cachefile(filename):\n if not os.path.exists(cachedir):\n os.makedirs(cachedir)\n return os.path.join(cachedir, filename)",
"def cache(*filepath):\n expected_dir = os.path.join(config.cache_dir, \"/\".join(filepath))\n if not os.path.exists(expected_dir):\n raise FileNotFoundError(\"Couldn't find {}\".format(expected_dir))\n return expected_dir",
"def cache_file_if_exists(self, file_path):\r\n dst = r\"C:/Users/ginger frame\"\r\n\r\n if os.path.exists(file_path):\r\n src = os.path.realpath(file_path) \r\n shutil.copy2(src, './temp')\r\n print(os.path.basename(file_path))\r\n return os.path.basename(file_path)\r\n else:\r\n print('error')\r\n sys.exit('Could not write configuration data to device, \"' + file_path + '\" does not exist.')",
"def _get_cached_filepath(prefix, url):\n filename = '{prefix}_{hash_string}.cache'.format(\n prefix=prefix,\n hash_string=_hash_value(url),\n )\n logger.debug('Cached filepath: ' + os.path.join(CACHE_DIRECTORY, filename))\n return os.path.join(CACHE_DIRECTORY, filename)",
"def __get_docker_file_path(path):\n if os.path.isfile(path):\n return path\n for dc_filename in DEFAULT_DC_FILENAMES:\n file_path = os.path.join(path, dc_filename)\n if os.path.isfile(file_path):\n return file_path\n # implicitly return None",
"def get_cache_file_path(self, URL):\n\n filename = hashlib.md5(URL.encode('utf-8')).hexdigest() + '.wbc'\n path = pathlib.Path(config.WEATHER_PROVIDERS['App']['Cache_path'])\n cache_file_path = path.joinpath(filename)\n\n return cache_file_path",
"def cache_find(item: str) -> str:\n\titem = str(item)\n\tcache = \"Cached/\" + item\n\n\tif os.path.exists(cache):\n\t\treturn cache\n\n\treturn None",
"def getFile(self, path):\n\t\ttry:\n\t\t\tlogger.info('getFile(%s)' % (path))\n\n\t\t\t# Check if file is in cache\n\t\t\tif self.cache_files.has_key(path):\n\t\t\t\tlogger.info('* Retrieving tmpfile name from the cache')\n\t\t\t\treturn self.cache_files[path]['tmpfile']\n\t\t\telse:\n\t\t\t\tlogger.info('* Needs to download the file')\n\n\t\t\t\t# Generate temp file; tmp has a file descriptor, tmp_name the name of the file\n\t\t\t\ttmp, tmp_name = mkstemp()\n\t\t\t\t\n\t\t\t\tlogger.info('* Generated name = %s' % (tmp_name))\n\n\t\t\t\t# Download file from dropbox\n\t\t\t\tif self.downloadFile(path, tmp) == True:\n\t\t\t\t\tlogger.info('* File downloaded')\n\n\t\t\t\t\t# Add to cache\n\t\t\t\t\tself.cache_files[path] = {} \n\t\t\t\t\tself.cache_files[path]['tmpfile'] = tmp_name\n\t\t\t\t\tlogger.info('* Added to cache, file %s is actually %s' % (path, tmp_name))\n\t\t\t\telse:\n\t\t\t\t\treturn False\n\n\t\t\t\treturn tmp_name\n\t\texcept Exception, e:\n\t\t\tinfo = sys.exc_info()\n\t\t\tlogger.error(\"Exception %s at getFile(%s)\" % (info[0],path))\n\t\t\tlogger.error(pformat(info))\n\t\t\treturn False",
"def cache_path(self):",
"def cache_path(self):",
"def cached_path(url_or_filename, cache_dir=None):\n if cache_dir is None:\n cache_dir = PYTORCH_PRETRAINED_BERT_CACHE\n if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):\n url_or_filename = str(url_or_filename)\n if sys.version_info[0] == 3 and isinstance(cache_dir, Path):\n cache_dir = str(cache_dir)\n\n parsed = urlparse(url_or_filename)\n\n if parsed.scheme in (\"http\", \"https\", \"s3\"):\n # URL, so get it from the cache (downloading if necessary)\n return get_from_cache(url_or_filename, cache_dir)\n elif os.path.exists(url_or_filename):\n # File, and it exists.\n return url_or_filename\n elif parsed.scheme == \"\":\n # File, but it doesn't exist.\n raise EnvironmentError(\"file {} not found\".format(url_or_filename))\n else:\n # Something unknown\n raise ValueError(\"unable to parse {} as a URL or as a local path\".format(url_or_filename))",
"def cachedir() -> Path:\n if os.environ.get(\"TEST_TMPDIR\"):\n return Path(os.environ[\"TEST_TMPDIR\"])\n else:\n return Path(\"~/.cache/programl\").expanduser()",
"def _findfile(self, path):\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None",
"def _2to3_cache_path(self, path):\n head, tail = os.path.split(path)\n base_filename, sep, tail = tail.partition('.')\n filename = ''.join([base_filename, sep, self.tag, sep, tail])\n return os.path.join(head, '__pycache__', filename)",
"def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None):\n\tif cache_dir is None:\n\t\tcache_dir = PYTORCH_TRANSFORMERS_CACHE\n\tif sys.version_info[0] == 3 and isinstance (url_or_filename, Path):\n\t\turl_or_filename = str (url_or_filename)\n\tif sys.version_info[0] == 3 and isinstance (cache_dir, Path):\n\t\tcache_dir = str (cache_dir)\n\n\tparsed = urlparse (url_or_filename)\n\n\tif parsed.scheme in ('http', 'https', 's3'):\n\t\t# URL, so get it from the cache (downloading if necessary)\n\t\treturn get_from_cache (url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies)\n\telif os.path.exists (url_or_filename):\n\t\t# File, and it exists.\n\t\treturn url_or_filename\n\telif parsed.scheme == '':\n\t\t# File, but it doesn't exist.\n\t\traise EnvironmentError (\"file {} not found\".format (url_or_filename))\n\telse:\n\t\t# Something unknown\n\t\traise ValueError (\"unable to parse {} as a URL or as a local path\".format (url_or_filename))",
"def cache_path(self):\n cache_path = os.path.join(os.path.dirname(__file__), '..', 'cache')\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n return cache_path",
"def get_cache_file(self, dependencies):\n filename = '%s.tar' % self.get_cache_key(dependencies)\n return os.path.join(self.cache_directory, filename)"
]
| [
"0.72540975",
"0.6637468",
"0.63889813",
"0.6347938",
"0.6347938",
"0.6323957",
"0.6313463",
"0.629222",
"0.6253909",
"0.6172477",
"0.61681104",
"0.61644065",
"0.6131427",
"0.6089258",
"0.6079828",
"0.60579205",
"0.6052529",
"0.6047051",
"0.5980046",
"0.5979689",
"0.59495044",
"0.5935791",
"0.5935791",
"0.5902662",
"0.5839",
"0.57965416",
"0.5793817",
"0.57737213",
"0.5767747",
"0.5733508"
]
| 0.8828662 | 0 |
Convert Vina Docking Output PDBQT file into SDF file with pose id and vina docking score. | def vina_pose_to_sdf(ifile,ofile):
output = Outputfile("sdf",ofile,overwrite=True)
poses = list(readfile("pdbqt",ifile))
for i in range(len(poses)):
poseid = poses[i].data['MODEL']
del poses[i].data['MODEL']
vinascore = poses[i].data['REMARK'].split()[2]
del poses[i].data['REMARK']
del poses[i].data['TORSDO']
poses[i].data['POSE_ID'] = poseid
poses[i].data['VINA_SCORE'] = vinascore
output.write(poses[i]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_sdf_to_pdbs(vars, gen_folder_path, sdfs_folder_path):\n\n files = []\n\n if os.path.isdir(sdfs_folder_path):\n # so it's a directory, go through the directory and find all the sdf files\n if sdfs_folder_path[-1:] != os.sep:\n sdfs_folder_path = (\n sdfs_folder_path + os.sep\n ) # so add a / to the end of the directory\n\n files.extend(glob.glob(sdfs_folder_path + \"*.sdf\"))\n files.extend(glob.glob(sdfs_folder_path + \"*.SDF\"))\n files = list(set(files))\n if len(files) == 0:\n printout = \"\\nThere are no sdf's to convert to PDB's. There may be an issue with Gypsum.\\n\"\n print(printout)\n raise Exception(printout)\n\n # create a new subfolder if one doesn't already exist. folder will be with\n # the generation and will be titled PDBs pdb_subfolder_path will become\n # the the output folder\n pdb_subfolder_path = gen_folder_path + \"PDBs\" + os.sep\n if not os.path.isdir(pdb_subfolder_path):\n os.makedirs(pdb_subfolder_path)\n\n job_inputs = []\n for file_path in files:\n if \"params\" in file_path:\n continue\n job_inputs.append(tuple([pdb_subfolder_path, file_path]))\n job_inputs = tuple(job_inputs)\n\n # Check that there are .sdf files to test. If not raise Exception\n if len(job_inputs) == 0:\n printout = \"\\n\\nThere are no SDF files were found to convert to PDB. \"\n printout = printout + \"This may be a problem with the Gypsum-DL \"\n printout = printout + \"settings.\\nPlease check that the `--gypsum_timeout_limit` \"\n printout = printout + \"is appropriate relative to the `--gypsum_thoroughness` \"\n printout = printout + \"and `--max_variants_per_compound` parameters.\\n\"\n raise Exception(printout)\n\n # Convert sdf files to pdbs in multithread\n vars[\"parallelizer\"].run(job_inputs, convert_single_sdf_to_pdb)",
"def read_vmdas(self,):\n fd = self.f\n # The raw files produced by VMDAS contain a binary navigation data\n # block.\n self.cfg['sourceprog'] = 'VMDAS'\n ens = self.ensemble\n k = ens.k\n if self._source != 1 and self._debug_level >= 1:\n print(' \\n***** Apparently a VMDAS file \\n\\n')\n self._source = 1\n self.vars_read += ['time_gps',\n 'latitude_gps',\n 'longitude_gps',\n 'etime_gps',\n 'elatitude_gps',\n 'elongitude_gps',\n 'flags',\n 'ntime', ]\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[2] + utim[3] * 256, utim[1], utim[0])\n # This byte is in hundredths of seconds (10s of milliseconds):\n time = tmlib.timedelta(milliseconds=(int(fd.read_ui32(1) / 10)))\n fd.seek(4, 1) # \"PC clock offset from UTC\" - clock drift in ms?\n ens.time_gps[k] = tmlib.date2epoch(date + time)[0]\n ens.latitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.longitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.etime_gps[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) * 10)))[0]\n ens.elatitude_gps[k] = fd.read_i32(1) * self._cfac\n ens.elongitude_gps[k] = fd.read_i32(1) * self._cfac\n fd.seek(12, 1)\n ens.flags[k] = fd.read_ui16(1)\n fd.seek(6, 1)\n utim = fd.read_ui8(4)\n date = tmlib.datetime(utim[0] + utim[1] * 256, utim[3], utim[2])\n ens.ntime[k] = tmlib.date2epoch(date + tmlib.timedelta(\n milliseconds=int(fd.read_ui32(1) / 10)))[0]\n fd.seek(16, 1)\n self._nbyte = 2 + 76",
"def exportData(self):\n self.fileName = QtGui.QFileDialog.getSaveFileName(self, self.tr(\"Save Data\"), \"\", \n self.tr('Atom Positions (*.pdb)'))\n if not self.fileName.isEmpty():\n self.setCursor(QtCore.Qt.WaitCursor)\n selectedChain = self.main_chain\n PDBstring = selectedChain.toPDB( CAlphaPlaceholders=False)\n F = open(self.fileName, 'w')\n F.write(PDBstring)\n F.close()\n self.dirty = False\n self.setCursor(QtCore.Qt.ArrowCursor)",
"def convert_single_sdf_to_pdb(pdb_subfolder_path, sdf_file_path):\n\n if os.path.exists(sdf_file_path) is True:\n\n file_basename = basename(sdf_file_path)\n file_basename = file_basename.split(\"__input1\")[0]\n\n file_output_name = \"{}{}_\".format(pdb_subfolder_path, file_basename)\n\n try:\n mols = Chem.SDMolSupplier(\n sdf_file_path, sanitize=False, removeHs=False, strictParsing=False\n )\n except:\n mols = None\n\n # if mols is None rdkit couldn't import the sdf so we will not do anything else\n if mols is None:\n pass\n elif len(mols) == 0:\n pass\n else:\n try:\n mols_no_hydrogen = Chem.SDMolSupplier(\n sdf_file_path, sanitize=True, removeHs=True, strictParsing=False\n )\n except:\n mols_no_hydrogen = [None for x in range(0, len(mols))]\n\n # if len(mols)==0 gypsum output a blank file by accident\n # if mols is None rdkit couldn't import the sdf\n if len(mols) != 0:\n counter = 0\n for i in range(0, len(mols)):\n mol = mols[i]\n # Extra precaution to prevent None's within a set of good\n # mols\n if mol is None:\n continue\n\n mol = MOH.check_sanitization(mol)\n # Filter out any which failed\n if mol is None:\n continue\n\n # pdb_name indexed to 1\n pdb_name = \"{}_{}.pdb\".format(file_output_name, counter + 1)\n if mol is not None: # For extra precaution...\n Chem.MolToPDBFile(mol, pdb_name, flavor=32)\n # Add header to PDB file with SMILES containing\n # protanation and stereochem\n\n no_hydrogen_smiles = mols_no_hydrogen[i]\n if no_hydrogen_smiles is None:\n no_hydrogen_smiles = Chem.MolToSmiles(mol)\n\n if no_hydrogen_smiles is None:\n print(\"SMILES was None for: \", pdb_name)\n printout = \"REMARK Final SMILES string: {}\\n\".format(\"None\")\n elif type(no_hydrogen_smiles) == str:\n printout = \"REMARK Final SMILES string: {}\\n\".format(\n no_hydrogen_smiles\n )\n elif type(no_hydrogen_smiles) == type(Chem.MolFromSmiles(\"C\")):\n printout = \"REMARK Final SMILES string: {}\\n\".format(\n Chem.MolToSmiles(no_hydrogen_smiles)\n )\n\n with open(pdb_name) as f:\n printout = printout + f.read()\n with open(pdb_name, \"w\") as f:\n f.write(printout)\n printout = \"\"\n\n counter = counter + 1\n else:\n pass",
"def make_big_vdwradii( targetpath ):\n\n file = open( os.path.join( targetpath, 'vdwradii.dat'), 'w')\n text=\"\"\"; Very approximate VanderWaals radii\n; only used for drawing atoms as balls or for calculating atomic overlap.\n; longest matches are used\n; '???' or '*' matches any residue name\n; 'AAA' matches any protein residue name\n; MODIFIED TO USE BIG VDW RADII TO PREVENT WATERS BEING PUT IN THE PROTEIN WHERE WE DON'T WANT THEM. DLM\n??? C 0.3\n??? F 0.3\n??? H 0.3\n??? N 0.3\n??? O 0.3\n??? P 0.3\n??? S 0.3\n??? LP1 0\n??? LP2 0\nSOL H 0.04\nSOL O 0.105\nWAT H 0.04\nWAT O 0.105\nGLY MN1 0\nGLY MN2 0\nALA MCB1 0\nALA MCB2 0 \nVAL MCG1 0 \nVAL MCG2 0 \nILE MCG1 0 \nILE MCG2 0 \nILE MCD1 0 \nILE MCD2 0 \nLEU MCD1 0 \nLEU MCD2 0 \nMET MCE1 0 \nMET MCE2 0 \nTRP MTRP1 0 \nTRP MTRP2 0\nTHR MCG1 0\nTHR MCG2 0\nLYSH MNZ1 0 \nLYSH MNZ2 0 \n\"\"\" \n file.writelines(text)\n file.close()",
"def get_df_from_psv(psql_out='data/output.psv', fout=None):\n\twith open(psql_out, encoding=\"utf-8\") as f:\n\t\tstrr = f.read()\n\n\tdf_str = re.sub(r'\\n(?![0-9])', ' ', strr) # Regex to handle erratic newlines\n\t\n\tdf = pd.read_csv(io.StringIO(df_str), sep='|', error_bad_lines=False) \n\n\t# at least one row had the title in label col. \n\tdf.art_arttitle = np.where(df.art_arttitle.isnull() & df.label.notnull(), df.label, df.art_arttitle)\n\n\tdf.columns = ['pmid', 'title', 'abstract', 'label']\n\tdf.abstract = df.abstract.fillna('')\n\n\t# Sectioned abstracts occur across multiple rows. identify them by multiple occurrences of the same PMID, and then combine.\n\ts=df.pmid.value_counts()\n\tmult_pmid = df[df.pmid.isin(s.index[s>1])]\n\tsingle_pmid = df[~df.pmid.isin(s.index[s>1])]\n\tmult_pmid2 = mult_pmid.groupby(['pmid']).abstract.transform(lambda x: ' '.join(x))\n\tmult_pmid['abstract'] = mult_pmid2\n\tmult_pmid = mult_pmid.drop_duplicates(subset=['pmid','abstract'])\n\tdata_df = single_pmid.append(mult_pmid)\n\n\tdata_df['content'] = np.where(data_df.title!=data_df.abstract, data_df.title+' '+data_df.abstract, data_df.abstract)\n\n\tif not fout:\n\t\timport datetime\n\t\tfout = \"data/datadf_\"+str(datetime.datetime.now()).replace(' ','_').replace(':','')+\".pkl\"\n\tdata_df.to_pickle(fout)\n\n\treturn data_df",
"def dok_to_sdf (dok_file=None,output=None):\r\n out=pybel.Outputfile(filename=output,format='sdf',overwrite=True)\r\n\r\n with open(dok_file, 'r') as f:\r\n doc=[line for line in f.readlines()]\r\n \r\n doc=[line.replace(line.split()[2],line.split()[2].upper()) if 'ATOM' in line else line for line in doc]\r\n \r\n start=[index for (index,p) in enumerate(doc) if 'REMARK Cluster' in p]\r\n finish=[index-1 for (index,p) in enumerate(doc) if 'REMARK Cluster' in p]\r\n finish.append(len(doc))\r\n\r\n interval=list(zip(start,finish[1:]))\r\n for num,i in enumerate(interval):\r\n block = \",\".join(doc[i[0]:i[1]]).replace(',','')\r\n\r\n m=pybel.readstring(format='pdb',string=block)\r\n \r\n m.data.update({'Pose':m.data['REMARK'].split()[4]})\r\n m.data.update({'Score':m.data['REMARK'].split()[6]})\r\n del m.data['REMARK']\r\n\r\n out.write(m)\r\n\r\n out.close()",
"def to_psf_file(self, psf_path) -> None:\n with open(psf_path, \"w\", encoding=\"utf-8\") as psf_file:\n psf_file.write(self.to_psf_block())",
"def dumpData(self,out,index):\n #--SCVR\n out.pack('4siBB2sB',\n 'SCVR', 5+len(self.text), index+48, self.type, self.func, self.oper)\n if self.text: out.write(self.text)\n #--Value\n if isinstance(self.value,int):\n out.packSub('INTV','i', self.value)\n else:\n out.packSub('FLTV','f', self.value)",
"def write_to_vcf(self):\n\n # 1. Generate header info\n date_for_vcf = datetime.now().strftime('%Y%m%d')\n header_info = [\n '##fileformat=VCFv4.2',\n '##fileDate=%s' % date_for_vcf,\n '##source=%s' % self.get_analyser_name(),\n '##reference=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/bigZips/hg38.fa.gz',\n '##contig=<ID=chr1,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr1.fa.gz>',\n '##contig=<ID=chr2,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr2.fa.gz>',\n '##contig=<ID=chr3,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr3.fa.gz>',\n '##contig=<ID=chr4,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr4.fa.gz>',\n '##contig=<ID=chr5,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr5.fa.gz>',\n '##contig=<ID=chr6,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr6.fa.gz>',\n '##contig=<ID=chr7,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr7.fa.gz>',\n '##contig=<ID=chr8,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr8.fa.gz>',\n '##contig=<ID=chr9,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr9.fa.gz>',\n '##contig=<ID=chr10,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr10.fa.gz>',\n '##contig=<ID=chr11,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr11.fa.gz>',\n '##contig=<ID=chr12,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr12.fa.gz>',\n '##contig=<ID=chr13,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr13.fa.gz>',\n '##contig=<ID=chr14,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr14.fa.gz>',\n '##contig=<ID=chr15,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr15.fa.gz>',\n '##contig=<ID=chr16,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr16.fa.gz>',\n '##contig=<ID=chr17,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr17.fa.gz>',\n '##contig=<ID=chr18,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr18.fa.gz>',\n '##contig=<ID=chr19,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr19.fa.gz>',\n '##contig=<ID=chr20,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr20.fa.gz>',\n '##contig=<ID=chr21,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr21.fa.gz>',\n '##contig=<ID=chr22,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chr22.fa.gz>',\n '##contig=<ID=chrM,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrM.fa.gz>',\n '##contig=<ID=chrX,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrX.fa.gz>',\n '##contig=<ID=chrY,URL=https://hgdownload.soe.ucsc.edu/goldenPath/hg38/chromosomes/chrY.fa.gz>',\n ]\n header_parameters = [\n '##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">',\n '##FORMAT=<ID=MTQ,Number=1,Type=String,Description=\"MassArray Typer quality value for SNP call. '\n 'A=Conservative, B=Moderate, C=Aggressive, D=Low Probability, E=User Call, i=Low Intensity. A and B are considered high '\n 'quality scores.\">',\n '##INFO=<ID=PCR,Number=2,Type=String,Description=\"PCR sequences used in assay.\">',\n '##INFO=<ID=AF,Number=A,Type=Float,Description=\"Minor allele frequency from population data.\">',\n '##INFO=<ID=Gene,Number=A,Type=String,Description=\"HGNC Gene Name for gene containing SNP.\">',\n '##INFO=<ID=Build,Number=A,Type=String,Description=\"Genome build used to determine SNP position for assay.\">',\n '##FILTER=<ID=LowCallRate,Description=\"SNP not called in at least 30% of samples in assay.\">',\n ]\n\n # 2. Extract info from XML file\n results = self.get_results()\n snps = self.get_snps()\n pcr_sequences = self.get_pcr_sequences()\n call_rates = self.get_snp_call_rate()\n\n # 3. For each sample, create VCF, add headers, determine genotype of each SNP and write to file.\n for sample, variants in results.items():\n\n with open(os.path.join(self.output, '%s.vcf' % sample), 'w+') as outfile:\n\n header_fields = ['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', str(sample)]\n\n outfile.write('%s\\n' % '\\n'.join(header_info))\n outfile.write('%s\\n' % '\\n'.join(header_parameters))\n outfile.write('#%s\\n' % '\\t'.join(header_fields))\n\n # for each variant, make a line to add to the file which will\n # then be sorted\n lines_to_write = []\n for snp, info in variants.items():\n\n ref_allele = snps[snp]['ref']\n alt_alleles = snps[snp]['alt']\n alt_list = alt_alleles.split(',')\n\n # Genotype formatting matches VCF v4.0 spec where ./. is no call.\n gt_list = []\n called_genotype = info['genotype']\n if not called_genotype:\n gt_list = ['.', '.']\n elif len(called_genotype) == 1:\n called_genotype += called_genotype\n for allele in list(called_genotype):\n if allele == ref_allele:\n gt_list.append(0)\n else:\n if allele in alt_list:\n idx = alt_list.index(allele)\n gt_list.append(idx + 1)\n else:\n raise ValueError(\n 'Called genotype %s not provided as possible alt in bed file. Sample %s and SNP '\n '%s %s.' % (called_genotype, sample, snp, alt_alleles)\n )\n gt = '/'.join([str(x) for x in gt_list])\n\n # Threshold currently set to 0.3 (70% results have a call).\n snp_call_rate = call_rates[snp]\n if snp_call_rate >= 0.3:\n vcf_filter = 'LowCallRate'\n else:\n vcf_filter = 'PASS'\n\n snp_pcr_seqs = pcr_sequences[snp]\n\n lines_to_write.append(\n '{chr}\\t{pos}\\t{id}\\t{ref}\\t{alt}\\t.\\t{filter}\\tAF={af};PCR={pcr};Gene={gene};Build={build}\\t'\n 'GT:MTQ\\t{gt}:{qual}\\n'.format(\n chr=snps[snp]['chrom'],\n pos=snps[snp]['pos'],\n id=snp,\n ref=ref_allele,\n alt=alt_alleles,\n filter=vcf_filter,\n af=snps[snp]['maf'],\n pcr=','.join(snp_pcr_seqs),\n gene=snps[snp]['gene'],\n build=snps[snp]['genome_build'],\n gt=gt,\n qual=','.join(info['quality'])\n )\n )\n\n sorted_lines_to_write = sorted(\n lines_to_write,\n key=lambda x: (\n # first key for sorting is the int value of chr\n int(x.split('\\t')[0][3:]),\n # second key for sorting is the position of the variant\n int(x.split('\\t')[1])\n )\n )\n\n for line in sorted_lines_to_write:\n outfile.write(line)",
"def writeto(self, output):\n\n hdu = pyfits.PrimaryHDU(data=self.integrated_psf)\n (year, month, day, hour, minute, second, weekday, DOY, DST) = \\\n time.gmtime()\n hdu.header.update(\"DATE\", \"%4d-%02d-%02dT%02d:%02d:%02d\" %\n (year, month, day, hour, minute, second))\n hdu.header.update(\"FILENAME\", os.path.basename(output),\n comment=\"Name of this file\")\n hdu.header.update(\"INSTRUME\", self.instrument, \"Instrument name\")\n\n # Copy some specific keywords from the input header.\n ihdr = self.header\n if \"BUNIT\" in ihdr:\n hdu.header.update(\"BUNIT\", ihdr.get(\"BUNIT\"))\n if \"ERR_BUDG\" in ihdr:\n hdu.header.update(\"ERR_BUDG\", ihdr.get(\"ERR_BUDG\"),\n comment=\"Optical error budget version number\")\n if \"SI_FP\" in ihdr:\n hdu.header.update(\"SI_FP\", ihdr.get(\"SI_FP\"),\n comment=\"Focal plane for OPD calculation\")\n if \"OPD_WFE\" in ihdr:\n hdu.header.update(\"OPD_WFE\", ihdr.get(\"OPD_WFE\"),\n comment=\"OPD wavefront error (nm)\")\n if \"W\" in ihdr:\n hdu.header.update(\"W\", ihdr.get(\"W\"),\n comment=\"Flat width of hex segment (m)\")\n if \"GAP\" in ihdr:\n hdu.header.update(\"GAP\", ihdr.get(\"GAP\"),\n comment=\"Gap width between hex segments (m)\")\n if \"EDGE\" in ihdr:\n hdu.header.update(\"EDGE\", ihdr.get(\"EDGE\"),\n comment=\"Edge roll off (m)\")\n if \"SW\" in ihdr:\n hdu.header.update(\"SW\", ihdr.get(\"SW\"),\n comment=\"Obscuring strut width (m)\")\n if \"HTS\" in ihdr:\n hdu.header.update(\"HTS\", ihdr.get(\"HTS\"),\n comment=\"Height of segment isogrid\")\n if \"HT2\" in ihdr:\n hdu.header.update(\"HT2\", ihdr.get(\"HT2\"),\n comment=\"Height of secondary isogrid\")\n if \"HT3\" in ihdr:\n hdu.header.update(\"HT3\", ihdr.get(\"HT3\"),\n comment=\"Height of tertiary isogrid\")\n if \"FL\" in ihdr:\n hdu.header.update(\"FL\", ihdr.get(\"FL\"),\n comment=\"Focal length (m)\")\n\n # Add some keywords.\n if self.phase_file is not None:\n hdu.header.update(\"PHASE\", os.path.basename(self.phase_file),\n \"Name of phase image file\")\n if self.pupil_file is not None:\n hdu.header.update(\"PUPIL\", os.path.basename(self.pupil_file),\n \"Name of pupil image file\")\n hdu.header.update(\"OVERSAMP\", self.oversample, \"Oversampling factor\")\n hdu.header.update(\"CALCTYPE\", self.type,\n \"32 = single precision, 64 = double precision\")\n hdu.header.update(\"DIAMETER\", self.D, \"pupil diameter (meters)\")\n hdu.header.update(\"ORIG_NX\", self.header[\"naxis1\"],\n \"NAXIS1 in input image\")\n hdu.header.update(\"ORIG_NY\", self.header[\"naxis2\"],\n \"NAXIS2 in input image\")\n\n self.putCoordInfo(hdu)\n\n (wavelengths, weights) = self.filter\n if len(wavelengths) >= 99:\n root_wln = \"WAV\"\n root_wgt = \"WGT\"\n else:\n root_wln = \"WAVELN\"\n root_wgt = \"WEIGHT\"\n for i in range(len(wavelengths)):\n keyword = \"%s%d\" % (root_wln, i + 1)\n hdu.header.update(keyword, wavelengths[i],\n \"wavelength in microns\")\n keyword = \"%s%d\" % (root_wgt, i + 1)\n hdu.header.update(keyword, weights[i], \"weight\")\n\n ofd = pyfits.HDUList(hdu)\n try:\n ofd.writeto(output)\n except IOError as message:\n print(\"ERROR: Output file has NOT been written; \" \\\n \"use <psf>.writeto(output)\")\n print(message)\n return\n self.output_written = True",
"def docking_vina(self, ligand_file, docking_pdbqt_file, docking_log_file):\n\n run_line = '%s' % self.docking_program\n run_line += ' --config %s' % self.dock_config_file\n run_line += ' --ligand %s' % ligand_file\n run_line += ' --out %s' % docking_pdbqt_file\n if self.output_save:\n run_line += ' --log %s' % (docking_log_file)\n e = None\n try:\n result = subprocess.check_output(run_line.split(),\n stderr=subprocess.STDOUT,\n timeout=self.timeout_dock,\n universal_newlines=True)\n except Exception as e:\n return [99.999], e\n\n result_lines = result.split('\\n')\n\n check_result = False\n affinity_list = list()\n for result_line in result_lines:\n if result_line.startswith('-----+'):\n check_result = True\n continue\n if not check_result:\n continue\n if result_line.startswith('Writing output'):\n break\n if result_line.startswith('Refine time'):\n break\n lis = result_line.strip().split()\n if not lis[0].isdigit():\n break\n# mode = int(lis[0])\n affinity = float(lis[1])\n affinity_list += [affinity]\n if len(affinity_list) == 0:\n e = 'WARNING: Could not find any conformations.'\n return [99.999], e\n return affinity_list, e",
"def export(ctx, outfile):\n adapter = ctx.obj['adapter']\n \n logger.info(\"Export the variants from {0}\".format(adapter))\n nr_cases = 0\n \n existing_chromosomes = set(adapter.get_chromosomes())\n \n ordered_chromosomes = []\n for chrom in CHROMOSOME_ORDER:\n if chrom in existing_chromosomes:\n ordered_chromosomes.append(chrom)\n existing_chromosomes.remove(chrom)\n for chrom in existing_chromosomes:\n ordered_chromosomes.append(chrom)\n \n nr_cases = adapter.cases().count()\n logger.info(\"Found {0} cases in database\".format(nr_cases))\n\n head = HeaderParser()\n head.add_fileformat(\"VCFv4.3\")\n head.add_meta_line(\"NrCases\", nr_cases)\n head.add_info(\"Obs\", '1', 'Integer', \"The number of observations for the variant\")\n head.add_info(\"Hom\", '1', 'Integer', \"The number of observed homozygotes\")\n head.add_info(\"Hem\", '1', 'Integer', \"The number of observed hemizygotes\")\n head.add_version_tracking(\"loqusdb\", __version__, datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n for chrom in ordered_chromosomes:\n length = adapter.get_max_position(chrom)\n head.add_contig(contig_id=chrom, length=str(length))\n\n print_headers(head, outfile=outfile)\n \n for chrom in ordered_chromosomes:\n for variant in adapter.get_variants(chromosome=chrom):\n chrom = variant['chrom']\n pos = variant['start']\n ref = variant['ref']\n alt = variant['alt']\n observations = variant['observations']\n homozygotes = variant['homozygote']\n hemizygotes = variant['hemizygote']\n info = \"Obs={0}\".format(observations)\n if homozygotes:\n info += \";Hom={0}\".format(homozygotes)\n if hemizygotes:\n info += \";Hem={0}\".format(hemizygotes)\n variant_line = \"{0}\\t{1}\\t.\\t{2}\\t{3}\\t.\\t.\\t{4}\\n\".format(\n chrom, pos, ref, alt, info)\n print_variant(variant_line=variant_line, outfile=outfile)",
"def save_fida(self, path):\n # input is Dimensions are channel x rep x mega x isis x t\n # FID-A seems to accomodate only 4: t x chan x rep x subSpecs\n # TODO: see if ISIS and MEGA subspecs are differentiated\n\n # permute the axes to t x chan x rep x mega x isis\n fids = np.transpose(self.fid, (4, 0,1,2,3))\n specs = np.transpose(self.spec, (4, 0,1,2,3))\n # reshape to combine subspecs\n dims = list(fids.shape[0:-2])\n dims.append(-1)\n fids = np.reshape(fids, tuple(dims))\n specs = np.reshape(specs, tuple(dims))\n\n # remove last dimensi if there are no subSpecs\n fids = np.squeeze(fids)\n specs = np.squeeze(specs)\n\n # fp to avoid int64 errors\n dim_dict = {'t': 1.0, 'coils': 2.0, 'averages': 3.0, 'subSpecs': 0.0, 'extras': 0.0}\n\n # there are still subSpectra\n if fids.ndim == 4:\n subspecs = fids.shape[-1]\n rawSubspecs = fids.shape[-1]\n dim_dict['subSpecs'] = 4.0\n else:\n subspecs = 0\n rawSubspecs = 0\n\n if self.fid.shape[0]==1:\n addedrcvrs = 1\n else:\n addedrcvrs = 0\n\n B0 = self.larmor/util.GYROMAGNETIC_RATIO[self.nucleus]\n\n n_averages = float(self.fid.shape[self.dimnames['rep']])\n # fids - time domain MRS data.\n # specs - frequency domain MRS data.\n # t - vector of time values for plotting in the time domain [s]\n # ppm - vector of frequency values for plotting in the frequency domain\n # [ppm]\n # sz - size of the fids and specs arrays\n # date - date that the data was acquired or simulated\n # averages - number of averages in the dataset (possibly altered by\n # processing)\n # rawAverages - number of averages in the original dataset (not altered by\n # processing).\n # subspecs - number of subspectra (ISIS, edit on/off, etc) in the dataset\n # (possibly altered by processing).\n # rawSubspecs - number of subspectra (ISIS, edit on/off, etc) in the original\n # dataset (not altered by processing). Bo - magnetic field strength [Tesla]\n # txfrq - Centre frequnecy [MHz];\n # linewidth - linewidth of data (only used for simulated data) [Hz]\n # n - number of spectral points\n # dwelltime - dwell time of the data in the time domain [s] (dwelltime =\n # 1/spectralwidth)\n # sim - type of simulation (ideal vs. shaped pulses), only used for\n # simulated data.\n # te seq dims\n # - echo time of acquisition [ms], only used for simulated data - type of sequence used (only used for simulated data).\n # - structure specifying which data dimensions are stored along\n # which dimensions of the fids/specs arrays. Fields include:\n # t - time/frequency dimension (usually this is 1, the first\n # dimension of the fids/specs array).\n # coils - for multiple receiver array, this is the dimension of\n # the arrayed receiver data (can be 2, 3 or 4). averages - for multiple averages, this is the dimension of the\n # averages (can be 2, 3 or 4).\n # subSpecs - in the case of subtraction data (ISIS, MEGA-PRESS), this\n # is the dimension of the subSpectra (can be 2, 3 or 4).\n\n\n mdict = {'fids': fids, 'specs': specs, 't': self.t,\n 'ppm': self.ppm, 'sz': np.float_(fids.shape), 'date': '',\n 'averages': n_averages, 'rawAverages': n_averages,\n 'subspecs': float(subspecs), 'rawSubspecs': float(rawSubspecs), 'Bo': B0,\n 'txfrq': self.larmor, 'dwelltime': 1.0/self.sw,\n 'spectralwidth': self.sw, 'seq': self._sequence_name,\n 'dims': dim_dict, 'te': self.te * 1e3, 'tr': self.tr * 1e3,\n 'pointsToLeftshift': 0}\n\n # writtentostruct\n # gotparams\n # filtered\n # zeropadded\n # freqcorrected\n # phasecorrected\n # averaged\n # addedrcvrs\n # Subtracted\n # Writtentotext\n # Downsampled\n # avgNormalized\n # isISIS\n # - Has the dataset been written to a structure (1 or 0)\n # - Have the parameters been retrieved from the dataset (1 or 0)\n # - Has the dataset been filtered (1 or 0)\n # - Has the dataset been zeropadded (1 or 0)\n # - Has the dataset been frequency corrected (1 or 0) - Has the dataset been phase corrected (1 or 0)\n # - Have the averages been combined (1 or 0)\n # - Have the rcvr channels been combined (1 or 0).\n # - Have the subspecs been subtracted (1 or 0)\n # - Has the data been written to text file (1 or 0) - has the data been resampled to a different\n # spectral resolution (1 or 0)\n # - Has the data been amplitude scaled following\n # combination of the averages (1 or 0)\n # - Does the dataset contain ISIS subspectra (1 or 0)\n\n flags = {'writtentostruct': 1, 'gotparams': 1, 'filtered': 0,\n 'zeropadded': 0, 'freqcorrected': 0, 'phasecorrected': 0,\n 'averaged': int(n_averages == 1), 'addedrcvrs': addedrcvrs,\n 'subtracted': 0, 'Writtentotext': 0, 'Downsampled': 0,\n 'avgNormalized': 0, 'isISIS': int(self.is_special),\n 'leftshifted': 0}\n\n if self.sequence_type == 'STEAM':\n mdict['tm'] = self.tm\n\n mdict['flags'] = flags\n scipy.io.savemat(path, {'svs': mdict}, format='5', long_field_names=True)",
"def generateSDFitsFromHipsr(filename_in, path_in, filename_out, path_out, write_stokes=0, cal=None):\n \n # Open h5 file\n print \"\\nOpening files\"\n print \"-------------\"\n h5file = os.path.join(path_in, filename_in)\n out_file = os.path.join(path_out, filename_out)\n h6 = Hipsr6(h5file)\n pointing = h6.tb_pointing.cols\n obs = h6.tb_observation.cols\n obs_mode = obs.obs_mode[0].strip()\n ref_beams= obs.ref_beam[:]\n\n freqs = h6.freqs\n freqs_cal = h6.freqs_cal\n \n firmware = h6.tb_firmware_config.cols.firmware[0]\n \n print \"Input file: %s\"%h6.h5.filename\n print h6\n\n if cal == None:\n abspath = os.path.abspath( __file__ ).replace('sdfits.pyc', '').replace('sdfits.py', '')\n #diode_cal_file_x = \"%s/diode_jy_x.cal\"%abspath\n #diode_cal_file_y = \"%s/diode_jy_y.cal\"%abspath\n diode_cal_file = \"%s/diode_jy.cal\"%abspath\n else:\n diode_cal_file = cal\n\n print \"Using calibration %s\"%cal\n diode_temps_x, diode_temps_y, rx_temps_x, rx_temps_y = loadDiodeTemp(h6, diode_cal_file)\n\n scan_pointing_len = h6.tb_scan_pointing.shape[0]\n \n tb_lengths = []\n for beam in h6.h5.root.raw_data:\n if beam.shape[0] != scan_pointing_len:\n beam_id = int(beam.name.lstrip('beam_'))\n print \"WARNING: beam %i len: %i, scan_pointing len: %i\"%(beam_id, beam.shape[0], scan_pointing_len)\n tb_lengths.append(np.min([beam.shape[0], scan_pointing_len]))\n \n \n num_acc = np.max(tb_lengths) \n num_rows = num_acc * 13\n\n if num_acc == 0:\n print \"No data in %s. Skipping.\"%h5file\n return -1\n \n print \"No accumulations: %s, no rows: %s\"%(num_acc, num_rows)\n\n # We now need to generate a blank SD-FITS file, with the same number of rows\n print \"\\nGenerating blank SD-FITS file with %i rows...\"%num_rows\n\n path = findLibraryPath()\n if obs_mode == 'MXCAL':\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_mxcal.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_mxcal.txt')\n elif write_stokes == 2:\n print \"Stokes flag found - writing I,Q,U,V\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_stokes.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_stokes.txt')\n elif write_stokes == 0:\n print \"Writing XX, YY\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU.txt')\n else:\n print \"Writing XX, YY, XY, YX\"\n header_primary = os.path.join(path, 'header_primaryHDU.txt')\n header_tbl = os.path.join(path, 'header_dataHDU_xpol.txt')\n coldef_file = os.path.join(path, 'coldefs_dataHDU_xpol.txt')\n \n if '200_16384' in firmware:\n coldef_file = os.path.join(path, 'coldefs_dataHDU_200_16384.txt')\n \n hdulist = generateBlankSDFits(num_rows, header_primary, header_tbl, coldef_file)\n print hdulist.info()\n \n # Next, we copy over observation data \n print \"Filling new SD-FITS with HIPSR data...\"\n sdtab = hdulist[1].data\n sdhead = hdulist[1].header\n\n # Fill in header values\n sdhead[\"OBSERVER\"] = obs.observer[0]\n sdhead[\"PROJID\"] = obs.project_id[0]\n \n # Fill in common values\n # NEW METHOD OF TIMESTAMPING - AUG 27 2013\n ref_time = int(h6.h5.root.raw_data.beam_01.cols.timestamp[0])\n ref_id = int(h6.h5.root.raw_data.beam_01.cols.id[0])\n ref_clk = np.abs(h6.h5.root.observation.cols.bandwidth[0]) * 1e6\n num_chans = h6.h5.root.raw_data.beam_01.cols.xx[0].shape[0]\n acc_len = h6.h5.root.firmware_config.cols.acc_len[0]\n # OLD - BEFORE MAR 2018 ref_delta = num_chans * acc_len * 2 / ref_clk\n # NEW - post MAR 2018\n fs = 800e6\n ref_delta = 4 * num_chans * acc_len / fs\n \n f = h6.freqs\n\n print \"Filling in common values... \",\n sdtab[\"SCAN\"][:] = 1\n sdtab[\"EXPOSURE\"][:] = ref_delta\n sdtab[\"OBJECT\"][:] = pointing.source[0]\n sdtab[\"OBJ-RA\"][:] = pointing.ra[0]\n sdtab[\"OBJ-DEC\"][:] = pointing.dec[0]\n sdtab[\"RESTFRQ\"][:] = obs.frequency[0] * 1e6\n sdtab[\"FREQRES\"][:] = np.abs(obs.bandwidth[0])*1e6 / num_chans\n sdtab[\"BANDWID\"][:] = np.abs(obs.bandwidth[0]) * 1e6\n sdtab[\"CRPIX1\"][:] = num_chans/2 + 1\n sdtab[\"CRVAL1\"][:] = obs.frequency[0] * 1e6\n sdtab[\"CDELT1\"][:] = np.abs(obs.bandwidth[0])*1e6 / num_chans\n sdtab[\"FLAGGED\"][:] = 0\n sdtab[\"SCANRATE\"][:] = obs.scan_rate[0] / 60 # Deg/min to deg/s\n\n\n # TCS INFO\n sdtab[\"OBSMODE\"][:] = obs.obs_mode[0] \n sdtab[\"IF\"][:] = 1\n print \"OK.\"\n \n row_sd = 0\n cycle_id = 0\n \n flipped = False\n if obs.bandwidth[0] < 0:\n flipped = True\n \n print \"Filling in unique values... \"\n num_cycles = np.min([scan_pointing_len, num_acc])\n for row_h5 in range(num_acc):\n cycle_id += 1 # Starts at 1 in SD-FITS file\n\n for beam in h6.h5.root.raw_data:\n beam_id = int(beam.name.lstrip('beam_'))\n LinePrint(\"%i of %i\"%(row_sd, num_rows))\n \n if cycle_id <= num_cycles:\n raj_id = \"mb%s_raj\"%beam.name.lstrip('beam_')\n dcj_id = \"mb%s_dcj\"%beam.name.lstrip('beam_')\n \n sdtab[\"CYCLE\"][row_sd] = cycle_id\n\n # Fix beam mapping (remove after fixing mapping)\n sdtab[\"BEAM\"][row_sd] = beam_id\n \n sdtab[\"CRVAL3\"][row_sd] = h6.tb_scan_pointing.col(raj_id)[cycle_id-1]\n sdtab[\"CRVAL4\"][row_sd] = h6.tb_scan_pointing.col(dcj_id)[cycle_id-1]\n\n # AZ, EL and PARANGLE should be stored for beam 1 only\n if beam_id == 1:\n sdtab[\"AZIMUTH\"][row_sd] = h6.tb_scan_pointing.col(\"azimuth\")[cycle_id-1]\n sdtab[\"ELEVATIO\"][row_sd] = h6.tb_scan_pointing.col(\"elevation\")[cycle_id-1]\n sdtab[\"PARANGLE\"][row_sd] = h6.tb_scan_pointing.col(\"par_angle\")[cycle_id-1]\n\n #sdtab[\"FOCUSAXI\"][row_sd] = h6.tb_scan_pointing.col(\"focus_axi\")[cycle_id-1]\n sdtab[\"FOCUSTAN\"][row_sd] = h6.tb_scan_pointing.col(\"focus_tan\")[cycle_id-1]\n\n # This is confusing - but it looks like FOCUSROT should be 15.0, which is sent as feed_angle\n # Likewise, focusaxi is probably supposed to be what we receive as focus_rot\n focus_rot = h6.tb_scan_pointing.col(\"focus_rot\")[cycle_id-1]\n sdtab[\"FOCUSROT\"][row_sd] = focus_rot\n sdtab[\"FOCUSAXI\"][row_sd] = h6.tb_observation.col(\"feed_angle\")[0]\n\n try:\n\n # OLD - 27 Aug 2013\n #timestamp = beam.cols.timestamp[row_h5]\n # New - based off integration length\n if beam_id == 1:\n new_id = beam.cols.id[row_h5]\n timestamp = (new_id - ref_id) * ref_delta + ref_time\n date_obs, time = timestamp2dt(timestamp)\n\n sdtab[\"DATE-OBS\"][row_sd] = date_obs\n sdtab[\"TIME\"][row_sd] = time\n\n ref_beam = ref_beams[np.argmin(np.abs(timestamp - obs.date[:]))]\n \n # Compute T_sys for each beam\n T_d_x = diode_temps_x[beam_id-1]\n T_d_y = diode_temps_y[beam_id-1]\n\n T_sys_x, T_sys_y = computeTsys(beam, row_h5, T_d_x, T_d_y)\n S_sys_x, S_sys_y = computeTsysSpec(h6, beam, row_h5, T_d_x, T_d_y)\n\n\n #print T_sys_x, T_sys_y\n sdtab[\"TSYS\"][row_sd] = (T_sys_x, T_sys_y)\n sdtab[\"TCAL\"][row_sd] = (np.average(extractMid(T_d_x)), np.average(extractMid(T_d_y)))\n #sdtab[\"CALFCTR\"][row_sd] = (1, 1)\n\n xx = beam.cols.xx[row_h5].astype('float32')\n yy = beam.cols.yy[row_h5].astype('float32')\n xx[0], yy[0] = 0, 0\n \n # See if there is cross corr \n if write_stokes in (1, 2):\n re_xy = beam.cols.re_xy[row_h5].astype('float32')\n im_xy = beam.cols.im_xy[row_h5].astype('float32')\n re_xy[0], im_xy[0] = 0, 0\n \n if flipped:\n xx, yy = xx[::-1], yy[::-1]\n if write_stokes in (1, 2):\n re_xy, im_xy = re_xy[::-1], im_xy[::-1]\n\n # DCP 2019.01 - Adding refbeam to all file types\n sdtab[\"REFBEAM\"][row_sd] = ref_beam\n #if obs_mode == 'MXCAL':\n # sdtab[\"REFBEAM\"][row_sd] = ref_beam\n\n if write_stokes == 2:\n xx = xx / fitLine(f, xx, num_chans) * S_sys_x\n yy = yy / fitLine(f, yy, num_chans) * S_sys_y\n\n re_xy = re_xy / fitLine(f, re_xy, num_chans)* np.sqrt(S_sys_x * S_sys_y)\n im_xy = im_xy / fitLine(f, im_xy, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n \n # Ettore tells me Parkes uses this definition\n # i.e. that I is the average of xx + yy\n ii = (xx + yy) / 2\n qq = (xx - yy) / 2\n uu = re_xy\n vv = im_xy\n \n # Form one data vector\n data1 = np.append(ii, qq)\n data2 = np.append(uu, vv)\n data = np.append(data1, data2)\n data = data.reshape([1,1,4,num_chans])\n else:\n\n if write_stokes == 1:\n re_xy = re_xy / fitLine(f, re_xy, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n im_xy = im_xy / fitLine(f, re_im, num_chans) * np.sqrt(S_sys_x * S_sys_y)\n re_xy[0], im_xy[0] = 0, 0\n\n #print \"cal factor: %2.3f\"%cf\n #print \"Diode temp: %s\"%T_d\n #xx, yy = applyCal(beam, row_h5, freqs, freqs_cal, cf, T_d_x, T_d_y)\n \n xx = xx / fitLine(f, xx, num_chans) * S_sys_x\n yy = yy / fitLine(f, yy, num_chans) * S_sys_y\n\n # Multibeam stats screws up if it encounters division by 1\n xx[xx <= 1 ] = 1\n yy[yy <= 1 ] = 1\n \n do_flagger = True\n if do_flagger:\n flags = np.zeros(len(xx))\n flags[xx > 1000] = 1\n flags[yy > 1000] = 1\n flags[xx==1] = 1\n flags[yy==1] = 1\n flags = np.append(flags, flags)\n flags = flags.reshape([1,1,2,num_chans])\n \n sdtab[\"FLAGGED\"][row_sd] = flags\n \n data = np.append(xx, yy)\n data = data.reshape([1,1,2,num_chans])\n \n sdtab[\"DATA\"][row_sd] = data\n\n if write_stokes == 1:\n sdtab[\"XPOLDATA\"][row_sd] = np.row_stack((re_xy, im_xy)).flatten()\n \n except:\n if beam.name != 'beam_02':\n print \"\\nWARNING: missing row in %s\"%beam.name\n print \"Current index: %i\"%row_h5\n print \"Row length: %i\"%beam.shape[0]\n raise\n try:\n sdtab[\"FLAGGED\"][row_sd] = np.ones_like([1,1,2,num_chans])\n except ValueError:\n pass\n row_sd += 1\n else:\n print \"WARNING: scan_pointing table is not complete.\"\n print \"%s table length: %i\"%(beam.name, beam.shape[0])\n print \"scan_pointing table length: %i\"%scan_pointing_len\n\n \n h6.h5.close()\n \n if os.path.exists(out_file):\n print \"\\nInfo: File exists, deleting...\"\n os.remove(out_file)\n\n print \"\\nInfo: Saving to file\"\n hdulist.writeto(out_file)\n hdulist.close()",
"def run(pdb_id):\n assert len(pdb_id) == 1\n pdb_id = pdb_id[0]\n collect = collect_ncs_files.ncs_paper_data_collection()\n pdb_info_fn = os.path.join(collect.data_dir,'log_' + pdb_id)\n pdb_info = collect.read_from_file(pdb_info_fn)\n if pdb_info:\n pdb_info = collect.make_mtz_file(pdb_info)\n if pdb_info:\n collect.write_to_file(pdb_info_fn,pdb_info)\n else:\n shutil.move(pdb_info_fn,collect.pdb_not_used_dir)",
"def cal_voro(inputfile, ndim, radii, ppp = '-p', results_path = '../../analysis/voro/'): \n if not os.path.exists(results_path):\n os.makedirs(results_path)\n\n basename = os.path.splitext(os.path.basename(inputfile))[0]\n fneighbor = open(results_path + basename + '.neighbor.dat', 'w')\n ffacearea = open(results_path + basename + '.facearea.dat', 'w')\n findex = open(results_path + basename + '.voroindex.dat', 'w') \n foverall = open(results_path + basename + '.overall.dat', 'w')\n\n position, bounds = get_input(inputfile, ndim, radii)\n for n in range(len(position)):\n fileformat = '%d ' + '%.6f ' * ndim + '%.2f'\n np.savetxt('dumpused', position[n], fmt = fileformat)\n \n #use box boundaries from snapshot\n Boxbounds = bounds[n].ravel()\n #use box boundaries from particle coordinates \n # boundsmin = position[n][:, 1: ndim + 1].min(axis = 0) - 0.1\n # boundsmax = position[n][:, 1: ndim + 1].max(axis = 0) + 0.1\n # Boxbounds = (np.column_stack((boundsmin, boundsmax))).ravel()\n\n cmdline = 'voro++ ' + ppp + ' -r -c \"%i %s %v %F @%i %A @%i %s %n @%i %s %f\" '\\\n + ('%f %f ' * ndim % tuple(Boxbounds)) + 'dumpused'\n if n == 0: print (cmdline)\n subprocess.run(cmdline, shell = True)\n\n fneighbor.write('id cn neighborlist\\n')\n ffacearea.write('id cn facearealist\\n')\n findex.write('id voro_index 0_to_7_faces\\n')\n foverall.write('id cn volume facearea\\n')\n f = open('dumpused.vol', 'r')\n for i in range(len(position[n][:, 0])):\n item = f.readline().split('@')\n foverall.write(item[0] + '\\n')\n findex.write(item[1] + '\\n')\n fneighbor.write(item[2] + '\\n')\n ffacearea.write(item[3])\n f.close()\n\n os.remove('dumpused') #delete temporary files\n os.remove('dumpused.vol')\n fneighbor.close()\n ffacearea.close()\n foverall.close()\n findex.close()\n print ('---------- Voronoi Analysis Done ------------')",
"def convertIffToPsd(*args, iffFileName: Union[AnyStr, bool]=\"\", psdFileName: Union[AnyStr,\n bool]=\"\", xResolution: Union[int, bool]=0, yResolution: Union[int, bool]=0,\n q=True, query=True, **kwargs)->Union[None, Any]:\n pass",
"def convert(infile,arcsec_per_pixel=0.2,sigma_conv=1.,expansion_factor=5,writeout=None,overwrite=False,keep_units=False):\n \n PLATESCALE = 1.2120 # arcsec / mm\n rss = fits.open( infile )\n phdr = rss[1].header\n dhdr = rss[0].header\n data = rss[0].data\n \n conff=dm.read_fibers_extension(phdr)\n bundles_values=conff.bundles.keys()\n sky_bundles=[]\n for bundlei in bundles_values:\n if phdr[\"BUN%03d_T\" % bundlei]=='SKY':\n sky_bundles.append(bundlei)\n \n w0 = dhdr['CRVAL1'] # reference wavelength\n try : dw = dhdr['CRDELT1'] # wavelength step\n except : dw = dhdr['CDELT1'] # wavelength step\n wunit = dhdr['CUNIT1'] # wavelength unit\n wtype = 'WAVE' # type spectra\n\n # define the dimensions of the spaxel array \n Nx, Ny, x0, y0, dx, dy = getspaxdim( data,phdr,sky_bundles,expansion_factor=expansion_factor)\n\n nbin=int(round(float(arcsec_per_pixel)/float(dx)))\n\n\n Nw = dhdr['NAXIS1'] # number of wave. steps\n \n\n \n # initialize an empty 3-d cube (zero everywhere)\n cube = fits.PrimaryHDU()\n #cube.header=rss[0].header \n #cube.header.remove('CRPIX1') \n #cube.header.remove('CRVAL1') \n #cube.header.remove('CUNIT1') \n #cube.header.remove('CTYPE1') \n #cube.header.remove('CRPIX2') \n #cube.header.remove('CRVAL2') \n #cube.header.remove('CDELT2') \n #cube.header.remove('CTYPE2') \n cube.header.update(NAXIS=3)\n cube.header.update(NAXIS1=Nx)\n cube.header.update(NAXIS2=Ny)\n cube.header.update(NAXIS3=Nw)\n cube.header.update(CD1_1=-dx/3600.)\n cube.header.update(CD2_2=dy/3600.)\n cube.header.update(CD3_3=dw)\n cube.header.update(CRPIX1=0)\n cube.header.update(CRPIX2=0)\n cube.header.update(CRPIX3=0)\n cube.header.update(CRVAL1=x0)\n cube.header.update(CRVAL2=y0)\n cube.header.update(CRVAL3=w0)\n\n cube.header.update(CTYPE1='RA---DEG')\n cube.header.update(CTYPE2='DEC--DEG')\n cube.header.update(CTYPE3=wtype)\n cube.header.update(CUNIT3=wunit)\n\n cube.header.update(CD1_2=0)\n cube.header.update(CD1_3=0)\n cube.header.update(CD2_1=0)\n cube.header.update(CD2_3=0)\n cube.header.update(CD3_1=0)\n cube.header.update(CD3_2=0)\n\n\n cube.data = numpy.zeros( (Nw,Ny,Nx) )\n\n # extract each spectrum and place it\n # into the 3-d cube\n for ispec in range(len(data)): \n fib_str='{:3d}'.format(ispec+1)\n fib_str=fib_str.replace(' ','0') \n if not(phdr['FIB'+fib_str+'_B'] in sky_bundles):\n try:\n end_sp=phdr['FIB'+fib_str+'W2'] \n start_sp=phdr['FIB'+fib_str+'W1']\n except:\n if ('start_sp' in locals()):\n print('Warning! FIB'+fib_str+'W1 and W2 information missing in header. Assuming previous fiber wavelength coverage.') \n else: \n end_sp=Nw\n start_sp=1 \n print('Warning! FIB'+fib_str+'W1 and W2 information missing in header. Assuming default wavelength coverage.') \n \n if end_sp!=start_sp:\n spec = data[ispec][:]\n Nwspec = Nw \n \n xpos = (phdr['FIB'+fib_str+'_x']+5.)*PLATESCALE \n ypos = (phdr['FIB'+fib_str+'_y']+5.)*PLATESCALE\n ix = int( round((xpos - x0),3) / dx )\n iy = int( round((ypos - y0),3) / dy )\n \n lambda_arr=w0+dw*numpy.arange(0,Nwspec,1)\n \n if keep_units==True:\n for i in range( start_sp, min(end_sp,Nwspec) ):\n cube.data[i][iy][ix] = spec[i]##same units \n else:\n for i in range( start_sp, min(end_sp,Nwspec) ):\n cube.data[i][iy][ix] = spec[i]*3.00e-5/lambda_arr[i]**2 ## Jy to erg/s/cm**2/A \n else:\n end_sp=Nwspec \n print('1st step') \n sigma_conv_pix=sigma_conv/((dx*nbin)/expansion_factor) \n for i in range( start_sp, min(end_sp,Nwspec)):\n print(str(i)+'/'+str(Nwspec)+' spectral channels',end=\"\\r\")\n cube.data[i]=scipy.ndimage.filters.gaussian_filter(cube.data[i], sigma=sigma_conv_pix)\n \n \n cube_rebin = fits.PrimaryHDU()\n cube_rebin.header=rss[0].header \n cube_rebin.header.remove('CRPIX1') \n cube_rebin.header.remove('CRVAL1') \n cube_rebin.header.remove('CUNIT1') \n cube_rebin.header.remove('CTYPE1') \n cube_rebin.header.remove('CDELT1')\n cube_rebin.header.remove('CRPIX2') \n cube_rebin.header.remove('CRVAL2') \n #cube_rebin.header.remove('CUNIT2') \n cube_rebin.header.remove('CDELT2') \n cube_rebin.header.remove('CTYPE2') \n cube_rebin.header.update(NAXIS=3)\n cube_rebin.header.update(NAXIS1=Nx//nbin)\n cube_rebin.header.update(NAXIS2=Ny//nbin)\n cube_rebin.header.update(NAXIS3=Nw)\n cube_rebin.header.update(CD1_1=-dx*nbin/3600.)\n cube_rebin.header.update(CD2_2=dy*nbin/3600.)\n cube_rebin.header.update(CD3_3=dw)\n cube_rebin.header.update(CRPIX1=0)\n cube_rebin.header.update(CRPIX2=0)\n cube_rebin.header.update(CRPIX3=0)\n cube_rebin.header.update(CRVAL1=x0)\n cube_rebin.header.update(CRVAL2=y0)\n cube_rebin.header.update(CRVAL3=w0)\n \n cube_rebin.header.update(CTYPE1='RA---SIN')\n cube_rebin.header.update(CTYPE2='DEC--SIN')\n cube_rebin.header.update(CTYPE3=wtype)\n cube_rebin.header.update(CUNIT3=wunit)\n cube_rebin.header.update(CUNIT1='deg')\n cube_rebin.header.update(CUNIT2='deg')\n \n cube_rebin.header.update(CD1_2=0)\n cube_rebin.header.update(CD1_3=0)\n cube_rebin.header.update(CD2_1=0)\n cube_rebin.header.update(CD2_3=0)\n cube_rebin.header.update(CD3_1=0)\n cube_rebin.header.update(CD3_2=0)\n cube_rebin.verify('fix')\n if keep_units:\n cube_rebin.header.update(BUNIT= dhdr['BUNIT']) ##the rss one!!\n else:\n cube_rebin.header.update(BUNIT= 'erg/s/cm**2/Angstrom') \n\n\n\n \n cube_rebin.data = numpy.zeros( (Nw,Ny//nbin,Nx//nbin) )\n print('')\n print('2nd step')\n for i in range( 0, Nwspec) : \n shape=cube.data[i].shape \n print(str(i)+'/'+str(Nwspec)+' spectral channels',end=\"\\r\")\n for xi in numpy.arange(0,shape[0],nbin)[:-1]:\n for yj in numpy.arange(0,shape[1],nbin)[:-1]:\n pixel_ij=numpy.sum(cube.data[i][xi:xi+nbin,yj:yj+nbin]) \n cube_rebin.data[i][xi//nbin,yj//nbin]=pixel_ij \n if writeout !=None:\n cube_rebin.writeto(writeout,overwrite=overwrite)\n return( cube_rebin)",
"def dumpData(self,out):\n out.packSub0('INAM',self.id)\n out.packSub0('PNAM',self.prevId)\n out.packSub0('NNAM',self.nextId)\n if not self.isDeleted:\n out.packSub('DATA','2i4B',\n self.type, self.spDisp, self.spRank, self.spSex, self.pcRank, self.unk02)\n if self.spId: out.packSub0('ONAM',self.spId)\n if self.spRace: out.packSub0('RNAM',self.spRace)\n if self.spClass: out.packSub0('CNAM',self.spClass)\n if self.spFaction: out.packSub0('FNAM',self.spFaction)\n if self.cell: out.packSub0('ANAM',self.cell)\n if self.pcFaction: out.packSub0('DNAM',self.pcFaction)\n if self.speak: out.packSub0('SNAM',self.speak)\n if self.text: out.packSub('NAME',self.text)\n if self.qflag == 0:\n pass\n if self.qflag == 1: out.packSub('QSTN','\\x01')\n if self.qflag == 2: out.packSub('QSTF','\\x01')\n if self.qflag == 3: out.packSub('QSTR','\\x01')\n for index,test in enumerate(self.tests):\n if test: test.dumpData(out,index)\n if self.script: out.packSub('BNAM',self.script)\n if self.isDeleted: out.pack('DELE','i',0)",
"def write_seqs_fasta(out_fp_seqs_fasta: str, out_fp_seqs_qza: str,\n tsv_pd: pd.DataFrame, tsv_fp: str = '') -> str:\n with open(out_fp_seqs_fasta, 'w') as fas_o:\n for seq in tsv_pd.index:\n fas_o.write('>%s\\n%s\\n' % (seq.strip(), seq.strip()))\n cmd = '# Write features as fasta file:\\n'\n cmd += '# - Features from: %s\\n' % tsv_fp\n cmd += '# Snippet:\\n'\n cmd += '# ```:\\n'\n cmd += \"# with open(fasta_out, 'w') as o:\\n\"\n cmd += \"# for seq in tsv_pd.index:\\n\"\n cmd += \"# o.write('>%s\\\\n%s\\\\n' % (seq.strip(), seq.strip()))\\n\"\n cmd += '# ```:\\n'\n cmd += run_import(\n out_fp_seqs_fasta, out_fp_seqs_qza, 'FeatureData[Sequence]')\n return cmd",
"def generate_siaf_detector_layout():\n\n VIdlParity = -1\n layout = Table(dtype=['S100', 'S100', 'f4', 'i4', 'i4'] ,names=('InstrName', 'AperName', 'DetSciYAngle', 'DetSciParity', 'VIdlParity'))\n for instrument in 'NIRCam FGS NIRISS NIRSpec MIRI'.split():\n if instrument == 'NIRCam':\n for sca_name in 'A1 A3 A5 B2 B4'.split():\n layout.add_row([instrument.upper(), 'NRC{}_FULL'.format(sca_name), 0, -1, VIdlParity])\n for sca_name in 'A2 A4 B1 B3 B5'.split():\n layout.add_row([instrument.upper(), 'NRC{}_FULL'.format(sca_name), 180, -1, VIdlParity])\n for sca_name in ['NRCA2_FULL_WEDGE_RND','NRCA2_FULL_WEDGE_BAR','NRCA4_FULL_WEDGE_RND','NRCA4_FULL_WEDGE_BAR']:\n layout.add_row([instrument.upper(), '{}'.format(sca_name), 180, -1, VIdlParity])\n for sca_name in ['NRCA1_FULL_WEDGE_RND','NRCA1_FULL_WEDGE_BAR','NRCA3_FULL_WEDGE_RND','NRCA3_FULL_WEDGE_BAR','NRCA5_FULL_WEDGE_RND','NRCA5_FULL_WEDGE_BAR']:\n layout.add_row([instrument.upper(), '{}'.format(sca_name), 0, -1, VIdlParity])\n elif instrument == 'NIRISS':\n for sca_name in ['NIS_CEN']:\n layout.add_row([instrument, sca_name, 180, 1, VIdlParity])\n elif instrument == 'MIRI':\n for sca_name in ['MIRIM_FULL']:\n layout.add_row([instrument, sca_name, 0, 1, VIdlParity])\n elif instrument == 'NIRSpec':\n for sca_name in ['NRS1_FULL']:\n layout.add_row([instrument.upper(), sca_name, 0, 1, VIdlParity])\n for sca_name in ['NRS2_FULL']:\n layout.add_row([instrument.upper(), sca_name, 180, 1, VIdlParity])\n elif instrument == 'FGS':\n for sca_name in ['FGS1_FULL']:\n layout.add_row([instrument, sca_name, 180, 1, VIdlParity])\n for sca_name in ['FGS2_FULL']:\n layout.add_row([instrument, sca_name, 0, -1, VIdlParity])\n\n layout_file = os.path.join(JWST_SOURCE_DATA_ROOT, 'siaf_detector_layout.txt')\n\n layout.pprint()\n\n comments = []\n comments.append('SIAF detector layout definition file.'.format(instrument))\n comments.append('')\n comments.append('These apertures act as parent apertures of all other SI apertures and their parameters are thus inherited.')\n comments.append('')\n comments.append('Generated {} {}'.format(timestamp.isot, timestamp.scale))\n comments.append('by {}'.format(username))\n comments.append('')\n layout.meta['comments'] = comments\n layout.write(layout_file, format='ascii.fixed_width', delimiter=',',\n delimiter_pad=' ', bookend=False)",
"def generate_resfile_from_pdb(pdbfilename, resfilename, input_sc = True ):\n\tp = rosetta.core.import_pose.pose_from_file(pdbfilename)\n\tgenerate_resfile_from_pose(p, resfilename, input_sc)",
"def serdes_dump_state(fpgaid,verbose=1):\n lines=[]\n\n lines.append(\"Group : AEQ\")\n lines.append(\" Logic Channel : %d\" % reconfig_AEQ_get_logical_channel_address(fpgaid,verbose))\n lines.append(\" adapt_done : %d\" % reconfig_AEQ_get_adapt_done(fpgaid,verbose))\n lines.append(\" equalization_results : %d\" % reconfig_AEQ_get_equalization_results(fpgaid,verbose))\n lines.append(\" mode : %d\" % reconfig_AEQ_get_mode(fpgaid,verbose))\n lines.append(\"Group : DFE\")\n lines.append(\" Logic Channel : %d\" % reconfig_DFE_get_logical_channel_address(fpgaid,verbose))\n lines.append(\" adaptation_engine_enable : %d\" % reconfig_DFE_get_adaptation_engine_enable(fpgaid,verbose))\n lines.append(\" power_on : %d\" % reconfig_DFE_get_power_on(fpgaid,verbose))\n lines.append(\" tap_1 : %d\" % reconfig_DFE_get_tap_1(fpgaid,verbose))\n lines.append(\" tap_2 : %d\" % reconfig_DFE_get_tap_2(fpgaid,verbose))\n lines.append(\" tap_2_polarity : %d\" % reconfig_DFE_get_tap_2_polarity(fpgaid,verbose))\n lines.append(\" tap_3 : %d\" % reconfig_DFE_get_tap_3(fpgaid,verbose))\n lines.append(\" tap_3_polarity : %d\" % reconfig_DFE_get_tap_3_polarity(fpgaid,verbose))\n lines.append(\" tap_4 : %d\" % reconfig_DFE_get_tap_4(fpgaid,verbose))\n lines.append(\" tap_4_polarity : %d\" % reconfig_DFE_get_tap_4_polarity(fpgaid,verbose))\n lines.append(\" tap_5 : %d\" % reconfig_DFE_get_tap_5(fpgaid,verbose))\n lines.append(\" tap_5_polarity : %d\" % reconfig_DFE_get_tap_5_polarity(fpgaid,verbose))\n lines.append(\"Group : EyeQ\")\n lines.append(\" Logic Channel : %d\" % reconfig_EyeQ_get_logical_channel_address(fpgaid,verbose))\n lines.append(\" 1D_Eye : %d\" % reconfig_EyeQ_get_1D_Eye(fpgaid,verbose))\n lines.append(\" BERB_Enable : %d\" % reconfig_EyeQ_get_BERB_Enable(fpgaid,verbose))\n lines.append(\" BERB_Snap_Shot_and_Reset : %d\" % reconfig_EyeQ_get_BERB_Snap_Shot_and_Reset(fpgaid,verbose))\n lines.append(\" Bit_Counter31_0 : %d\" % reconfig_EyeQ_get_Bit_Counter31_0(fpgaid,verbose))\n lines.append(\" Bit_Counter63_32 : %d\" % reconfig_EyeQ_get_Bit_Counter63_32(fpgaid,verbose))\n lines.append(\" Counter_Enable : %d\" % reconfig_EyeQ_get_Counter_Enable(fpgaid,verbose))\n lines.append(\" Enable_Eye_Monitor : %d\" % reconfig_EyeQ_get_Enable_Eye_Monitor(fpgaid,verbose))\n lines.append(\" Err_Conter63_32 : %d\" % reconfig_EyeQ_get_Err_Conter63_32(fpgaid,verbose))\n lines.append(\" Err_Counter31_0 : %d\" % reconfig_EyeQ_get_Err_Counter31_0(fpgaid,verbose))\n lines.append(\" Horizontal_phase : %d\" % reconfig_EyeQ_get_Horizontal_phase(fpgaid,verbose))\n lines.append(\" Polarity : %d\" % reconfig_EyeQ_get_Polarity(fpgaid,verbose))\n lines.append(\" Vertical_height : %d\" % reconfig_EyeQ_get_Vertical_height(fpgaid,verbose))\n lines.append(\"Group : PMA\")\n lines.append(\" Logic Channel : %d\" % reconfig_PMA_get_logical_channel_address(fpgaid,verbose))\n lines.append(\" Pre_emphasis_first_post_tap : %d\" % reconfig_PMA_get_Pre_emphasis_first_post_tap(fpgaid,verbose))\n lines.append(\" Pre_emphasis_pre_tap : %d\" % tapformat(reconfig_PMA_get_Pre_emphasis_pre_tap(fpgaid,verbose)))\n lines.append(\" Pre_emphasis_second_post_tap : %d\" % tapformat(reconfig_PMA_get_Pre_emphasis_second_post_tap(fpgaid,verbose)))\n lines.append(\" RX_equalization_control : %d\" % reconfig_PMA_get_RX_equalization_control(fpgaid,verbose))\n lines.append(\" RX_equalization_DC_gain : %d\" % reconfig_PMA_get_RX_equalization_DC_gain(fpgaid,verbose))\n lines.append(\" VOD : %d\" % reconfig_PMA_get_VOD(fpgaid,verbose))\n print \"\\n\".join(lines)\n return lines",
"def dump_data(self,filename,dump_id):\n import pickle\n from Auxiliary import tdc_Filenames\n data = [ d.get_pure_data_copy() for d in self.plotter.data ]\n dump_dict={}\n dump_dict['fft_data'] = data\n dump_dict['fitting_type'] = self.fft_fit.type \n dump_dict['nk_plot'] = self.fft_fit.nk_plot\n # full file name of the file with manipulator dump\n filename=tdc_Filenames.get_full_vis_filename(dump_id, filename+'.pickle')\n pickle.dump( dump_dict, open(filename,'w') )\n print '\\nContent dumped in \"%s\" \\n' % filename",
"def _convert_psd(self, ascii_format, ifo):\n command = [\"convert_psd_ascii2xml\",\n \"--fname-psd-ascii\", f\"{ascii_format}\",\n \"--conventional-postfix\",\n \"--ifo\", f\"{ifo}\"]\n \n pipe = subprocess.Popen(command, \n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n out, err = pipe.communicate()\n self.logger.info(command, production = self.production)\n if err:\n self.production.status = \"stuck\"\n if hasattr(self.production.event, \"issue_object\"):\n raise PipelineException(f\"An XML format PSD could not be created.\\n{command}\\n{out}\\n\\n{err}\",\n issue=self.production.event.issue_object,\n production=self.production.name)\n else:\n raise PipelineException(f\"An XML format PSD could not be created.\\n{command}\\n{out}\\n\\n{err}\",\n production=self.production.name)",
"def saveFits(self, filename):\n \n if isinstance(self.res, type(None)):\n raise Exception('Result is not yet aviable.')\n \n header = fits.Header()\n header['NAXIS1'] = self.naxis\n header['NAXIS2'] = self.naxis\n header['CTYPE1'] = 'RA---SIN'\n header['CTYPE2'] = 'DEC--SIN'\n header['CDELT1'] = - self.fov/(np.pi/180 * self.naxis)\n header['CDELT2'] = self.fov/(np.pi/180 * self.naxis)\n header['BUNIT'] = 'JY/PIXEL'\n \n hdu = fits.PrimaryHDU(self.res, header=header)\n hdulist = fits.HDUList([hdu])\n hdulist.writeto(filename, overwrite=True)\n \n print(\"Saved as '%s'.\" %(filename))",
"def main():\r\n\timport sys\r\n\r\n\tlistofSequences = FastAreader(sys.stdin).readFasta() \r\n\tPAMSequences = PAMfinder(listofSequences).classController() # Calls on controller class to return desired models.\r\n\tf = open('Guide Sequences.txt','w') \r\n\tfor i in range(len(PAMSequences[0])):\r\n\t\tf.write(PAMSequences[0][i]) # Prints the header sequence into the file.\r\n\t\tf.write('\\n') \r\n\t\tprint(PAMSequences[0][i]) \r\n\t\tfor j in range(len(PAMSequences[1][i])): \r\n\t\t\tif j == 0: \r\n\t\t\t\tf.write(\"Forward Strand PAM Sites:\") \r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Forward Strand PAM Sites:\") \r\n\t\t\tprint(PAMSequences[1][i][j]) # Prints the forward sequences\r\n\t\t\ty = str(PAMSequences[1][i][j]) # Changes from int to string characters.\r\n\t\t\tx = ''.join(y) # Joining all the string values so we can print to file.\r\n\t\t\tf.write(x) # Write the joined forward sequences to the file.\r\n\t\t\tf.write('\\n')\r\n\t\tfor k in range(len(PAMSequences[2][i])): # For reverse sequences, and follows same logic as forward. \r\n\t\t\tif k == 0:\r\n\t\t\t\tf.write(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\t\tf.write('\\n')\r\n\t\t\t\tprint(\"Reverse Strand PAM Sites (in reference to the Top Strand Position):\")\r\n\t\t\tprint(PAMSequences[2][i][k]) # Prints the reverse sequences with the corresponding positions. \r\n\t\t\ta = str(PAMSequences[2][i][k]) # Changes the integer to string characters, allowing for the values to join.\r\n\t\t\tb = ''.join(a)\r\n\t\t\tf.write(b) # Write all of the reverse sequences onto the text file with their positions. \r\n\t\t\tf.write('\\n')\r\n\tf.close() # Close the file.\r",
"def mainPSM(myPath, result_file):\n def maxQuant(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[0].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n def proteomeDiscoverer(my_file):\n\n peptideList = list()\n table = str.maketrans('', '', string.ascii_lowercase)\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[4].split(\".\")[1].rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptide = peptide.translate(table)\n peptideList.append(peptide)\n\n return peptideList\n\n def galaxyP(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[2].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n def MPA(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[2].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n # Open a file\n sample_db = os.listdir(myPath)\n # dictionary for a db1-5\n completeResultsDict = dict() # key = se; value = dict(key = dataset, value = peptidelist)\n\n # This would print all the files and directories\n for se in sample_db:\n if se not in completeResultsDict.keys():\n # sub-dictionary for a certain search pipeline\n searchEngineDict = dict() # key = dataset, value = peptidelist)\n completeResultsDict[se] = searchEngineDict\n\n for result in os.listdir(myPath + \"/\" + se):\n peptideList = list()\n if se == \"MQ\":\n peptideList = maxQuant(myPath + \"/\" + se + \"/\" + result)\n elif se == \"PD\":\n peptideList = proteomeDiscoverer(myPath + \"/\" + se + \"/\" + result)\n elif se == \"GP\":\n if result.endswith(\".tabular\"):\n peptideList = galaxyP(myPath + \"/\" + se + \"/\" + result)\n elif se == \"MPA\":\n peptideList = MPA(myPath + \"/\" + se + \"/\" + result)\n else:\n print(\"Are you sure?\")\n\n # updating the completeResultsDict\n if peptideList:\n myDict = completeResultsDict.get(se)\n myDict[result.split(\".\", maxsplit=1)[0]] = peptideList\n\n # nested for-loop: {search engine: {dataset : peptidelist}}\n nonRedundantPeptideSet = set()\n count = 0\n for se, result in completeResultsDict.items():\n for dataset, peptides in result.items():\n for peptide in peptides:\n nonRedundantPeptideSet.add(peptide)\n count += 1\n nonRedundantPeptideList = sorted(list(nonRedundantPeptideSet))\n\n peptideMatrix = dict()\n peptideMatrix[\"PeptideSeq\"] = nonRedundantPeptideList\n headerList = list()\n headerList.append(\"se_dataset\")\n for se, result in completeResultsDict.items():\n print(se)\n for dataset, peptides in result.items():\n print(dataset)\n headerList.append(\"{}_{}\".format(se, dataset))\n peptideList = []\n for peptide in nonRedundantPeptideList:\n if peptide in peptides:\n peptideList.append(1)\n else:\n peptideList.append(0)\n peptideMatrix[\"{}_{}\".format(se, dataset)] = peptideList\n\n\n df = pandas.DataFrame(data=peptideMatrix)\n df.to_csv(open(result_file, \"w\", newline=''), index=False)",
"def write_psf(self):\n # **********************************\n # **********************************\n # psf writer (start)\n # **********************************\n # **********************************\n\n print(\"******************************\")\n print(\"\")\n print(\n \"The charmm X-plor format psf writer (the write_psf function) is running\"\n )\n\n date_time = datetime.datetime.today()\n\n print(\n \"write_psf: forcefield_selection = {}, residues = {}\".format(\n self.forcefield_selection, self.residues\n )\n )\n\n print(\"******************************\")\n print(\"\")\n\n if self.structure_box_1:\n list_of_structures = [\n self.structure_box_0_ff,\n self.structure_box_1_ff,\n ]\n list_of_file_names = [self.filename_box_0, self.filename_box_1]\n stuct_only = [self.structure_box_0_ff, self.structure_box_1_ff]\n else:\n list_of_structures = [self.structure_box_0_ff]\n list_of_file_names = [self.filename_box_0]\n stuct_only = [self.structure_box_0_ff]\n\n for q in range(0, len(list_of_structures)):\n stuct_iteration = list_of_structures[q]\n file_name_iteration = list_of_file_names[q]\n output = str(file_name_iteration) + \".psf\"\n stuct_only_iteration = stuct_only[q]\n # Lammps syntax depends on the functional form\n # Infer functional form based on the properties of the stuct_iteration\n if self.detect_forcefield_style:\n # Check for angles\n if len(stuct_iteration.urey_bradleys) > 0:\n print(\n \"Warning: Urey bradley terms detected. GOMC does no support the Urey-Bradley terms\"\n )\n warn(\n \"warning: Urey bradley terms detected. \"\n \"GOMC does no support the Urey-Bradley terms\"\n )\n use_urey_bradleys = True\n else:\n print(\"No urey bradley terms detected\")\n use_urey_bradleys = False\n\n # Check for dihedrals\n if len(stuct_iteration.rb_torsions) > 0:\n print(\n \"RB Torsions detected, will converted to CHARMM Dihedrals\"\n )\n use_rb_torsions = True\n dihedrals_list = stuct_iteration.rb_torsions\n dihedrals = [\n [\n dihedral.atom1.idx + 1,\n dihedral.atom2.idx + 1,\n dihedral.atom3.idx + 1,\n dihedral.atom4.idx + 1,\n ]\n for dihedral in stuct_iteration.rb_torsions\n ]\n else:\n use_rb_torsions = False\n\n if len(stuct_iteration.dihedrals) > 0:\n print(\n \"Charmm dihedrals detected, so CHARMM Dihedrals will remain\"\n )\n use_dihedrals = True\n dihedrals_list = stuct_iteration.dihedrals\n dihedrals = [\n [\n dihedral.atom1.idx + 1,\n dihedral.atom2.idx + 1,\n dihedral.atom3.idx + 1,\n dihedral.atom4.idx + 1,\n ]\n for dihedral in stuct_iteration.dihedrals\n ]\n else:\n use_dihedrals = False\n if (use_rb_torsions is False) and (use_dihedrals is False):\n dihedrals_list = []\n dihedrals = []\n if use_rb_torsions and use_dihedrals:\n warn(\n \"Multiple dihedral styles detected, check your \"\n \"Forcefield XML and structure files\"\n )\n\n # Check for impropers\n for dihedral in stuct_iteration.dihedrals:\n if dihedral.improper:\n warn(\n \"ERROR: Amber-style impropers are currently not supported in GOMC\"\n )\n\n impropers_list = stuct_iteration.impropers\n impropers = [\n [\n improper.atom1.idx + 1,\n improper.atom2.idx + 1,\n improper.atom3.idx + 1,\n improper.atom4.idx + 1,\n ]\n for improper in stuct_iteration.impropers\n ]\n\n no_atoms = len(stuct_iteration.atoms)\n no_bonds = len(stuct_iteration.bonds)\n no_angles = len(stuct_iteration.angles)\n\n no_dihedrals = len(dihedrals)\n no_impropers = len(impropers)\n\n no_donors = len(stuct_iteration.donors)\n no_acceptors = len(stuct_iteration.acceptors)\n no_groups = len(stuct_iteration.groups)\n\n # psf printing (start)\n\n residue_data_list = []\n residue_names_list = []\n for k, atom in enumerate(stuct_only_iteration.atoms):\n residue_data_list.append(str(atom.residue))\n residue_names_list.append(atom.residue.name)\n\n unique_residue_data_dict = {}\n unique_residue_data_list = []\n residue_data_name_list = []\n\n for m, residue in enumerate(stuct_only_iteration.residues):\n unique_residue_data_list.append(\n str(stuct_only_iteration.residues[m])\n )\n unique_residue_data_dict.update(\n {unique_residue_data_list[m]: m + 1}\n )\n residue_data_name_list.append(\n stuct_only_iteration.residues[m].name\n )\n\n res_no_chain_iter_corrected = []\n residue_id_list = []\n residue_id_adder_fixed_struct_wo_bonds = 0\n for f, PSF_atom_iteration_0 in enumerate(\n stuct_only_iteration.atoms\n ):\n if f > 0:\n if (\n PSF_atom_iteration_0.residue.chain\n == previous_residue_chain\n and len(PSF_atom_iteration_0.bonds) == 0\n ):\n residue_id_adder_fixed_struct_wo_bonds += 1\n\n previous_residue_chain = PSF_atom_iteration_0.residue.chain\n\n residue_id_int = int(\n unique_residue_data_dict[residue_data_list[f]]\n + residue_id_adder_fixed_struct_wo_bonds\n )\n res_id_adder = int(\n (residue_id_int % self.max_residue_no) % self.max_residue_no\n )\n if int(res_id_adder) == 0:\n res_no_iteration_corrected = int(self.max_residue_no)\n else:\n res_no_iteration_corrected = res_id_adder\n\n res_no_chain_iter_corrected.append(res_no_iteration_corrected)\n residue_id_list.append(residue_id_int)\n\n output_write = genopen(output, \"w\")\n\n first_indent = \"%8s\"\n psf_formating = (\n \"%8s %-4s %-4s %-4s %-4s %4s %10.6f %13.4f\" + 11 * \" \"\n )\n\n output_write.write(\"PSF \")\n output_write.write(\"\\n\\n\")\n\n no_of_remarks = 3\n output_write.write(first_indent % no_of_remarks + \" !NTITLE\\n\")\n output_write.write(\n \" REMARKS this file \"\n + file_name_iteration\n + \" - created by MoSDeF-GOMC using the\"\n + \"\\n\"\n )\n output_write.write(\n \" REMARKS parameters from the \"\n + str(self.forcefield_selection)\n + \" force field via MoSDef\\n\"\n )\n output_write.write(\n \" REMARKS created on \" + str(date_time) + \"\\n\\n\\n\"\n )\n\n # This converts the atom name in the GOMC psf and pdb files to unique atom names\n print(\n \"bead_to_atom_name_dict = {}\".format(\n self.bead_to_atom_name_dict\n )\n )\n [\n unique_individual_atom_names_dict,\n individual_atom_names_list,\n missing_bead_to_atom_name,\n ] = unique_atom_naming(\n stuct_only_iteration,\n residue_id_list,\n residue_names_list,\n bead_to_atom_name_dict=self.bead_to_atom_name_dict,\n )\n\n if None in [\n unique_individual_atom_names_dict,\n individual_atom_names_list,\n missing_bead_to_atom_name,\n ]:\n self.input_error = True\n print_error_message = (\n \"ERROR: The unique_atom_naming function failed while \"\n \"running the charmm_writer function. Ensure the proper inputs are \"\n \"in the bead_to_atom_name_dict.\"\n )\n raise ValueError(print_error_message)\n\n # ATOMS: Calculate the atom data\n # psf_formating is conducted for the for CHARMM format (i.e., atom types are base 52, letters only)\n output_write.write(first_indent % no_atoms + \" !NATOM\\n\")\n for i_atom, PSF_atom_iteration_1 in enumerate(\n stuct_iteration.atoms\n ):\n segment_id = PSF_atom_iteration_1.residue.segid or \"SYS\"\n atom_type_iter = base10_to_base52_alph(\n self.atom_types_to_index_value_dict[\n PSF_atom_iteration_1.type\n + \"_\"\n + PSF_atom_iteration_1.residue.name\n ]\n )\n\n atom_lines_iteration = psf_formating % (\n i_atom + 1,\n segment_id,\n res_no_chain_iter_corrected[i_atom],\n str(residue_names_list[i_atom])[: self.max_resname_char],\n individual_atom_names_list[i_atom],\n atom_type_iter,\n PSF_atom_iteration_1.charge,\n PSF_atom_iteration_1.mass,\n )\n\n output_write.write(\"%s\\n\" % atom_lines_iteration)\n\n output_write.write(\"\\n\")\n\n # BONDS: Calculate the bonding data\n output_write.write(first_indent % no_bonds + \" !NBOND: bonds\\n\")\n for i_bond, PSF_bond_iteration_1 in enumerate(\n stuct_iteration.bonds\n ):\n output_write.write(\n (first_indent * 2)\n % (\n PSF_bond_iteration_1.atom1.idx + 1,\n PSF_bond_iteration_1.atom2.idx + 1,\n )\n )\n\n if (i_bond + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_bonds % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_bonds == 0:\n output_write.write(\"\\n\")\n\n # ANGLES: Calculate the angle data\n output_write.write(first_indent % no_angles + \" !NTHETA: angles\\n\")\n for i_angle, angle_iteration in enumerate(stuct_iteration.angles):\n output_write.write(\n (first_indent * 3)\n % (\n angle_iteration.atom1.idx + 1,\n angle_iteration.atom2.idx + 1,\n angle_iteration.atom3.idx + 1,\n )\n )\n\n if (i_angle + 1) % 3 == 0:\n output_write.write(\"\\n\")\n\n if no_angles % 3 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_angles == 0:\n output_write.write(\"\\n\")\n\n # DIHEDRALS: Calculate the dihedral data\n output_write.write(\n first_indent % no_dihedrals + \" !NPHI: dihedrals\\n\"\n )\n for i_dihedral, dihedral_iter in enumerate(dihedrals_list):\n (\n dihedral_atom_1,\n dihedral_atom_2,\n dihedral_atom_3,\n dihedral_atom_4,\n ) = (\n dihedral_iter.atom1,\n dihedral_iter.atom2,\n dihedral_iter.atom3,\n dihedral_iter.atom4,\n )\n\n output_write.write(\n (first_indent * 4)\n % (\n dihedral_atom_1.idx + 1,\n dihedral_atom_2.idx + 1,\n dihedral_atom_3.idx + 1,\n dihedral_atom_4.idx + 1,\n )\n )\n\n if (i_dihedral + 1) % 2 == 0:\n output_write.write(\"\\n\")\n\n if no_dihedrals % 2 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_dihedrals == 0:\n output_write.write(\"\\n\")\n\n # IMPROPERS: Calculate the improper data\n output_write.write(\n first_indent % no_impropers + \" !NIMPHI: impropers\\n\"\n )\n for i_improper, improper_iter in enumerate(impropers_list):\n (\n improper_atom_1,\n improper_atom_2,\n improper_atom_3,\n improper_atom_4,\n ) = (\n improper_iter.atom1,\n improper_iter.atom2,\n improper_iter.atom3,\n improper_iter.atom4,\n )\n\n output_write.write(\n (first_indent * 4)\n % (\n improper_atom_1.idx + 1,\n improper_atom_2.idx + 1,\n improper_atom_3.idx + 1,\n improper_atom_4.idx + 1,\n )\n )\n\n if (i_improper + 1) % 2 == 0:\n output_write.write(\"\\n\")\n\n if no_impropers % 2 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_impropers == 0:\n output_write.write(\"\\n\")\n\n # DONOR: calculate the donor data\n output_write.write(first_indent % no_donors + \" !NDON: donors\\n\")\n for donor_i, donor_iter in enumerate(stuct_iteration.donors):\n output_write.write(\n (first_indent * 2)\n % (donor_iter.atom1.idx + 1, donor_iter.atom2.idx + 1)\n )\n if (donor_i + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_donors % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_donors == 0:\n output_write.write(\"\\n\")\n\n # ACCEPTOR: calculate the acceptor data\n output_write.write(\n first_indent % no_acceptors + \" !NACC: acceptors\\n\"\n )\n for acceptor_i, acceptor_iter in enumerate(\n stuct_iteration.acceptors\n ):\n output_write.write(\n (first_indent * 2)\n % (acceptor_iter.atom1.idx + 1, acceptor_iter.atom2.idx + 1)\n )\n if (acceptor_i + 1) % 4 == 0:\n output_write.write(\"\\n\")\n\n if no_acceptors % 4 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_acceptors == 0:\n output_write.write(\"\\n\")\n\n # NNB: calculate the NNB data\n output_write.write(first_indent % 0 + \" !NNB\\n\\n\")\n for nbb_i, atoms_iter in enumerate(stuct_iteration.atoms):\n output_write.write(first_indent % 0)\n if (nbb_i + 1) % 8 == 0:\n output_write.write(\"\\n\")\n\n if no_atoms % 8 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_atoms == 0:\n output_write.write(\"\\n\")\n\n # GROUP: calculate the group data\n try:\n group_data = stuct_iteration.groups.nst2\n except AttributeError:\n group_data = 0\n output_write.write(\n (first_indent * 2) % (no_groups or 1, group_data) + \" !NGRP \\n\"\n )\n if stuct_iteration.groups is True:\n for group_i, group_iter in enumerate(stuct_iteration.groups):\n output_write.write(\n (first_indent * 3)\n % (\n group_iter.atom.idx,\n group_iter.type,\n group_iter.move,\n )\n )\n if (group_i + 1) % 3 == 0:\n output_write.write(\"\\n\")\n\n if no_groups % 3 == 0:\n output_write.write(\"\\n\")\n else:\n output_write.write(\"\\n\\n\")\n\n if no_groups == 0:\n output_write.write(\"\\n\")\n\n else:\n structure_abs_charge_value = abs(\n sum(\n atom_charge_iter.charge\n for atom_charge_iter in stuct_iteration.atoms\n )\n )\n if structure_abs_charge_value < 1.0e-4:\n group_type = 1\n else:\n group_type = 2\n output_write.write((first_indent * 3) % (0, group_type, 0))\n output_write.write(\"\\n\")\n\n output_write.write(\"\\n\")\n output_write.close()\n # **********************************\n # **********************************\n # psf writer (end)\n # **********************************\n # **********************************"
]
| [
"0.57730514",
"0.5676229",
"0.5420465",
"0.5398354",
"0.5393157",
"0.5263756",
"0.5250648",
"0.522665",
"0.52078366",
"0.5158776",
"0.50826997",
"0.5068098",
"0.5034896",
"0.50248575",
"0.5023164",
"0.5021004",
"0.50092244",
"0.50082356",
"0.5005879",
"0.50026983",
"0.5002594",
"0.50007963",
"0.4975282",
"0.49677518",
"0.49579272",
"0.494678",
"0.49466625",
"0.49360964",
"0.493289",
"0.4932647"
]
| 0.72169113 | 0 |
guard against single num turn a single number into a constant dataframe whose index is consistent with others | def _to_constant_df(self, num):
if isinstance(num, pd.DataFrame):
# pdb.set_trace()
return num
else:
return self.data['ones'].copy() * num | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first_valid_index(self):\n return (\n DataFrameDefault.register(pandas.DataFrame.first_valid_index)(self)\n .to_pandas()\n .squeeze()\n )",
"def data_structure():\n\n items = [1.0, 2.0, 3.0, 4.0, 5.0 ]\n s = pd.Series(items, index=['a', 'b', 'c', 'd', 'e'])\n # s = pd.Series(np.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])\n print s\n s = pd.Series(items)\n print s\n\n d= {'one': [1.0, 2.0, 3.0, 4.0], 'two': [4.0, 3.0, 2.0, 1.0]}\n\n df = pd.DataFrame(d)\n print df\n df = pd.DataFrame(d, index=['a', 'b', 'c', 'd'])\n print df\n\n\n data2 = [{'a': 1, 'b': 2}, {'a': 5, 'b': 10, 'c': 20}]\n df = pd.DataFrame(data2)\n\n print df",
"def int_frame_const_col():\n df = DataFrame(\n np.tile(np.arange(3, dtype=\"int64\"), 6).reshape(6, -1) + 1,\n columns=[\"A\", \"B\", \"C\"],\n )\n return df",
"def initialize_df(scenario_index,scenarios_nums):\n df = pd.DataFrame(index=scenarios_nums)\n df.index.name = scenario_index\n return df",
"def renumber( self, ini=1 ):\n df = self.copy()\n df['frame'] = df['frame'] + ini - df['frame'].values[0]\n return df.coerce()",
"def test_table_numpy_from_schema_float_to_int_with_nan_partial_indexed(self):\n df = {\"a\": np.array([np.nan, 1.5, np.nan, 2.5, np.nan, 3.5, 4.5]), \"b\": np.array([1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5])}\n table = Table({\"a\": int, \"b\": int}, index=\"b\")\n table.update(df)\n\n # truncates decimal\n assert table.view().to_dict() == {\n \"a\": [None, 1, None, 2, None, 3, 4],\n \"b\": [1, 2, 3, 4, 5, 6, 7],\n }\n\n table.update(pd.DataFrame({\"a\": [10, 9, 8], \"b\": [2, 3, 5]}))\n\n assert table.view().to_dict() == {\"a\": [None, 10, 9, 2, 8, 3, 4], \"b\": [1, 2, 3, 4, 5, 6, 7]}\n\n table.update({\"a\": np.array([100, np.nan], dtype=np.float64), \"b\": np.array([-1, 6], dtype=np.float64)})\n\n assert table.view().to_dict() == {\"a\": [100, None, 10, 9, 2, 8, None, 4], \"b\": [-1, 1, 2, 3, 4, 5, 6, 7]}\n\n table.update({\"a\": np.array([100, 1000, np.nan], dtype=np.float64), \"b\": np.array([100, 6, 97], dtype=np.float64)})\n\n assert table.view().to_dict() == {\"a\": [100, None, 10, 9, 2, 8, 1000, 4, None, 100], \"b\": [-1, 1, 2, 3, 4, 5, 6, 7, 97, 100]}",
"def num_df(df):\n float_mask = np.array(df.dtypes == 'float')\n int_mask = np.array(df.dtypes == 'int')\n mask = float_mask | int_mask\n df_obj = df.iloc[:, mask]\n return df_obj",
"def __getitem__(self, index) -> Union[pd.Series, pd.DataFrame]:\n if isinstance(index, int):\n # A single row\n return self.data.iloc[index]\n # return self.as_dataframe(self.data.iloc[index:index+1])\n if isinstance(index, str):\n # A column, by name\n return self.data[index]\n if (\n isinstance(index, tuple)\n and len(index) == 2\n and index[1] in self.data.columns\n ):\n # Row index, column index -> cell value\n return self.data.loc[index]\n if isinstance(index, slice):\n # return self.as_dataframe(self.data.take(index))\n return self.as_dataframe(self.data[index])\n # Iterable -- selected row indices or boolean array, probably\n try:\n if isinstance(index, type(None)) or len(index) == 0:\n empty = pd.DataFrame(columns=self.data.columns)\n return self.as_dataframe(empty)\n except TypeError as exc:\n raise TypeError(\n f\"object of type {type(index)!r} \"\n f\"cannot be used as an index into a {self.__class__.__name__}\"\n ) from exc\n return self.as_dataframe(self.data[index])\n # return self.as_dataframe(self.data.take(index))",
"def regular_index(*dfs):\n original_index = [df.index for df in dfs]\n have_bad_index = [not isinstance(df.index, pd.RangeIndex)\n for df in dfs]\n\n for df, bad in zip(dfs, have_bad_index):\n if bad:\n df.reset_index(drop=True, inplace=True)\n\n try:\n yield dfs\n finally:\n for df, bad, idx in zip(dfs, have_bad_index, original_index):\n if bad and len(df.index) == len(idx):\n df.index = idx",
"def test_build_index(constructor):\n dsi = DatasetIndex(constructor)\n if isinstance(constructor, int):\n constructor = np.arange(constructor)\n elif isinstance(constructor, DatasetIndex):\n constructor = constructor.index\n elif callable(constructor):\n constructor = constructor()\n assert (dsi.index == constructor).all()",
"def transform(self, x):\n found_indices = x.index.intersection(self.index)\n if len(found_indices) != len(self.index) and not self.ignore_absent:\n raise KeyError(\n f\"{set(self.index).difference(x.index)} not found in the DataFrame\"\n )\n else:\n return x.loc[found_indices, :]",
"def frame(something, name = None):\n \n if isinstance(something, dict):\n res = pd.DataFrame.from_dict(something, orient='index')\n else:\n res = pd.DataFrame(something)\n number_of_columns = len(res.columns)\n if name != None:\n if isinstance(name, list):\n if len(name) >= number_of_columns:\n res.columns = name[:number_of_columns]\n else:\n res.columns = name + list(range(len(name), number_of_columns))\n else:\n res.columns = [name] + list(range(1, number_of_columns))\n return res",
"def df(x):\n raise NotImplementedError",
"def filter_index_column(df, verbose = False): \n index_starts_with_nan = pd.isnull(df.index)[0]\n if index_starts_with_nan:\n full_row_count = df.shape[0]\n df.index = list(range(full_row_count))\n if verbose is True:\n print(\"\")\n print(\"Index changed. New dataframe index: \", df.index)\n return df",
"def iat_df(self, df):\n result = self.iat(**df).reset_coords(drop=True).to_dataframe()\n if isinstance(df, pd.DataFrame):\n result.index = df.index\n return result",
"def single_program_df(classify_df: pd.DataFrame) -> pd.DataFrame:\n return classify_df.iloc[[0], :].copy()",
"def sample_by_num(data_dict: dict, num: int):\n samples = {}\n for k, v in data_dict.items():\n if k == \"index\":\n samples[k] = v[0: num]\n else:\n samples[k] = v[0: int(data_dict[\"index\"][num])]\n return samples",
"def test_index_alternate(self):\n self.insert()\n self.tbl[::2]",
"def test_df():\n return pd.DataFrame({\n 'intcol': [1, 2, 3],\n 'strcol': ['four', 'five', 'six'],\n 'floatcol': [7.0, 8.0, 9.0]\n })",
"def intrinsic_index_calc(df: pd.DataFrame):\n\n cur_index = 0\n df['Int_index'] = None\n df['Int_index'].iloc[0] = cur_index\n for i in range(len(df)):\n if df['Int_event'][i] in [-1, 1, -2, 2]:\n cur_index = cur_index + 1\n df['Int_index'].iloc[i] = cur_index\n\n return df",
"def series_from_dataframe(df, index_column: str, value_column: str=None):\n\n if len(df.columns) > 2:\n df = df[[index_column, value_column]].copy()\n else:\n df = df.copy()\n df.set_index(index_column, inplace=True)\n sr = df.squeeze()\n sr.name = value_column\n return sr",
"def transform(self, x):\n if self.index is None:\n self.index = x.index\n else:\n found_indices = x.index.intersection(self.index)\n if len(found_indices) != len(self.index) and not self.ignore_absent_index:\n raise KeyError(\n f\"{set(self.index).difference(x.index)} not found in the DataFrame\"\n )\n if self.columns is None:\n self.columns = x.columns\n else:\n found_columns = x.columns.intersection(self.columns)\n if (\n len(found_columns) != len(self.columns)\n and not self.ignore_absent_columns\n ):\n raise KeyError(\n f\"{set(self.columns).difference(x.columns)} not found in the DataFrame\"\n )\n return x.loc[found_indices, found_columns]",
"def conv_idx_numlist_to_missing(self):\n \n ct = 0\n for indx in self.num_list_to_missing:\n self.list_with_missing[indx] = self.num_list[ct]\n ct += 1",
"def _getvXXXXAsOneString(self,vXXXX=None,start=0,end=-1,dropColList=None,filterColList=None,mapFunc={},sortList=None,ascending=True,roundDct=None,fmtFunc={},index=True,header=True):\r\n\r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n\r\n dfContentAsOneString=None\r\n\r\n df=self.dataFrames[vXXXX]\r\n\r\n # select rows\r\n if end == -1:\r\n df=df[start:]\r\n else:\r\n df=df[start:end]\r\n\r\n # select cols \r\n colList=df.columns.values.tolist()\r\n if isinstance(dropColList,list):\r\n colListOut=[col for col in colList if col not in dropColList]\r\n else:\r\n colListOut=colList\r\n df=df.loc[:,colListOut]\r\n if filterColList!=None:\r\n df=df.filter(items=filterColList)\r\n\r\n # map cols\r\n for col,func in mapFunc.items(): \r\n if col not in df.columns:\r\n pass\r\n else:\r\n df[col]=df[col].map(func)\r\n\r\n # sort \r\n if isinstance(sortList,list):\r\n df=df.sort_values(sortList,ascending=ascending) \r\n\r\n # round \r\n if isinstance(roundDct,dict):\r\n df=df.round(roundDct) \r\n\r\n try: \r\n dfContentAsOneString=df.to_string(formatters=fmtFunc,index=index,header=header,justify='right') \r\n except MxError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.')) \r\n return dfContentAsOneString",
"def test_stochatreat_output_idx_col(treatments_dict):\n treatments_df = treatments_dict[\"treatments\"]\n data = treatments_dict[\"data\"]\n idx_col = treatments_dict[\"idx_col\"]\n assert treatments_df[idx_col].dtype == data[idx_col].dtype, \"Index column is missing\"",
"def _index_to_nan(data, existing_nans, to_nan):\n index_nan = np.random.choice([i for i in range(len(data)) if i not in existing_nans],\n size=to_nan, replace=False)\n data_imp = data.copy()\n data_imp[index_nan] = np.nan\n return data_imp, index_nan",
"def _remove_rows(df, num):\n if num == 0:\n return df.copy()\n\n to_remove = np.random.choice(df.index.values, num)\n to_keep = df.index.difference(to_remove)\n\n return df.loc[to_keep]",
"def _add_rows(df, num, alloc_id, constraint, stuff=False):\n if num == 0:\n return df.copy()\n\n to_add = np.random.choice(df.index.values, num)\n rows_to_add = df.loc[to_add]\n\n # update the new rows' index\n max_idx = df.index.max()\n rows_to_add.index = range(max_idx + 1, max_idx + len(rows_to_add) + 1)\n\n # allocate rows to containers\n _allocate_rows(rows_to_add, alloc_id, constraint, stuff)\n\n return pd.concat([df, rows_to_add])",
"def mydata(y):\n data = {\n \"one\": pd.Series([1.0, 5.0, 3.0, 10.0], index=[\"a\", \"b\", \"c\", \"d\"]),\n \"two\": pd.Series([1.0, 2.0, 3.0, 4.0], index=[\"a\", \"b\", \"c\", \"d\"]),\n }\n db = {}\n db[\"vals\"] = pd.DataFrame(data)\n db[\"y\"] = y\n return db",
"def get_data(dataframe,index=None):\n dflen = len(dataframe)\n if index==None or index <0 or index >= dflen:\n index = randint(0,dflen)\n return dataframe.iloc[index].to_json()"
]
| [
"0.55400425",
"0.5448055",
"0.53408647",
"0.53388983",
"0.5300624",
"0.5299355",
"0.5055315",
"0.5000199",
"0.49719244",
"0.4967105",
"0.49633044",
"0.49564132",
"0.4935128",
"0.49337956",
"0.49235603",
"0.4909642",
"0.48602775",
"0.48543218",
"0.4815096",
"0.4796346",
"0.47896305",
"0.47840232",
"0.4776213",
"0.47753388",
"0.4768223",
"0.47607788",
"0.47586355",
"0.47514543",
"0.47504947",
"0.4733366"
]
| 0.71836346 | 0 |
readFasta reads in the fasta, calls parseFasta and then calls analyzeSequence to analyze the dna | def readFasta(self, fp):
for head, seq in self.parseFasta(fp):
#analyzing the sequence
self.analyzeSequence(seq)
#saving the header
if head == '':
continue
else:
self.header.append(head) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fasta(file_path):\n \n print(f\"Parsing fasta '{file_path}'\")\n data = {\n 'ur_up_': [], 'accession': [],\n 'entry_name': [], 'offset': [],\n 'taxonomy': [], 'sequence': []\n }\n\n with open(file_path, 'r') as f:\n for i, line in enumerate(f):\n line = line.strip()\n \n if line[0] == '>':\n key = line[1:]\n \n if i == 0:\n name, offset = key.split(\"/\")\n ur_up_, acc = None, None\n else:\n ur_up_, acc, name_offset = key.split(\"|\")\n name, offset = name_offset.split('/')\n \n data['ur_up_'].append(ur_up_)\n data['accession'].append(acc)\n data['entry_name'].append(name)\n data['offset'].append(offset)\n data['sequence'].append('')\n data['taxonomy'].append(name.split('_')[1])\n else:\n data['sequence'][-1] += line\n \n if i and (i % 50000 == 0):\n print(f\"Reached: {i}\")\n\n return pd.DataFrame(data=data)",
"def readFasta(self, fastaFile):\t\n\t\tname, seq = None, []\n\t\tfor line in fastaFile:\n\t\t\tline = line.rstrip()\n\t\t\tif (line.startswith(\">\")):\n\t\t\t\tif name: yield (name, ''.join(seq))\n\t\t\t\tname, seq = line, []\n\t\t\telse:\n\t\t\t\tseq.append(line)\n\t\tif name: yield (name, ''.join(seq))",
"def fasta_parser(filename):\n fasta = {}\n with open(filename) as f:\n contents = f.read()[1:].split('\\n>')\n for section in contents:\n sample = section.split('\\n')\n sample_id = sample[0]\n seq = ''.join(sample[1:]).strip()\n fasta[sample_id] = seq\n return fasta",
"def parseFasta(self, fastaRef):\n\n seq = \"\"\n prevId = \"\"\n with open(fastaRef, 'r') as f:\n\n for line in f:\n if \">\" == line[0]:\n # asserting the regex don't fail...\n found = GENEIDRULE.search(line)\n if(found):\n alternate = found.group(1)\n geneName = found.group(2)\n self._transcripts[alternate] = geneName\n else:\n print(\"EnsemblFasta: NOT FOUND\")\n print(line)\n exit()\n\n if(prevId and seq):\n geneName = self._transcripts[prevId]\n if geneName in self._genes:\n gene = self._genes[geneName]\n else:\n gene = Gene(geneName)\n self._genes[geneName] = gene\n\n gene.addTranscripts(prevId, seq)\n seq = \"\"\n prevId = alternate\n else:\n seq += line.rstrip(\"\\n\")\n gene.addTranscripts(prevId, seq)",
"def readFastaFile(filename):\n if os.path.exists(filename)==False:return {}\n sequences={}\n fhr=open(filename,\"r\")\n for line in fhr:\n if line[0]==\">\":\n sequences[line.strip()[1:].split()[0]]=fhr.readline().strip()\n fhr.close()\n return sequences",
"def processFasta(file,testStr):\n header = \"\"\n seq = \"\"\n with open(file, \"r\") as f:\n for line in f:\n line = line.strip()\n if(line.startswith(\">\")):\n if(len(header) == 0 ):\n #first entry:\n header = line[1:]\n else:\n #this is a new entry\n indexes = getIndexes(seq,testStr)\n if len(indexes) > 0:\n print(\"{} in {}: {} times ({})\".format(testStr, header,len(indexes),indexes))\n seq = \"\"\n header = line[1:]\n else:\n seq +=line\n #processing the final entry\n indexes = getIndexes(seq,testStr)\n if len(indexes) > 0:\n print(\"{} in {}: {} times ({})\".format(testStr, header,len(indexes),indexes))",
"def readFastaFile(filename):",
"def fasta_reader(inp):\n #inp is hard coded as \"Sequence1/2.fasta in this script\".\n with open(inp) as in_file: \n for line in in_file.readlines():\n #Guarantees sequence is pulled from the FASTA file not the title \n if line[0].isalpha():\n seq = line.rstrip()\n return (seq)",
"def parse_text(filehandle: TextIO) -> Iterator[Fasta]:\n\n # Check that the file looks like UniProt text format\n first_line = next(filehandle)\n if not first_line.startswith(\"ID\"):\n raise TextParserError(\n \"Unexpected file format: first line of UniProt text file should start with 'ID'\"\n )\n filehandle.seek(0)\n\n fasta = Fasta(sequence=\"\")\n for line in filehandle:\n key = line[:2] # This is more efficient than using line.startswith\n if key == \"ID\":\n tokens = line.split()\n fasta.entry_name = tokens[1]\n fasta.reviewed = True if tokens[2] == \"Reviewed;\" else False\n elif key == \"AC\":\n if fasta.accession is None:\n accessions = line[5:].rstrip(\";\\n\").split(\"; \")\n fasta.accession = accessions[0]\n elif key == \"DT\":\n if \"sequence version\" in line:\n tokens = line[5:].strip(\".\\n\").split()\n fasta.version = int(tokens[3])\n elif key == \"DE\":\n if \"RecName\" in line:\n fasta.name = _extract_name(line)\n # Get the first SubName if no RecName found\n elif fasta.name is None and line[5:12] == \"SubName\":\n fasta.name = _extract_name(line)\n elif line[5:10] == \"Flags\" and \"Fragment\" in line:\n fasta.fragment = True\n elif key == \"GN\":\n if line[5:10] == \"Name=\":\n tokens = line[10:].split(\";\")\n # Remove evidence tags, if present\n gene_tokens = tokens[0].split(\" {\")\n fasta.gene = gene_tokens[0]\n elif key == \"OS\":\n # TODO: check for multiline species name (excluding brackets)\n if fasta.species is None:\n species_line = line[5:].strip().split(\" (\")\n fasta.species = species_line[0].strip(\".\")\n elif key == \"OX\":\n if \"NCBI_TaxID\" in line:\n tokens = line[5:].strip(\";\\n\").split(\"; \")\n # Remove evidence tag if present\n taxid_tokens = tokens[0][11:].split(\" {\")\n fasta.taxid = taxid_tokens[0]\n elif key == \"PE\":\n fasta.evidence = int(line[5])\n elif key == \" \":\n sequence_line = line.strip().replace(\" \", \"\")\n fasta.sequence += sequence_line\n elif key == \"//\":\n yield fasta\n fasta = Fasta(sequence=\"\")",
"def parse_fasta(self, filename):\n id = ''\n desc = ''\n tempseq = []\n try:\n seqfile = open(filename,'r')\n for line in seqfile:\n if line.startswith('>'):\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n if ' ' in line:\n (id, desc) = line[1::].split(' ', 1)\n else:\n id = line[1::].strip()\n desc = ''\n tempseq = []\n elif not line.startswith('>'):\n tempseq.append(line.rstrip())\n if not id is '':\n yield { 'id': id.strip(), 'desc': desc.strip(), 'dna': ''.join(tempseq) }\n except OSError:\n raise PathError(''.join(['ERROR: cannot open', refseqpath]))",
"def read_fasta(fasta_name):\n \n \"\"\"first open the file outside \"\"\"\n file_handler = open(fasta_name)\n\n # ditch the boolean (x[0]) and just keep the header or sequence since\n # we know they alternate.\n fasta_iter = (x[1] for x in groupby(file_handler, lambda line: line[0] == \">\"))\n\n for header in fasta_iter:\n # drop the \">\"\n headerStr = header.__next__()[1:].strip()\n\n # join all sequence lines to one.\n seq = \"\".join(s.strip() for s in fasta_iter.__next__())\n\n # yield (headerStr, seq)\n result_record = {'header':headerStr,'seqRecord':seq}\n return result_record",
"def parse_fasta(fasta_filename):\n\n sequences = {}\n\n with open(fasta_filename, \"r\") as fasta:\n\n # do our best to accept any input that looks vaguely valid\n for line in fasta:\n \n if line.startswith(\">\"):\n # take everything up to the first space as the id\n # get rid of the leading >\n # and get rid of the newline\n fasta_id = line.split(\" \")[0].replace(\">\", \"\", 1).rstrip('\\n')\n \n seq = []\n wholeseq = ''\n if fasta_id == \"\":\n raise Exceptions.MissingId(\"invalid if there is no fasta_id\")\n \n else:\n seq.append(line.rstrip('\\n'))\n # handle sequences on multiple lines\n wholeseq = \"\".join(seq)\n if len(wholeseq) == 0:\n raise Exceptions.MissingSequence(\"invalid if there is no sequence\")\n sequences[fasta_id] = wholeseq\n\n if len(sequences) == 0:\n raise Exceptions.EmptyFasta(\"invalid if there is nothing in the fasta file\")\n\n return sequences",
"def parse_multifasta_file(file, number_of_fastas):\n\n with open(file) as file:\n for i in range(number_of_fastas):\n fasts_seq = ''\n fasta_name = file.readline().strip()[1:]\n end_of_file = False\n end_of_seq = False\n while not end_of_seq and not end_of_file:\n x = file.tell()\n seq = file.readline()\n if not seq:\n end_of_file = True\n elif '>' not in seq:\n fasts_seq = fasts_seq + seq\n else:\n file.seek(x)\n end_of_seq = True\n fasts_seq = re.sub(r'\\n', '', fasts_seq)\n yield fasta_name, fasts_seq",
"def Parse_Fasta(filename):\n dic = {}\n name = None\n seq = ''\n with open(filename) as F:\n for line in F:\n if line.startswith('>'):\n if name is not None:\n dic[name] = seq\n seq = ''\n name = line.strip()\n else:\n seq += line\n if not name in dic:\n dic[name] = seq\n return dic",
"def process_fasta(in_fh, args, cluster_size_re, rna_seq_objs):\n for record in SeqIO.parse(in_fh, 'fasta'):\n sequence = '%s%s%s'.replace('T', 'U') % (\n args.prefix, str(record.seq), args.suffix\n )\n cluster_size = 1\n try:\n cluster_size = cluster_size_re.search(record.description)\n cluster_size = cluster_size.group(1)\n except AttributeError:\n print 'Not able to find cluster size. Setting to 1.'\n if cluster_size is None:\n cluster_size = 1\n\n # find structure\n curr_seq = RNASequence(record.id, cluster_size, sequence)\n if args.run_mfold:\n curr_seq.structure, curr_seq.energy_dict = run_mfold(\n sequence, args\n )\n curr_seq.free_energy = curr_seq.energy_dict['dG']\n else:\n rnafold_out = run_rnafold(sequence, args)\n rnafold_out = rnafold_out.split('\\n')\n try:\n curr_seq.structure, curr_seq.free_energy = (\n rnafold_out[1].split(' (')\n )\n except (ValueError, IndexError):\n print 'Error running RNAfold:\\n%s\\nExiting.' % rnafold_out\n sys.exit(1)\n\n print '%s\\n' % rnafold_out\n try:\n curr_seq.free_energy = abs(\n float(curr_seq.free_energy.replace(')', ''))\n )\n curr_seq.ensemble_free_energy = abs(\n float(rnafold_out[2].split('[')[1].replace(']', ''))\n )\n curr_seq.ensemble_probability = abs(float(\n rnafold_out[4].split(';')[0].replace(\n ' frequency of mfe structure in ensemble ', ''\n )\n ))\n curr_seq.ensemble_diversity = abs(float(\n rnafold_out[4].split(';')[1].replace(\n ' ensemble diversity ', ''\n )\n ))\n except IndexError:\n print (\n 'Error parsing RNAfold output. '\n '(Couldn\\'t find statistics.) Please check '\n 'RNAfold options.'\n )\n sys.exit(1)\n rna_seq_objs.append(curr_seq)",
"def test_parse_fasta_file(self):\r\n\r\n fasta_data = ['>seq1 SAMPLE1', 'AAACGT', '>seq2', 'ACGGT']\r\n\r\n expected_fasta = {'seq1': 'AAACGT', 'seq2': 'ACGGT'}\r\n\r\n expected_order = ['seq1 SAMPLE1', 'seq2']\r\n\r\n actual_fasta, actual_order = parse_fasta_file(fasta_data)\r\n\r\n self.assertEqual(actual_fasta, expected_fasta)\r\n\r\n self.assertEqual(actual_order, expected_order)",
"def premrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnaacc = re.search(r'accession=([^;\\n]+)', fields[8]).group(1)\n mrnalen = int(fields[4]) - int(fields[3]) + 1\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'pre-mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n mrnaacc = ''\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n elif '\\texon\\t' in entry:\n exoncount += 1\n elif '\\tintron\\t' in entry:\n introncount += 1\n elif '\\tfive_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr5plen += int(fields[4]) - int(fields[3]) + 1\n elif '\\tthree_prime_UTR\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n utr3plen += int(fields[4]) - int(fields[3]) + 1\n elif entry.startswith('###'):\n if mrnaacc != '':\n values = '%s %d %.3f %.3f %.3f %d %d %d %d' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent,\n exoncount, introncount, utr5plen, utr3plen)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0\n gccontent = 0.0\n gcskew = 0.0\n ncontent = 0.0\n exoncount = 0\n exonlen = 0\n introncount = 0\n utr5plen = 0\n utr3plen = 0",
"def process_fasta(file, type):\n seqs = []\n with open(file) as f:\n seq = f.readline().strip()\n seq = seq + '\\t'\n for line in f:\n line = line.strip()\n if line[0] == \">\":\n s1 = seq.split('\\t')\n bac = s1[0].split(\"|\")[2]\n sequence = to_word_index(s1[1], type)\n seqs.append([bac, sequence])\n seq = line + '\\t'\n else:\n seq = seq + line\n\n s1 = seq.split('\\t')\n bac = s1[0].split(\"|\")[2]\n sequence = to_word_index(s1[1], type)\n seqs.append([bac, sequence])\n\n df = pd.DataFrame(seqs, columns=['bacteria', 'sequence'])\n df = shuffle(df)\n\n return df",
"def readFASTA(filename, alpha = None, string_only = False):\n seqlist = []\n seqname = None\n seqinfo = None\n seqdata = []\n fh = open(filename)\n thisline = fh.readline()\n while (thisline):\n if (thisline[0] == '>'): # new sequence\n if (seqname): # take care of the data that is already in the buffer before processing the new sequence\n try:\n if (string_only):\n seqnew = ''.join(seqdata)\n else:\n seqnew = Sequence(seqdata, alpha, seqname, seqinfo)\n seqlist.append(seqnew)\n except RuntimeError as e:\n print(\"Warning: \"+seqname+\" is invalid (ignored): \", e, file=sys.stderr)\n seqinfo = thisline[1:-1] # everything on the defline is \"info\"\n seqname = seqinfo.split()[0] # up to first space\n seqdata = []\n else: # pull out the sequence data\n cleanline = thisline.split()\n for line in cleanline:\n seqdata.extend(tuple(line.strip('*'))) # sometimes a line ends with an asterisk in FASTA files\n thisline = fh.readline()\n\n if (seqname):\n try:\n if (string_only):\n seqnew = ''.join(seqdata)\n else:\n seqnew = Sequence(seqdata, alpha, seqname, seqinfo)\n seqlist.append(seqnew)\n except RuntimeError as e:\n print(\"Warning: \" + seqname + \" is invalid (ignored): \", e, file=sys.stderr)\n else:\n raise RuntimeError(\"No sequences on FASTA format found in this file\")\n fh.close()\n return seqlist",
"def parse_rosalind(filename):\n print \"parse_rosalind should be called parse_fasta\"\n return parse_fasta(filename)",
"def fasta_reader(fasta):\n # ditch the boolean (x[0]) and just keep the header/seq grouping\n fa_iter = (x[1] for x in itertools.groupby(fasta, lambda line: line[0] == \">\"))\n for header in fa_iter:\n # drop the \">\"\n name = next(header)[1:].strip()\n # join all sequence lines to one by iterating until the next group.\n read = \"\".join(s.strip() for s in next(fa_iter))\n yield name, read",
"def fasta_reader(path, fasta_file):\n fasta_dict = dict()\n try:\n for seq_record in SeqIO.parse(path + fasta_file, \"fasta\"):\n id_fasta = seq_record.id\n sequence = seq_record.seq\n fasta_dict[id_fasta] = sequence\n except FileNotFoundError:\n GRAPH_LOGGER.debug('External fasta file not exist!')\n return None\n\n return fasta_dict",
"def read_fasta_file(fasta):\n\n ptn_list = []\n fasta_content = open(fasta, \"r\")\n new_ptn = None\n for line in fasta_content:\n if \">sp\" in line or \">tr\" in line:\n if new_ptn != None:\n new_ptn[\"seq\"] = sequence\n ptn_list.append(new_ptn)\n tokens = line.split()\n new_ptn = {\"id\": tokens[0] }\n sequence = \"\"\n else:\n sequence += line[:-1]\n new_ptn[\"seq\"] = sequence\n ptn_list.append(new_ptn)\n\n return ptn_list",
"def readFastaFile(filename):\n info={}\n fhr=open(filename,\"r\")\n while(True):\n line=fhr.readline()\n if not line: break\n if(\">\" in line):\n try:\n info[line.strip()[1:].split()[0]]=fhr.readline().strip()\n except ValueError:\n pass\n return info",
"def parse_fasta(fasta_f, contig_data):\n\n basen = os.path.basename(fasta_f)\n [soil, ecotype, media] = basen.split(\"_\")[:3]\n\n with open(fasta_f, 'rU') as IN:\n for record in SeqIO.parse(IN, \"fasta\"):\n contig_data[record.description] = {'length': len(record.seq), 'soil': soil, 'ecotype': ecotype, 'media': media}",
"def read_fasta(sequence_file :str):\n\n #for gziped files:\n\n if sequence_file.endswith(\".gz\"):\n with gzip.open(sequence_file, \"rt\") as file:\n seqDict = SeqIO.to_dict(SeqIO.parse(file, 'fasta'))\n ident = ident.split(\"|\")[1]\n return seqDict\n\n # for no gziped fasta files:\n else:\n seqRecord = SeqIO.read(sequence_file, \"fasta\")\n sequence = seqRecord.seq\n ident = seqRecord.id\n ident = ident.split(\"|\")[1]\n return ident, sequence",
"def mrna_desc(gff3, fasta):\n seqs = {}\n for defline, seq in LocusPocus.fasta.parse(fasta):\n seqid = defline[1:].split(' ')[0]\n if seqid not in seqs:\n seqs[seqid] = seq\n\n mrnaacc = ''\n mrnalen = 0\n for entry in gff3:\n if '\\tmRNA\\t' in entry:\n fields = entry.rstrip().split('\\t')\n assert len(fields) == 9\n mrnalen += int(fields[4]) - int(fields[3]) + 1\n accmatch = re.search(r'accession=([^;\\n]+)', fields[8])\n assert accmatch, 'Unable to parse mRNA accession: %s' % fields[8]\n mrnaacc = accmatch.group(1)\n elif entry.startswith('###'):\n mrnaseq = seqs[mrnaacc]\n if len(mrnaseq) != mrnalen:\n message = 'mature mRNA \"%s\": length mismatch' % mrnaacc\n message += ' (gff3=%d, fa=%d)' % (mrnalen, len(mrnaseq))\n message += '; most likely a duplicated accession, discarding'\n print(message, file=sys.stderr)\n else:\n gccontent = gc_content(mrnaseq)\n gcskew = gc_skew(mrnaseq)\n ncontent = n_content(mrnaseq)\n values = '%s %d %.3f %.3f %.3f' % (\n mrnaacc, mrnalen, gccontent, gcskew, ncontent)\n yield values.split(' ')\n mrnaacc = ''\n mrnalen = 0",
"def read_fasta(path, description=True, case=None, func_header=None, into=pd.Series, compression=\"infer\", name=None, verbose=False):\n from Bio.SeqIO.FastaIO import SimpleFastaParser\n # Get path\n path = format_path(path)\n\n # Assign pathname as name if there isn't one\n if name is None:\n name = path\n\n # Open file object\n f = get_file_object(path, mode=\"read\", compression=compression, safe_mode=False, verbose=False)\n\n # Read in fasta\n d_id_seq = OrderedDict()\n\n if verbose:\n seq_records = pv(SimpleFastaParser(f), \"Reading sequence file: {}\".format(path))\n else:\n seq_records = SimpleFastaParser(f)\n\n # Verbose but faster\n\n if description:\n if case == \"lower\":\n for header, seq in seq_records:\n seq = seq.lower()\n d_id_seq[header] = seq\n if case == \"upper\":\n for header, seq in seq_records:\n seq = seq.upper()\n d_id_seq[header] = seq\n if case is None:\n for header, seq in seq_records:\n d_id_seq[header] = seq\n if not description:\n if case == \"lower\":\n for header, seq in seq_records:\n seq = seq.lower()\n header = header.split(\" \")[0]\n d_id_seq[header] = seq\n if case == \"upper\":\n for header, seq in seq_records:\n seq = seq.upper()\n header = header.split(\" \")[0]\n d_id_seq[header] = seq\n if case is None:\n for header, seq in seq_records:\n header = header.split(\" \")[0]\n d_id_seq[header] = seq\n\n # Close File\n f.close()\n\n # Transform header\n if func_header is not None:\n d_id_seq = OrderedDict( [(func_header(id),seq) for id, seq in d_id_seq.items()])\n sequences = into(d_id_seq)\n if hasattr(sequences, \"name\"):\n sequences.name = name\n return sequences",
"def indexFasta(self):\n zipFileName = \"{}.gz\".format(self.fastaFileName)\n utils.log(\"indexing {} ...\".format(zipFileName))\n cmd = \"samtools faidx {}\".format(zipFileName)\n utils.runCommand(cmd)",
"def parse_sequence(sequence):\n return FastaEntry.from_text(sequence)"
]
| [
"0.68018943",
"0.67670727",
"0.67245513",
"0.6623084",
"0.6588016",
"0.6573484",
"0.65499896",
"0.63427275",
"0.62580866",
"0.6169045",
"0.6161581",
"0.6152185",
"0.6132377",
"0.61134493",
"0.606481",
"0.60580164",
"0.60068524",
"0.6005277",
"0.59995",
"0.599696",
"0.5988293",
"0.5984556",
"0.5941141",
"0.59383696",
"0.5937654",
"0.59317774",
"0.5928703",
"0.59070337",
"0.58688414",
"0.5862222"
]
| 0.68119603 | 0 |
Enable i2c device (device1). | def EnableI2c(self):
try:
if os.path.exists('/sys/bus/i2c/devices/i2c-0/0-0060'):
result = " - I2C device already enabled!"
else:
with open('/sys/bus/i2c/devices/i2c-0/new_device', 'a') as f:
# 'echo '+i2c_device.driver+' '+i2c_device.addr+ '
f.write('mpl3115 0x60')
result = " - I2C device enabled!"
LOG.info(result)
except Exception as err:
LOG.error("Error enabling I2C (device1): " + str(err)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_i2s(self, enable):\n control = self.get_control()\n if enable:\n control = control | CONTROL_ENABLE\n else:\n control = control & (~CONTROL_ENABLE)\n\n self.set_control(control)",
"def use_i2c():\n _LIB.oled_click_use_i2c()",
"def enable_cl2_copy_ad9866(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x01) ## Enable aux output on clock 1\n self.write_versa5(0x31,0x0c) ## Use clock1 aux output as input for clock2\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)",
"def _sendi2c(self,command,data=[]) -> None:\n if isinstance(command, str):\n command = ord(command)\n try:\n self.bus.write_i2c_block_data(self.address, command, data)\n except OSError as err:\n print(\"I2C Device Error\\nCheck Connection\\n{}\".format(err))",
"def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)",
"def enable_cl2_61p44(self):\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x2c,0x00) ## Disable aux output on clock 1\n self.write_versa5(0x31,0x81) ## Use divider for clock2\n ## VCO multiplier is shared for all outputs, set to 68 by firmware\n ## VCO = 38.4*68 = 2611.2 MHz\n ## There is a hardwired divide by 2 in the Versa 5 at the VCO output\n ## VCO to Dividers = 2611.2 MHZ/2 = 1305.6\n ## Target frequency of 61.44 requires dividers of 1305.6/61.44 = 21.25\n ## Frational dividers are supported\n ## Set integer portion of divider 21 = 0x15, 12 bits split across 2 registers\n self.write_versa5(0x3d,0x01)\n self.write_versa5(0x3e,0x50)\n ## Set fractional portion, 30 bits, 2**24 * .25 = 0x400000\n self.write_versa5(0x32,0x01) ## [29:22]\n self.write_versa5(0x33,0x00) ## [21:14]\n self.write_versa5(0x34,0x00) ## [13:6]\n self.write_versa5(0x35,0x00) ## [5:0] and disable ss\n self.write_versa5(0x63,0x01) ## Enable clock2",
"def enableDevice(*args, apply: bool=True, device: Union[AnyStr, bool]=\"\", enable: bool=True,\n monitor: bool=True, record: bool=True, q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass",
"def enable(self, coil):\n self.log.info(\"RASPDriver.Enable(%s %s)\" % (coil.config['label'], coil.hw_driver.number))\n self.platform.communicator.driver_enable(coil.hw_driver.number)\n pass",
"def enable_device(self, device_serial):\n pipeline = rs.pipeline()\n\n # Enable the device\n self._config.enable_device(device_serial)\n pipeline_profile = pipeline.start(self._config)\n\n depth_sensor = pipeline_profile.get_device().first_depth_sensor()\n depth_sensor.set_option(rs.option.exposure, 8400.0)\n\n self._profile_pipe = pipeline_profile\n self._enabled_devices[device_serial] = (Device(pipeline, pipeline_profile))",
"def is_i2s_enabled(self):\n return ((self.get_control() & CONTROL_ENABLE) > 0)",
"def enable_cl1_direct(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x44) ## Enable clock input only and refmode\n self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew",
"def cu2(self, q0, q1, alpha=DEF_PHASE, beta=DEF_PHASE, ctrl=None):\n self.__add_quantum_gate(kind=CONTROLLED_U2, qid=[q0,q1], phase=alpha, phase1=beta, ctrl=ctrl)\n return self",
"def do_i2c(self, i2cAddress):\n ise.setI2CAddress(i2cAddress)",
"def open(self):\n self._i2c.open(bus=self._i2c_bus)\n self._configure_i2c_library_functions()\n if self.debug:\n print('VL53L1X: Opened I2C bus {}'.format(self._i2c_bus))",
"def enable_device_imu(self, enable_raw=False, enable_user=False, enable_gyro=False):\n msg = _clad_to_engine_iface.EnableDeviceIMUData(enableAccelerometerRaw=enable_raw,\n enableAccelerometerUser=enable_user,\n enableGyro=enable_gyro)\n self.conn.send_msg(msg)",
"def enable_sensor_power():\n sen = digital.SensorPower(\"senpwr\") \n sen.set()",
"def __set_i2c_address(self, address):\n fcntl.ioctl(self.file_read, self.I2C_SLAVE, address)\n fcntl.ioctl(self.file_write, self.I2C_SLAVE, address)",
"def enable_output(self):\n\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 7, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 4, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return",
"def _turn_on_dev_mode(self):\n if self._device is not None:\n self._char_write(self._BLE_SERVICE_ANTI_DOS,\n [ord(c) for c in self._ANTI_DOS_MESSAGE])\n self._char_write(self._BLE_SERVICE_TX_POWER,\n [self._TX_POWER_VALUE])\n # Sending 0x01 to the wake service wakes the sphero.\n self._char_write(self._BLE_SERVICE_WAKE, [0x01])",
"def enable_cl2_sync_76p8(self,iskw=0,fskw=31):\n iskw = iskw & 0x0f\n iskw = iskw << 4\n fskw = fskw & 0x3f\n self.write_versa5(0x62,0x3b) ## Clock2 CMOS1 output, 3.3V\n self.write_versa5(0x3d,0x01) ## Set divide by 0x0110\n self.write_versa5(0x3e,0x10)\n self.write_versa5(0x31,0x81) ## Enable divider output for clock2\n self.write_versa5(0x3c,iskw) ## Write integer portion of skew\n self.write_versa5(0x3f,fskw) ## Write fractional portion of skew\n self.write_versa5(0x63,0x01) ## Enable clock2 output\n self.reset_versa5()",
"def led2(self, val):\n data = val & self.LED2_MASK\n self._ftdi.spi_write(self.LED2_ADDR, [data], burst='fixed')",
"def EnableCPU():\n global option\n option['device'] = 'CPU'",
"def change_device(self):\n if self.state.ser:\n UsbHost.close_port(self.state.ser)\n device = self.CBDevices.currentText()\n if device:\n comport = self.devices[int(device)]\n self.state.ser = UsbHost.open_port(comport)\n if not self.state.ser:\n self.statusbar.showMessage(\"Выбранный порт более недоступен. Произведите повторный поиск\")\n return\n answer: str = self.UsbHost.send_command(self.state.ser, \"ping\", device)\n if answer in wrong_answers:\n error_message(\"Выбранный девайс не отвечает\")\n self.statusbar.showMessage(\"Выбранный порт более недоступен. Произведите повторный поиск\")\n return\n self.state.device_id = int(device)\n self.state.comport = comport\n self.create_message()\n self.set_controls_state(True)\n self.BtnL1.click()\n self.BtnAttenuate.click()\n self.SpinDACValue.setValue(35000)\n self.BtnSetDACValue.click()\n self.set_sw(\"0 1\")",
"def disable_cl2(self):\n self.write_versa5(0x31,0x80) ## Disable divider output for clock2\n self.write_versa5(0x63,0x00) ## Disable clock2 output",
"def enable(self, port):\n assert port in self.ports, 'bad port name'\n port = ord(port[4:]) - ord('A')\n (_, reg, ofs) = gpio_info[self.device.soc_name]\n hw = self.device.RCC.registers[reg]\n port += ofs\n val = hw.rd()\n val &= ~(1 << port)\n val |= 1 << port\n hw.wr(val)",
"def r2_on_off():\n \n r2_cmd_packet = b'\\x04\\x14\\x02\\x00\\x00\\xe6\\x0f'\n ser_relay.write(r2_cmd_packet)",
"def device_connect(self):\n pass",
"def setup_device(device):\n try:\n # Gets around \"Resource busy\" errors\n device.detach_kernel_driver(0)\n except Exception:\n pass\n device.set_configuration()",
"def devices_command_on(device_id, **kwargs):\n return devices_command_generic(device_id=device_id, request_type='CMD_DEVICE_ON', **kwargs)"
]
| [
"0.71020955",
"0.6241626",
"0.61950284",
"0.5782846",
"0.5756835",
"0.5755888",
"0.5698368",
"0.5685892",
"0.56685895",
"0.56587315",
"0.56057596",
"0.5595121",
"0.5585164",
"0.558492",
"0.5573397",
"0.555043",
"0.5504512",
"0.54773784",
"0.546188",
"0.54415196",
"0.5410099",
"0.5396139",
"0.537701",
"0.5374015",
"0.5343132",
"0.528892",
"0.5268624",
"0.5239093",
"0.522858",
"0.5228467"
]
| 0.84149885 | 0 |
Read i2c raw value. | def i2cRead(self, sensor):
try:
with open(device1_path + "in_" + sensor + "_raw") as raw:
value = raw.read()
except Exception as err:
LOG.error("Error reading I2C device: " + str(err))
value = None
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _read_i2c(fd, n):\n if n == 0:\n return b''\n buf = os.read(fd, n)\n if len(buf) != n:\n raise OSError(errno.EIO, os.strerror(errno.EIO))\n return buf",
"def read(self, register): #good\r\n\t\tcurrentVal = self.i2c.readU8(register)\r\n\t\treturn currentVal",
"def _i2c_read(self, register, bank=None):\n if bank is not None:\n self.set_bank(bank)\n return self.i2c.read_byte_data(self.address, register)",
"def read(self):\n temp = self.bus.read_i2c_block_data(self.address, 0)\n print('temp', temp)\n return_buffer = []\n for i in temp:\n return_buffer += str(chr(int(i)))\n\n return return_buffer",
"def read_i2c_block_data(self, i2c_address, register, length):\n return self.regs[register:register + length]",
"def read_byte(fd, reg):\n b, = write_read_i2c(fd, bytes([reg]), 1)\n return b",
"def readRaw(self):\n self.value = self.analogInput.value # new value is read and stored\n return self.value",
"def read_raw8(self):\n raise NotImplementedError",
"def readRaw(self):\r\n count = 2048 #this is as big as the library buffer, so the user doesn't have to poll as often\r\n buf = [] #buffer that will hold the read raw data and be returned to the user\r\n dataPtr = (c_int * count)()\r\n length = c_int()\r\n length.value = count;\r\n \r\n try:\r\n result = PhidgetLibrary.getDll().CPhidgetIR_getRawData(self.handle, dataPtr, byref(length))\r\n except RuntimeError:\r\n raise\r\n \r\n if result > 0:\r\n raise PhidgetException(result)\r\n \r\n for i in range(length.value):\r\n buf.append(dataPtr[i])\r\n \r\n return buf",
"def read_sensor_raw(self):\n return self.read_sensor()",
"def read_sensor_raw(self):\n return self.read_sensor()",
"def _read_v2(self):\n return self.usb_dev.read(self.ep_in, self.rdbuf_chunksize, self.usb_rd_timeout)",
"async def i2c_read_data(self, address):\n if address in self.i2c_map:\n map_entry = self.i2c_map.get(address)\n data = map_entry.get('value')\n return data\n else:\n return None",
"async def i2c_read_data(self, address):\n if address in self.i2c_map:\n map_entry = self.i2c_map.get(address)\n data = map_entry.get('value')\n return data\n else:\n return None",
"def read_string(self):\n return self.bits.read('bytes:{0}'.format(self.read_int())).decode(\"utf-8\", 'replace')",
"def read_uchar(self):\n return self._packers[\"B\"].unpack(self.read(1))[0]",
"def read(self):\n if self.mode == UNAVAILABLE:\n raise IOError, \"Cannot read pin %s\"% self.__str__()\n return self.value",
"def IR_sensor(self):\n self.serial.reset_input_buffer() # clear buffer\n self.send(b\"kk\\n\")\n # next line depends on read timeout\n result = self.serial.read(1)\n if result == b'':\n print(\"no IR data returned\")\n return 2 # if 2 returned do it again\n else:\n result = int.from_bytes(result, \"big\")\n return result",
"def read(self):\n\n return self.read_raw().rstrip()",
"def read_binary(self):\n with self.open(\"rb\") as f:\n return f.read()",
"def _read(self):\n # because protocol has no termination chars the read reads the number\n # of bytes in the buffer\n bytes_in_buffer = self.visa_handle.bytes_in_buffer\n # a workaround for a timeout error in the pyvsia read_raw() function\n with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):\n mes = self.visa_handle.visalib.read(\n self.visa_handle.session, bytes_in_buffer)\n mes = str(mes[0].decode()) # cannot be done on same line for some reason\n # if mes[1] != 0:\n # # see protocol descriptor for error codes\n # raise Exception('IVVI rack exception \"%s\"' % mes[1])\n return mes",
"def read_char(self):\n return self._packers[\"b\"].unpack(self.read(1))[0]",
"def raw_humidity(self):\n data = self._bus.read_i2c_block_data(self.addr, self.HUM, 2)\n return (data[0] << 8) + data[1]",
"def _read(self):\n \n try:\n d = self._get_byte()\n ts = time.time()\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n packet = [d]\n d = self._get_byte()\n if d == self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n ts = time.time()\n else:\n packet.append(d)\n while d != self.HDLC_FLAG_BYTE:\n d = self._get_byte()\n packet.append(d)\n if self._debug == True:\n print \"Serial:_read: unescaped\", packet\n packet = self._unescape(packet)\n \n crc = self._crc16(0, packet[1:-3])\n packet_crc = self._decode(packet[-3:-1])\n \n if crc != packet_crc:\n print \"Warning: wrong CRC! %x != %x %s\" % (crc, packet_crc, [\"%2x\" % i for i in packet])\n if self._debug:\n if self._ts == None:\n self._ts = ts\n else:\n print \"Serial:_read: %.4f (%.4f) Recv:\" % (ts, ts - self._ts), self._format_packet(packet[1:-3])\n self._ts = ts\n return RawPacket(ts, packet[1:-3], crc == packet_crc)\n except socket.timeout:\n return None",
"def write_read_i2c(fd, write_buf, read_len):\n with LOCK:\n _write_i2c(fd, write_buf)\n return _read_i2c(fd, read_len)",
"def read_uv(self):\n return self._read16(0x2C, little_endian=True) / 100",
"def i2c_read8(self, address, register):\n raise NotImplementedError",
"def read(self):\n return self.block.read()",
"def read(self, reg, ndata, return_bytes=False):\n if self.emulate:\n rtn = 0\n else:\n self.gateway.write_then_read(numtx=2, numrx=0, txdata=[(self.addr<<1), reg]) \n rtn = self.gateway.write_then_read(numtx=1, numrx=ndata, txdata=[(self.addr<<1)+1]) \n \n if isinstance(rtn, bytes) and not return_bytes:\n rtn = int.from_bytes(rtn, byteorder='big') #bigendian\n\n logger.debug(\"Read from I2C_ADDR=0x{0:02x}(0x{1:02x}); REG=0x{2:02x}; DATA=\".format(self.addr, (self.addr<<1)+1, reg, rtn) + hex(rtn))\n return rtn",
"def read_interrupt_capture(self, port):\n value = 0\n if port == 0:\n value = self.__bus.read_byte_data(self.__ioaddress, self.INTCAPA)\n else:\n value = self.__bus.read_byte_data(self.__ioaddress, self.INTCAPB)\n return value"
]
| [
"0.71933854",
"0.7141151",
"0.70043665",
"0.679657",
"0.66493845",
"0.6602332",
"0.6516032",
"0.63425654",
"0.63419306",
"0.62273055",
"0.62273055",
"0.6206236",
"0.61949515",
"0.61949515",
"0.61196005",
"0.61109686",
"0.61014265",
"0.6043389",
"0.59487134",
"0.5927885",
"0.5913872",
"0.5906822",
"0.5902905",
"0.5898636",
"0.5855851",
"0.5840032",
"0.58096033",
"0.5808027",
"0.58061975",
"0.57970095"
]
| 0.73743635 | 0 |
Function to set digital PIN value. | def setPIN(self, DPIN, value):
try:
with open('/sys/class/gpio/' + DPIN + '/value', 'a') as f:
f.write(value)
except Exception as err:
LOG.error("Error setting PIN value: " + str(err)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def digital_pin_write(self, pin, value):\n\n command = (PrivateConstants.SET_DIGITAL_PIN_VALUE, pin, value)\n\n await self._send_command(command)",
"def set(pin, val=1):\n if val not in [0,1]:\n raise RuntimeError\n GPIO.output(pin,val)",
"def Set(self,value):\n self.Bus.Write_uInt8(self.Address,0x50+self.Pin,value)",
"def set_pin(self, pin):\n if pin not in range(0, 14):\n raise Exception(\"Incorrect pin {} selected. Pins available (0 to 13)\".format(pin))\n else:\n self.pin = pin\n self.gpio_pin = mraa.Gpio(pin)",
"def digital_pin_write(self, pin, value):\n\n self._digital_pins_directly[pin].DigitalWrite(value, PermitWriteToInputPin = False)",
"def digital_write(self, pin_number, digital_value):\n command = (''.join(('WD', str(pin_number), ':',58))).encode()\n self.conn.write(command)",
"def light_set(self, pin='D13', value='0'):\n self.bridge.put(str(pin), str(value))",
"def Set(self,value):\n if value:\n onoff = 0x01\n else:\n onoff = 0x00\n self.Bus.Write_uInt8(self.Address,0x20+self.Pin, onoff)",
"def set_pin(self):\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.pin, GPIO.OUT)\n GPIO.output(self.pin, GPIO.LOW)\n time.sleep(3)\n GPIO.output(self.pin, GPIO.HIGH)",
"def setHack(self, pin, value, board=0):\n msg = [int(pin), int(value)]\n return self.callModule('hackp', board, 0, 'write', msg)",
"def set_setpoint(self, value):\n value = value * self.conf['PSICONV']\n log.debug(\"Set pressure regulator %d to %f\", self.id_, value)\n self.synth.cbox.set_dac(self.id_, value)",
"def write_pin(self, pin: int, value: bool):\n RPi.GPIO.output(pin, value)",
"def digital_write(self, pin, value):\n #logstring(\"going for pin {} and value {} while pincount is {}\".format(pin, value, len(self._digital_pins_directly)))\n self._digital_pins_directly[pin].DigitalWrite(value)\n #logstring(\"finished digital write\")",
"def set_led(self, pin, value=0):\n value = self.int_lim(lower=PWM_MIN, upper=PWM_MAX, value=value) #Standardise the value to our correct range\n if self.iface.connected:\n try:\n self.iface.set_PWM_dutycycle(pin, value)\n except (AttributeError, IOError):\n logging.error(\" Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n else:\n logging.error(\" Interface not connected. Cannot output to pins. PWM of pin #%s would be %s\" % (pin,value))\n return value",
"def _setGPIOs(self, Dpin, direction, value):\n\n try:\n\n with open('/sys/class/gpio/export', 'a') as f_export:\n f_export.write(self.MAPPING[Dpin])\n\n with open('/sys/class/gpio/' + Dpin + '/direction', 'a') as f_dir:\n f_dir.write(direction)\n\n with open('/sys/class/gpio/' + Dpin + '/value', 'a') as f_value:\n f_value.write(value)\n\n with open('/sys/class/gpio/' + Dpin + '/value') as f_value:\n result = \"PIN \" + Dpin + \" value \" + f_value.read()\n\n except Exception as err:\n LOG.error(\"Error setting GPIO value: \" + str(err))\n result = None\n\n return result",
"def write_pin(self, pin, value):\n\n pin = pin - 1\n if pin < 8:\n self.__port_a_value = self.__helper.updatebyte(\n self.__port_a_value, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPIOA, self.__port_a_value)\n else:\n self.__port_b_value = self.__helper.updatebyte(\n self.__port_b_value, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPIOB, self.__port_b_value)\n return",
"def set_pupd(self, port, bit, x):\n hw = self.device.peripherals[port].PUPDR\n mode = {'pu':1,'pd':2}.get(x, 0)\n shift = (bit & 15) << 1\n val = hw.rd()\n val &= ~(3 << shift)\n val |= mode << shift\n hw.wr(val)",
"def set_pin_pullup(self, pin, value):\n pin = pin - 1\n if pin < 8:\n self.__port_a_pullup = self.__helper.updatebyte(\n self.__port_a_pullup, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUA, self.__port_a_pullup)\n else:\n self.__port_b_pullup = self.__helper.updatebyte(\n self.__port_b_pullup, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPPUB, self.__port_b_pullup)\n return",
"def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError",
"def set_duty_cycle(self, pin, dutycycle):\n raise NotImplementedError",
"def setUserPassword(self,value):\n self.PDFreactorConfiguration.in1[\"userPassword\"] = value",
"def setInt(self, addr: ghidra.program.model.address.Address, value: int) -> None:\n ...",
"def set_bit(self, port, bit):\n hw = self.device.peripherals[port]\n hw.BSRR.wr(1 << (bit & 15))",
"def select_pin(self):\r\n\t\tself.pin = int(input(\"Enter the Pin No.(0-1) = \"))\r\n\t\twhile self.pin > 1 :\r\n\t\t\tself.pin = int(input(\"Enter the Pin No.(0-1) = \"))",
"def write_pin(self, attr):\n \n self.logging.debug(\"Setting \" + attr.label + \" to \" + str(attr.value) + \" on pin \" + str(attr.io_pin))\n GPIO.output(attr.io_pin, attr.value)",
"def setInt(self, address: ghidra.program.model.address.Address, value: int) -> None:\n ...",
"def set_interrupt_on_pin(self, pin, value):\n\n pin = pin - 1\n if pin < 8:\n self.__inta = self.__helper.updatebyte(self.__inta, pin, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPINTENA, self.__inta)\n else:\n self.__intb = self.__helper.updatebyte(self.__intb, pin - 8, value)\n self.__bus.write_byte_data(\n self.__ioaddress, self.GPINTENB, self.__intb)\n return",
"def set_vin(self, value):\n return self.sendCMD(\"ATSET VIN={}\".format(value))",
"async def digital_write(self, pin, value):\n # The command value is not a fixed value, but needs to be calculated\n # using the pin's port number\n port = pin // 8\n\n calculated_command = PrivateConstants.DIGITAL_MESSAGE + port\n mask = 1 << (pin % 8)\n # Calculate the value for the pin's position in the port mask\n if value == 1:\n PrivateConstants.DIGITAL_OUTPUT_PORT_PINS[port] |= mask\n else:\n PrivateConstants.DIGITAL_OUTPUT_PORT_PINS[port] &= ~mask\n\n # Assemble the command\n command = (calculated_command,\n PrivateConstants.DIGITAL_OUTPUT_PORT_PINS[port] & 0x7f,\n (PrivateConstants.DIGITAL_OUTPUT_PORT_PINS[port] >> 7) & 0x7f)\n\n await self._send_command(command)",
"def set_register(self, name, value):\n if name is 'P':\n value = value | (1 << 5)\n\n self.regs[name].set_value(value & 0xFF)\n return value & 0xFF"
]
| [
"0.75729305",
"0.7364922",
"0.6986811",
"0.6898064",
"0.6795176",
"0.67899877",
"0.6787525",
"0.67728305",
"0.6735007",
"0.6669532",
"0.6632994",
"0.65423346",
"0.653754",
"0.64920366",
"0.6398399",
"0.6333039",
"0.6301748",
"0.62127906",
"0.61851203",
"0.61605304",
"0.61573297",
"0.6138925",
"0.6120933",
"0.6107676",
"0.61035466",
"0.61034983",
"0.6072523",
"0.6053587",
"0.6050566",
"0.59537214"
]
| 0.7687744 | 0 |
Processes the response from the proxy. If the tunnel is successfully created, notifies the client that we are ready to send requests. If not raises a TunnelError. | def processProxyResponse(self, bytes):
self._protocol.dataReceived = self._protocolDataReceived
if TunnelingTCP4ClientEndpoint._responseMatcher.match(bytes):
# print 'test: requestTunnel successfully'
self._protocol.transport.startTLS(self._contextFactory,
self._protocolFactory)
self._tunnelReadyDeferred.callback(self._protocol)
else:
# print 'test: requestTunnel failed'
self._tunnelReadyDeferred.errback(
TunnelError('Could not open CONNECT tunnel.')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _handle_proxy_packet(self, pkt):\n if pkt.source[0] not in self.hosts:\n raise ServerPacketError('Received packet from unknown host')\n pkt.secret = self.hosts[pkt.source[0]].secret\n\n if pkt.code not in [PacketCode.ACCESS_ACCEPT, PacketCode.ACCESS_REJECT,\n PacketCode.ACCOUNTING_RESPONSE]:\n raise ServerPacketError('Received non-response on proxy socket')",
"def handle(self):\n try:\n peers = Peers([\n gevent.spawn(self.route.proxy_input, self.client.sock,\n self.sock, self.buf, self.extra),\n gevent.spawn(self.route.proxy_connected, self.sock, \n self.client.sock, self.extra)])\n gevent.joinall(peers.greenlets)\n finally:\n self.sock.close()",
"def tunnel_recv_handler(self, payload):\n _log.analyze(self.node.id, \"+ CLIENT\", {'payload': payload})\n if 'msg_uuid' in payload and payload['msg_uuid'] in self.replies and 'cmd' in payload and payload['cmd']=='REPLY':\n kwargs = {}\n if 'key' in payload:\n kwargs['key'] = payload['key']\n if 'value' in payload:\n kwargs['value'] = payload['value']\n if 'response' in payload:\n kwargs['value'] = calvinresponse.CalvinResponse(encoded=payload['response'])\n self.replies.pop(payload['msg_uuid'])(**kwargs)",
"def process_response(self, request, response):\n\n # send our data off to trajectory.\n\n path_info = ''\n if request and request.META and 'PATH_INFO' in request.META:\n path_info = request.META['PATH_INFO']\n\n req_body = \"\"\n\n status_code = -1\n if response:\n status_code = getattr(response, 'status_code', -1)\n\n send_trajectory(path_info, req_body, request.META, int(status_code),\n response, None)\n\n return response",
"def _on_response(self):\n request = self._requests.pop(0)\n try:\n request[-1].cancel()\n left = request[-1].end - Engine.instance().time\n except Exception:\n left = request[5]\n pass\n\n response = self.current_response\n\n close_after = response.headers.get('Connection', '') == 'close'\n close_after &= self.keep_alive\n\n # Is this a 100 Continue?\n if response.status == 100:\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Did we catch a redirect?\n if response.status in (301,302) and request[9] <= self.max_redirects:\n # Generate a new request, using the new URL.\n new_url = urlparse.urljoin(response.full_url,\n response.headers['Location'])\n\n new_headers = request[3].copy()\n del new_headers['Host']\n\n new_req = self._add_request(request[0], new_url, new_headers,\n request[4], left, False)\n new_req[6] = request[6]\n new_req[7] = request[7]\n new_req[9] = request[9] + 1\n\n new_req.append(\n Engine.instance().defer(left, self._request_timeout, new_req))\n\n self._requests.insert(0, new_req)\n self.current_response = None\n del response\n\n # Process the request.\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()\n return\n\n # Try converting to unicode?\n if self.unicode:\n content_type = response.headers.get('Content-Type','')\n if 'charset=' in content_type:\n content_type, _, encoding = content_type.partition('charset=')\n try:\n response.body = response.body.decode(encoding)\n except (LookupError, UnicodeDecodeError):\n pass\n\n # Determine the handler function to use.\n if callable(request[6]):\n func = request[6]\n else:\n func = self.on_response\n\n # Call the handler function.\n try:\n func(0, response)\n except Exception:\n log.exception('Error in HTTP response handler.')\n\n # Process the next request.\n self.current_response = None\n\n if close_after:\n if self._stream:\n self._stream.close()\n return\n\n self._process_request()",
"def test_tls_in_tls_recv_into_sendall(self) -> None:\n self.start_destination_server()\n self.start_proxy_server()\n\n sock = socket.create_connection(\n (self.proxy_server.host, self.proxy_server.port)\n )\n with self.client_context.wrap_socket(\n sock, server_hostname=\"localhost\"\n ) as proxy_sock:\n with SSLTransport(\n proxy_sock, self.client_context, server_hostname=\"localhost\"\n ) as destination_sock:\n destination_sock.sendall(sample_request())\n response = bytearray(65536)\n destination_sock.recv_into(response)\n str_response = response.decode(\"utf-8\").rstrip(\"\\x00\")\n validate_response(str_response, binary=False)",
"def __negotiatehttp(self, destaddr, destport):\r\n # If we need to resolve locally, we do this now\r\n if not self.__proxy[3]:\r\n addr = socket.gethostbyname(destaddr)\r\n else:\r\n addr = destaddr\r\n headers = [\"CONNECT \", addr, \":\", str(destport), \" HTTP/1.1\\r\\n\"]\r\n headers += [\"Host: \", destaddr, \"\\r\\n\"]\r\n if (self.__proxy[4] != None and self.__proxy[5] != None):\r\n headers += [self.__getauthheader(), \"\\r\\n\"]\r\n headers.append(\"\\r\\n\")\r\n self.sendall(\"\".join(headers).encode())\r\n # We read the response until we get the string \"\\r\\n\\r\\n\"\r\n resp = self.recv(1)\r\n while resp.find(\"\\r\\n\\r\\n\".encode()) == -1:\r\n resp = resp + self.recv(1)\r\n # We just need the first line to check if the connection\r\n # was successful\r\n statusline = resp.splitlines()[0].split(\" \".encode(), 2)\r\n if statusline[0] not in (\"HTTP/1.0\".encode(), \"HTTP/1.1\".encode()):\r\n self.close()\r\n raise GeneralProxyError((1, _generalerrors[1]))\r\n try:\r\n statuscode = int(statusline[1])\r\n except ValueError:\r\n self.close()\r\n raise GeneralProxyError((1, _generalerrors[1]))\r\n if statuscode != 200:\r\n self.close()\r\n raise HTTPError((statuscode, statusline[2]))\r\n self.__proxysockname = (\"0.0.0.0\", 0)\r\n self.__proxypeername = (addr, destport)",
"def test_tls_in_tls_tunnel(self) -> None:\n self.start_destination_server()\n self.start_proxy_server()\n\n sock = socket.create_connection(\n (self.proxy_server.host, self.proxy_server.port)\n )\n with self.client_context.wrap_socket(\n sock, server_hostname=\"localhost\"\n ) as proxy_sock:\n with SSLTransport(\n proxy_sock, self.client_context, server_hostname=\"localhost\"\n ) as destination_sock:\n assert destination_sock.version() is not None\n destination_sock.send(sample_request())\n response = consume_socket(destination_sock)\n validate_response(response)",
"def do_POST(self):\n self.send_response(httplib.OK)\n self.send_header('Content-Type', 'application/octet-stream')\n self.end_headers()\n\n response = remote_api_pb.Response()\n try:\n request = remote_api_pb.Request()\n\n\n\n request.ParseFromString(\n self.rfile.read(int(self.headers['content-length'])))\n api_response = _ExecuteRequest(request).Encode()\n response.set_response(api_response)\n except Exception, e:\n logging.debug('Exception while handling %s\\n%s',\n request,\n traceback.format_exc())\n response.set_exception(pickle.dumps(e))\n if isinstance(e, apiproxy_errors.ApplicationError):\n application_error = response.mutable_application_error()\n application_error.set_code(e.application_error)\n application_error.set_detail(e.error_detail)\n self.wfile.write(response.Encode())",
"def __negotiatehttp(self,destaddr,destport):\r\n # If we need to resolve locally, we do this now\r\n if self.__proxy[3] == False:\r\n addr = socket.gethostbyname(destaddr)\r\n else:\r\n addr = destaddr\r\n self.sendall(\"CONNECT \" + addr + \":\" + str(destport) + \" HTTP/1.1\\r\\n\" + \"Host: \" + destaddr + \"\\r\\n\\r\\n\")\r\n # We read the response until we get the string \"\\r\\n\\r\\n\"\r\n resp = self.recv(1)\r\n while resp.find(\"\\r\\n\\r\\n\")==-1:\r\n resp = resp + self.recv(1)\r\n # We just need the first line to check if the connection\r\n # was successful\r\n statusline = resp.splitlines()[0].split(\" \",2)\r\n if statusline[0] not in (\"HTTP/1.0\",\"HTTP/1.1\"):\r\n self.close()\r\n raise GeneralProxyError((1,_generalerrors[1]))\r\n try:\r\n statuscode = int(statusline[1])\r\n except ValueError:\r\n self.close()\r\n raise GeneralProxyError((1,_generalerrors[1]))\r\n if statuscode != 200:\r\n self.close()\r\n raise HTTPError((statuscode,statusline[2]))\r\n self.__proxysockname = (\"0.0.0.0\",0)\r\n self.__proxypeername = (addr,destport)",
"def on_auth_resp(self, jdata):\n LOGGER.debug('on_auth_resp %s', str(jdata))\n self.client_id = jdata['Payload']['ClientId']\n\n self.send_dict_pack(\n MoloSocketHelper.req_tunnel(self.tunnel['protocol'],\n self.tunnel['hostname'],\n self.tunnel['subdomain'],\n self.tunnel['rport'], self.client_id))",
"def on_req_proxy(self, jdata):\n LOGGER.debug(\"on_req_proxy, %s, %s, %s, %s\", self.host, self.port,\n self.tunnel['lhost'], self.tunnel['lport'])\n remotesession = RemoteSession(self.client_id, self.host, self.port,\n self.tunnel['lhost'],\n self.tunnel['lport'],\n MOLO_CLIENT_APP.async_map)\n remotesession.sock_connect()",
"def return_proxy(self):\n\n check_server()\n url='{url}/proxy_return'.format(url=config.SERVER_URL)\n proxy_ret= [x.raw_data for x in self.proxy_pool]\n proxy_str=''\n\n for item in proxy_ret:\n proxy_str=proxy_str+item\n data={\n 'data':proxy_str\n }\n\n data=parse.urlencode(data).encode('utf-8')\n\n try:\n opener=request.build_opener()\n req=request.Request(url,data)\n res=opener.open(req).read().decode('utf-8')\n except:\n try:\n opener=request.build_opener()\n req=request.Request(url,data)\n res=opener.open(req).read().decode('utf-8')\n except:\n err_str='error:client->return_proxy:unable to ' \\\n 'connect to server'\n info_manager(err_str,type='KEY')\n return\n\n if 'return success' in res:\n print('Success: return proxy to server')\n return\n else:\n err_str='error:client->return_proxy:'+res\n info_manager(err_str,type='KEY')\n # raise ConnectionError('Unable to return proxy')\n return",
"def proxy_error(response):\n r = HttpResponse(\n response.content,\n content_type=response.headers[\"content-type\"],\n status=response.status_code,\n )\n r.setdefault(\"X-PROMGEN-PROXY\", response.url)\n return r",
"def __negotiatehttp(self, destaddr, destport):\r\n # If we need to resolve locally, we do this now\r\n if not self.__proxy[3]:\r\n addr = socket.gethostbyname(destaddr)\r\n else:\r\n addr = destaddr\r\n self.sendall((\"CONNECT \" + addr + \":\" + str(destport) + \" HTTP/1.1\\r\\n\" + \"Host: \" + destaddr + \"\\r\\n\\r\\n\").encode())\r\n # We read the response until we get the string \"\\r\\n\\r\\n\"\r\n resp = self.recv(1)\r\n while resp.find(\"\\r\\n\\r\\n\".encode()) == -1:\r\n resp = resp + self.recv(1)\r\n # We just need the first line to check if the connection\r\n # was successful\r\n statusline = resp.splitlines()[0].split(\" \".encode(), 2)\r\n if statusline[0] not in (\"HTTP/1.0\".encode(), \"HTTP/1.1\".encode()):\r\n self.close()\r\n raise GeneralProxyError((1, _generalerrors[1]))\r\n try:\r\n statuscode = int(statusline[1])\r\n except ValueError:\r\n self.close()\r\n raise GeneralProxyError((1, _generalerrors[1]))\r\n if statuscode != 200:\r\n self.close()\r\n raise HTTPError((statuscode, statusline[2]))\r\n self.__proxysockname = (\"0.0.0.0\", 0)\r\n self.__proxypeername = (addr, destport)",
"def proxy_reponse(body, content_type=None):\n content_type = content_type or 'text/html'\n # Wrap HTML in proxy response object\n return {\n 'body': body,\n 'headers': {'Content-Type': f'{content_type}; charset=UTF-8'},\n 'statusCode': 200,\n }",
"def process(self):\n\n try:\n self._read_buffer += self._socket.recv(4096)\n except socket.error as exc:\n if exc.errno not in [errno.EAGAIN,\n errno.EWOULDBLOCK,\n errno.WSAEWOULDBLOCK]:\n raise\n response, self._read_buffer = Message.decode(self._read_buffer)\n # Check if terminating RESPONSE_VALUE with body 00 01 00 00\n if (response.type == Message.SERVERDATA_RESPONSE_VALUE and\n response.body.encode(\"ascii\") == \"\\x00\\x01\\x00\\x00\"):\n response = Message(self._response[0].id,\n self._response[0].type,\n \"\".join([r.body for r in self._response]))\n self._active_requests[response.id].response = response\n self._response = []\n self._active_requests[response.id]\n elif response.type == Message.SERVERDATA_RESPONSE_VALUE:\n self._response.append(response)\n elif response.type == Message.SERVERDATA_AUTH_RESPONSE:\n self._active_requests[self._response[0].id].response = response\n # Clear empty SERVERDATA_RESPONSE_VALUE sent before\n # SERVERDATA_AUTH_RESPONSE\n self._response = []\n self._active_requests[response.id]",
"def post_response(self, body, **kwargs):\n data = json.loads(body)\n if \"errors\" in data:\n self.handle_error(data)",
"def _handle_response(self, response):\n self.client.status = response.code\n self.response_headers = headers = response.headers\n # XXX This workaround (which needs to be improved at that) for possible\n # bug in Twisted with new client:\n # http://twistedmatrix.com/trac/ticket/5476\n if self._method.upper() == 'HEAD' or response.code == NO_CONTENT:\n return succeed('')\n receiver = self.receiver_factory()\n receiver.finished = d = Deferred()\n receiver.content_length = response.length\n response.deliverBody(receiver)\n if response.code >= 400:\n d.addCallback(self._fail_response, response)\n return d",
"def post(self):\n\n # unwrap json message from body\n if self.request.headers[\"Content-Type\"] == \"application/x-mplane+json\":\n env = mplane.model.parse_json(self.request.body.decode(\"utf-8\"))\n else:\n self._respond_plain_text(400, \"Invalid format\")\n return\n\n if self._listenerclient.registration_path != self._listenerclient.result_path:\n # registration and result path are different, we need to check if the requests have been\n # sent on the correct path\n\n # if is a Result, Receipt, Exception or Envelope (containing results)\n if self.request.path == self._listenerclient.result_path:\n if isinstance(env, mplane.model.Result) \\\n or isinstance(env, mplane.model.Receipt) \\\n or isinstance(env, mplane.model.Exception):\n self._listenerclient.handle_message(env, self._tls.extract_peer_identity(self.request))\n self._respond_plain_text(200)\n elif isinstance(env, mplane.model.Envelope):\n for msg in env.messages():\n if not isinstance(msg, mplane.model.Result) \\\n and not isinstance(msg, mplane.model.Receipt) \\\n and not isinstance(msg, mplane.model.Exception):\n self._respond_plain_text(401, \"Not a result (or receipt) received on result path\")\n return\n # fall through\n self._listenerclient.handle_message(env, self._tls.extract_peer_identity(self.request))\n self._respond_plain_text(200)\n return\n else:\n self._respond_plain_text(401, \"Not authorized: not a result (or receipt) received on result path\")\n return\n\n # if is an Envelope (containing capabilities)\n elif self.request.path == self._listenerclient.registration_path:\n if isinstance(env, mplane.model.Envelope):\n self._listenerclient.handle_message(env, self._tls.extract_peer_identity(self.request))\n response = self.generate_response(env)\n self._respond_json_text(200, response)\n return\n else:\n self._respond_plain_text(400, \"Not a capability / Wrong format\")\n return\n else:\n # registration and result path are the same, so we need to differentiate the handling of the\n # request based on what's in the request body\n if isinstance(env, mplane.model.Result) \\\n or isinstance(env, mplane.model.Receipt) \\\n or isinstance(env, mplane.model.Exception):\n self._listenerclient.handle_message(env, self._tls.extract_peer_identity(self.request))\n self._respond_plain_text(200)\n return\n elif isinstance(env, mplane.model.Envelope):\n self._listenerclient.handle_message(env, self._tls.extract_peer_identity(self.request))\n for msg in env.messages():\n if isinstance(msg, mplane.model.Result) or isinstance(env, mplane.model.Receipt):\n self._respond_plain_text(200)\n return\n elif isinstance(msg, mplane.model.Capability) or isinstance(msg, mplane.model.Withdrawal):\n response = self.generate_response(env)\n self._respond_json_text(200, response)\n return\n else:\n self._respond_plain_text(401, \"Not authorized\")\n return",
"async def send_response(\n self, response_url: Optional[str] = None, **kwargs: Optional[Any]\n ):\n req_args = dict(\n # contents of messenger[UserDict]\n **self,\n # any other API fields\n **kwargs,\n )\n\n api_url = response_url or self.response_url\n\n res = await self.client._request( # noqa\n http_verb=\"POST\", api_url=api_url, req_args=dict(json=req_args)\n )\n\n status = res[\"status_code\"]\n\n if status != 200:\n raise SlackApiError(\n message=\"Failed to send response_url: {}: status={}\".format(\n api_url, status\n ),\n response=res,\n )\n\n return True",
"def test_public_proxy(self, retry_on_failure=True):\n logging.info(\"Testing Proxy: %s (%s)\" % (self.proxy_bag[0][\"ip\"], self.proxy_bag[0][\"country\"]))\n self.use_skip_ssl_verify()\n self.headers = {\"Content-Type\": \"application/json\"}\n test_url = self.remote_service_api.replace(\"api\", \"test\")\n\n test_response = self.get(test_url)\n self.use_skip_ssl_verify(False)\n\n # if not test_response:\n # logging.error(\"Could not find a working proxy.\")\n # return False\n\n logging.debug(\"Registered Proxy %s (%s) Test Request Took: %s\" % (\n self.proxy_bag[0][\"ip\"],\n self.proxy_bag[0][\"country\"],\n test_response.roundtrip))\n\n return True",
"def handle_response(self, response):\n self.__log(f'Received response from server. The code is: \"{response}\"')\n if not response.status_code == 200:\n self.handle_api_error(response)\n self.to_output_file(response.text)",
"def apply_response(self, request):\n assert request.response is not None\n response = request.response\n\n other_addr = self.get_other_address()\n\n self.processor.process_command(\n other_addr=other_addr,\n command=request.command,\n cid=request.cid,\n status_success=request.is_success(),\n error=response.error if response.error else None\n )",
"def _response_from_proxy(self, form: dict, proxy_marker_attr: str) -> 'Response':\n\n if (proxy_marker_attr in form['data']) and (form['data'][proxy_marker_attr] != self.did):\n endpoint = json.loads(get_endpoint_attrib(form['data'][proxy_marker_attr]))\n # (host, port) = tuple(endpoint['ha'].split(':'))\n form['data'].pop(proxy_marker_attr)\n r = post(\n 'http://{}/{}/{}'.format(\n endpoint['ha'],\n self.agent_api_path,\n form['type']),\n json=form) # requests module json-encodes\n r.raise_for_status()\n return r.json()\n\n return None",
"def requestTunnel(self, protocol):\n\n\t\ttunnelReq = 'CONNECT %s:%s HTTP/1.1\\r\\n' % ( self._tunneledHost, self._tunneledPort)\n\t\tif self._proxy.auth_header:\n\t\t\ttunnelReq += 'Proxy-Authorization: ' + self._proxy.auth_header + '\\r\\n'\n\n\t\ttunnelReq += '\\r\\n'\n\t\tprotocol.transport.write(tunnelReq)\n\t\tself._protocolDataReceived = protocol.dataReceived\n\t\tprotocol.dataReceived = self.processProxyResponse\n\t\tself._protocol = protocol\n\t\treturn protocol",
"def got_response(self, response, parser):\r\n protocol = HTTPResponseProtocol(parser, self.tolerant)\r\n return protocol.handle_response(response)",
"def do_CONNECT(self):\n self.log.debug('do_CONNECT called')\n pre_ssl_request = ssl_proxy_log.Request(self.path, 'CONNECT', self.headers,\n self.log, self.http_redirect_table, self.ssl_redirect_table)\n\n spoof = self.SSLSpoofCheck(self.path)\n\n if not spoof:\n request_log = self.BypassSSL(pre_ssl_request, self.connection)\n\n if not request_log:\n return\n\n if self.interactive:\n self.Log(request_log)\n\n return\n\n ssl_response = self.SpoofSSL(pre_ssl_request, self.connection)\n self.log.debug('do_CONNECT: Host to connect to: %s' % self.path)\n\n # Now that the Client thinks they are talking to the server, redo the\n # request processing as if we are the target server.\n self.raw_requestline = self.rfile.readline()\n if not self.raw_requestline:\n self.close_connection = 1\n return False\n\n if not self.parse_request(): # An error code has been sent, just exit\n return False\n\n mname = 'do_' + self.command\n\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return False\n\n method = getattr(self, mname)\n\n # Build a new path for an HTTPS operation, and call the correct method\n target_host = pre_ssl_request.GetTargetHost()\n \n\n _parts = self.path.split('/', 3)\n if len(_parts) > 3:\n _path = _parts[3]\n else:\n _path = ''\n \n \n self.path = 'https://%s:%s/%s' % (target_host[0], target_host[1], _path)\n print('URL: %s' % self.path)\n\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return\n\n self.log.debug('do_CONNECT: New SSL path: %s' % self.path)\n\n method = getattr(self, mname)\n method()",
"def _check_tunnel(self, _srv):\n if self.skip_tunnel_checkup:\n self.tunnel_is_up[_srv.local_address] = True\n return\n self.logger.info('Checking tunnel to: {0}'.format(_srv.remote_address))\n if isinstance(_srv.local_address, string_types): # UNIX stream\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n else:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.settimeout(TUNNEL_TIMEOUT)\n try:\n # Windows raises WinError 10049 if trying to connect to 0.0.0.0\n connect_to = ('127.0.0.1', _srv.local_port) \\\n if _srv.local_host == '0.0.0.0' else _srv.local_address\n s.connect(connect_to)\n self.tunnel_is_up[_srv.local_address] = _srv.tunnel_ok.get(\n timeout=TUNNEL_TIMEOUT * 1.1\n )\n self.logger.debug(\n 'Tunnel to {0} is DOWN'.format(_srv.remote_address)\n )\n except socket.error:\n self.logger.debug(\n 'Tunnel to {0} is DOWN'.format(_srv.remote_address)\n )\n self.tunnel_is_up[_srv.local_address] = False\n\n except queue.Empty:\n self.logger.debug(\n 'Tunnel to {0} is UP'.format(_srv.remote_address)\n )\n self.tunnel_is_up[_srv.local_address] = True\n finally:\n s.close()",
"def send_final_response(self, response):\n if response.get_meaning() == ResponseMeaning.Informational:\n raise TurmdrehkranException(\"Final Response can not be informational.\")\n self.client_sock.sendall(str(response))"
]
| [
"0.5587567",
"0.5469762",
"0.5467382",
"0.542428",
"0.537138",
"0.5313755",
"0.53075236",
"0.52798104",
"0.5274118",
"0.5259868",
"0.5248621",
"0.5247179",
"0.523715",
"0.5236347",
"0.52206075",
"0.5212699",
"0.51765764",
"0.51717204",
"0.51508975",
"0.5145908",
"0.51360375",
"0.51326954",
"0.5121367",
"0.5101061",
"0.50985193",
"0.5097501",
"0.50697654",
"0.50669146",
"0.5064436",
"0.5061168"
]
| 0.7779374 | 0 |
Plot histogram of population fitness values | def plt_hist(pop, bin_limit=fit_range):
plt.hist(pop, bins=range(0,bin_limit+1))
plt.grid(True)
plt.title('Distribution of Population')
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_data(self,feature_idx):\r\n\t\tfig,ax = plt.subplots()\r\n\t\tax.set(title = 'Gaussian Histogram',ylabel = 'Frequency',xlabel = \"Chosen feature\")\r\n\t\tax.hist(self.x[feature_idx], edgecolor = 'black', facecolor = 'r')\r\n\t\tplt.show()",
"def geneticAlgorithmPlot(population, popSize, fittestSize, mutationRate, generations):\n pop = GA.initialPopulation(popSize, population)\n progress = []\n progress.append(1 / GA.rankRoutes(pop)[0][1])\n \n for i in range(0, generations):\n pop = GA.nextGeneration(pop, fittestSize, mutationRate)\n progress.append(1 / GA.rankRoutes(pop)[0][1])\n \n plt.plot(progress)\n plt.ylabel('Distance')\n plt.xlabel('Generation')\n plt.show()",
"def plot_trait_histogram(params, traits):\n\n pylab.subplot(122).clear()\n pylab.xlabel(r\"$x$\")\n pylab.ylabel(r\"$I$\")\n pylab.hist(traits, 100, range = (0, params[\"max_trait\"]), normed = True, \n facecolor = \"black\")\n pylab.xlim(0, params[\"max_trait\"])\n pylab.ylim(0, params[\"population\"])\n ax = pylab.gca()\n ax.yaxis.major.formatter.set_powerlimits((0,0))\n pylab.draw()",
"def hist(self):\r\n plt.hist(self.data_array, bins='auto', density=False, facecolor='b')\r\n plt.title(self.column_name)\r\n plt.savefig(self.column_name + \".svg\")\r\n plt.close()",
"def makeHistogram(values, numBins, xLabel, yLabel, title=None):",
"def generate_heatmap(self):\n data = []\n for j in range(len(self.generations[0])):\n row = []\n for i in range(len(self.generations)):\n row.append(self.generations[i][j].fitness)\n data.append(row)\n data = np.array(data)\n\n plt.figure()\n ax = sns.heatmap(\n data,\n cmap='RdBu',\n xticklabels=2,\n yticklabels=2)\n\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Individual', **hfont)\n ax.invert_yaxis()\n ax.axhline(linewidth=4, color='black')\n ax.axvline(linewidth=4, color='black')\n ax.collections[0].colorbar.set_label('Fitness')\n plt.savefig('figures/Voltage Clamp Figure/Single VC Optimization/'\n 'heatmap.svg')",
"def plot_histogram(self) -> None:\n\n if self.data:\n plt.hist(self.data)\n plt.title(\"Histogram of data\")\n plt.xlabel(\"data\")\n plt.ylabel(\"count\")\n else:\n raise ValueError(\"Histogram cannot be generated as no\\\n data has been provided\")",
"def plot_learning(self):\n plt.plot([i for i in range(len(self.fitness_list))], self.fitness_list)\n plt.ylabel(\"Fitness\")\n plt.xlabel(\"Iteration\")\n plt.show()",
"def histopi(data):\n dataset = discrete_dataset(data)\n theoretical_dataset = [theoretical_effective(dataset)]*10\n observed = plt.bar(numpy.arange(len(dataset)) - 0.4, dataset, color=\"blue\", width=0.4)\n theoretical = plt.bar(numpy.arange(len(theoretical_dataset)), theoretical_dataset, color=\"deepskyblue\", width=0.4)\n plt.legend([observed, theoretical], [\"effectifs observes\", \"effectifs theoriques\"])\n plt.xlabel('pi digits')\n plt.ylabel('occurrence')\n plt.axis([-0.7, 9.7, 0, 130000])\n plt.savefig(\"report/khi2_histopi.png\", bbox_inches='tight')\n return dataset",
"def distribution_sentimentscore_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Score\")\n ax.set_ylabel(\"Number of Loans\")\n fig.suptitle(label)\n ax.hist(x, bins = 15)\n plt.show()",
"def plot_histogram(df):\n\n\t# find the highest value in the column\n\tmaximum_index = df['GDP ($ per capita) dollars'].idxmax()\n\t\n\t# if graph is being plotted use this style\n\tplt.style.use('seaborn-darkgrid')\n\n\t# plot 2 graph's in one figure\n\tplt.figure(1)\n\tplt.subplot(211)\n\n\t# plot a histogram with the GDP data and set a title, x-label, y-label\n\thist_plot = df['GDP ($ per capita) dollars'].hist(bins=50, color = \"pink\")\n\thist_plot.set_title('Before')\n\thist_plot.set_xlabel('GDP ($ per capita) dollars')\n\thist_plot.set_ylabel('Frequency')\n\n\t# Drop the maximum value and plot second gragh\n\tdf.drop(maximum_index, inplace=True)\n\tplt.subplot(212)\n\n\t# plot a histogram with the GDP data and set a title, x-label, y-label\n\thist_plot = df['GDP ($ per capita) dollars'].hist(bins=50, color = \"pink\")\n\thist_plot.set_title('After')\n\thist_plot.set_xlabel('GDP ($ per capita) dollars')\n\thist_plot.set_ylabel('Frequency')\n\tplt.show()\n\n\t# descriptive statistics to have a quick look\n\tdescription = df['GDP ($ per capita) dollars'].describe()",
"def histogram(values, title, fig_size=(4,3), path=None):\n plt.clf()\n f, ax = plt.subplots(1, figsize=fig_size)\n ax.hist(values, bins=60)\n ax.set_title(title)\n f.tight_layout()\n if(path != None):\n f.savefig(path+'/hist_'+title+'.png')",
"def plot_features(data: np.array)->None:\n n_rows = np.size(data, 0)\n n_cols = np.size(data, 1)\n for i in range(n_cols):\n plt.hist(data[:,i])\n plt.show()",
"def hist_pvalue(perms, ax, name):\n # Re-weight to obtain distribution\n pval = np.sum(perms >= perms[0]) / perms.shape[0]\n weights = np.ones(perms.shape[0]) / perms.shape[0]\n ax.hist([perms[perms >= perms[0]], perms], histtype='stepfilled',\n bins=100, label=\"p-val<%.3f\" % pval,\n weights=[weights[perms >= perms[0]], weights])\n ax.axvline(x=perms[0], color=\"k\", linewidth=2)#, label=\"observed statistic\")\n ax.set_ylabel(name)\n ax.legend()\n return ax",
"def basic_statistics():\n print(train_data['revenue'].describe())\n plt.hist(train_data['revenue'], color = 'blue', edgecolor = 'black',\n bins = int(4))\n\n # Add labels\n plt.title('Histogram of Revenues')\n plt.xlabel('revenues')\n plt.ylabel('P(revenues)')\n plt.show()",
"def plot_hitstogram_graph(data_values, title,\r\n number_of_keys,\r\n max_val,\r\n file_in):\r\n\r\n # bins = max(data_values)\r\n # pylab.hist(data_values, facecolor='blue')\r\n pylab.hist(data_values, facecolor='green', alpha=0.6)\r\n pylab.grid(True)\r\n pylab.title(title + \"_histogram\")\r\n pylab.xlabel('number in cluster')\r\n pylab.ylabel('Count')\r\n pylab.savefig(file_in + \"_\" + title + '_histogram.png')\r\n plt.close()\r\n pylab.close()\r\n os.chdir('..')",
"def income_distribution_plot(income_data,year):\n income_year = income_data.loc[year]\n plt.figure(figsize=(10,8))\n income_year.hist(bins=100,alpha=0.3,color='k')\n plt.title('Income Distribution of Year %s' % year)\n plt.xlabel('Income per person')\n plt.ylabel('Frequency')\n plt.savefig('Income distribution of year %s' % year)",
"def show_bryant(data_length=200000, bins=100):\n\n data = np.random.normal(0,1,data_length)\n plt.hist(data, bins)\n plt.show()",
"def plot_variation_distn(gene_vars: pd.DataFrame):\n plt.hist(gene_vars.median(axis=1), bins=100, alpha=0.4, label='median')\n plt.hist(gene_vars.mean(axis=1), bins=100, alpha=0.4, label='mean')\n plt.legend()",
"def plot_histogram(self, years_statistics):\n\n plt.hist(years_statistics, normed=True)\n plt.ylabel('Histogram');\n plt.hist(years_statistics)\n plt.title(\"Statistics for years\")\n plt.xlabel(\"Value\")\n plt.ylabel(\"Frequency\")\n plt.show()",
"def visualize(self):\n self.dataFrame.hist()\n plt.show()",
"def PlotHist(self, label=None):\n ys, xs, patches = plt.hist(self.test_stats)\n plt.vlines(self.actual, 0, max(ys), linewidth=3, color='black')\n plt.xlabel('test statistic')\n plt.ylabel('count')\n plt.show()",
"def plot_histograms(self):\n for year in self.years:\n # Don't produce a histogram if there are no polities or the\n # empire did not exist at this century\n if self.n_polities[year] == 0:\n continue\n elif self.occupied[year] == []:\n continue\n\n # Initialise figure and axis\n fig = plt.figure()\n ax = fig.subplots(1, 2)\n\n polity_sizes = self.polity_sizes[year]\n bins = max(polity_sizes)\n\n ax[0].set_title('Polities of size N')\n ax[0].set_ylabel('number of polities')\n ax[0].set_xlabel('size of polity / cells')\n ax[0].hist(polity_sizes, bins=bins)\n ax[1].set_title('Number of cells in\\npolities of size N')\n ax[1].set_ylabel('number of cells')\n ax[1].set_xlabel('size of polity / cells')\n ax[1].hist(polity_sizes, bins=bins, weights=polity_sizes)\n fig.tight_layout()\n fig.savefig('{}_{}.pdf'.format(self.name, year))",
"def plot_hist(self):\n labels = [self.get_class_str(action, obj)\n for (action, obj, subj, rec, beg, end) in self.action_clips]\n visualize.plot_hist(labels, proportion=True)",
"def plot_feature_histograms(self):\n\n num_features = len(self.feature_names)\n fig, ax = plt.subplots(nrows=1, ncols=num_features, figsize=(50, 2), tight_layout=True)\n fig.suptitle('Histograms of 19 features', fontsize=14)\n\n for i in range(num_features):\n ax[i].hist(self.train_data[self.train_data.columns[i]], bins=50)\n ax[i].set_xlabel(self.train_data.columns[i])\n\n plt.savefig(r'data_analysis\\histograms_' + self.file_name + '.png', \n facecolor=fig.get_facecolor(), bbox_inches='tight')",
"def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()",
"def plot_hist_for_pop(exp,data_crop,IV):\n if not os.path.exists('plots'):\n os.makedirs('plots')\n DVdata=data_crop.loc[:,IV] \n if exp == 'POP1':\n colors = [\"orangered\", \"silver\"]\n else:\n colors = [\"steelblue\", \"silver\"]\n \n plt.figure()\n customPalette = sns.set_palette(sns.color_palette(colors))\n plt.rc('font', size=15) \n\n \n sns.distplot(DVdata,kde=False)\n sns.despine(top=True,right=True)\n ax = plt.gca()\n if IV == 'Loss.streak.outcome...zero':\n ax.set_xlim(right=75)\n plt.tight_layout()\n plt.savefig(fname='plots/hist_'+IV + '.eps',format='eps',transparent=True)",
"def plot_sample_distribution(samples):\n plt.hist(samples, 50)\n plt.xlabel('Value of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample distribution')\n plt.show()",
"def generate_heatmap(self):\n data = []\n for j in range(len(self.generations[0])):\n row = []\n for i in range(len(self.generations)):\n row.append(self.generations[i][j].fitness)\n data.append(row)\n data = np.array(data)\n\n # Display log error in colorbar.\n tick_range = range(\n math.floor(math.log10(data.min().min())),\n 1 + math.ceil(math.log10(data.max().max())))\n cbar_ticks = [math.pow(10, i) for i in tick_range]\n log_norm = LogNorm(vmin=data.min().min(), vmax=data.max().max())\n\n plt.figure(figsize=(10, 5))\n ax = sns.heatmap(\n data,\n cmap='viridis',\n xticklabels=2,\n yticklabels=2,\n norm=log_norm,\n cbar_kws={'ticks': cbar_ticks, 'aspect': 15})\n\n hfont = {'fontname': 'Helvetica'}\n plt.xlabel('Generation', **hfont)\n plt.ylabel('Individual', **hfont)\n plt.xticks(\n [i for i in range(0, self.config.max_generations, 5)],\n [i for i in range(0, self.config.max_generations, 5)])\n plt.yticks(\n [i for i in range(0, self.config.population_size, 5)],\n [i for i in range(0, self.config.population_size, 5)])\n\n ax.invert_yaxis()\n ax.collections[0].colorbar.set_label('Error')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n plt.savefig('figures/Parameter Tuning Figure/heatmap.svg')",
"def plot_hist(df, num_bins=8):\n df.hist(figsize=(24, 20), bins=num_bins)\n plt.axes"
]
| [
"0.66024387",
"0.64840144",
"0.64536685",
"0.63611174",
"0.63259864",
"0.61939675",
"0.6187393",
"0.61788845",
"0.6167037",
"0.6154359",
"0.60931194",
"0.6068298",
"0.60612893",
"0.60508525",
"0.60471725",
"0.60467935",
"0.60432434",
"0.60355425",
"0.60211074",
"0.6012157",
"0.6000709",
"0.5996851",
"0.59864795",
"0.597355",
"0.59718764",
"0.5947451",
"0.5942998",
"0.59412074",
"0.5931718",
"0.59308"
]
| 0.67125946 | 0 |
Check that instance can't be instantiated without implementation of the 'apply_connectivity_changes' | def test_apply_connectivity_changes(self):
with self.assertRaisesRegexp(TypeError, "Can't instantiate abstract class TestedClass with "
"abstract methods apply_connectivity_changes"):
self.tested_class() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_connection(self):\n pass",
"def test_cannot_be_instantiated(self):\n with self.assertRaises(NotImplementedError):\n Channel(0)",
"def _check_connection(self):\n if \"_connection\" not in self.__dict__:\n message = \"use connect method before doing operation on this database\"\n raise Exception(message)",
"def check_stability(self):",
"def check_init(self):\n if self.Nlayer > 1:\n raise Exception(\"Nlayer == 1 currently\")",
"def check_connection(self):\n return False",
"def test_cannot_be_instantiated(self):\n with self.assertRaises(NotImplementedError):\n ClassicalIOChannel(0)",
"def test_cannot_be_instantiated(self):\n with self.assertRaises(NotImplementedError):\n PulseChannel(0)",
"def test_live_migration_src_check_compute_node_not_alive(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n t = utils.utcnow() - datetime.timedelta(10)\n s_ref = self._create_compute_service(created_at=t, updated_at=t,\n host=i_ref['host'])\n\n self.assertRaises(exception.ComputeServiceUnavailable,\n self.scheduler.driver._live_migration_src_check,\n self.context, i_ref)\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.conformsToProtocol(self))\n else:\n return False",
"def rescue(self, instance):\n pass",
"def __init__(self):\n self.try_to_connect()",
"def testInvalidNetworkInfo(self):\n ### create test resources\n instance_name = \"end-to-end-test-instance-1\"\n instance_selfLink = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name)\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute, instance_selfLink,\n 'an-invalid-network-for-testing',\n self.test_resource_creator.subnetwork_name,\n True)\n with self.assertRaises(Exception):\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n\n ### check result\n # Terminate before migration starts\n new_config = self.google_api_interface.get_instance_configs(\n instance_name)\n self.assertEqual(original_config, new_config)\n print('Pass the current test')",
"def check(self):\n raise NotImplementedError",
"def test_connection(cls, instances):\n try:\n instance, = instances\n except ValueError:\n cls.raise_user_error('multiple_instances')\n\n try:\n with magento.API(\n instance.url, instance.api_user, instance.api_key\n ):\n return\n except (\n xmlrpclib.Fault, IOError, xmlrpclib.ProtocolError, socket.timeout\n ):\n cls.raise_user_error(\"connection_error\")",
"def _check_validity(self):\n pass",
"def should_check_refcount(self):\n raise NotImplementedError()",
"def can_detect_offline(self):\n raise NotImplementedError(\"Abstract method, must be overridden\")",
"def test_raises_when_accessing_none_implementation(self):\n\n class APIObj(\n platform.APIObject,\n collections.namedtuple(\"APIObj\", \"implementation\"),\n ):\n def __new__(cls):\n return super().__new__(cls, implementation=None)\n\n obj = APIObj()\n\n with pytest.raises(AttributeError) as exc_info:\n obj.implementation # pylint: disable=pointless-statement\n\n assert \"invalid access to 'implementation': not initialized\" in str(\n exc_info.value\n )",
"def test_cant_call_after_creation(self):\n self.assertTrue(not hasattr(self.Foo, '_config'))",
"def test_live_migration_dest_check_not_alive(self):\n instance_id = self._create_instance()\n i_ref = db.instance_get(self.context, instance_id)\n t = utils.utcnow() - datetime.timedelta(10)\n s_ref = self._create_compute_service(created_at=t, updated_at=t,\n host=i_ref['host'])\n\n self.assertRaises(exception.ComputeServiceUnavailable,\n self.scheduler.driver._live_migration_dest_check,\n self.context, i_ref, i_ref['host'], False)\n\n db.instance_destroy(self.context, instance_id)\n db.service_destroy(self.context, s_ref['id'])",
"def valid_endpoint(cls):\n\t\treturn cls.__subclasses__() == []",
"def check_connectivity(self):\n return self.connected",
"def __instancecheck__(self, instance):\n\n if isinstance(instance, ObjCInstance):\n return bool(instance.isKindOfClass(self))\n else:\n return False",
"def initialize_network_los() -> bool:\n return True",
"def check_consistency(self):\n raise NotImplementedError()",
"def _check_connect(self) -> bool:\n\n if (self._conn is None):\n if (self._exception):\n raise base_connection.ConnectException(\n \"No connection established\")\n\n else:\n return False\n\n return True",
"def sanity_check(self):\n pass",
"def __init__(self):\n raise Exception(\"Cannot create this object\")",
"def __init__(self):\n raise Exception(\"Cannot create this object\")"
]
| [
"0.6313133",
"0.6298124",
"0.61359864",
"0.61053586",
"0.6011557",
"0.59839636",
"0.5937353",
"0.5771025",
"0.56448454",
"0.5626299",
"0.5595381",
"0.5570998",
"0.5547566",
"0.5543715",
"0.553049",
"0.5529679",
"0.55031174",
"0.5496212",
"0.5473476",
"0.5470526",
"0.5464141",
"0.5431464",
"0.54224473",
"0.5406907",
"0.54052985",
"0.54035056",
"0.5400479",
"0.53794634",
"0.5369122",
"0.5369122"
]
| 0.7049147 | 0 |
Check the requests collection for orders to change the validity of an upload. | def check_for_requests(self):
while True:
doc = self.cc.requests_coll.find_one_and_delete(
{'receiver': 'validator'}, sort=[('_id', pymongo.ASCENDING)]
)
if doc is None:
break
if doc['action'] == 'validate_upload':
print("fulfil request: set valid: {} for upload_id {}".format(doc['valid'], doc['upload_id']))
self.validate_upload(ObjectId(doc['upload_id']), doc['valid']) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def _check_order_update(self, *args, **kwargs):\n order_nos = list(self._orders.keys())\n if not order_nos:\n return\n for order_no in order_nos:\n success, error = await self._rest_api.get_order_status(order_no)\n if error:\n return\n await self._update_order(success[\"data\"][0])",
"def test_upload_iteration(upload_items: List[JSONDict]) -> None:\n validated = UploadCollection(items=upload_items)\n for i, item in zip(range(len(validated)), validated):\n assert validated[i] == item",
"def test_pre_upload_ok(self):\n namespace = 'default-gzip'\n good_digests = generate_collection(namespace, ['a pony'])\n self.assertEqual(good_digests.namespace.namespace, namespace)\n response = self.call_api(\n 'preupload', self.message_to_dict(good_digests), 200)\n message = response.json.get(u'items', [{}])[0]\n self.assertEqual('', message.get(u'gs_upload_url', ''))\n expected = validate(\n message.get(u'upload_ticket', ''),\n handlers_endpoints_v1.UPLOAD_MESSAGES[0])\n self.assertEqual(\n expected, generate_embedded(namespace, good_digests.items[0]))",
"def check(self):\n self.__check_request_limit()",
"def can_update_order_items(self) -> bool:\n return self.is_created or self.is_pending",
"def test_unique_upload_items(upload_items: List[JSONDict]) -> None:\n validated = UploadCollection(items=upload_items)\n assert validated.dict() == upload_items",
"def check_for_uploads(self):\n\n def set_action_id_ops() -> Sequence[Tuple[UpdateOne, InsertOne]]:\n find_query = {\n 'complete': True,\n self.action_id_name: {'$exists': False},\n 'meta.format': {'$exists': True},\n 'meta.start_time': {'$exists': True},\n 'meta.stop_time': {'$exists': True}\n }\n\n # apply filter from environment config\n if isinstance(self.cc.validator_upload_filter, dict):\n find_query.update(self.cc.validator_upload_filter)\n\n cursor = self.cc.metadata_coll.find(find_query).sort('timestamp')\n for upload in cursor:\n action_id = self._action_id_creator()\n print(\"assign action id {} to upload {}\".format(action_id, upload['_id']))\n uploads_query = UpdateOne({'_id': upload['_id']},\n {'$set': {self.action_id_name: action_id,\n self.valid_name: True}})\n\n timespans = [(upload['meta']['start_time'], upload['meta']['stop_time'])]\n\n action_log_query = InsertOne({\n '_id': action_id,\n 'output_formats': [upload['meta']['format']],\n 'timespans': timespans,\n 'action': 'upload',\n 'upload_ids': [upload['_id']]\n })\n\n yield uploads_query, action_log_query\n\n try:\n for uploads_block, action_log_block in grouper_transpose(set_action_id_ops(), 1000):\n self.cc.metadata_coll.bulk_write(uploads_block)\n self.cc.action_log.bulk_write(action_log_block)\n except BulkWriteError as e:\n # most likely a configuration error\n print(e.details)\n raise",
"def test_admin_change_order_status(self):\n # Test unregistered id\n # Correct format but not there\n response = self.client.put(\n 'api/v1/parcels/35420', headers=self.admin_token_dict)\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(\n data, {'message': 'No Parcel delivery order with that id'})\n # Test invalid format id\n response = self.client.put(\n 'api/v1/parcels/35uh420', headers=self.admin_token_dict) # Incorrect id format\n data = json.loads(response.data)\n self.assertEqual(response.status_code, 400)\n self.assertEqual(data, {'message': 'Wrong id format'})",
"def clean(self):\n cleaned_data = super().clean()\n\n if self.project.storagerequests.filter(is_active=True):\n raise forms.ValidationError(\n 'This project already has an outstanding storage request.')\n return cleaned_data",
"def init_check(self):\n for required_file in self._required_files:\n # Check if required files are there\n # FIXME Sometimes it doesn't work :?\n if required_file not in self.files:\n self.valid = False",
"def validate_collection_response(self, response):\n\n self.validate_response(response)\n if response.status_code not in self.model._meta['valid_get_status']:\n raise InvalidStatusError(\n self.model._meta['valid_get_status'], response\n )",
"def test_update_order(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 201)\n self.assertTrue(\n response_as_json(response)['order']['status_updated_on'])\n self.assertEqual(\n response_as_json(response)['order']['order_status'], 'accepted')",
"def test_update_order_with_no_status(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), json={})\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'], 'Bad request. Missing required param')",
"def test_uploaded_file_in_file_list(self):\n # Send file\n enc_file_path, additional_data = self.send_file(self.nonce1, self.nonce2)\n enc_file_name = enc_file_path.name\n # Get list back from server, and see if it is there\n file_list = self.serverComs.get_file_list()\n self.assertIn(enc_file_name, [x[0] for x in file_list], \"File not on the server!\")",
"def check_active_requests():\n\n active_requests = jobtracker.query(\"SELECT * FROM requests \" \\\n \"WHERE status='waiting'\")\n for request in active_requests:\n\n\t# Check requested status \n\tif DownloaderSPAN512.check_request_done(request):\n\t dlm_cout.outs(\"Restore (GUID: %s) has succeeded. Will create file entries.\\n\" % request['guid'])\n\t create_file_entries(request)\n\n\telse:\n#\t dlm_cout.outs(\"Request (GUID: %s) has failed.\\n\" \\\n#\t \"\\tDatabase failed to report the data as restored.\" % request['guid'])\n#\t jobtracker.query(\"UPDATE requests SET status='failed', \" \\\n# \"details='Request failed. Why ?', \" \\\n# \"updated_at='%s' \" \\\n# \"WHERE guid='%s'\" % (jobtracker.nowstr(), request['guid']))\n\n query = \"SELECT (TO_SECONDS('%s')-TO_SECONDS(created_at)) \" \\\n \"AS deltaT_seconds \" \\\n \"FROM requests \" \\\n \"WHERE guid='%s'\" % \\\n (jobtracker.nowstr(), request['guid'])\n row = jobtracker.query(query, fetchone=True)\n #if row['deltaT_seconds']/3600. > config.download.request_timeout:\n if row/3600. > config.download.request_timeout:\n dlm_cout.outs(\"Restore (GUID: %s) is over %d hr old \" \\\n \"and still not ready. Marking \" \\\n \"it as failed.\" % \\\n (request['guid'], config.download.request_timeout))\n jobtracker.query(\"UPDATE requests \" \\\n \"SET status='failed', \" \\\n \"details='Request took too long (> %d hr)', \" \\\n \"updated_at='%s' \" \\\n \"WHERE guid='%s'\" % \\\n (config.download.request_timeout, jobtracker.nowstr(), \\\n request['guid']))",
"def check_files_for_update(request):\n\n reports = []\n pubmedfiles = []\n\n labels = []\n concepts = []\n type1 = request.POST.get('type',None)\n for filename, file in request.FILES.items():\n if filename.startswith('reports'):\n reports.append(file)\n if filename.startswith('pubmed'):\n pubmedfiles.append(file)\n elif filename.startswith('concepts'):\n concepts.append(file)\n elif filename.startswith('labels'):\n labels.append(file)\n\n jsonDisp = request.POST.get('json_disp',None)\n jsonAnn = request.POST.get('json_ann',None)\n jsonDispUp = request.POST.get('json_disp_update',None)\n jsonAnnUp = request.POST.get('json_ann_update',None)\n load_concepts = request.POST.get('exa_concepts',None)\n load_labels = request.POST.get('exa_labels',None)\n msg = check_for_update(type1,pubmedfiles,reports,labels,concepts,jsonDisp,jsonAnn,jsonDispUp,jsonAnnUp,load_concepts,load_labels)\n jsonResp = {'message':msg}\n return JsonResponse(jsonResp)",
"def test_files_present(self, changes_file):\n for filename in changes_file.get_files():\n log.debug('Looking whether %s was actually uploaded' % filename)\n if os.path.isfile(os.path.join(pylons.config['debexpo.upload.incoming'], filename)):\n log.debug('%s is present' % filename)\n else:\n log.critical('%s is not present; importing cannot continue' % filename)\n raise OSError(\"Missing file %s in incoming\" % (filename))\n\n return True",
"def cleanup_incomplete_uploads_from_blob_store() -> bool:\n\n DAYS_TO_RETAIN = 1\n\n # Get current time in UTC timezone\n now = datetime.datetime.now(pytz.timezone(\"UTC\"))\n\n client = get_s3_client(settings=node.settings)\n incomplete_upload_objs = client.list_multipart_uploads(Bucket=node.id.no_dash).get(\n \"Uploads\", []\n )\n\n for obj in incomplete_upload_objs:\n # Get the upload id and object name\n upload_id: str = obj[\"UploadId\"]\n obj_name: str = obj[\"Key\"]\n\n # Get the list of all parts of the object uploaded\n # This step is required to get the upload time of the object\n object_parts: list = client.list_parts(\n Bucket=node.id.no_dash, UploadId=upload_id, Key=obj_name\n ).get(\"Parts\", [])\n\n obj_part_expired = False\n for part in object_parts:\n # Normalize upload time to UTC timezone\n part_upload_time = pytz.timezone(\"UTC\").normalize(part[\"LastModified\"])\n\n # If upload time of any part of the object\n # crosses DAYS_TO_RETAIN, then expire the whole object\n if (now - part_upload_time).days > DAYS_TO_RETAIN:\n obj_part_expired = True\n break\n\n if obj_part_expired:\n # Abort multipart upload\n client.abort_multipart_upload(\n UploadId=upload_id,\n Key=obj_name,\n Bucket=node.id.no_dash,\n )\n\n return True",
"def test_update_checklists_index_ignored_on_get(self):\r\n update_url = self.get_url(1)\r\n\r\n returned_checklists = json.loads(self.client.get(update_url).content)\r\n for pay, resp in zip(self.get_persisted_checklists(), returned_checklists):\r\n self.compare_checklists(pay, resp)",
"def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())",
"def check(self):\n\n self.check_auto_update()\n assert not self.empty()",
"def test_order_cannot_be_deleted_if_dont_exist(self):\n\n\t\tres = self.login_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/5',\n\t\t\theaders={\"x-access-token\": access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 404)\n\t\tself.assertEqual(result[\"message\"], \"That order is not available\")",
"def test_update_checklists_index_out_of_range(self):\r\n update_url = self.get_url(100)\r\n\r\n response = self.client.post(update_url)\r\n self.assertContains(response, 'Could not save checklist', status_code=400)",
"def test_updating_the_supply_price(self):\n self.assertEqual(self.po.id, 1)\n self.assertEqual(self.po.items.count(), 1)\n item = self.po.items.all()[0]\n self.assertEqual(item.id, 1)\n self.assertEqual(item.unit_cost, Decimal('12.11'))\n self.assertEqual(Log.objects.all().count(), 0)\n \n modified_po = copy.deepcopy(base_purchase_order)\n modified_po['items'][0]['unit_cost'] = Decimal('10.05')\n modified_po['items'][0]['id'] = 1\n modified_po['status'] = 'PROCESSED'\n del modified_po['items'][1]\n resp = self.client.put('/api/v1/purchase-order/1/',\n format='json',\n data=modified_po)\n self.assertEqual(resp.status_code, 200, msg=resp)\n resp_obj = resp.data\n self.assertEqual(resp_obj['revision'], 1)\n #Check the new pdf\n #webbrowser.get(\"open -a /Applications/Google\\ Chrome.app %s\").open(resp_obj['pdf']['url'])\n \n self.assertEqual(resp_obj['id'], 1)\n self.assertEqual(resp_obj['supplier']['id'], 1)\n self.assertEqual(resp_obj['vat'], 7)\n self.assertEqual(resp_obj['discount'], 0)\n self.assertEqual(resp_obj['revision'], 1)\n self.assertEqual(Decimal(resp_obj['grand_total']), Decimal('107.54'))\n self.assertEqual(len(resp_obj['items']), 1)\n item1 = resp_obj['items'][0]\n self.assertEqual(item1['id'], 1)\n self.assertEqual(item1['quantity'], Decimal('10.0000000000'))\n self.assertEqual(Decimal(item1['unit_cost']), Decimal('10.05'))\n self.assertEqual(Decimal(item1['total']), Decimal('100.50'))\n \n #Confirm cost change for item and supply in the database\n po = PurchaseOrder.objects.get(pk=1)\n self.assertEqual(po.grand_total, Decimal('107.54'))\n item1 = po.items.order_by('id').all()[0]\n self.assertEqual(item1.id, 1)\n self.assertEqual(item1.quantity, 10)\n self.assertEqual(item1.unit_cost, Decimal('10.05'))\n supply = item1.supply\n supply.supplier = po.supplier\n self.assertEqual(supply.cost, Decimal('10.05'))\n \n self.assertEqual(Log.objects.all().count(), 1)\n log = Log.objects.all()[0]\n self.assertEqual(log.cost, Decimal('10.05'))\n self.assertEqual(log.supply, supply)\n self.assertEqual(log.supplier, po.supplier)\n self.assertEqual(log.message, \"Price change from 12.11USD to 10.05USD for Pattern: Maxx, Col: Blue [Supplier: Zipper World]\")\n\n # Confirm that there is still only one product for this supply and supplier\n # in the database\n products = Product.objects.filter(supply=supply, supplier=po.supplier)\n self.assertEqual(len(products), 1)",
"def test_order_cannot_be_deleted_if_not_owner(self):\n\n\t\tres = self.login_user()\n\t\tress = self.login_admin_user()\n\t\taccess_token = json.loads(res.data.decode())['access_token']\n\t\ta_access_token = json.loads(ress.data.decode())['access_token']\n\n\t\tresponse = self.client().post(\n\t\t\t'/api/v2/orders',\n\t\t\theaders={\"x-access-token\": access_token},\n\t\t\tdata = json.dumps(\n\t\t\t\tself.order_data) , content_type = 'application/json')\n\t\tself.assertEqual(response.status_code, 201)\n\n\t\tresponse = self.client().delete(\n\t\t\t'/api/v2/orders/1',\n\t\t\theaders={\"x-access-token\": a_access_token})\n\n\t\tresult = json.loads(response.data)\n\t\tself.assertEqual(response.status_code, 401)\n\t\tself.assertEqual(result[\"message\"], \n\t\t\t\"Not authorized to perform this function!\")",
"def set_order_valid():\n data = select_data_source()\n order_id = data['id']\n valid = data['valid']\n user = data['user']\n \n if check_user_permission(user) : return permission_denied_return\n \n db = database.getdb()\n \n ### Check if order exists.\n \n cmd = 'select * from orders where id==\"{0}\"'.format(order_id)\n order_info = db.execute(cmd).fetchall()\n if len(order_info) != 1 :\n return validate_not_exist_return\n \n ### Check if this order belongs to the user.\n \n cmd = 'select * from orders where id==\"{0}\" AND owner==\"{1}\"'.format(order_id, user)\n user_info = db.execute(cmd).fetchall()\n if len(user_info) != 1 :\n return validate_invalid_user_return\n \n ### Check if order is not valid recently.\n \n cmd = 'select * from orders where id==\"{0}\" AND passed!=0'.format(order_id)\n order_valid = db.execute(cmd).fetchall()\n if len(order_valid) != 0 :\n return validate_is_valid_return\n \n ### Check if there is an order already valid at the same time.\n cmd = 'select time from orders where id==\"{0}\"'.format(order_id)\n order_time = db.execute(cmd).fetchall()[0][0]\n cmd = 'select * from orders where time==\"{0}\" AND passed!=0'.format(order_time)\n conflict = db.execute(cmd).fetchall()\n if len(conflict) != 0 :\n return validate_conflict_return\n \n if str.lower(valid) == 'true' :\n ### Set order valid.\n cmd = 'update orders set passed=1 where id==\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('set order {0} to valid.'.format(order_id))\n return validate_complete_return\n elif str.lower(valid) == 'false' :\n ### Remove the order entry.\n cmd = 'delete from orders where id=\"{0}\"'.format(order_id)\n db.execute(cmd)\n db.commit()\n print('deny the order {0} and remove it from databse.'.format(order_id))\n # TODO: email something to announce...\n return validate_reject_return\n else:\n return validate_valid_parameter_error_return",
"def test_check_existing_finds_existing_entities(self):\n namespace = 'default-gzip'\n collection = generate_collection(\n namespace, ['small content', 'larger content', 'biggest content'])\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n response = self.call_api(\n 'preupload', self.message_to_dict(collection), 200)\n\n # we should see one enqueued task and two new URLs in the response\n items = response.json['items']\n self.assertEqual(2, len(items))\n self.assertEqual([1, 2], [int(item['index']) for item in items])\n for item in items:\n self.assertIsNotNone(item.get('upload_ticket'))\n\n # remove tasks so tearDown doesn't complain\n _ = self.execute_tasks()",
"def check_revision(self, request):\n assert hasattr(self, 'doc'), \"dispatcher document must be set\"\n try:\n rev = request.cgi_fields['_rev'].value\n except KeyError:\n return\n if rev != self.doc.rev:\n raise HTTP_CONFLICT(\"Your edit was based on an entity document that\"\n \" was changed by someone else after you loaded\"\n \" the edit page; the document revisions do\"\n \" not match. Go back to the entity ('Cancel')\"\n \" and retry your edit...\")",
"def test_update_order_with_non_json_data(self):\n response = self.api_test_client.put('{}/orders/1'.format(\n self.BASE_URL), data='order_status=rejected')\n\n self.assertEqual(response.status_code, 400)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Bad request. Request data must be in json format')",
"def test_change_order_status_when_order_does_not_exist(self):\n response = self.api_test_client.put('{}/orders/1000'.format(\n self.BASE_URL), json={'order_status': 'accepted'})\n\n self.assertEqual(response.status_code, 404)\n self.assertEqual(response_as_json(\n response)['message'],\n 'Order with id 1000 not found')"
]
| [
"0.6284529",
"0.5638963",
"0.5551693",
"0.5446516",
"0.53494304",
"0.53440046",
"0.53349584",
"0.5324783",
"0.5308333",
"0.53029424",
"0.53028655",
"0.5274873",
"0.5270222",
"0.5244655",
"0.5208642",
"0.5177385",
"0.5153453",
"0.51222587",
"0.51172185",
"0.5111013",
"0.5100442",
"0.5072526",
"0.50625116",
"0.50604075",
"0.50550187",
"0.5047861",
"0.5033563",
"0.5025154",
"0.50171655",
"0.5015371"
]
| 0.72320664 | 0 |
Assign a new action_id to every recently completed upload | def check_for_uploads(self):
def set_action_id_ops() -> Sequence[Tuple[UpdateOne, InsertOne]]:
find_query = {
'complete': True,
self.action_id_name: {'$exists': False},
'meta.format': {'$exists': True},
'meta.start_time': {'$exists': True},
'meta.stop_time': {'$exists': True}
}
# apply filter from environment config
if isinstance(self.cc.validator_upload_filter, dict):
find_query.update(self.cc.validator_upload_filter)
cursor = self.cc.metadata_coll.find(find_query).sort('timestamp')
for upload in cursor:
action_id = self._action_id_creator()
print("assign action id {} to upload {}".format(action_id, upload['_id']))
uploads_query = UpdateOne({'_id': upload['_id']},
{'$set': {self.action_id_name: action_id,
self.valid_name: True}})
timespans = [(upload['meta']['start_time'], upload['meta']['stop_time'])]
action_log_query = InsertOne({
'_id': action_id,
'output_formats': [upload['meta']['format']],
'timespans': timespans,
'action': 'upload',
'upload_ids': [upload['_id']]
})
yield uploads_query, action_log_query
try:
for uploads_block, action_log_block in grouper_transpose(set_action_id_ops(), 1000):
self.cc.metadata_coll.bulk_write(uploads_block)
self.cc.action_log.bulk_write(action_log_block)
except BulkWriteError as e:
# most likely a configuration error
print(e.details)
raise | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_action(self, action):\n with self._mutex:\n _id = max(self._used_ids) if self._used_ids else 0\n while _id in self._used_ids:\n _id += 1\n self._actions[_id] = action\n self._used_ids.add(_id)\n self._workers[_id] = Thread(target=action.run, args=(self.api,))\n\n return _id",
"def mark_as_completed(conn, pk):\n cursor = conn.cursor()\n statement = \"update aws_files set action_completed_at=? where id=?\"\n now = datetime.utcnow()\n values = (now, pk,)\n print(\" {0}\").format(now.isoformat())\n cursor.execute(statement, values)\n conn.commit()\n cursor.close()",
"def updateActionExp(self):\n self.action_cnt += 1",
"def post(self, request: HttpRequest) -> HttpResponse:\n for __, _file in request.FILES.items():\n new_upload, _created = self.handle_post_file(_file)\n new_upload.user = request.user\n new_upload.save()\n # Count initial view\n ObjectViewFile.count_view(new_upload, request)\n LOGGER.info(\"Uploaded %s\", new_upload.filename)\n return HttpResponse(status=204)",
"def upload(request):\n ids = ((1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1),\n (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1),\n (18, 1), (19, 2), (20, 2), (21, 2), (22, 2), (23, 2), (24, 2), (25, 2),\n (26, 2), (27, 2), (28, 2), (29, 3), (30, 3), (31, 3), (32, 3), (33, 3),\n (34, 3), (35, 3), (36, 4), (37, 4), (38, 4), (39, 4), (40, 4), (41, 4),\n (42, 4), (43, 4), (44, 4), (45, 4), (46, 4), (47, 4), (48, 4), (49, 4),\n (50, 4), (51, 4), (52, 4), (53, 4), (54, 4), (55, 4), (56, 4), (57, 4),\n (58, 4), (59, 4), (60, 4), (61, 4), (62, 4), (63, 4), (64, 4), (81, 4),\n (97, 4), (98, 4), (65, 5), (66, 5), (67, 5), (68, 5), (69, 5), (70, 5),\n (71, 5), (72, 5), (73, 5), (74, 5), (75, 5), (76, 5), (77, 5), (78, 5),\n (79, 5), (80, 6), (81, 6), (82, 6), (83, 6), (84, 6), (85, 6), (86, 6),\n (87, 6), (88, 6), (89, 6), (90, 6), (91, 6), (92, 6), (93, 6), (94, 6),\n (95, 6), (96, 7), (97, 7), (98, 7), (99, 7), (100, 7), (101, 7));\n idx = 1\n products = Product.objects.all()\n for product in products:\n product.product_category.add(Category.objects.get(category_id=ids[idx][1]))\n idx += 1\n\n serializer = ProductSerializer(instance=products, context={'request': request})\n\n return Response(data=serializer.data)",
"def DoneWithId(self, id):\n self.ids.add(id)",
"def perform_action(self, action_id: int) -> None:\r\n ...",
"def action_upload(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'uploaded'\n action = 'upload'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Uploaded'),\n 'action': action,\n 'docaction': 'uploaddoc',\n 'excludeStatuses': ['uploaded', 'confirmed', 'transmitted','released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)",
"def test_bulk_iterates_actions_only_once(self):\n doc = self._make_doc()\n actions = OneshotIterable([BulkActionItem.index(doc)])\n self.adapter.bulk(actions) # does not raise IterableExhaustedError",
"def _increment_file_counter(self):\n self._add_to_file_counter(1)",
"def accumulate_unique_actions_for_active_iids(self, action_type: str, limit_to_iids=None) -> PythonBatchCommandBase:\n retVal = AnonymousAccum()\n iid_and_action = self.items_table.get_iids_and_details_for_active_iids(action_type, unique_values=True, limit_to_iids=limit_to_iids)\n iid_and_action.sort(key=lambda tup: tup[0])\n previous_iid = \"\"\n\n for IID, an_action in iid_and_action:\n #log.debug(f'Marking action {an_action} on - {IID}')\n if IID != previous_iid: # avoid multiple progress messages for same iid\n actions_of_iid_count = 0\n name_and_version = self.name_and_version_for_iid(iid=IID)\n action_description = self.action_type_to_progress_message[action_type]\n previous_iid = IID\n if an_action: # ~ was specified in yaml\n actions = config_vars.resolve_str_to_list(an_action)\n for action in actions:\n actions_of_iid_count += 1\n message = f\"{name_and_version} {action_description} {actions_of_iid_count}\"\n retVal += EvalShellCommand(action, message, self.python_batch_names)\n return retVal",
"def _push_one(self, f, **kwargs):\n\n d = self.UploaderClass(f, **kwargs)\n\n # Submit the data to the database\n d.submit(self.session)\n self.uploaded += 1",
"def set_action(self, action):\n if action not in self.images:\n raise Exception('Action not defined for {}'.format(\n self.__name__\n ))\n self._action_i = 0\n self._action = action",
"def request_action(reqID, action):\n req = get_ride_request(reqID)\n req.status = action.lower().title()\n req.save();",
"def _touch_file(self, file_id):\n if file_id in self.touch_list:\n self.touch_list.remove(file_id)\n self.touch_list.append(file_id)",
"def _assignUIDs(self):\n for messagePath in self.maildir:\n\n messageFile = os.path.basename(messagePath)\n\n if not messageFile in self.metadata['uids']:\n\n self.metadata['uids'][messageFile] = self.metadata['uidnext']\n\n self.metadata['uidnext'] += 1\n\n self.saveMetadata()",
"def reset_processed_ids_file():\n src = test_config[\"processed_tweets_ids_src_path\"]\n dst = test_config[\"overwrite\"][\"processed_tweets_ids_path\"]\n copyfile(src, dst)",
"def _action_to_perform(self, ids, operationParams , default={}):\n full_ids=[]\n status=operationParams['status'] \n action=operationParams['action']\n docaction=operationParams['docaction']\n excludeStatuses=operationParams['excludeStatuses']\n includeStatuses=operationParams['includeStatuses']\n \n stopFlag,allIDs=self._get_recursive_parts(ids, excludeStatuses, includeStatuses)\n self._action_ondocuments(allIDs,docaction, status)\n if action:\n idMoves=move_workflow(self, allIDs, action, status)\n self.logging_workflow(idMoves, action, status)\n objId=self.browse(allIDs).with_context({'internal_writing':True}).write(default)\n if objId:\n wf_message_post(self, allIDs, body='Status moved to: {status}.'.format(status=status))\n return objId",
"def process_actions(actions, user):\n \n process = {\n 'add_task': add_task,\n 'rem_task': rem_task,\n 'add_list': add_list,\n 'rem_list': rem_list,\n 'move_list': move_list,\n 'edit_list': edit_list,\n 'edit_item': edit_item,\n 'add_board': add_board,\n 'rem_board': rem_board,\n 'edit_board': edit_board,\n }\n \n #TODO: handle errors\n modified_lists = dict()\n tmp_id_to_new_ids = [] # [(<tmp id>, <new id>), ..]\n \n while actions:\n \n action = actions.pop(0)\n \n try:\n fn = process.get(action['type'])\n if not fn: raise ActionDoesNotExist # the demanded action does not exist\n \n returned_list = fn(action, user)\n except (InsufficientPermissions, ActionDoesNotExist, List.DoesNotExist, Item.DoesNotExist):\n # Cannot modify this list\n # Add errors in the response\n continue\n \n # If there's a return value\n if returned_list and isinstance(returned_list, List): modified_lists[returned_list.id] = returned_list\n \n if action['type'] == 'add_list':\n # Change the temporary id to the new id in all the remaining actions\n for x in actions: \n if x.get('listId') == action['listId']: \n x['listId'] = returned_list.id\n tmp_id_to_new_ids.append((action['listId'], returned_list.id))\n \n return modified_lists, tmp_id_to_new_ids",
"def upload_all_completed_analyses(context: click.Context, pipeline: Pipeline = None):\n\n LOG.info(\"----------------- AUTO -----------------\")\n\n status_db: Store = context.obj.status_db\n\n exit_code = 0\n for analysis_obj in status_db.get_analyses_to_upload(pipeline=pipeline):\n if analysis_obj.family.analyses[0].uploaded_at is not None:\n LOG.warning(\n f\"Skipping upload for case {analysis_obj.family.internal_id}. \"\n f\"It has been already uploaded at {analysis_obj.family.analyses[0].uploaded_at}.\"\n )\n continue\n\n case_id = analysis_obj.family.internal_id\n LOG.info(\"Uploading analysis for case: %s\", case_id)\n try:\n context.invoke(upload, case_id=case_id)\n except Exception:\n LOG.error(f\"Case {case_id} upload failed\")\n LOG.error(traceback.format_exc())\n exit_code = 1\n\n sys.exit(exit_code)",
"def report_action(self, action_name):\n last_index = self.configuration.results.shape[0] - 1\n self.configuration.results.loc[last_index, 'action'] = action_name",
"def progress(self, id):",
"def progress(self, id):",
"def unique_files(self):\n self._tempfiles[-1].ctr = -1",
"def addPostStartAction ( action ) :\n global __Bender_PostStart_Actions\n if action : __Bender_PostStart_Actions.append ( action ) \n return tuple(__Bender_PostStart_Actions)",
"def completed_file(self, context):",
"def get_action_id(output):\n return output['Action queued with id']",
"def actions(self, actions):\n\n self._actions = actions",
"def actions(self, actions):\n\n self._actions = actions",
"def addAction(self, **kwargs):\n\n try:\n colour = kwargs[\"fname\"]\n except:\n rospy.logwarn(\"Could not get the current action selection\")\n\n try:\n action = kwargs['action']\n except:\n action = self.mm.default_values[self.mm.modes[self.mm.cur_mode]]\n\n if action in self.bl.getAllSavedActions():\n # if selected action modifies same block, add actions\n new = self.bl.baxter_actions[action]['joint_position']\n rospy.loginfo('New action is: %s' % str(new))\n if len(self.actionSequence) > 0 and colour == self.actionSequence[-1][0]:\n last_action = self.actionSequence[-1][1]\n new_action = tuple(map(operator.add, last_action, new))\n self.actionSequence[-1] = (colour, new_action)\n rospy.loginfo('Updated action to %s for %s block' % (str(self.actionSequence[-1][1]), colour))\n\n else:\n self.actionSequence.append((str(colour),new))\n rospy.loginfo('Added action %s for %s block' % (str(new), colour))\n self.mm.loadPreviousMenu()\n else:\n rospy.logwarn(\"Action does not exist. Skip action.\")\n self.mm.neglect()"
]
| [
"0.6022013",
"0.5814392",
"0.56513506",
"0.5365241",
"0.5296398",
"0.5252554",
"0.524484",
"0.5203079",
"0.5201009",
"0.518607",
"0.51719576",
"0.5156348",
"0.51541024",
"0.5147858",
"0.51136595",
"0.5083526",
"0.5081598",
"0.50777394",
"0.50723493",
"0.5058067",
"0.50570077",
"0.50564504",
"0.50564504",
"0.50536877",
"0.505346",
"0.503479",
"0.5029082",
"0.50156254",
"0.50156254",
"0.49914086"
]
| 0.60786474 | 0 |
Scans the analyzers collection for analyzer modules that were executed and performs validation on their generated observations. If they are ok, the observations are committed to the observatory and a new action is inserted into the action_log. | def check_for_analyzers(self):
executed = self.analyzer_state.executed_analyzers()
for analyzer in executed:
# check for wish
if self.analyzer_state.check_wish(analyzer, 'cancel'):
print("validator: cancelled {} upon request".format(analyzer['_id']))
continue
print("validating and committing {}".format(analyzer['_id']))
self.analyzer_state.transition(analyzer['_id'], 'executed', 'validating')
exe_res = analyzer['execution_result']
temporary_coll = self.cc.temporary_db[exe_res['temporary_coll']]
if exe_res['timespans'] is not None and exe_res['upload_ids'] is not None:
self.analyzer_state.transition_to_error(analyzer['_id'],
'internal error: either timespans or upload_ids can have a '
'value but not both. I cannot decide if direct or normal analyzer')
continue
if exe_res['timespans'] is None and exe_res['upload_ids'] is None:
self.analyzer_state.transition_to_error(analyzer['_id'],
'internal error: it\'s not allowed to have both timespans and upload_ids to be None. '
'I cannot decide if direct or normal analyzer')
continue
if exe_res['upload_ids'] is not None:
print("using direct commit")
valid_count, errors, action_id = commit_direct(
analyzer['_id'], analyzer['working_dir'], self._action_id_creator,
exe_res['upload_ids'], exe_res['max_action_id'], temporary_coll,
self.cc.observations_coll, analyzer['output_types'],
self.cc.action_log)
else:
print("using normal commit")
valid_count, errors, action_id = commit_normal(
analyzer['_id'], analyzer['working_dir'], self._action_id_creator,
exe_res['timespans'], exe_res['max_action_id'], temporary_coll,
self.cc.observations_coll, analyzer['output_types'],
self.cc.action_log)
if len(errors) > 0:
print("analyzer {} with action id {} has at least {} valid records but {} have problems:".format(analyzer['_id'], action_id, valid_count, len(errors)))
for idx, error in enumerate(errors):
print("{}: {}".format(idx, error))
self.analyzer_state.transition_to_error(analyzer['_id'], 'error when executing validator:\n' + '\n'.join((str(error) for error in errors)))
else:
print("successfully commited analyzer {} run with action id {}. {} records inserted".format(analyzer['_id'], action_id, valid_count))
self.analyzer_state.transition(analyzer['_id'], 'validating', 'sensing', {'action_id': action_id}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check(self):\r\n for action in self._actions:\r\n action.check()",
"def run_analyzers(args: argparse.Namespace):\n log = logging.getLogger(\"run\")\n model_repository = create_model_repo_from_args(args)\n log.info(\"Created %s\", model_repository)\n if args.request_server == \"auto\":\n data_request_address = \"%s:10301\" % args.server.split(\":\")[0]\n else:\n data_request_address = args.request_server\n data_service = DataService(data_request_address)\n log.info(\"Created %s\", data_service)\n sys.path.append(os.getcwd())\n manager = AnalyzerManager(\n analyzers=[importlib.import_module(a).analyzer_class for a in args.analyzer],\n model_repository=model_repository,\n data_service=data_service,\n )\n sys.path = sys.path[:-1]\n log.info(\"Created %s\", manager)\n listener = EventListener(address=args.server, handlers=manager, n_workers=args.workers)\n log.info(\"Created %s\", listener)\n listener.start()\n log.info(\"Listening %s\", args.server)\n listener.block()\n model_repository.shutdown()\n data_service.shutdown()",
"def check_for_work(self):\n print(\"validator: check for work\")\n self.check_for_analyzers()\n self.check_for_uploads()\n self.check_for_requests()",
"def check_for_uploads(self):\n\n def set_action_id_ops() -> Sequence[Tuple[UpdateOne, InsertOne]]:\n find_query = {\n 'complete': True,\n self.action_id_name: {'$exists': False},\n 'meta.format': {'$exists': True},\n 'meta.start_time': {'$exists': True},\n 'meta.stop_time': {'$exists': True}\n }\n\n # apply filter from environment config\n if isinstance(self.cc.validator_upload_filter, dict):\n find_query.update(self.cc.validator_upload_filter)\n\n cursor = self.cc.metadata_coll.find(find_query).sort('timestamp')\n for upload in cursor:\n action_id = self._action_id_creator()\n print(\"assign action id {} to upload {}\".format(action_id, upload['_id']))\n uploads_query = UpdateOne({'_id': upload['_id']},\n {'$set': {self.action_id_name: action_id,\n self.valid_name: True}})\n\n timespans = [(upload['meta']['start_time'], upload['meta']['stop_time'])]\n\n action_log_query = InsertOne({\n '_id': action_id,\n 'output_formats': [upload['meta']['format']],\n 'timespans': timespans,\n 'action': 'upload',\n 'upload_ids': [upload['_id']]\n })\n\n yield uploads_query, action_log_query\n\n try:\n for uploads_block, action_log_block in grouper_transpose(set_action_id_ops(), 1000):\n self.cc.metadata_coll.bulk_write(uploads_block)\n self.cc.action_log.bulk_write(action_log_block)\n except BulkWriteError as e:\n # most likely a configuration error\n print(e.details)\n raise",
"def analyze(self):\r\n if not self.is_failed():\r\n try:\r\n # Check for the OSPL error log file:\r\n self.check_for_ospl_error_log()\r\n\r\n # Get test hosts:\r\n hosts = {}\r\n hosts[\"Pub\"] = self.get_host_by_role(\"Pub\")[0]\r\n hosts[\"Sub\"] = self.get_host_by_role(\"Sub\")[0]\r\n\r\n # Read node mopnitor logs:\r\n app_log_contents = {}\r\n app_log_contents[\"Pub\"] = self.parser.get_process_log_content(\r\n self.log_root,\r\n hosts[\"Pub\"],\r\n \"dds2734_publisher\")\r\n app_log_contents[\"Sub\"] = self.parser.get_process_log_content(\r\n self.log_root,\r\n hosts[\"Sub\"],\r\n \"dds2734_subscriber\")\r\n\r\n for index in app_log_contents.keys():\r\n if len(app_log_contents[index]) == 0:\r\n raise TestError(\"DDS2734TestScenario::analyze - empty application log for node [%s]\"% hosts[index].get_host_name())\r\n\r\n # Check test case expected result:\r\n if self.type == DDS2734TransLocTestScenario.TRANSLOC_NORMAL:\r\n self.check_transloc_normal(app_log_contents)\r\n elif self.type == DDS2734TransLocTestScenario.TRANSLOC_LATE_READER:\r\n self.check_transloc_late_reader(app_log_contents)\r\n elif self.type == DDS2734TransLocTestScenario.TRANSLOC_LATE_NODE:\r\n self.check_transloc_late_node(app_log_contents)\r\n elif self.type == DDS2734TransLocTestScenario.TRANSLOC_TOO_LATE_READER:\r\n self.check_transloc_too_late_reader(app_log_contents)\r\n elif self.type == DDS2734TransLocTestScenario.TRANSLOC_TOO_LATE_NODE:\r\n self.check_transloc_too_late_node(app_log_contents)\r\n \r\n except:\r\n self.fail()\r\n self.errors.append(\"Cannot analyze results: %s %s\"% (sys.exc_info()[0], sys.exc_info()[1]))\r\n\r\n # Call parent analyze to create log file:\r\n BaseTestScenario.analyze(self)",
"def analyze(self):\n dataset = self.config.dataset\n class_config = dataset.class_config\n\n scene_id_to_cfg = {s.id: s for s in dataset.all_scenes}\n\n @lru_cache(maxsize=len(dataset.all_scenes))\n def build_scene(scene_id: str) -> Scene:\n cfg = scene_id_to_cfg[scene_id]\n scene = cfg.build(\n class_config, self.tmp_dir, use_transformers=False)\n return scene\n\n # build and run each AnalyzerConfig for each scene group\n for a in self.config.analyzers:\n for group_name, group_ids in dataset.scene_groups.items():\n if len(group_ids) == 0:\n log.info(f'Skipping scene group \"{group_name}\". '\n 'Empty scene group.')\n continue\n group_scenes = (build_scene(id) for id in group_ids)\n analyzer = a.build(scene_group=(group_name, group_scenes))\n\n log.info(f'Running {type(analyzer).__name__} on '\n f'scene group \"{group_name}\"...')\n analyzer.process(group_scenes, self.tmp_dir)",
"def run(self):\n all_groups_settings, iam_groups_settings = self._retrieve()\n all_violations = self._find_violations(all_groups_settings,\n iam_groups_settings)\n self._output_results(all_violations)",
"def RunChecks(self):\n results = []\n\n affected_files = self.input_api.AffectedFiles(\n file_filter=self.file_filter, include_deletes=False)\n affected_js_files = filter(\n lambda f: f.LocalPath().endswith('.js'), affected_files)\n\n if affected_js_files:\n self.input_api.logging.info(\n 'Running appengine eslint on %d JS file(s)', len(affected_js_files))\n results += self.RunESLintChecks(affected_js_files)\n\n\n if results:\n results.append(self.output_api.PresubmitNotifyResult(\n 'See the JavaScript style guide at https://goo.gl/Ld1CqR.'))\n\n return results",
"def check(self):\n try:\n if self.is_compiled:\n # skip compiled (Cythonized) files because pyanalyze will misinterpret the\n # AST in some cases (for example, if a function was cdefed)\n return []\n if self.module is None:\n # If we could not import the module, other checks frequently fail.\n return self.all_failures\n with qcore.override(self, \"state\", VisitorState.collect_names):\n self.visit(self.tree)\n with qcore.override(self, \"state\", VisitorState.check_names):\n self.visit(self.tree)\n # This doesn't deal correctly with errors from the attribute checker. Therefore,\n # leaving this check disabled by default for now.\n self.show_errors_for_unused_ignores(ErrorCode.unused_ignore)\n self.show_errors_for_bare_ignores(ErrorCode.bare_ignore)\n if self.unused_finder is not None and not self.has_file_level_ignore():\n self.unused_finder.record_module_visited(self.module)\n except node_visitor.VisitorError:\n raise\n except Exception as e:\n self.show_error(\n None,\n \"%s\\nInternal error: %r\" % (traceback.format_exc(), e),\n error_code=ErrorCode.internal_error,\n )\n # Recover memory used for the AST. We keep the visitor object around later in order\n # to show ClassAttributeChecker errors, but those don't need the full AST.\n self.tree = None\n self._lines.__cached_per_instance_cache__.clear()\n self._argspec_to_retval.clear()\n return self.all_failures",
"def process(self):\n\t\tif self.update_check():\n\t\t\tself.ingest_all()\n\t\t\tself.update_totals()\n\t\telse:\n\t\t\tlog.info('PHE cases up to date')",
"def list_analyzers(args: argparse.Namespace):\n first = True\n queue = [tuple(c) + (\"lookout.\",) for c in pkgutil.iter_modules(lookout.__path__)]\n while queue:\n importer, name, ispkg, prefix = queue.pop(0)\n\n if not ispkg or name == \"core\":\n continue\n\n m = importer.find_module(name).load_module(name)\n if getattr(m, \"__meta__\", False):\n queue.extend(tuple(c) + (prefix + name + \".\",)\n for c in pkgutil.iter_modules(m.__path__))\n continue\n\n try:\n cls = m.analyzer_class\n except AttributeError:\n continue\n if first:\n first = False\n else:\n print()\n print(prefix + name)\n print(\"\\t%s\" % cls.version)\n print(\"\\t\" + cls.description)",
"def cingValidation(self): \n \n self.cingRun()\n \n self.analyseCingResults()",
"def analyse(self):\n pass",
"def validate(self):\n\n # start validate\n self.model.eval()\n preds, labels = [], []\n for batch_idx, data in enumerate(self.valid_dataloader):\n # calculate and log losses\n losses_report, valid_preds, valid_labels = self.forward_one_batch(\n data)\n self._update_losses(losses_report, train=False)\n\n preds.append(valid_preds)\n labels.append(valid_labels)\n\n preds = np.concatenate(preds, axis=0)\n labels = np.concatenate(labels, axis=0)\n if IS_REG:\n preds = disc(preds)\n # calculate and log metrics\n metrics_report = self.evaluate_metrics(preds, labels)\n self._update_metrics(metrics_report, train=False)\n\n # TODO: lr scheduler step setting\n self.lr_scheduler.step(self.valid_loss_meters['CrossEntropyLoss'].avg)\n\n # end validate\n self.model.train()",
"def validate(self):\n self.set_model_mode('eval')\n self.evaluator.reset()\n losses = MetricMeter()\n\n print('Do evaluation on {} set'.format('valid set'))\n data_loader = self.val_loader\n assert data_loader is not None\n for batch_idx, batch in enumerate(data_loader):\n input, label = self.parse_batch_test(batch)\n loss = self.forward_backward(batch, backprob=False)\n losses.update(loss)\n # total_loss += loss['loss']\n output = self.model_inference(input)\n self.evaluator.process(output, label)\n\n results = self.evaluator.evaluate()\n total_loss = losses.meters['loss_x'].avg\n\n for k, v in results.items():\n tag = '{}/{}'.format('validation', k)\n self.write_scalar(tag, v, self.epoch)\n # if full_results:\n return [total_loss,losses.dict_results(),results]\n # return total_loss",
"def _run_actions(self):\n\n if \"install-bento\" in self.actions:\n self._do_action_bento_setup()\n\n if \"create-tables\" in self.actions:\n self._do_action_tables_create()\n\n if \"import-ratings\" in self.actions:\n self._do_action_import_ratings()\n\n if \"import-user-info\" in self.actions:\n self._do_action_import_user_info()\n\n if \"import-movie-info\" in self.actions:\n self._do_action_import_movie_info()\n\n if \"train-item-item-cf\" in self.actions:\n self._do_action_train()\n\n if \"register-freshener\" in self.actions:\n self._do_action_register_freshener()",
"def initAnalyzer():\n return controller.initAnalyzer()",
"def on_sanity_check_start(self):\n for callback in self.callbacks:\n callback.on_sanity_check_start(self, self.get_model())",
"def validate(self):\n self.filter_passing_hits()\n\n checks = {\"number of hits\":self.check_hits(),\n \"base pair count\":self.check_bp(),\n \"contig count\":self.check_contigs(),\n \"characters\": self.check_chars(),\n \"checksum\":not check_checksum(self.seqdata.checksum)}\n\n failed_checks = {(k, v) for k, v in checks.iteritems() if v is False}\n\n if failed_checks:\n \"\"\"\n replace this with logger, break would be replaced by a raised\n Exception where the Exception would be caught by the\n Sequence_Upload code\n \"\"\"\n for k, v in failed_checks:\n with open(generate_path(\"outputs/seq_errors.txt\"), \"a\") as file_:\n file_.write(\n '%s failed validation:'\n 'the %s was not valid\\n' %(self.seqdata.accession, k)\n )\n self.seqdata.valid = False\n else:\n self.seqdata.valid = True",
"def analyze_input():\n\n # Generate action_id classes for OF 1.3\n for wire_version, ordered_classes in of_g.ordered_classes.items():\n if not wire_version in [of_g.VERSION_1_3]:\n continue\n classes = versions[of_g.of_version_wire2name[wire_version]]['classes']\n for cls in ordered_classes:\n if not loxi_utils.class_is_action(cls):\n continue\n action = cls[10:]\n if action == '' or action == 'header':\n continue\n name = \"of_action_id_\" + action\n members = classes[\"of_action\"][:]\n of_g.ordered_classes[wire_version].append(name)\n if type_maps.action_id_is_extension(name, wire_version):\n # Copy the base action classes thru subtype\n members = classes[\"of_action_\" + action][:4]\n classes[name] = members\n\n # @fixme If we support extended actions in OF 1.3, need to add IDs\n # for them here\n\n for wire_version in of_g.wire_ver_map.keys():\n version_name = of_g.of_version_wire2name[wire_version]\n calculate_offsets_and_lengths(\n of_g.ordered_classes[wire_version],\n versions[version_name]['classes'],\n wire_version)",
"def run_all(self):\n\n self.run_mash() ###Run MASH analysis\n self.filter_query() ###Filter fasta sequences out based on p value\n self.build_index(self.filtered_out_path) ###Build index for off-target analysis\n os.remove(self.filtered_out_path) ###Clean up intermediate fasta file\n self.format_gRNA(self.path1) ###Format everything in the right order\n self.run_OTF() ###Run off-target analysis\n self.output_parse() ###Parse output values and update table",
"def flush_analysis_data(self):\n self.writer.write_bulk(zip(self.analyzed_types, self.analyzed))\n self.analyzed_types = []\n self.analyzed = []",
"async def organize_analyses(db):\n logger.info(\" • analyses\")\n\n motor_client = db.motor_client\n\n await delete_unready(motor_client.analyses)\n await add_original_reference(motor_client.analyses)\n\n if await motor_client.analyses.count({\"diagnosis.virus\": {\"$exists\": True}}):\n async for document in motor_client.references.find({}, [\"name\"]):\n query = {\n \"reference.id\": document[\"_id\"],\n \"reference.name\": {\n \"$ne\": document[\"name\"]\n }\n }\n\n await motor_client.analyses.update_many(query, {\n \"$set\": {\n \"reference.name\": document[\"name\"]\n }\n })\n\n query = {\n \"algorithm\": \"pathoscope_bowtie\",\n \"diagnosis.virus\": {\n \"$exists\": True\n }\n }\n\n buffer = list()\n\n async for document in motor_client.analyses.find(query, [\"diagnosis\"]):\n diagnosis = document[\"diagnosis\"]\n\n for hit in diagnosis:\n hit[\"otu\"] = hit.pop(\"virus\")\n\n op = UpdateOne({\"_id\": document[\"_id\"]}, {\n \"$set\": {\n \"diagnosis\": diagnosis\n }\n })\n\n buffer.append(op)\n\n if len(buffer) == 40:\n await db.motor_client.analyses.bulk_write(buffer)\n buffer = list()\n\n if len(buffer):\n await db.motor_client.analyses.bulk_write(buffer)",
"def execute(self):\n for action in self.actions:\n self._logger.info('[~] Executing %s.', action)\n self._execute_action(action)",
"def analyze(self):\n\n self.makeSessions()\n self.collectPlayers()\n self.__analyze()",
"def on_sanity_check_end(self):\n for callback in self.callbacks:\n callback.on_sanity_check_end(self, self.get_model())",
"def update_articles_analysis(articles):\n print(\"Updating \" + str(len(articles)) + \" articles\")\n for article in articles:\n if not article.textIsAnalyzed:\n # Try to analyze the text again if the first time failed\n article.analyze_sentiment()\n article.analyze_facebook()\n article.save()",
"def update_analysis(self):\n # Read our analysis scripts into an internal structure\n self.analysis_list = Configs.import_analysis_scripts(\n self.analysis_directory)",
"def analyze(collector):\n mod_list = [\"Oxidation\", \"Deamidated\", \"Methyl\", \"Acetyl\", \"Phospho\"]\n fieldnames = (\n [\"approach\", \"count_type\", \"validation_engine\", \"unmodified\", \"multimodified\"]\n + mod_list\n + [\"total\"]\n )\n\n csv_writer = csv.DictWriter(open(\"ungrouped_results.csv\", \"w\"), fieldnames)\n csv_writer.writeheader()\n uc = ursgal.UController()\n uc.params[\"validation_score_field\"] = \"PEP\"\n uc.params[\"bigger_scores_better\"] = False\n\n # Count the number of identified peptides and PSMs for the different modifications\n # Spectra with multiple PSMs are sanitized, i.e. only the PSM with best PEP score is counted\n # and only if the best hit has a PEP that is at least two orders of\n # magnitude smaller than the others\n for validation_engine, result_file in collector.items():\n counter_dict = {\"psm\": ddict(set), \"pep\": ddict(set)}\n grouped_psms = uc._group_psms(\n result_file, validation_score_field=\"PEP\", bigger_scores_better=False\n )\n for spec_title, grouped_psm_list in grouped_psms.items():\n best_score, best_line_dict = grouped_psm_list[0]\n if len(grouped_psm_list) > 1:\n second_best_score, second_best_line_dict = grouped_psm_list[1]\n best_peptide_and_mod = (\n best_line_dict[\"Sequence\"] + best_line_dict[\"Modifications\"]\n )\n second_best_peptide_and_mod = (\n second_best_line_dict[\"Sequence\"]\n + second_best_line_dict[\"Modifications\"]\n )\n\n if best_peptide_and_mod == second_best_peptide_and_mod:\n line_dict = best_line_dict\n elif best_line_dict[\"Sequence\"] == second_best_line_dict[\"Sequence\"]:\n if best_score == second_best_score:\n line_dict = best_line_dict\n else:\n if (-1 * math.log10(best_score)) - (\n -1 * math.log10(second_best_score)\n ) >= 2:\n line_dict = best_line_dict\n else:\n continue\n else:\n if (-1 * math.log10(best_score)) - (\n -1 * math.log10(second_best_score)\n ) >= 2:\n line_dict = best_line_dict\n else:\n continue\n else:\n line_dict = best_line_dict\n\n count = 0\n for mod in mod_list:\n if mod in line_dict[\"Modifications\"]:\n count += 1\n key_2_add = \"\"\n if count == 0:\n key_2_add = \"unmodified\"\n elif count >= 2:\n key_2_add = \"multimodified\"\n elif count == 1:\n for mod in mod_list:\n if mod in line_dict[\"Modifications\"]:\n key_2_add = mod\n break\n # for peptide identification comparison\n counter_dict[\"pep\"][key_2_add].add(\n line_dict[\"Sequence\"] + line_dict[\"Modifications\"]\n )\n # for PSM comparison\n counter_dict[\"psm\"][key_2_add].add(\n line_dict[\"Spectrum Title\"]\n + line_dict[\"Sequence\"]\n + line_dict[\"Modifications\"]\n )\n for counter_key, count_dict in counter_dict.items():\n dict_2_write = {\n \"approach\": \"ungrouped\",\n \"count_type\": counter_key,\n \"validation_engine\": validation_engine,\n }\n total_number = 0\n for key, obj_set in count_dict.items():\n dict_2_write[key] = len(obj_set)\n total_number += len(obj_set)\n dict_2_write[\"total\"] = total_number\n csv_writer.writerow(dict_2_write)\n return",
"def launch(self,\n train_data,\n validate_data,\n logger,\n epochs,\n start_epoch=0):\n\n for e in range(start_epoch, start_epoch + epochs):\n for name, loss in self.train(train_data).items():\n logger.record(name, loss, e)\n for name, loss in self.validate(validate_data).items():\n logger.record(name, loss, e)"
]
| [
"0.6238496",
"0.5721421",
"0.5691736",
"0.56328654",
"0.5553783",
"0.54151267",
"0.53628486",
"0.53609884",
"0.53175175",
"0.5306766",
"0.5306371",
"0.52737206",
"0.5269958",
"0.5251529",
"0.524727",
"0.5210122",
"0.51994556",
"0.519592",
"0.5194466",
"0.51939213",
"0.5181274",
"0.5177928",
"0.51756257",
"0.51690114",
"0.51494366",
"0.5133787",
"0.5121736",
"0.51215726",
"0.5117803",
"0.51146257"
]
| 0.73985136 | 0 |
Cleanup the oslo.messaging layer. | def cleanup():
global TRANSPORT, NOTIFIER
assert TRANSPORT is not None
assert NOTIFIER is not None
TRANSPORT.cleanup()
TRANSPORT = NOTIFIER = None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cleanup(self):\n self.msgmap.clear()\n self.droppedmsgs.clear()\n self.chan.stop_receiving_messages()\n\n # TODO: enable\n #self.cmdMap.clear()\n #self.cmdCliSubmitQueue.clear()\n #self.cmdSvrComputeQueue.clear()\n #self.droppedCommands.clear()\n #self.ch.stop_receiving_commands()",
"def cleanup(self):\n\t\tself.pb.cleanup()\n\t\tsys.exit()",
"def cleanup(self):\n if hasattr(self, \"reply_channel\"):\n del self.factory.reply_protocols[self.reply_channel]",
"async def do_cleanup(rcv_trans, rcv_proto):\n\n log.info(\"Closing AMQP receive channel ...\")\n await rcv_proto.close()\n rcv_trans.close()",
"def cleanup(self):\n self.sock.close()",
"def test_cleanup_sync(self):\n msg_helper = MessageHelper()\n self.assertEqual(msg_helper.cleanup(), None)",
"def cleanup():\n # global the_vx_ifc\n print(\"\\n cleaning up...\", end=' ')\n the_vx_ifc.close()\n print('connections closed\\n')",
"def cleanup(self):\n # Removing the ROS system wide advert about which topic are interfaced with this process\n # TODO : lock this for concurrent access\n if_topics = rospy.get_param('~' + TopicBack.IF_TOPIC_PARAM, [])\n if_topics.remove(self.fullname)\n rospy.set_param('~' + TopicBack.IF_TOPIC_PARAM, if_topics)\n\n # cleanup pub and sub, so we can go through another create / remove cycle properly\n self._remove_pub(self.pub)\n self._remove_sub(self.sub)",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup(self):\n pass",
"def cleanup (self):\n pass",
"def cleanup():",
"def test_cleanup_sync(self):\n md_helper = MessageDispatchHelper(None, None)\n self.assertEqual(md_helper.cleanup(), None)",
"async def _try_to_clean(self, ctx: Context):\n if self.cleanup:\n try:\n await ctx.channel.delete_messages(self._messages)\n except:\n pass",
"def cleanup(self):\r\n pass",
"def cleanup(self):\r\n pass",
"def cleanup(self):\n\n pass",
"def network_cleanup(self, args):\n pass",
"def _cleanup():\n global _dispatcher\n if _dispatcher is None:\n return\n\n pyepics_compat.get_pv = pyepics_compat._get_pv\n\n if _dispatcher.is_alive():\n _dispatcher.stop()\n\n _dispatcher = None",
"def cleanup(self):\r\n pass",
"def cleanup(self):\n self._socket.close()\n os.remove(_get_control_socket_path())"
]
| [
"0.7078813",
"0.6610845",
"0.65237373",
"0.6373199",
"0.63084286",
"0.6236432",
"0.62181246",
"0.62173826",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6184065",
"0.6180252",
"0.6175269",
"0.6161132",
"0.61461604",
"0.61397284",
"0.61397284",
"0.61080986",
"0.61070377",
"0.61022305",
"0.6085911",
"0.6029372"
]
| 0.6849613 | 1 |
create soap envelope, and convert the structure to xml | def create_soap_envelope(self, rpc_name, cwmp_version="cwmp-1-0", rpc_args="", cwmp_id=""):
log.debug_info("create_soap_envelope")
try:
dict_envelope_attrib = {'xmlns:SOAP-ENV':'http://schemas.xmlsoap.org/soap/envelope/',
'xmlns:SOAP-ENC':'http://schemas.xmlsoap.org/soap/encoding/',
'xmlns:xsd':'http://www.w3.org/2001/XMLSchema',
'xmlns:xsi':'http://www.w3.org/2001/XMLSchema-instance'}
dict_envelope_attrib['xmlns:cwmp'] = '' + 'urn:dslforum-org:'+ cwmp_version
# create an element
self.soap_envelope = Element('SOAP-ENV:Envelope', dict_envelope_attrib)
# set root of tree
self.soap._setroot(self.soap_envelope)
# create sub elemnts of soap_envelop
self.soap_header = Element('SOAP-ENV:Header')
self.soap_body = Element('SOAP-ENV:Body')
# add soap_header and soap_body to soap_envelope
self.soap_envelope.append(self.soap_header)
self.soap_envelope.append(self.soap_body)
# create sub elements of soap header
self.create_soap_header(cwmp_id)
# create sub elements of soap body
self.create_soap_body(rpc_name, rpc_args)
# convert structure to xml
self.str_xml = tostring(self.soap_envelope)
except Exception, e:
log.debug_err(e)
return CONSTRUCT_FAIL, e
return CONSTRUCT_SUC, "" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buildxml(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml()\")\n self.buildplatformxml()\n self.buildnemxml()\n self.buildtransportxml()\n self.buildeventservicexml()",
"def compile_SOAP_Message(self):\n SOAP_Message = \"\"\n SOAP_Message += self.SOAP_ENVELOPE_START\n SOAP_Message += self.action_Tag_Opener\n SOAP_Message += self.arguments_body\n SOAP_Message += self.action_Tag_Closer\n SOAP_Message += self.SOAP_ENVELOPE_END\n self.SOAP_Message = SOAP_Message",
"def create_xml(self, array):\n valid_fields = [\n 'acquirerId',\n 'commerceId',\n 'purchaseCurrencyCode',\n 'purchaseAmount',\n 'purchaseOperationNumber',\n 'billingAddress',\n 'billingCity',\n 'billingState',\n 'billingCountry',\n 'billingZIP',\n 'billingPhone',\n 'billingEMail',\n 'billingFirstName',\n 'billingLastName',\n 'language',\n 'commerceMallId',\n 'terminalCode',\n 'tipAmount',\n 'HTTPSessionId',\n 'shippingAddress',\n 'shippingCity',\n 'shippingState',\n 'shippingCountry',\n 'shippingZIP',\n 'shippingPhone',\n 'shippingEMail',\n 'shippingFirstName',\n 'shippingLastName',\n 'reserved1',\n 'reserved2',\n 'reserved3',\n 'reserved4',\n 'reserved5',\n 'reserved6',\n 'reserved7',\n 'reserved8',\n 'reserved9',\n 'reserved10',\n 'reserved11',\n 'reserved12',\n 'reserved13',\n 'reserved14',\n 'reserved15',\n 'reserved16',\n 'reserved17',\n 'reserved18',\n 'reserved19',\n 'reserved20',\n 'reserved21',\n 'reserved22',\n 'reserved23',\n 'reserved24',\n 'reserved25',\n 'reserved26',\n 'reserved27',\n 'reserved28',\n 'reserved29',\n 'reserved30',\n 'reserved31',\n 'reserved32',\n 'reserved33',\n 'reserved34',\n 'reserved35',\n 'reserved36',\n 'reserved37',\n 'reserved38',\n 'reserved39',\n 'reserved40',\n ]\n\n root = ET.Element('VPOSTransaction1.2')\n\n temp_dict = dict()\n taxes = dict()\n taxes_vals = dict()\n\n for key, value in array.items():\n if key in valid_fields:\n temp_dict[key] = value\n elif re.search(r'tax_([0-9]{1}|[0-9]{2})_name', key):\n re.sub(r'(^tax_)|(_name$)', '', key)\n taxes[key] = value\n else:\n raise AlignetError('%s is not allowed value in Alignet.') % key\n\n for key, value in temp_dict.items():\n elem = ET.SubElement(root, key)\n elem.text = value\n\n #TODO: If some taxes exist add to the XML doc\n if len(taxes):\n pass\n\n return ET.tostring(root, encoding='iso-8859-1')",
"def to_xml(self):\n \n root = ET.Element(\"Document\")\n root.set('xmlns',\"urn:iso:std:iso:20022:tech:xsd:pacs.008.001.02\")\n root_fito = ET.SubElement(root, \"FIToFICstmrCdtTrf\")\n \n self.xml_header(root_fito)\n self.xml_transaction(root_fito)\n\n ET.ElementTree(root)\n \n return ET.tostring(root,encoding='utf-8',xml_declaration=True).decode('utf-8')",
"def add_envelope(self) -> reapy.Envelope:\r\n ...",
"def dumps(structure):\n buffer = []\n env = { 'envelope': structure }\n __descend(buffer, 0, env)\n return \"\".join(buffer)",
"def generate(self):\n root = self.get_root_elt()\n\n shop = self.get_shop_elt()\n root.append(shop)\n\n shop.append(self.get_currencies_elt())\n shop.append(self.get_categories_elt())\n shop.append(self.get_offers_elt())\n \n return self.header + et.tostring(root)",
"def __init__(self, ocws_url, username, password, ocws_ca=True):\n namespaced_classname = '.'.join([__name__, self.__class__.__name__])\n self.log_ocws = logging.getLogger(namespaced_classname)\n self.log_ocws.setLevel(logging.INFO)\n envelope = et.Element(et.QName(self.ns_se, 'Envelope'))\n header = et.Element(et.QName(self.ns_se, 'Header'))\n body = et.Element(et.QName(self.ns_se, 'Body'))\n security_attrib = {'mustUnderstand': 'True'}\n security = et.Element(et.QName(self.ns_we, 'Security'), security_attrib)\n user_token = et.Element(et.QName(self.ns_we, 'UsernameToken'))\n user_name = et.Element(et.QName(self.ns_we, 'Username'))\n pass_word = et.Element(et.QName(self.ns_we, 'Password'))\n envelope.append(header)\n envelope.append(body)\n header.append(security)\n security.append(user_token)\n user_token.append(user_name)\n user_token.append(pass_word)\n user_name.text = username\n pass_word.text = hashlib.sha1(password.encode('utf-8')).hexdigest()\n self.envelope = et.tostring(envelope)\n if ocws_url[-1] == '/':\n self.ocws_url = '{0}ws/'.format(ocws_url)\n else:\n self.ocws_url = '{0}/ws/'.format(ocws_url)\n self.ocws_ca_bundle = ocws_ca",
"def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it",
"def start_serialization(self):\n self.xml = SimplerXMLGenerator(self.stream, self.options.get(\"encoding\", settings.DEFAULT_CHARSET))\n self.xml.startDocument()\n self.xml.startElement(\"xliff\", {\n \"version\": \"1.2\",\n \"xmlns\": \"urn:oasis:names:tc:xliff:document:1.2\",\n \"xmlns:d\": \"https://docs.djangoproject.com/\"\n })",
"def xml_obj(dict):\n string = \"\"\n for key in dict.keys():\n string += ' <{}>{}</{}>\\n'.format(key, dict[key], key)\n return string",
"def toxml(self) :\n\t\treturn self.doc.toxml()",
"def toxml(self, root, outfile=None, envelope=False):\n\n try:\n\n self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(\n 'datagen_xmlgen_write_sample'), self._mh.fromhere())\n ev = event.Event('xmlgen_before_write', root, outfile, envelope)\n if (self._mh.fire_event(ev) > 0):\n root = ev.argv(0)\n outfile = ev.argv(1)\n envelope = ev.argv(2)\n\n if (ev.will_run_default()):\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n if (envelope):\n ns = '{%s}' % 'http://schemas.xmlsoap.org/soap/envelope/'\n doc = Element(ns + 'Envelope')\n SubElement(doc, 'Header')\n body = SubElement(doc, 'Body')\n body.append(self._toxml_rec(root))\n else:\n doc = self._toxml_rec(root)\n\n outfile = 'sample.xml' if (outfile == None) else outfile\n with open(outfile, 'w') as f:\n f.write(tostring(\n doc, encoding='UTF-8', xml_declaration=True, pretty_print=True).decode())\n\n self._mh.demsg('htk_on_debug_info', self._mh._trn.msg(\n 'datagen_xmlgen_sample_written', outfile), self._mh.fromhere())\n ev = event.Event('xmlgen_after_write')\n self._mh.fire_event(ev)\n\n return True\n\n except (Exception, ValueError) as ex:\n self._mh.demsg(\n 'htk_on_error', 'error: {0}'.format(ex), self._mh.fromhere())\n return False",
"def create_gen_xml(self, out_file):\n\n param_list = []\n msg = []\n msg_type = []\n dep_node = []\n for line in self.full_ed_lines:\n param_list.append(line.text())\n dep_pkg = param_list[6].split(', ')\n if dep_pkg[len(dep_pkg) - 1] == '':\n dep_pkg.pop()\n for dep in self.manager.wid.sub_list:\n dep_node.append(dep['msg_type'])\n for dep in self.manager.wid.pub_list:\n dep_node.append(dep['msg_type'])\n for dep in dep_node:\n a, b = dep.split('/')\n msg.append(a)\n msg_type.append(b)\n f = open('../genkernel/templates/package_rosgen.xml')\n o = open(out_file, 'a')\n flag = 0\n while 1:\n line = f.readline()\n if not line: break\n for i in range(6):\n line = line.replace('[{0}]'.format(i), param_list[i])\n line = line.replace('[7]', param_list[7])\n if line.find('[6]') != -1:\n for dep in dep_pkg:\n line_dep = '\\t<depend>{0}</depend>\\n'.format(dep)\n o.write(line_dep)\n flag = 1\n elif line.find('[8]') != -1:\n for dep, tp in zip(msg, msg_type):\n line_dep = '\\t\\t<depend type=\"{1}\">{0}</depend>\\n'.format(dep, tp)\n o.write(line_dep)\n flag = 1\n elif line.find('<subscribers>') != -1:\n o.write('\\t\\t<subscribers>\\n')\n for sub in self.manager.wid.sub_list:\n o.write('\\t\\t\\t<sub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(sub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(sub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(sub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(sub['queue_size']))\n o.write('\\t\\t\\t</sub>\\n')\n o.write('\\t\\t</subscribers>\\n')\n flag = 1\n elif line.find('<publishers>') != -1:\n o.write('\\t\\t<publishers>\\n')\n for pub in self.manager.wid.pub_list:\n o.write('\\t\\t\\t<pub>\\n')\n o.write('\\t\\t\\t\\t<name>{0}</name>\\n'.format(pub['name']))\n o.write('\\t\\t\\t\\t<msg_type>{0}</msg_type>\\n'.format(pub['msg_type']))\n o.write('\\t\\t\\t\\t<topic_name>{0}</topic_name>\\n'.format(pub['topic_name']))\n o.write('\\t\\t\\t\\t<queue_size>{0}</queue_size>\\n'.format(pub['queue_size']))\n o.write('\\t\\t\\t</pub>\\n')\n o.write('\\t\\t</publishers>\\n')\n flag = 1\n if flag == 0:\n o.write(line)\n else:\n flag = 0\n o.close()\n f.close()\n self.changed = False",
"def obj_to_xml(obj):\n # TODO convert object to xml without default namespace gracefully.\n try:\n xml = obj.toxml('utf-8')\n except pyxb.ValidationError as e:\n raise VantivException(e.details())\n xml = xml.replace(b'ns1:', b'')\n xml = xml.replace(b':ns1', b'')\n xml = xml.replace(b'vendorCreditCtx', b'vendorCredit')\n xml = xml.replace(b'vendorDebitCtx', b'vendorDebit')\n xml = xml.replace(b'submerchantCreditCtx', b'submerchantCredit')\n xml = xml.replace(b'submerchantDebitCtx', b'submerchantDebit')\n return xml",
"def buildeventservicexml(self):\n defaults = self.emane_config.getdefaultvalues()\n values = self.getconfig(None, \"emane\", self.emane_config.getdefaultvalues())[1]\n need_xml = False\n keys = (\"eventservicegroup\", \"eventservicedevice\")\n for k in keys:\n a = self.emane_config.valueof(k, defaults)\n b = self.emane_config.valueof(k, values)\n if a != b:\n need_xml = True\n\n if not need_xml:\n # reset to using default config\n self.initeventservice()\n return\n\n try:\n group, port = self.emane_config.valueof(\"eventservicegroup\", values).split(\":\")\n except ValueError:\n logger.exception(\"invalid eventservicegroup in EMANE config\")\n return\n dev = self.emane_config.valueof(\"eventservicedevice\", values)\n\n doc = self.xmldoc(\"emaneeventmsgsvc\")\n es = doc.getElementsByTagName(\"emaneeventmsgsvc\").pop()\n kvs = ((\"group\", group), (\"port\", port), (\"device\", dev), (\"mcloop\", \"1\"), (\"ttl\", \"32\"))\n xmlutils.add_text_elements_from_tuples(doc, es, kvs)\n filename = \"libemaneeventservice.xml\"\n self.xmlwrite(doc, filename)\n pathname = os.path.join(self.session.session_dir, filename)\n self.initeventservice(filename=pathname)",
"def buildxml2(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml2()\")\n # on master, control network bridge added earlier in startup()\n ctrlnet = self.session.add_remove_control_net(net_index=0, remove=False, conf_required=False)\n self.buildplatformxml2(ctrlnet)\n self.buildnemxml()\n self.buildeventservicexml()",
"def createxmlmall():\r\n\r\n root = ET.Element(\"state\")\r\n model = ET.SubElement(root, \"model\")\r\n model.text = r\"\"\r\n\r\n dataid = ET.SubElement(root, \"dataids\")\r\n application = ET.SubElement(root, \"application\")\r\n\r\n application.text = \"SIBS Configurator\"\r\n safecookie = ET.SubElement(root, \"safecookie\")\r\n steps = ET.SubElement(root, \"steps\")\r\n prev = ET.SubElement(steps, \"prev\")\r\n\r\n lastproxy = ET.SubElement(root, \"last-proxy\").text = \"tcserver0\"\r\n\r\n tree = ET.ElementTree(root) # saves tree in variable \"tree\"\r\n return tree, safecookie, steps, prev",
"def _toxml_rec(self, root, obj=None, ns_cur=None):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n try:\n\n if (obj == None):\n obj = self._client.factory.create(root)\n\n ns = '{%s}' % self._get_element_ns(obj.__class__.__name__)\n if (ns != '{None}' and ns != ns_cur):\n doc = Element(ns + root)\n else:\n doc = Element(root)\n ns = ns_cur\n\n for key in obj.__keylist__:\n subelem = obj[key]\n\n if (subelem == None):\n SubElement(doc, key).text = '?'\n elif (subelem == [] or '[]' in subelem.__str__()):\n inner_doc = self._toxml_rec(key, None, ns)\n if (inner_doc != None):\n doc.append(inner_doc)\n else:\n el_type = self._get_element_type(\n subelem.__class__.__name__)\n if (el_type == 'Simple'):\n SubElement(doc, key).text = '?'\n elif (el_type == 'Complex'):\n inner_doc = self._toxml_rec(key, subelem, ns)\n if (inner_doc != None):\n doc.append(inner_doc)\n\n return doc\n\n except TypeNotFound:\n return None",
"def format_xml(self,query_results):\n results=query_results.data\n factory=factory_xml()\n dump=factory.dumps({'data':results})\n print(dump)\n # TODO return output for this\n return \"\"",
"def ConvertToXML (given_dict) :\r\n stream_thing = cStringIO.StringIO()\r\n WriteToXMLStream(given_dict, stream_thing, 'top')\r\n return stream_thing.getvalue()",
"def make_eas_request(endpoint, parameters):\n\n # The valid XML request necessary to invoke the EAS SOAP interface.\n # Generated using SoapUI (http://www.soapui.org/)\n REQUEST_XML = '''<soapenv:Envelope xmlns:get=\"http://xmlns.oracle.com/apps/gms/soaprovider/plsql/gwu_gms_atp_pub/%s/\" xmlns:gwu=\"http://xmlns.oracle.com/apps/gms/soaprovider/plsql/gwu_gms_atp_pub/\" xmlns:soapenv=\"http://schemas.xmlsoap.org/soap/envelope/\">\n <soapenv:Header>\n <wsse:Security soapenv:mustUnderstand=\"1\" xmlns:wsse=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd\" xmlns:wsu=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd\">\n <wsse:UsernameToken wsu:Id=\"UsernameToken-13A8CEB13B8B04DC3C14058080562144\">\n <wsse:Username>GWATWS</wsse:Username>\n <wsse:Password Type=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText\">%s</wsse:Password>\n <wsse:Nonce EncodingType=\"http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary\">%s</wsse:Nonce>\n <wsu:Created>2014-07-19T22:14:16.214Z</wsu:Created>\n </wsse:UsernameToken>\n </wsse:Security>\n <gwu:SOAHeader>\n <!--Optional:-->\n <gwu:Responsibility>GW BANNER TO EAS MAPPING</gwu:Responsibility>\n <!--Optional:-->\n <gwu:RespApplication>GWU</gwu:RespApplication>\n <!--Optional:-->\n <gwu:SecurityGroup>STANDARD</gwu:SecurityGroup>\n <!--Optional:-->\n <gwu:NLSLanguage>AMERICAN</gwu:NLSLanguage>\n <!--Optional:-->\n <gwu:Org_Id>0</gwu:Org_Id>\n </gwu:SOAHeader>\n </soapenv:Header>\n <soapenv:Body>\n %s\n </soapenv:Body>\n</soapenv:Envelope>'''\n\n # Create a new requests Session with our custom adapter\n s = requests.Session()\n s.mount('https://', OracleAdapter())\n\n if settings.DEBUG:\n # Disable SSL verification if we're using SSH tunneling locally\n response = s.post(\n settings.EAS_URL,\n headers={\n 'Content-Type': 'text/xml'},\n data=REQUEST_XML %\n (endpoint,\n settings.EAS_PASSWORD,\n settings.EAS_NONCE,\n parameters),\n verify=False)\n else:\n # Submit the SOAP request\n response = s.post(\n settings.EAS_URL,\n headers={\n 'Content-Type': 'text/xml'},\n data=REQUEST_XML %\n (endpoint,\n settings.EAS_PASSWORD,\n settings.EAS_NONCE,\n parameters))\n\n return ET.fromstring(response.content)",
"def to_xml(self):\n xml_strings = ['<table name=\"%s\">\\n' % self.name]\n if self.tablespace_name:\n xml_strings.append(' <tablespace name=')\n xml_strings.append('\"%s\" />\\n' % self.tablespace_name)\n # Columns\n for column in self.columns:\n col_details = self.columns[column]\n xml_strings.append(' <column name=\"%s\"' % column)\n xml_strings.append(' data-type=\"%s\"' % col_details['type'])\n xml_strings.append(' sequence=\"%d\">\\n' % col_details['sequence'])\n # The following statement means we need Python 2.5 and above\n # pre 2.5 it would be col_details.has_key('length')\n if 'length' in col_details:\n xml_strings.append(' <length>')\n xml_strings.append(str(col_details['length']))\n xml_strings.append('</length>\\n')\n if 'precision' in col_details and col_details['precision'] != 0:\n xml_strings.append(' <precision>')\n xml_strings.append(str(col_details['precision'])+'</precision>\\n')\n if 'scale' in col_details:\n xml_strings.append(' <scale>')\n xml_strings.append(str(col_details['scale']))\n xml_strings.append('</scale>\\n')\n xml_strings.append(' </column>\\n')\n # Constraints\n for constraint, cons_details in list(self.constraints.items()):\n # Exclude check constraint that start with 'SYS_C' (an Oracle hack)\n if cons_details['type'] != 'Check' or not constraint.startswith(\"SYS_C\"):\n xml_strings.append(' <constraint name=\"%s\"' % constraint)\n xml_strings.append(' type=\"%s\">\\n' % cons_details['type'])\n if cons_details['type'] == 'Check':\n xml_strings.append(' <details>')\n xml_strings.append(cons_details['condition'])\n xml_strings.append('</details>\\n')\n if cons_details['type'] == 'Primary':\n for column in cons_details['columns']:\n xml_strings.append(' <column name=\"%s\" />\\n' % column)\n if cons_details['type'] == 'Foreign':\n xml_strings.append(' <jointable ')\n xml_strings.append(' name=\"%s\"' % cons_details['reftable'])\n xml_strings.append(' pk=\"%s\">\\n' % cons_details['refpk'])\n for col_index in range(len(cons_details['columns'])):\n xml_strings.append(' <constraintcolumn')\n name = cons_details['columns'][col_index]\n xml_strings.append(' name=\"%s\"' % name)\n xml_strings.append(' joincolumn')\n join_column = cons_details['refcolumns'][col_index]\n xml_strings.append('=\"%s\" />\\n' % join_column)\n xml_strings.append(' </jointable>\\n')\n xml_strings.append(' </constraint>\\n')\n # Indexes\n for index in self.indexes:\n index_details = self.indexes[index]\n xml_strings.append(' <index name=\"%s\"' % index)\n xml_strings.append(' type=\"%s\">\\n' % index_details['type'])\n for column in index_details['columns']:\n xml_strings.append(' <column name=\"%s\" />\\n' % column)\n xml_strings.append(' </index>\\n')\n # Triggers\n for trigger in self.triggers:\n xml_strings.append(self.triggers[trigger].to_xml())\n xml_strings.append('</table>')\n return \"\".join(xml_strings)",
"def build_obj(self, name, **kw):\n xml = self.client.factory.create(name)\n obj = dict()\n for k in dict(xml):\n obj[k] = kw.get(k) or ''\n return obj",
"def serialize(self):\n res = '<?xml version=\"1.0\" encoding=\"UTF-8\"?>'\n self.grammar_elem.attr[\"xmlns\"] = \\\n \"http://relaxng.org/ns/structure/1.0\"\n self.grammar_elem.attr[\"datatypeLibrary\"] = \\\n \"http://www.w3.org/2001/XMLSchema-datatypes\"\n for ns in self.namespaces:\n self.grammar_elem.attr[\"xmlns:\" + self.namespaces[ns]] = ns\n res += self.grammar_elem.start_tag()\n for ch in self.grammar_elem.children:\n res += ch.serialize()\n if not self.no_data:\n res += \"<start>\" + self.root.serialize() + \"</start>\"\n for d in self.defs:\n res += self.defs[d].serialize()\n if self.has_anyxml:\n res += self.anyxml_def\n return res + self.grammar_elem.end_tag()",
"def _make_envelope(\n self,\n sender_address: str,\n receiver_address: str,\n message_id: int = 1,\n target: int = 0,\n ):\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=message_id,\n target=target,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=receiver_address,\n sender=sender_address,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n return envelope",
"def empty(cls) -> EnvelopeStructure:\n return _EmptyEnvelopeStructure()",
"def makeEnvelopeData(envelope):\n (event,path) = envelope.flatten()\n payload = event.getPayload()\n return simplejson.dumps([\"forward\",[path,event.getType(),event.getSource(),payload]])",
"def get_soap_vec(struct: Structure) -> NDArray:\n adaptor = AseAtomsAdaptor()\n species_ = [str(el) for el in struct.composition.elements]\n dummy_structure = struct.copy()\n for el in species_:\n dummy_structure.replace_species({str(el): DUMMY_SPECIES})\n soap_desc = SOAP(species=[DUMMY_SPECIES], r_cut=5, n_max=8, l_max=6, periodic=True)\n vecs = soap_desc.create(adaptor.get_atoms(dummy_structure))\n return vecs",
"def build_serializer(self):\n self._add_child_elements_recursive(self.get_root_element())"
]
| [
"0.6282234",
"0.5916893",
"0.580916",
"0.5804524",
"0.57287157",
"0.5636734",
"0.5617781",
"0.5475675",
"0.5451068",
"0.5409047",
"0.540611",
"0.53757817",
"0.53274316",
"0.53067744",
"0.5283993",
"0.5280287",
"0.52703387",
"0.5251362",
"0.5244601",
"0.52359813",
"0.5215179",
"0.5210486",
"0.5205354",
"0.5203453",
"0.5177332",
"0.5175277",
"0.5170954",
"0.5143063",
"0.51397604",
"0.51241386"
]
| 0.70732576 | 0 |
Look up the official Spore achievement name and text Call before getting any achievements | def loadAchievementList():
global achievements
achievements = {}
doc = minidom.parse(urllib.urlopen(serverString + "/data/achievements.xml"))
for element in doc.getElementsByTagName("achievement"):
key = element.getElementsByTagName("id")[0].firstChild.data
name = element.getElementsByTagName("name")[0].firstChild.data
description = element.getElementsByTagName("description")[0].firstChild.data
achievements[key] = (name, description) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getInfo(self):\n self.name, self.description = achievements[self.id]",
"async def achievements(self, ctx: commands.Context):\r\n # The milestones for each achievement type\r\n milestones_dict_of_achievements = {\r\n 'times_entertained': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'times_fed': [5, 25, 25, 50, 100, 500, 1000, 10000, 100000, 1000000],\r\n 'times_cleaned': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'times_caught': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'tanks_owned': [1, 3, 5, 10],\r\n 'times_gambled': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'money_gained': [100, 500, 1000, 5000, 10000, 50000, 100000, 500000, 1000000, 10000000],\r\n }\r\n\r\n\r\n # Database variables\r\n async with self.bot.database() as db:\r\n user_achievement_milestone_data = await db(\"\"\"SELECT * FROM user_achievements_milestones WHERE user_id = $1\"\"\", ctx.author.id)\r\n user_achievement_data = await db(\"\"\"SELECT * FROM user_achievements WHERE user_id = $1\"\"\", ctx.author.id)\r\n tank_data = await db(\"\"\"SELECT tank FROM user_tank_inventory WHERE user_id = $1\"\"\", ctx.author.id)\r\n if not user_achievement_data:\r\n user_achievement_data = await db(\"\"\"INSERT INTO user_achievements (user_id) VALUES ($1) RETURNING *\"\"\", ctx.author.id)\r\n if not user_achievement_milestone_data:\r\n user_achievement_milestone_data = await db(\"\"\"INSERT INTO user_achievements_milestones (user_id) VALUES ($1) RETURNING *\"\"\", ctx.author.id)\r\n\r\n # Getting the users data into a dictionary for the embed and ease of access\r\n user_achievement_data_dict = {}\r\n for achievement_type_database, achievement_amount_database in user_achievement_data[0].items():\r\n if achievement_type_database != \"user_id\":\r\n user_achievement_data_dict[achievement_type_database] = achievement_amount_database\r\n\r\n # Getting the users amount of tanks and adding that to the user data dictionary\r\n tanks = 0\r\n if not tank_data:\r\n tanks = 0\r\n else:\r\n for tank in tank_data[0]['tank']:\r\n if tank is True:\r\n tanks += 1\r\n user_achievement_data_dict[\"tanks_owned\"] = tanks\r\n\r\n # Setting claimable to non as default\r\n Achievements_that_are_claimable = {}\r\n are_there_any_claimable_achievements_check = False\r\n\r\n # Creating the embed\r\n embed = discord.Embed(title=f\"**{ctx.author.display_name}**'s achievements\")\r\n\r\n # Set Variables for milestones, default to nonclaimable, and default stars to nothing\r\n for achievement, user_achievement_value in user_achievement_data_dict.items():\r\n milestone = f\"{achievement}_milestone\"\r\n is_achievement_claimable = \"nonclaimable\"\r\n list_of_stars_per_achievement = []\r\n # Checks what type of star to add\r\n for milestone_value in milestones_dict_of_achievements[achievement]:\r\n if user_achievement_milestone_data[0][f\"{milestone}_done\"] is True:\r\n list_of_stars_per_achievement.append(\"<:achievement_star:877646167087906816>\")\r\n elif milestone_value < user_achievement_milestone_data[0][milestone]:\r\n list_of_stars_per_achievement.append(\"<:achievement_star:877646167087906816>\")\r\n elif milestone_value <= user_achievement_value:\r\n list_of_stars_per_achievement.append(\"<:achievement_star_new:877737712046702592>\")\r\n else:\r\n list_of_stars_per_achievement.append(\"<:achievement_star_no:877646167222141008>\")\r\n # Grammar stuff and the number of stars said\r\n next_unclaimable_star = 0\r\n st_nd_rd_th_grammar = 'th'\r\n for single_star_per_star_list in list_of_stars_per_achievement:\r\n if single_star_per_star_list != \"<:achievement_star:877646167087906816>\":\r\n next_unclaimable_star += 1\r\n break\r\n next_unclaimable_star += 1\r\n if next_unclaimable_star == 1:\r\n st_nd_rd_th_grammar = 'st'\r\n elif next_unclaimable_star == 2:\r\n st_nd_rd_th_grammar = 'nd'\r\n elif next_unclaimable_star == 3:\r\n st_nd_rd_th_grammar = 'rd'\r\n\r\n # Sets the milestonme to be claimable if it is\r\n if user_achievement_value >= user_achievement_milestone_data[0][milestone] and user_achievement_milestone_data[0][f'{milestone}_done'] is False:\r\n if are_there_any_claimable_achievements_check is False:\r\n are_there_any_claimable_achievements_check = True\r\n Achievements_that_are_claimable[achievement] = milestones_dict_of_achievements[achievement].index(user_achievement_milestone_data[0][milestone])\r\n is_achievement_claimable = \"claimable\"\r\n if user_achievement_milestone_data[0][f'{milestone}_done'] is True:\r\n value_data = 'All achievements have been claimed!'\r\n name_data = ''\r\n else:\r\n value_data = ''\r\n value_data = f\"{(user_achievement_value/user_achievement_milestone_data[0][milestone])}% of **{next_unclaimable_star}**{st_nd_rd_th_grammar} star\"\r\n name_data = f\"{user_achievement_value:,}/{user_achievement_milestone_data[0][milestone]:,}\"\r\n embed.add_field(name=f\"{achievement.replace('_', ' ').title()} {name_data}\", value=f\"{value_data}\\n{''.join(list_of_stars_per_achievement)} \\n**{is_achievement_claimable}**\")\r\n\r\n # Adds a button to the message if there are any claimable achievements\r\n if are_there_any_claimable_achievements_check is True:\r\n components = vbu.MessageComponents(\r\n vbu.ActionRow(\r\n vbu.Button(custom_id=\"claim_all\", emoji=\"1\\N{COMBINING ENCLOSING KEYCAP}\"),\r\n ),\r\n )\r\n claim_message = await ctx.send(embed=embed, components=components)\r\n else:\r\n # Doesnt add a button if theres no claimable achievements\r\n return await ctx.send(embed=embed)\r\n\r\n # Make the button check\r\n def button_check(payload):\r\n if payload.message.id != claim_message.id:\r\n return False\r\n self.bot.loop.create_task(payload.defer_update())\r\n return payload.user.id == ctx.author.id\r\n\r\n\r\n pressed = False\r\n while True:\r\n\r\n # Wait for them to click a button\r\n try:\r\n chosen_button_payload = await self.bot.wait_for('component_interaction', timeout=60.0, check=button_check)\r\n chosen_button = chosen_button_payload.component.custom_id.lower()\r\n except asyncio.TimeoutError:\r\n await claim_message.edit(components=components.disable_components())\r\n break\r\n\r\n # Sets reward and if the button is clicked...\r\n amount_of_doubloons_earned = 0\r\n if chosen_button == \"claim_all\":\r\n pressed = True\r\n for achievement_button, user_achievement_position_button in Achievements_that_are_claimable.items():\r\n amount_per_achievement = user_achievement_position_button + 1\r\n print(achievement_button)\r\n print(user_achievement_position_button)\r\n print(amount_per_achievement)\r\n for x in range(amount_per_achievement):\r\n print(x)\r\n amount_of_doubloons_earned += x + 1\r\n print(amount_of_doubloons_earned)\r\n if achievement_button == 'tanks_owned' and user_achievement_position_button >= 3:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = TRUE WHERE user_id = $1\"\"\".format(f\"{achievement_button}_milestone_done\"), ctx.author.id)\r\n elif user_achievement_position_button >= 9:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = TRUE WHERE user_id = $1\"\"\".format(f\"{achievement_button}_milestone_done\"), ctx.author.id)\r\n else:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = $1 WHERE user_id = $2\"\"\".format(f\"{achievement_button}_milestone\"), milestones_dict_of_achievements[achievement_button][user_achievement_position_button + 1], ctx.author.id)\r\n async with self.bot.database() as db:\r\n await db(\r\n \"\"\"INSERT INTO user_balance (user_id, doubloon) VALUES ($1, $2)\r\n ON CONFLICT (user_id) DO UPDATE SET doubloon = user_balance.doubloon + $2\"\"\",\r\n ctx.author.id, amount_of_doubloons_earned)\r\n components.get_component(chosen_button).disable()\r\n break\r\n if pressed is True:\r\n await ctx.send(f\"Rewards claimed, you earned {amount_of_doubloons_earned} <:doubloon:878297091057807400>!\")",
"def get_achievements(tag, platform=\"pc\", region=\"eu\"):\n #\n try:\n context = ssl._create_unverified_context()\n achievements = json.load(\n const.codec(urlopen(const.URL + platform + \"/\" + region + \"/\" + tag + \"/achievements\", context=context)))\n #\n if \"error\" in achievements:\n raise BattleTagNotFound(achievements['error'])\n exit(1)\n #\n result = a.Achievements(achievements['totalNumberOfAchievements'],\n achievements['numberOfAchievementsCompleted'],\n achievements['finishedAchievements'])\n return result\n except urllib.error.URLError as e:\n print(\"An error occurred when fetching stats\\n\" + e)\n exit(1)\n except Exception as e:\n print(\"An error occurred:\\n \" + str(e))\n exit(1)",
"def test_str_method_obtained_achievement(self):\n achievement_definition = AchievementDefinition(name={\"es\":'nombre'}, description={\"es\":'descripcion'})\n self.assertEqual(str(achievement_definition), achievement_definition.name['es'])",
"def sample_achievement(achievement='Had camera on for a full day'):\n return Achievement.objects.create(achievement=achievement)",
"def getAchievements(self) -> list:\n return self.state[ACHIEVEMENTS]",
"def inspect_achievement(achievements, entity_type, entity, dirty_fields):\n\n # Getting only achievements that have sense to be checked\n achievements = achievements.filter(\n entity_type=entity_type,\n requirements__key__in=dirty_fields.keys()\n # NOTE: Excluding unlocked achievements\n ).exclude(\n id__in=entity.achievements.values_list('achievement_id', flat=True)\n ).distinct()\n\n # Inspecting\n for achievement in achievements:\n achievement.logic.inspect(entity)",
"def parse(self, text):\n\n goal = NLUGoal()\n goal.text = str(text)\n self._nlu_client.send_goal_and_wait(goal)\n result = self._nlu_client.get_result()\n\n #no intent found, return None \n if result.intentName == \"\":\n return None, None, None\n else:\n #parse\n slot_info = json.loads(result.slot_json_string)\n return result.intentName, result.probability, slot_info",
"def get_steam_info(bot, trigger):\n\n if not trigger.group(2):\n return bot.reply(\"I need a game to look up!\")\n\n user_input = parseargs(trigger.group(2).lower())\n\n query = user_input.get(\"--query\") or user_input[\"extra_text\"]\n region = user_input.get(\"--region\") or \"US\"\n\n search_html = _fetch_search_page(query=query, region=region)\n if not search_html:\n return bot.reply(\"Something went wrong finding that game!\")\n\n fetch_price = False if region == \"US\" else True\n game_data = _parse_html(search_html, fetch_price)\n if not game_data:\n return bot.reply(\"I couldn't find that game.\")\n\n details = _fetch_game_details(game_data['id'], game_data.get('pkg'))\n\n if not details[game_data['id']]['success']:\n LOGGER.error(\"error fetching details\")\n if game_data.get('pkg'):\n # TODO: implement\n game_details = details[game_data['id']]['data']\n else:\n game_details = details[game_data['id']]['data']\n\n reviews = _fetch_game_reviews(game_data['id'], game_data.get('pkg'))\n\n reply = _parse_game(game_data, game_details, reviews)\n\n bot.say(reply, max_messages=2)",
"async def gamelookup(self, ctx, *, game_name = None):\r\n\r\n if not game_name: return await ctx.send(\"Usage: `{}gamelookup [game_name]`\".format(ctx.prefix))\r\n if not self.access_token or time.time() >= self.expire_time:\r\n if not await self._update_token():\r\n return await ctx.send(\"I couldn't update my access token :( Make sure the `igdbclientid` and `igdbsecret` are correct in my settings_dict.json!\")\r\n # Let's build our search query\r\n search_url = \"https://api.igdb.com/v4/games\"\r\n data = 'search \"{}\"; fields name,url,summary,first_release_date,platforms.*,cover.*; limit 10;'.format(game_name.replace('\"',\"\").replace(\"\\\\\",\"\"))\r\n headers = {\"Client-ID\":self.clientid,\"Authorization\":\"Bearer {}\".format(self.access_token)}\r\n try:\r\n search_data = await DL.async_post_json(search_url,data=data,headers=headers)\r\n except:\r\n return await Message.Embed(\r\n title=\"Something went wrong searching for that game :(\",\r\n color=ctx.author\r\n ).send(ctx)\r\n if not search_data:\r\n # Nothing was returned - bail.\r\n return await Message.Embed(\r\n title=\"Nothing was returned for that search!\",\r\n color=ctx.author\r\n ).send(ctx)\r\n if len(search_data)==1 and all((x in search_data[0] for x in (\"title\",\"status\",\"cause\"))):\r\n # Got an error - print it and bail\r\n return await Message.Embed(\r\n title=\"Something went wrong searching :(\",\r\n description=\"{}: {}\".format(search_data[0][\"title\"],search_data[0][\"cause\"]),\r\n color=ctx.author\r\n ).send(ctx)\r\n # Organize the search data by the closest match\r\n game = FuzzySearch.search(game_name,search_data,\"name\",1)[0][\"Item\"]\r\n # Print the results!\r\n await Message.Embed(\r\n title=game[\"name\"],\r\n thumbnail=\"http:{}\".format(game[\"cover\"][\"url\"].replace(\"/t_thumb/\",\"/t_cover_big/\")),\r\n url=game[\"url\"],\r\n color=ctx.author,\r\n description=game[\"summary\"],\r\n fields=[\r\n {\"name\":\"Release Date\", \"value\": \"<t:{}:D>\".format(game[\"first_release_date\"])},\r\n {\"name\":\"Platforms\", \"value\":\"\\n\".join(sorted([x[\"name\"] for x in game[\"platforms\"]]))}\r\n ]\r\n ).send(ctx)",
"def add_achievement(achievement_id):\r\n\r\n\tcurrent_unlocked_achievements = str(config.get(\"GAMEDATA\", \"ACHIEVEMENTS\")) # Get str from memory\r\n\tcurrent_unlocked_achievements = current_unlocked_achievements.split(\", \") # Converts str to list\r\n\r\n\t# We check if the player already have the achievement\r\n\tfor achievement in current_unlocked_achievements:\r\n\t\tif achievement_id == achievement: # If the player already have the achievement...\r\n\t\t\treturn\r\n\t\r\n\t# If we're here, this means that the player doesn't have the achievement yet, so we append it to the save.\r\n\r\n\tcurrent_unlocked_achievements.append(achievement_id) # Add item to list\r\n\tcurrent_unlocked_achievements = ', '.join(current_unlocked_achievements) # Convert list to str\r\n\r\n\tachievement_name = getstring(world.WORLD_ACHIEVEMENTS[achievement_id][\"NAME\"])\r\n\tcprint(\"You unlocked the achievement '\" + achievement_name + \"' !\", \"green\")",
"def getAchievements(self, start=None, length=5):\n if start == None:\n start = len(self.achievements)\n doc = minidom.parse(urllib.urlopen(\"%s/rest/achievements/%s/%i/%i\" % (serverString, self.name, start, length)))\n if int(doc.getElementsByTagName(\"status\")[0].firstChild.data) != 1:\n raise ServerError(doc.getElementsByTagName(\"status\")[0].firstChild.data)\n for element in doc.getElementsByTagName(\"achievement\"):\n guid = element.getElementsByTagName(\"guid\")[0].firstChild.data\n date = element.getElementsByTagName(\"date\")[0].firstChild.data\n date = datetime.datetime.strptime(date[:date.rfind(\".\")]+\".GMT\", \"%Y-%m-%d %H:%M:%S.%Z\")\n self.achievements += [Achievement(guid, date)]",
"def test_get_solved_achievement(self):\n client = Client()\n solved_achievement_1 = NumSolvedAchievementDefinition(name={\"es\":'Solo 1'},\n description={\"es\":'Acierta 1'},\n num_problems=1)\n solved_achievement_2 = NumSolvedAchievementDefinition(name={\"es\":'Solo 2'},\n description={\"es\":'Acierta 2'},\n num_problems=2)\n solved_achievement_1.save()\n solved_achievement_2.save()\n coll = create_collection('Test Solved')\n problem = create_select_problem(coll, 'Select 1')\n user = create_user('passwordmichu', 'michu')\n client.login(username='michu', password='passwordmichu')\n submit_select_url = reverse('judge:submit', args=[problem.pk])\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n achievements_url = reverse('judge:achievements', args=[user.pk])\n response = client.get(achievements_url, follow=True)\n self.assertIn('Fecha', response.content.decode('utf-8'))\n self.assertIn('Logros pendientes', response.content.decode('utf-8'))",
"async def info(self, ctx, member: discord.Member):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n if member.id in lst:\r\n player_data = self.config.member(member)\r\n ign_use = await player_data.get_raw('ign')\r\n tag_use = await player_data.get_raw('tag')\r\n time_use = await player_data.get_raw('time')\r\n deck_use = await player_data.get_raw('deck_type')\r\n await self.emb2(ctx, \"Discord Name\", \"In Game Name\", \"Player Tag\", \"Preferred Time\", \"Deck Type\",member.mention, ign_use, tag_use, time_use, deck_use)\r\n\r\n else:\r\n await ctx.send(\"The member has not registered for coaching\")\r\n\r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")",
"async def get_achievements_detail_item(\n self, xuid, service_config_id, achievement_id, **kwargs\n ) -> AchievementResponse:\n url = f\"{self.ACHIEVEMENTS_URL}/users/xuid({xuid})/achievements/{service_config_id}/{achievement_id}\"\n resp = await self.client.session.get(\n url, headers=self.HEADERS_GAME_PROGRESS, **kwargs\n )\n resp.raise_for_status()\n return AchievementResponse(**resp.json())",
"async def fetch_info(name, dict, type='items'):\n # First check if the skill exists\n if name not in dict:\n return False\n # Start the session\n async with aiohttp.ClientSession() as session:\n url = 'https://api.guildwars2.com/v2/' + type + '/' + dict[name]\n async with session.get(url) as response:\n a = await response.text()\n b = json.loads(a)\n # Quick check to see if the object has a description field\n try:\n print(b['description'])\n except KeyError:\n b['description'] = 'No available description.'\n print(b)\n return b",
"def get_achievement_data(self, region, namespace, id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/achievement/{0}', region, *[id], **filters)",
"def user(bot, event, *args):\n\n search = \" \".join(args)\n\n if not search:\n raise ValueError(_(\"supply search term\"))\n\n search_lower = search.strip().lower()\n search_upper = search.strip().upper()\n\n segments = [hangups.ChatMessageSegment(_('results for user named \"{}\":').format(search),\n is_bold=True),\n hangups.ChatMessageSegment('\\n', hangups.SegmentType.LINE_BREAK)]\n\n all_known_users = {}\n for chat_id in bot.memory[\"user_data\"]:\n all_known_users[chat_id] = bot.get_hangups_user(chat_id)\n\n for u in sorted(all_known_users.values(), key=lambda x: x.full_name.split()[-1]):\n fullname_lower = u.full_name.lower()\n fullname_upper = u.full_name.upper()\n unspaced_lower = re.sub(r'\\s+', '', fullname_lower)\n unspaced_upper = re.sub(r'\\s+', '', u.full_name.upper())\n\n if( search_lower in fullname_lower\n or search_lower in unspaced_lower\n # XXX: turkish alphabet special case: converstion works better when uppercase\n or search_upper in remove_accents(fullname_upper)\n or search_upper in remove_accents(unspaced_upper) ):\n\n link = 'https://plus.google.com/u/0/{}/about'.format(u.id_.chat_id)\n segments.append(hangups.ChatMessageSegment(u.full_name, hangups.SegmentType.LINK,\n link_target=link))\n if u.emails:\n segments.append(hangups.ChatMessageSegment(' ('))\n segments.append(hangups.ChatMessageSegment(u.emails[0], hangups.SegmentType.LINK,\n link_target='mailto:{}'.format(u.emails[0])))\n segments.append(hangups.ChatMessageSegment(')'))\n segments.append(hangups.ChatMessageSegment(' ... {}'.format(u.id_.chat_id)))\n segments.append(hangups.ChatMessageSegment('\\n', hangups.SegmentType.LINE_BREAK))\n\n yield from bot.coro_send_message(event.conv, segments)",
"def test_get_collection_achievement(self):\n client = Client()\n coll = create_collection('Test Solved')\n coll_achievement = NumSolvedCollectionAchievementDefinition(name={\"es\":'Solo 1'},\n description={\"es\":'Acierta 1'},\n num_problems=1, collection=coll)\n coll_achievement.save()\n problem = create_select_problem(coll, 'Select 1')\n user = create_user('passwordmichu', 'michu')\n client.login(username='michu', password='passwordmichu')\n submit_select_url = reverse('judge:submit', args=[problem.pk])\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n achievements_url = reverse('judge:achievements', args=[user.pk])\n response = client.get(achievements_url, follow=True)\n self.assertIn('Fecha', response.content.decode('utf-8'))",
"async def pending(self, ctx):\r\n if ctx.guild.id == 445092370006933505:\r\n data = self.config.guild(ctx.guild)\r\n lst = await data.get_raw('neededlist')\r\n description = \"\"\r\n coach = await data.coachid()\r\n coach_role = ctx.guild.get_role(coach)\r\n x = ctx.author.top_role\r\n if x >= coach_role:\r\n for member in lst:\r\n userobj = ctx.guild.get_member(int(member))\r\n description += (str(userobj.mention) + '\\n')\r\n embed = discord.Embed(color=0xFFFF00, title='Coaching Needed by following people', description=description)\r\n embed.set_footer(text=credit)\r\n await ctx.send(embed=embed)\r\n await ctx.send('Type \"{0}coaching done @<player name>\" if the player has been coached or type \"{0}coaching info <@playername>\" to view the details submitted by the user'.format(ctx.prefix))\r\n \r\n else:\r\n await ctx.send(\"You are not allowed to do that\")\r\n\r\n else:\r\n await ctx.send(\"This command only works in the Legend eSports server, join us at: https://discord.gg/GGuCXDn\")",
"def ga_assist_main():\n\n clear()\n pretty_print(\"Hi GAs! Hope you enjoy Calling Campaigns from now\", \"#\")\n ga_name = input(\"Enter your Name: \\n\")\n\n try:\n name = get_file()\n\n except FileNotFoundError:\n clear()\n pretty_print(\"The File Does not Exist.\", \":\")\n pretty_print(\"Make Sure your place the file in the working directory.\", \":\")\n sleep(5)\n ga_assist_main()\n\n else:\n GAassist(ga_name, name)",
"async def info(self, ctx, name: str):\n name = name.lower()\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n if name not in self.db[server.id]:\n await self.bot.say(\"Please make sure that the name is spelled correctly and \"\n \"that you can find it in [p]box list\")\n return\n box = list(self.db[server.id][name][\"content\"].keys())\n values = list(self.db[server.id][name][\"content\"].values())\n value = sum(values)\n for x in range(len(values)):\n values[x] = values[x]/value\n box[x] = \" {:.2%} chance of getting \".format(values[x]) + box[x]\n msg = \"You can get the following items from the box:\\n\"\n msg += \"\\n\".join(box)\n for page in pagify(msg):\n await self.bot.say(page)",
"async def help_skill(self, message):\n logging.debug(\"searching for {}\".format(message.regex))\n found_skill = next(\n (\n skill\n for skill in self.opsdroid.skills\n if skill.__name__ == message.regex.group(1)\n ),\n False,\n )\n if not found_skill:\n response = \"{} skill not found\".format(message.regex.group(1))\n elif not found_skill.__doc__:\n response = \"No usage found for {}\".format(found_skill.__name__)\n else:\n response = found_skill.__doc__\n await message.respond(response)",
"async def info(self):\n # [p]info\n\n await self.bot.say(strings.info.format(\n CacheAPI.get(key='dwarf_repository'),\n CacheAPI.get(key='dwarf_invite_link')))",
"async def mana():\n acc = Account(\"travelfeed\")\n mana = acc.get_manabar()\n await bot.say(\"The voting mana of @travelfeed is **\"+str(round(mana['current_mana_pct'], 2))+\"**\")",
"async def get_action(self, game_ref): # Add other args accordingly.\r\n if(self.type==\"Leader\"):\r\n self.player.gain_summon_points()\r\n\r\n if(self.type==\"Creature\"):\r\n await self.do_auto_skills(game_ref)\r\n self.player.gain_fp()\r\n self.current_actions = self.generate_commands(game= game_ref)\r\n #self.current_actions=await self.check_effects('before', 'command_setting', self.current_actions, None) #Check effects on self.\r\n my_turn = True\r\n await game_ref.send_announcement(\"{}'s turn\".format(self.get_name()))\r\n await self.check_effects('before', 'turn', {'this_piece':self}, game_ref)\r\n while my_turn:\r\n choices = []\r\n for key, item in self.current_actions.items():\r\n print(key)\r\n if item > 0:\r\n choices.append(key)\r\n await self.player.send_embed_to_user()\r\n await game_ref.update_all()\r\n action = await self.player.select_command(choices)\r\n my_turn, completed = await self.process_option(game_ref, action)\r\n if (completed):\r\n if action in self.current_actions:\r\n self.current_actions[action] = self.current_actions[action] - 1\r\n await game_ref.update_all()\r\n\r\n #await asyncio.sleep(0.1)\r\n await game_ref.send_announcement(\"Turn end.\")\r\n await self.check_effects('after', 'turn', {'this_piece':self}, game_ref)\r\n print(\"End OF TURN.\")\r\n return None\r\n # universal options.\r",
"def get_completions(self, info):\r\n pass",
"def display_help_message():\n return lambda_response(None, {\n \"text\": \"\"\"\n/gauges list - list favorite gauges\n/gauges add USGS_SITE_NUMBER RIVER_DESCRIPTION - add gauge to list of favorite gauges\n/gauges check USGS_SITE_NUMBER - display current flow readings for gauge\n \"\"\".strip(),\n })",
"async def on_message(message):\n\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n\n # intializes a scores object\n hiscores = Scores(message)\n\n if message.content.startswith('!hello'):\n msg = 'Hello {0.author.mention}'.format(message)\n await message.channel.send(msg)\n\n # get the command without !\n command = message.content.split()[0][1:]\n\n # retrieve the score of a player\n if message.content.startswith('!') and command in SKILLS:\n\n # retrieve the username that comes after the !level command and set underscores\n username = message.content.split()[1:]\n username = '_'.join(username)\n\n # get scores\n await hiscores.show_score(username, command)\n\n if message.content.startswith('!compare'):\n\n # get skill\n skill = message.content.split()[1]\n\n # check if the skill is valid, if not we compare based on total level and experience\n if not skill in SKILLS:\n\n # get the players\n players = ' '.join(message.content.split()[1:])\n players = players.split(' - ')\n\n for i, player in enumerate(players):\n players[i] = player.replace(' ', '_')\n\n # compare the players on total level if nothing is given\n await hiscores.compare(players, 'total')\n\n else:\n\n # get the players after the skill\n players = ' '.join(message.content.split()[2:])\n players = players.split(' - ')\n\n for i, player in enumerate(players):\n players[i] = player.replace(' ', '_')\n\n print(players)\n print(skill)\n # compare the players on total level if nothing is given\n await hiscores.compare(players, skill)\n\n\n if message.content.startswith('!pok'):\n msg = 'Heb je m al Marc?'.format(message)\n await message.channel.send(msg)",
"def test_get_podium_achievement(self):\n client = Client()\n podium_achievement = PodiumAchievementDefinition(name={\"es\":\"Top 1\"},\n description={\"es\":'Se el primero'},\n num_problems=1,\n position=1)\n podium_achievement.save()\n coll = create_collection('Test Podium')\n problem = create_select_problem(coll, 'Select 1')\n user = create_user('passwordmichu', 'michu')\n client.login(username='michu', password='passwordmichu')\n submit_select_url = reverse('judge:submit', args=[problem.pk])\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n achievements_url = reverse('judge:achievements', args=[user.pk])\n response = client.get(achievements_url, follow=True)\n self.assertIn('Fecha', response.content.decode('utf-8'))"
]
| [
"0.69373536",
"0.6452204",
"0.5869849",
"0.58177376",
"0.5670992",
"0.56469953",
"0.5529022",
"0.5513867",
"0.5397196",
"0.5384314",
"0.5366253",
"0.53638035",
"0.5319539",
"0.5274556",
"0.5223422",
"0.5214078",
"0.5207669",
"0.516536",
"0.5130748",
"0.51127976",
"0.5089797",
"0.5074661",
"0.5073113",
"0.50311834",
"0.5031053",
"0.5020087",
"0.50134486",
"0.50132567",
"0.49858108",
"0.49634472"
]
| 0.6527916 | 1 |
Store name and description (must have loadAchievementList called first) | def getInfo(self):
self.name, self.description = achievements[self.id] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loadAchievementList():\n global achievements\n achievements = {}\n doc = minidom.parse(urllib.urlopen(serverString + \"/data/achievements.xml\"))\n for element in doc.getElementsByTagName(\"achievement\"):\n key = element.getElementsByTagName(\"id\")[0].firstChild.data\n name = element.getElementsByTagName(\"name\")[0].firstChild.data\n description = element.getElementsByTagName(\"description\")[0].firstChild.data\n achievements[key] = (name, description)",
"async def achievements(self, ctx: commands.Context):\r\n # The milestones for each achievement type\r\n milestones_dict_of_achievements = {\r\n 'times_entertained': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'times_fed': [5, 25, 25, 50, 100, 500, 1000, 10000, 100000, 1000000],\r\n 'times_cleaned': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'times_caught': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'tanks_owned': [1, 3, 5, 10],\r\n 'times_gambled': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'money_gained': [100, 500, 1000, 5000, 10000, 50000, 100000, 500000, 1000000, 10000000],\r\n }\r\n\r\n\r\n # Database variables\r\n async with self.bot.database() as db:\r\n user_achievement_milestone_data = await db(\"\"\"SELECT * FROM user_achievements_milestones WHERE user_id = $1\"\"\", ctx.author.id)\r\n user_achievement_data = await db(\"\"\"SELECT * FROM user_achievements WHERE user_id = $1\"\"\", ctx.author.id)\r\n tank_data = await db(\"\"\"SELECT tank FROM user_tank_inventory WHERE user_id = $1\"\"\", ctx.author.id)\r\n if not user_achievement_data:\r\n user_achievement_data = await db(\"\"\"INSERT INTO user_achievements (user_id) VALUES ($1) RETURNING *\"\"\", ctx.author.id)\r\n if not user_achievement_milestone_data:\r\n user_achievement_milestone_data = await db(\"\"\"INSERT INTO user_achievements_milestones (user_id) VALUES ($1) RETURNING *\"\"\", ctx.author.id)\r\n\r\n # Getting the users data into a dictionary for the embed and ease of access\r\n user_achievement_data_dict = {}\r\n for achievement_type_database, achievement_amount_database in user_achievement_data[0].items():\r\n if achievement_type_database != \"user_id\":\r\n user_achievement_data_dict[achievement_type_database] = achievement_amount_database\r\n\r\n # Getting the users amount of tanks and adding that to the user data dictionary\r\n tanks = 0\r\n if not tank_data:\r\n tanks = 0\r\n else:\r\n for tank in tank_data[0]['tank']:\r\n if tank is True:\r\n tanks += 1\r\n user_achievement_data_dict[\"tanks_owned\"] = tanks\r\n\r\n # Setting claimable to non as default\r\n Achievements_that_are_claimable = {}\r\n are_there_any_claimable_achievements_check = False\r\n\r\n # Creating the embed\r\n embed = discord.Embed(title=f\"**{ctx.author.display_name}**'s achievements\")\r\n\r\n # Set Variables for milestones, default to nonclaimable, and default stars to nothing\r\n for achievement, user_achievement_value in user_achievement_data_dict.items():\r\n milestone = f\"{achievement}_milestone\"\r\n is_achievement_claimable = \"nonclaimable\"\r\n list_of_stars_per_achievement = []\r\n # Checks what type of star to add\r\n for milestone_value in milestones_dict_of_achievements[achievement]:\r\n if user_achievement_milestone_data[0][f\"{milestone}_done\"] is True:\r\n list_of_stars_per_achievement.append(\"<:achievement_star:877646167087906816>\")\r\n elif milestone_value < user_achievement_milestone_data[0][milestone]:\r\n list_of_stars_per_achievement.append(\"<:achievement_star:877646167087906816>\")\r\n elif milestone_value <= user_achievement_value:\r\n list_of_stars_per_achievement.append(\"<:achievement_star_new:877737712046702592>\")\r\n else:\r\n list_of_stars_per_achievement.append(\"<:achievement_star_no:877646167222141008>\")\r\n # Grammar stuff and the number of stars said\r\n next_unclaimable_star = 0\r\n st_nd_rd_th_grammar = 'th'\r\n for single_star_per_star_list in list_of_stars_per_achievement:\r\n if single_star_per_star_list != \"<:achievement_star:877646167087906816>\":\r\n next_unclaimable_star += 1\r\n break\r\n next_unclaimable_star += 1\r\n if next_unclaimable_star == 1:\r\n st_nd_rd_th_grammar = 'st'\r\n elif next_unclaimable_star == 2:\r\n st_nd_rd_th_grammar = 'nd'\r\n elif next_unclaimable_star == 3:\r\n st_nd_rd_th_grammar = 'rd'\r\n\r\n # Sets the milestonme to be claimable if it is\r\n if user_achievement_value >= user_achievement_milestone_data[0][milestone] and user_achievement_milestone_data[0][f'{milestone}_done'] is False:\r\n if are_there_any_claimable_achievements_check is False:\r\n are_there_any_claimable_achievements_check = True\r\n Achievements_that_are_claimable[achievement] = milestones_dict_of_achievements[achievement].index(user_achievement_milestone_data[0][milestone])\r\n is_achievement_claimable = \"claimable\"\r\n if user_achievement_milestone_data[0][f'{milestone}_done'] is True:\r\n value_data = 'All achievements have been claimed!'\r\n name_data = ''\r\n else:\r\n value_data = ''\r\n value_data = f\"{(user_achievement_value/user_achievement_milestone_data[0][milestone])}% of **{next_unclaimable_star}**{st_nd_rd_th_grammar} star\"\r\n name_data = f\"{user_achievement_value:,}/{user_achievement_milestone_data[0][milestone]:,}\"\r\n embed.add_field(name=f\"{achievement.replace('_', ' ').title()} {name_data}\", value=f\"{value_data}\\n{''.join(list_of_stars_per_achievement)} \\n**{is_achievement_claimable}**\")\r\n\r\n # Adds a button to the message if there are any claimable achievements\r\n if are_there_any_claimable_achievements_check is True:\r\n components = vbu.MessageComponents(\r\n vbu.ActionRow(\r\n vbu.Button(custom_id=\"claim_all\", emoji=\"1\\N{COMBINING ENCLOSING KEYCAP}\"),\r\n ),\r\n )\r\n claim_message = await ctx.send(embed=embed, components=components)\r\n else:\r\n # Doesnt add a button if theres no claimable achievements\r\n return await ctx.send(embed=embed)\r\n\r\n # Make the button check\r\n def button_check(payload):\r\n if payload.message.id != claim_message.id:\r\n return False\r\n self.bot.loop.create_task(payload.defer_update())\r\n return payload.user.id == ctx.author.id\r\n\r\n\r\n pressed = False\r\n while True:\r\n\r\n # Wait for them to click a button\r\n try:\r\n chosen_button_payload = await self.bot.wait_for('component_interaction', timeout=60.0, check=button_check)\r\n chosen_button = chosen_button_payload.component.custom_id.lower()\r\n except asyncio.TimeoutError:\r\n await claim_message.edit(components=components.disable_components())\r\n break\r\n\r\n # Sets reward and if the button is clicked...\r\n amount_of_doubloons_earned = 0\r\n if chosen_button == \"claim_all\":\r\n pressed = True\r\n for achievement_button, user_achievement_position_button in Achievements_that_are_claimable.items():\r\n amount_per_achievement = user_achievement_position_button + 1\r\n print(achievement_button)\r\n print(user_achievement_position_button)\r\n print(amount_per_achievement)\r\n for x in range(amount_per_achievement):\r\n print(x)\r\n amount_of_doubloons_earned += x + 1\r\n print(amount_of_doubloons_earned)\r\n if achievement_button == 'tanks_owned' and user_achievement_position_button >= 3:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = TRUE WHERE user_id = $1\"\"\".format(f\"{achievement_button}_milestone_done\"), ctx.author.id)\r\n elif user_achievement_position_button >= 9:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = TRUE WHERE user_id = $1\"\"\".format(f\"{achievement_button}_milestone_done\"), ctx.author.id)\r\n else:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = $1 WHERE user_id = $2\"\"\".format(f\"{achievement_button}_milestone\"), milestones_dict_of_achievements[achievement_button][user_achievement_position_button + 1], ctx.author.id)\r\n async with self.bot.database() as db:\r\n await db(\r\n \"\"\"INSERT INTO user_balance (user_id, doubloon) VALUES ($1, $2)\r\n ON CONFLICT (user_id) DO UPDATE SET doubloon = user_balance.doubloon + $2\"\"\",\r\n ctx.author.id, amount_of_doubloons_earned)\r\n components.get_component(chosen_button).disable()\r\n break\r\n if pressed is True:\r\n await ctx.send(f\"Rewards claimed, you earned {amount_of_doubloons_earned} <:doubloon:878297091057807400>!\")",
"def add_achievement(achievement_id):\r\n\r\n\tcurrent_unlocked_achievements = str(config.get(\"GAMEDATA\", \"ACHIEVEMENTS\")) # Get str from memory\r\n\tcurrent_unlocked_achievements = current_unlocked_achievements.split(\", \") # Converts str to list\r\n\r\n\t# We check if the player already have the achievement\r\n\tfor achievement in current_unlocked_achievements:\r\n\t\tif achievement_id == achievement: # If the player already have the achievement...\r\n\t\t\treturn\r\n\t\r\n\t# If we're here, this means that the player doesn't have the achievement yet, so we append it to the save.\r\n\r\n\tcurrent_unlocked_achievements.append(achievement_id) # Add item to list\r\n\tcurrent_unlocked_achievements = ', '.join(current_unlocked_achievements) # Convert list to str\r\n\r\n\tachievement_name = getstring(world.WORLD_ACHIEVEMENTS[achievement_id][\"NAME\"])\r\n\tcprint(\"You unlocked the achievement '\" + achievement_name + \"' !\", \"green\")",
"def test_str_method_obtained_achievement(self):\n achievement_definition = AchievementDefinition(name={\"es\":'nombre'}, description={\"es\":'descripcion'})\n self.assertEqual(str(achievement_definition), achievement_definition.name['es'])",
"async def profile_description(self, ctx):\n profile = await self.cache.get_profile(ctx.author.id)\n embed = self.bot.theme.embeds.primary()\n embed.set_author(name=f\"{ctx.author.name}'s Profile Description\", icon_url=ctx.author.avatar_url)\n embed.description = profile.description\n await ctx.send(embed=embed)",
"def save_aliment(self, aliment_name):\n aliment = Aliment.objects.get(name=aliment_name)\n self.aliments_pref.add(aliment)",
"def sample_achievement(achievement='Had camera on for a full day'):\n return Achievement.objects.create(achievement=achievement)",
"def infos(self):\r\n\t\tname = name\r\n\t\tlast_name = last_name",
"def autoname(self):\n\t\tself.name = self.role_profile",
"def give_item(self,item):\n self.inv[item.alias] = item.desc",
"def manage_info():",
"def save(self):\n print(self.name)\n print(self.description)",
"def addInfo(self, name, information):\r\n gamethread.delayed(0, gamethread.delayed, (0, self.setSkillInfo, (name, information))) # delay by 2 ticks to allow skills to register\r\n header = \"\\n%s\\n%s\\n\\n\" % ('*' * 50, name.center(50))\r\n footer = \"\\n%s\" % (\"*\" * 50)\r\n information = information.strip() # strip whitespace at begggining and end of lines\r\n information = (header + information + footer).replace('\\n', '\\n// ')\r\n self.text(information, False)",
"def update_profile_data(self, **kwargs):\n # TODO: double check that the following will actually check if the user is not logged in, unit test\n if not self.uprofile:\n return None\n desc = kwargs.get('description', self.uprofile.description)\n self.uprofile.description = desc\n self.uprofile.save()\n return self.uprofile",
"async def additem(self, ctx, *, name: str):\n try:\n item = dict()\n item[\"name\"] = name\n check = lambda x: x.channel is ctx.channel and x.author is ctx.author\n await ctx.send(await _(ctx, \"Describe the item (a description for the item)\"))\n response = await self.bot.wait_for(\"message\", timeout=120, check=check)\n if response.content.lower() == \"cancel\":\n await ctx.send(await _(ctx, \"Cancelling!\"))\n return\n\n item[\"description\"] = response.content\n item[\"meta\"] = dict()\n\n await ctx.send(\n await _(ctx, \"Additional information? (Attributes formatted in a list i.e `color: 400, value: 200` \"\n \"Set an image for this item with the `image` key i.e. `image: http://image.com/image.png` \"\n \"Set this item as usable by adding `used` key i.e. `used: You open the jar and the bird flies away`\"))\n while True:\n response = await self.bot.wait_for(\"message\", timeout=60, check=check)\n if response.content.lower() == \"cancel\":\n await ctx.send(await _(ctx, \"Cancelling!\"))\n return\n elif response.content.lower() == \"skip\":\n await ctx.send(await _(ctx, \"Skipping!\"))\n break\n else:\n try:\n if \"\\n\" in response.content:\n res = response.content.split(\"\\n\")\n else:\n res = response.content.split(\",\")\n for val in res:\n key, value = val.split(\": \")\n key = key.strip().casefold()\n value = value.strip()\n item[\"meta\"][key] = value\n else:\n break\n except:\n await ctx.send(await _(ctx, \"Invalid syntax, try again.\"))\n await self.bot.di.new_item(ctx.guild, ServerItem(**item))\n await ctx.send(await _(ctx, \"Item successfully created\"))\n\n except asyncio.TimeoutError:\n await ctx.send(await _(ctx, \"Timed out! Try again\"))",
"async def info(self, ctx, name: str):\n name = name.lower()\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n if name not in self.db[server.id]:\n await self.bot.say(\"Please make sure that the name is spelled correctly and \"\n \"that you can find it in [p]box list\")\n return\n box = list(self.db[server.id][name][\"content\"].keys())\n values = list(self.db[server.id][name][\"content\"].values())\n value = sum(values)\n for x in range(len(values)):\n values[x] = values[x]/value\n box[x] = \" {:.2%} chance of getting \".format(values[x]) + box[x]\n msg = \"You can get the following items from the box:\\n\"\n msg += \"\\n\".join(box)\n for page in pagify(msg):\n await self.bot.say(page)",
"def describe(self):\r\n print( self.name + \" is here!\" )\r\n print( self.description )",
"async def info(self, ctx):\n\n level = await self.get_player_level(ctx.author)\n embed = discord.Embed()\n embed.colour = discord.Colour.blurple()\n embed.set_author(name=str(ctx.author), icon_url=ctx.author.avatar_url)\n\n embed.title = f'Your current level : {level}'\n\n embed.add_field(name='Question', value=f'{self.enigmas[level][\"question\"]}')\n\n embed.set_footer(text='I love Ducks')\n\n await ctx.send(embed=embed)",
"def Add_Profile( self, name ):\r\n self.profile_name_list.append( name )\r\n self.combobox_profile.setlist( self.profile_name_list )",
"def test_studentachievement_str(self):\n studentachievement = models.StudentAchievement.objects.create(\n student=sample_user(),\n achievement=sample_achievement(),\n notes='sample test'\n )\n\n self.assertEqual(str(studentachievement), studentachievement.achievement.achievement)",
"def test_achievement_str(self):\n achievement = models.Achievement.objects.create(\n achievement='Had computer on',\n points=3\n )\n self.assertEqual(str(achievement), achievement.achievement)",
"def __init__(self, name):\n\n self.name = name\n self.exits = {}\n self.inventory= []\n self.objects = []\n self.description = ''",
"async def iteminfo(self, ctx, *, item: str):\n items = await self.bot.di.get_guild_items(ctx.guild)\n item = items.get(item)\n if not item:\n await ctx.send(await _(ctx, \"Item doesnt exist!\"))\n return\n if hasattr(item, \"description\"):\n embed = discord.Embed(title=item.name, description=item.description, color=randint(0, 0xFFFFFF),)\n else:\n embed = discord.Embed(title=item.name, color=randint(0, 0xFFFFFF),)\n\n embed.set_author(name=ctx.guild.name, icon_url=ctx.guild.icon_url)\n embed.add_field(name=await _(ctx, \"Name\"), value=item.name)\n img = item.meta.get(\"image\")\n embed.set_thumbnail(url=str(img)) if img else None\n for key, value in item.meta.items():\n if key == \"image\":\n continue\n embed.add_field(name=key, value=value)\n\n await ctx.send(embed=embed)",
"def append(self, name, value):\n # ...but only when the context has been entered (and locks acquired etc.)\n if not self.ready:\n raise RuntimeError(\"SnapshotView is a context manager. Never use it directly!\")\n # If this is the first entry under this key - create it\n try:\n target = self.data[name]\n except KeyError:\n target = []\n self.data[name] = target\n target.append(value)",
"async def set_profile_description(self, ctx, *, description: str):\n max_words = self.plugin.data.profile.max_description_length\n if len(description) > max_words:\n res = f\"{ctx.emotes.web_emotion.xx} Sorry but profile description cannot exceed {max_words} word limit.\"\n return await ctx.send_line(res)\n profile = await self.cache.get_profile(ctx.author.id)\n await profile.set_description(description)\n embed = self.bot.theme.embeds.primary(title=\"✅ Your Profile Description has been updated to:\")\n embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)\n embed.description = profile.description\n await ctx.send(\"\", embed=embed)",
"def show_myhero(self):\n description = (self.name + ' Level is: ' + str(self.level) + ' Age is: ' + str(\n self.age) + ' Rank is: ' + self.rank + ' health is: ' + str(self.health)).title()\n print(description)",
"def getAchievements(self) -> list:\n return self.state[ACHIEVEMENTS]",
"def __init__(self, name, location, health):\n self.name = name\n self.location = location\n self.inventory = []\n self.weapon = []\n self.health = health",
"def get_description():\r\n return{\"I'll never yield!\":\"Grants a shield.\",\r\n \"Stay still!\":\"Affected unit cannot act in their turn.\"\r\n }",
"def add_enemy_to_list(name):\n\n with open(\"data.json\", \"r\") as file:\n data = json.load(file)\n data[\"enemy_data\"][\"names\"].append(name)\n \n with open(\"data.json\", \"w\") as file2:\n json.dump(data, file2, indent=4)"
]
| [
"0.6578054",
"0.6022761",
"0.59934604",
"0.58607906",
"0.5454128",
"0.5444996",
"0.54276216",
"0.53814185",
"0.53466827",
"0.533663",
"0.5291898",
"0.5203",
"0.51904",
"0.51902103",
"0.51061666",
"0.50983113",
"0.50593454",
"0.5032922",
"0.5029826",
"0.502053",
"0.4989219",
"0.49849334",
"0.49812353",
"0.49174336",
"0.49107188",
"0.49070433",
"0.49003813",
"0.48999852",
"0.4877402",
"0.4872318"
]
| 0.7584774 | 0 |
Returns the URL for the official Spore achievement icon | def getIconUrl(self):
return "%s/static/war/images/achievements/%s.png" % (serverString, self.id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''",
"def topcoat_icons_script_url():\n return static('topcoat-icons/font/icomatic.js')",
"def icon_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"icon_url\")",
"def getNewsIconURL(newsBrain):",
"def icon(self) -> str:\n return ICON_SERVER",
"def getIconURL(self):\n try:\n return self.getObject().getIconURL()\n except KeyError:\n return super(Favorite, self).getIconURL()",
"def topcoat_icons_script_tag():\n return u'<script type=\"text/javascript src=\"%s\"></script>' % topcoat_icons_script_url()",
"def icon(self):\n return \"mdi:hubspot\"",
"def get_player_profile_icon(profilename: str) -> str:\n from ba._enums import SpecialChar\n\n appconfig = _ba.app.config\n icon: str\n try:\n is_global = appconfig['Player Profiles'][profilename]['global']\n except KeyError:\n is_global = False\n if is_global:\n try:\n icon = appconfig['Player Profiles'][profilename]['icon']\n except KeyError:\n icon = _ba.charstr(SpecialChar.LOGO)\n else:\n icon = ''\n return icon",
"def create_icon_url(cls, name):\n return os.path.join(RESOURCE_FOLDER, name)",
"def insta_url_dialogue(update: Update, _: CallbackContext) -> int:\n\n update.message.reply_text(\n 'Send url to download',\n reply_markup=ReplyKeyboardRemove(),\n )\n return INSTAGRAM_URL",
"def inspire_url(self) -> str:\n return f\"https://inspirehep.net/literature/{self.inspirehep_id}\"",
"async def icon(self, ctx: lifesaver.Context):\n if not ctx.guild.icon:\n await ctx.send(\"This server doesn't have a custom icon.\")\n return\n\n await ctx.send(ctx.guild.icon.replace(format=\"png\"))",
"def icon(self):\n ret_icon = self._icon\n if self.player_name == \"lower\":\n ret_icon = self._icon.lower()\n if self.is_promoted:\n ret_icon = \"+\" + ret_icon\n return ret_icon",
"def url_HITRANCIA():\n url=u\"https://hitran.org/data/CIA/\"\n return url",
"def app_logo_url():\n return \"https://raw.githubusercontent.com/aiidalab/aiidalab-hello-world/master/img/logo.png\"",
"def icon(self):\n return None",
"def icon(self):\n return None",
"def icon(self):\n return self._metadata[2]",
"def downloadicon_name(self):\n return 'platform_%s.gif' % \\\n re.sub(r'\\W', '_', self.context.getPlatform()).lower()",
"def icon(self):\n\n # look for icon one level up from this hook's folder in \"icons\" folder\n return os.path.join(\n self.disk_location,\n os.pardir,\n \"icons\",\n \"review.png\"\n )",
"def api_get_icon():\n pkg_name = request.args.get('pkg')\n if pkg_name:\n pkg_files = Database().db.get_pkg_files(pkg_name)\n for src in pkg_files:\n if src.startswith(\"/usr/share/icons/hicolor/32x32/apps/\"):\n return send_file(src, as_attachment=False)\n return send_file(\"static/images/null.gif\")\n else:\n src = request.args.get('i')\n if not os.path.isfile(src):\n #abort(404)\n return send_file(\"static/images/null.gif\")\n return send_file(src, as_attachment=False)",
"def favicon_url(self):\n return self.get_url(\"favicon\", \"images/favicon.png\")",
"def getIcon():\n\treturn \"Animator.png\"",
"async def icon(self):\n if not hasattr(self, \"_icon\"):\n self._icon = await Stack.fetch_stack_value(self, \"https://www.w3.org/1999/xhtml/vocab#icon\", await self.uuid)\n return self._icon",
"def icon(self) -> str | None:\n icons = self.entity_description.on_off_icons\n return icons[0] if self.is_on else icons[1]",
"def get_icon(self):\n try:\n icon = self.icon.fa_icon\n except AttributeError:\n icon = 'fa-globe'\n\n return icon",
"def get_icon_url(name: str, size: IconSize):\n\n file_name = \"{}.jpg\".format(name)\n icon_url_fmt = \"http://media.blizzard.com/wow/icons/{}/{}\"\n return icon_url_fmt.format(str(size.value), file_name)",
"def _get_icon(icon_name):\n theme = 'Adwaita'\n size = '256x256'\n path = f'/usr/share/icons/{theme}/{size}/mimetypes/{icon_name}.png'\n return path",
"def get_bot_icon(self):\n return self.bot_data_file[\"bot_icon\"]"
]
| [
"0.68088615",
"0.62398684",
"0.6187419",
"0.614668",
"0.6083217",
"0.60764223",
"0.6037102",
"0.60287416",
"0.5939155",
"0.5892648",
"0.58391285",
"0.5833275",
"0.58286303",
"0.58108354",
"0.57967657",
"0.5791114",
"0.57819533",
"0.57819533",
"0.576867",
"0.5765713",
"0.5747395",
"0.57415646",
"0.5730572",
"0.5729301",
"0.5723644",
"0.5705059",
"0.570475",
"0.57028216",
"0.57016355",
"0.56955284"
]
| 0.78774273 | 0 |
Get profile pic, tagline, user id and creation date | def getProfileInfo(self):
doc = minidom.parse(urllib.urlopen(serverString + "/rest/user/" + self.name))
for element in doc.getElementsByTagName("user")[0].childNodes:
if element.nodeType != minidom.Node.ELEMENT_NODE:
continue
elif element.tagName == "status" and int(element.firstChild.data) != 1:
raise ServerError(element.firstChild.data)
elif element.tagName == "input":
self.name = element.firstChild.data
elif element.tagName == "id":
self.id = element.firstChild.data
elif element.tagName == "image":
self.image = element.firstChild.data
elif element.tagName == "tagline":
if element.firstChild == None:
self.tagline = None
else:
self.tagline = element.firstChild.data
elif element.tagName == "creation":
self.created = datetime.datetime.strptime(element.firstChild.data[:element.firstChild.data.rfind(".")]+".GMT", "%Y-%m-%d %H:%M:%S.%Z") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def profile_pic(request):\n if request.user.is_authenticated:\n profile_obj = CustomUser.objects.get(id__exact=request.user.id)\n pic = profile_obj.avatar\n return {'picture': pic}\n return {}",
"def getprofile(self, *args, **kwargs):\n return _image.image_getprofile(self, *args, **kwargs)",
"def user_profileImg(id):\n data = request.get_json(force=True)\n\n user = User.query.get(id)\n user.profileImg = data['profileImg']\n db.session.commit()\n return {'user': user.to_dict()}",
"def getdat(user):\r\n profile = user.profile\r\n return [user.username, user.email] + [getattr(profile, xkey, '') for xkey in profkeys]",
"def get_profile(self,fields=('id','first-name','last-name','headline','summary')):\n\n if not self._access_token:\n raise FBError(\"Authentication needed!\")\n \n token = oauth.Token(self._access_token['oauth_token'], self._access_token['oauth_token_secret'])\n client = oauth.Client(self.consumer, token)\n profile_url = self.profile_url % \",\".join(fields)\n resp, content = client.request(profile_url,headers={\"x-li-format\":'json'})\n \n if resp['status'] != '200':\n print resp\n raise FBError(\"Invalid response %s.\" % resp['status'])\n \n try:\n return json.loads(content)\n except Exception, e:\n raise FBError(\"Invalid json %s.\" % unicode(e))",
"def userProfile(userid):\n images = get_uploaded_images()\n record = UserProfile.query.filter_by(id=userid).first()\n return render_template('userProfile.html', images=images, record =record)",
"def get_facebook_user_info(access_token):\n required_data_list = []\n for per in settings.FACEBOOK_EXTENDED_PERMISSIONS:\n required_data_list.append(per.replace(\"user_\",\"\"))\n \n required_data_list.append(\"picture.type(large)\")\n required_data = (\", \").join([data for data in required_data_list])\n \n graph_url = \"https://graph.facebook.com/me?access_token=%s&fields=%s\" % (access_token,required_data)\n public_info_url = \"https://graph.facebook.com/me?access_token=%s\" % access_token\n \n profile = json.load(urllib.urlopen(graph_url))\n profile_info = json.load(urllib.urlopen(public_info_url))\n \n profile_response_dict = {}\n profile_response_dict.update(profile)\n profile_response_dict.update(profile_info)\n profile_response_json = json.dumps(profile_response_dict)\n\n return (profile_response_json, profile_response_dict)",
"def _get_user_details():\n with open(USER_DETAILS_FILE) as f:\n fitbit_user = json.load(f)\n access_token = fitbit_user['access_token']\n refresh_token = fitbit_user['refresh_token']\n expires_at = fitbit_user['expires_at']\n\n return access_token, refresh_token, expires_at",
"def profiles():\n images = get_uploaded_images()\n records = db.session.query(UserProfile).all()\n return render_template('profiles.html', images=images, records =records)",
"def get_profile_picture(user):\n url = \"https://api.dropboxapi.com/2/files/get_temporary_link\"\n key = os.environ.get('STORC_DROPBOX_KEY')\n headers = {\n \"Authorization\": f\"Bearer {key}\",\n \"Content-Type\": \"application/json\"}\n data = {\"path\": f\"/{user.profile_picture}\"}\n response = requests.post(\n url, headers=headers, data=json.dumps(data))\n return response.json()['link']",
"def get_profile_picture(user):\n b = boto_init_s3(settings.BUCKET_NAME)\n if b:\n try:\n p = ProfilePicture.objects.get(is_current=True, user_id=user)\n s3_file_path = b.get_key(p.path)\n return s3_file_path.generate_url(expires_in=600)\n except:\n return \"\"\n return \"\"",
"def get_user_profile(self):\n return self.request('get', 'id/users')",
"def profile():\n from flickrAPI import FlickrAPI\n #flickr = FlickrAPI(key=session['resource_owner_key'], secret=session['resource_owner_secret'])\n flickr = FlickrAPI(key=request.cookies.get('oauth_token'), secret=request.cookies.get('oauth_token_secret'))\n faves = flickr.favorites_getList(user_id=\"44124394781@N01\", page=1, per_page=5, extras='owner_name')\n return str(faves)",
"def get_profile_info(self):\n\n drill_path = str(Path.home())+\"/Documents/ball_e_profiles/drill_profiles/{drill_name}/{drill_name}.csv\".format(\n drill_name=self.drill_name)\n with open(drill_path) as file:\n csv_reader = csv.reader(file, delimiter=',')\n row_count = 0\n info_dict = dict()\n for row in csv_reader:\n if row_count == 0:\n row_count += 1\n else:\n info_dict[row[0]] = [row[1], row[2], row[3]]\n row_count += 1\n\n return info_dict",
"def profile(self) -> dict:\n endpoint = \"/api/users/profile/\"\n ret = self._request(endpoint=endpoint)\n return ret",
"def list_user_info(service):\n profile = service.users().getProfile(userId='me').execute()\n return profile",
"def user_details():\n url = 'https://api.github.com/orgs/facebook/repos'\n json_obj = urllib2.urlopen(url)\n userdata = json.load(json_obj)\n if 'error' in userdata:\n print 'errors are scanned in data'\n for data in userdata:\n if 'name' in data:\n if data['name'] == 'codemod':\n print 'language used'\n print data['language']\n print 'number of watchers'\n print data['watchers']\n print 'git url'\n print data['git_url']\n print 'open issues'\n print data['open_issues']\n print 'permissions for user'\n print 'push'\n print data['permissions']['push']\n print 'pull'\n print data['permissions']['pull']",
"def describe_my_user_profile():\n pass",
"def get_gallery_profile(self):\n url = (\"https://api.imgur.com/3/account/{0}/\"\n \"gallery_profile\".format(self.name))\n return self._imgur._send_request(url)",
"def profile_get(request):\n fields = [\"email\", \"token\", \"filename\"]\n\n # serializes the quert string to a dict (neeto)\n args = request.args\n auth = azure_refresh_token(args[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n query_validation = validate_query_params(args, fields)\n # check that body validation succeeded\n if query_validation[1] != 200 or not validate_photo(args[\"filename\"]):\n return query_validation\n\n profile_storage = Storage(\"biit_profiles\")\n\n try:\n response = {\n \"data\": profile_storage.get(args[\"filename\"]),\n \"access_token\": auth[0],\n \"refresh_token\": auth[1],\n }\n\n return jsonHttp200(\"File Received\", response)\n except:\n return http400(\"File not found\")",
"def add_profile_photo():\n pass",
"def get_profile(self):\n endpoint = '/profile'\n return self.get_request(endpoint)",
"def get_profile():\n logger.debug(\"entering function get_profile\")\n response = read_user_profile()\n logger.debug(\"exiting function get_profile\")\n return jsonify(response)",
"def getAvatarInfo(self):\n return \", \".join(self._get_avatar_info())",
"def profile():\n recipes = mongo.db.recipes.find({\"created_by\": session[\"user\"]})\n return render_template(\"accounts/profile.html\", recipes=recipes)",
"def getUserProfile(request):\n user = request.user\n serializer = UserSerializer(user, many=False)\n return Response(serializer.data)",
"def getProfile(self):\n # GET /profile\n debugMain('getProfile')\n return self._genericGet('/profile')",
"def getUserInfo(UserId):\n url = f\"https://users.roblox.com/v1/users/{UserId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n displayName = j['displayName']\n name = j['name']\n uid = j['id']\n isBanned = j['isBanned']\n joinDate = j['created']\n description = j['description']\n return displayName,name,uid,isBanned,joinDate,description",
"def get_profile():\n\n if request['user_id']:\n\n user = User.select().where(User.id == request['user_id']).get()\n uSchema = UserSchema()\n jsonUser = uSchema.dumps(user)\n\n del request['user_id']\n return jsonUser.data\n\n return",
"def profile_pic(self):\n raise AttributeError('profile_pic is not a readable attribute')"
]
| [
"0.6857109",
"0.66098845",
"0.6390527",
"0.636934",
"0.6362527",
"0.634142",
"0.62794304",
"0.62617004",
"0.62315524",
"0.62301964",
"0.62144315",
"0.61914563",
"0.61870426",
"0.6123204",
"0.6103439",
"0.60492516",
"0.60465515",
"0.6041822",
"0.60295165",
"0.59979457",
"0.5982579",
"0.59698814",
"0.59421414",
"0.59325343",
"0.591422",
"0.59075177",
"0.58791065",
"0.58687174",
"0.5863009",
"0.58451563"
]
| 0.66463876 | 1 |
Get number of achievements for the user and a list of achievement ids and unlockdates | def getAchievements(self, start=None, length=5):
if start == None:
start = len(self.achievements)
doc = minidom.parse(urllib.urlopen("%s/rest/achievements/%s/%i/%i" % (serverString, self.name, start, length)))
if int(doc.getElementsByTagName("status")[0].firstChild.data) != 1:
raise ServerError(doc.getElementsByTagName("status")[0].firstChild.data)
for element in doc.getElementsByTagName("achievement"):
guid = element.getElementsByTagName("guid")[0].firstChild.data
date = element.getElementsByTagName("date")[0].firstChild.data
date = datetime.datetime.strptime(date[:date.rfind(".")]+".GMT", "%Y-%m-%d %H:%M:%S.%Z")
self.achievements += [Achievement(guid, date)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def achievements(self, ctx: commands.Context):\r\n # The milestones for each achievement type\r\n milestones_dict_of_achievements = {\r\n 'times_entertained': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'times_fed': [5, 25, 25, 50, 100, 500, 1000, 10000, 100000, 1000000],\r\n 'times_cleaned': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'times_caught': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'tanks_owned': [1, 3, 5, 10],\r\n 'times_gambled': [5, 25, 100, 250, 500, 1000, 5000, 10000, 100000, 1000000],\r\n 'money_gained': [100, 500, 1000, 5000, 10000, 50000, 100000, 500000, 1000000, 10000000],\r\n }\r\n\r\n\r\n # Database variables\r\n async with self.bot.database() as db:\r\n user_achievement_milestone_data = await db(\"\"\"SELECT * FROM user_achievements_milestones WHERE user_id = $1\"\"\", ctx.author.id)\r\n user_achievement_data = await db(\"\"\"SELECT * FROM user_achievements WHERE user_id = $1\"\"\", ctx.author.id)\r\n tank_data = await db(\"\"\"SELECT tank FROM user_tank_inventory WHERE user_id = $1\"\"\", ctx.author.id)\r\n if not user_achievement_data:\r\n user_achievement_data = await db(\"\"\"INSERT INTO user_achievements (user_id) VALUES ($1) RETURNING *\"\"\", ctx.author.id)\r\n if not user_achievement_milestone_data:\r\n user_achievement_milestone_data = await db(\"\"\"INSERT INTO user_achievements_milestones (user_id) VALUES ($1) RETURNING *\"\"\", ctx.author.id)\r\n\r\n # Getting the users data into a dictionary for the embed and ease of access\r\n user_achievement_data_dict = {}\r\n for achievement_type_database, achievement_amount_database in user_achievement_data[0].items():\r\n if achievement_type_database != \"user_id\":\r\n user_achievement_data_dict[achievement_type_database] = achievement_amount_database\r\n\r\n # Getting the users amount of tanks and adding that to the user data dictionary\r\n tanks = 0\r\n if not tank_data:\r\n tanks = 0\r\n else:\r\n for tank in tank_data[0]['tank']:\r\n if tank is True:\r\n tanks += 1\r\n user_achievement_data_dict[\"tanks_owned\"] = tanks\r\n\r\n # Setting claimable to non as default\r\n Achievements_that_are_claimable = {}\r\n are_there_any_claimable_achievements_check = False\r\n\r\n # Creating the embed\r\n embed = discord.Embed(title=f\"**{ctx.author.display_name}**'s achievements\")\r\n\r\n # Set Variables for milestones, default to nonclaimable, and default stars to nothing\r\n for achievement, user_achievement_value in user_achievement_data_dict.items():\r\n milestone = f\"{achievement}_milestone\"\r\n is_achievement_claimable = \"nonclaimable\"\r\n list_of_stars_per_achievement = []\r\n # Checks what type of star to add\r\n for milestone_value in milestones_dict_of_achievements[achievement]:\r\n if user_achievement_milestone_data[0][f\"{milestone}_done\"] is True:\r\n list_of_stars_per_achievement.append(\"<:achievement_star:877646167087906816>\")\r\n elif milestone_value < user_achievement_milestone_data[0][milestone]:\r\n list_of_stars_per_achievement.append(\"<:achievement_star:877646167087906816>\")\r\n elif milestone_value <= user_achievement_value:\r\n list_of_stars_per_achievement.append(\"<:achievement_star_new:877737712046702592>\")\r\n else:\r\n list_of_stars_per_achievement.append(\"<:achievement_star_no:877646167222141008>\")\r\n # Grammar stuff and the number of stars said\r\n next_unclaimable_star = 0\r\n st_nd_rd_th_grammar = 'th'\r\n for single_star_per_star_list in list_of_stars_per_achievement:\r\n if single_star_per_star_list != \"<:achievement_star:877646167087906816>\":\r\n next_unclaimable_star += 1\r\n break\r\n next_unclaimable_star += 1\r\n if next_unclaimable_star == 1:\r\n st_nd_rd_th_grammar = 'st'\r\n elif next_unclaimable_star == 2:\r\n st_nd_rd_th_grammar = 'nd'\r\n elif next_unclaimable_star == 3:\r\n st_nd_rd_th_grammar = 'rd'\r\n\r\n # Sets the milestonme to be claimable if it is\r\n if user_achievement_value >= user_achievement_milestone_data[0][milestone] and user_achievement_milestone_data[0][f'{milestone}_done'] is False:\r\n if are_there_any_claimable_achievements_check is False:\r\n are_there_any_claimable_achievements_check = True\r\n Achievements_that_are_claimable[achievement] = milestones_dict_of_achievements[achievement].index(user_achievement_milestone_data[0][milestone])\r\n is_achievement_claimable = \"claimable\"\r\n if user_achievement_milestone_data[0][f'{milestone}_done'] is True:\r\n value_data = 'All achievements have been claimed!'\r\n name_data = ''\r\n else:\r\n value_data = ''\r\n value_data = f\"{(user_achievement_value/user_achievement_milestone_data[0][milestone])}% of **{next_unclaimable_star}**{st_nd_rd_th_grammar} star\"\r\n name_data = f\"{user_achievement_value:,}/{user_achievement_milestone_data[0][milestone]:,}\"\r\n embed.add_field(name=f\"{achievement.replace('_', ' ').title()} {name_data}\", value=f\"{value_data}\\n{''.join(list_of_stars_per_achievement)} \\n**{is_achievement_claimable}**\")\r\n\r\n # Adds a button to the message if there are any claimable achievements\r\n if are_there_any_claimable_achievements_check is True:\r\n components = vbu.MessageComponents(\r\n vbu.ActionRow(\r\n vbu.Button(custom_id=\"claim_all\", emoji=\"1\\N{COMBINING ENCLOSING KEYCAP}\"),\r\n ),\r\n )\r\n claim_message = await ctx.send(embed=embed, components=components)\r\n else:\r\n # Doesnt add a button if theres no claimable achievements\r\n return await ctx.send(embed=embed)\r\n\r\n # Make the button check\r\n def button_check(payload):\r\n if payload.message.id != claim_message.id:\r\n return False\r\n self.bot.loop.create_task(payload.defer_update())\r\n return payload.user.id == ctx.author.id\r\n\r\n\r\n pressed = False\r\n while True:\r\n\r\n # Wait for them to click a button\r\n try:\r\n chosen_button_payload = await self.bot.wait_for('component_interaction', timeout=60.0, check=button_check)\r\n chosen_button = chosen_button_payload.component.custom_id.lower()\r\n except asyncio.TimeoutError:\r\n await claim_message.edit(components=components.disable_components())\r\n break\r\n\r\n # Sets reward and if the button is clicked...\r\n amount_of_doubloons_earned = 0\r\n if chosen_button == \"claim_all\":\r\n pressed = True\r\n for achievement_button, user_achievement_position_button in Achievements_that_are_claimable.items():\r\n amount_per_achievement = user_achievement_position_button + 1\r\n print(achievement_button)\r\n print(user_achievement_position_button)\r\n print(amount_per_achievement)\r\n for x in range(amount_per_achievement):\r\n print(x)\r\n amount_of_doubloons_earned += x + 1\r\n print(amount_of_doubloons_earned)\r\n if achievement_button == 'tanks_owned' and user_achievement_position_button >= 3:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = TRUE WHERE user_id = $1\"\"\".format(f\"{achievement_button}_milestone_done\"), ctx.author.id)\r\n elif user_achievement_position_button >= 9:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = TRUE WHERE user_id = $1\"\"\".format(f\"{achievement_button}_milestone_done\"), ctx.author.id)\r\n else:\r\n async with self.bot.database() as db:\r\n await db(\"\"\"UPDATE user_achievements_milestones SET {0} = $1 WHERE user_id = $2\"\"\".format(f\"{achievement_button}_milestone\"), milestones_dict_of_achievements[achievement_button][user_achievement_position_button + 1], ctx.author.id)\r\n async with self.bot.database() as db:\r\n await db(\r\n \"\"\"INSERT INTO user_balance (user_id, doubloon) VALUES ($1, $2)\r\n ON CONFLICT (user_id) DO UPDATE SET doubloon = user_balance.doubloon + $2\"\"\",\r\n ctx.author.id, amount_of_doubloons_earned)\r\n components.get_component(chosen_button).disable()\r\n break\r\n if pressed is True:\r\n await ctx.send(f\"Rewards claimed, you earned {amount_of_doubloons_earned} <:doubloon:878297091057807400>!\")",
"def getAchievements(self) -> list:\n return self.state[ACHIEVEMENTS]",
"def loadAchievementList():\n global achievements\n achievements = {}\n doc = minidom.parse(urllib.urlopen(serverString + \"/data/achievements.xml\"))\n for element in doc.getElementsByTagName(\"achievement\"):\n key = element.getElementsByTagName(\"id\")[0].firstChild.data\n name = element.getElementsByTagName(\"name\")[0].firstChild.data\n description = element.getElementsByTagName(\"description\")[0].firstChild.data\n achievements[key] = (name, description)",
"def user_stats(request):\r\n user_count = UserMgr.count()\r\n pending_activations = ActivationMgr.count()\r\n users_with_bookmarks = BmarkMgr.count(distinct_users=True)\r\n return _api_response(request, {\r\n 'count': user_count,\r\n 'activations': pending_activations,\r\n 'with_bookmarks': users_with_bookmarks\r\n })",
"def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Achievements.objects.filter(username = username).order_by('-date')",
"async def get_achievements_detail_item(\n self, xuid, service_config_id, achievement_id, **kwargs\n ) -> AchievementResponse:\n url = f\"{self.ACHIEVEMENTS_URL}/users/xuid({xuid})/achievements/{service_config_id}/{achievement_id}\"\n resp = await self.client.session.get(\n url, headers=self.HEADERS_GAME_PROGRESS, **kwargs\n )\n resp.raise_for_status()\n return AchievementResponse(**resp.json())",
"def get_achievements(tag, platform=\"pc\", region=\"eu\"):\n #\n try:\n context = ssl._create_unverified_context()\n achievements = json.load(\n const.codec(urlopen(const.URL + platform + \"/\" + region + \"/\" + tag + \"/achievements\", context=context)))\n #\n if \"error\" in achievements:\n raise BattleTagNotFound(achievements['error'])\n exit(1)\n #\n result = a.Achievements(achievements['totalNumberOfAchievements'],\n achievements['numberOfAchievementsCompleted'],\n achievements['finishedAchievements'])\n return result\n except urllib.error.URLError as e:\n print(\"An error occurred when fetching stats\\n\" + e)\n exit(1)\n except Exception as e:\n print(\"An error occurred:\\n \" + str(e))\n exit(1)",
"def get_usage_count(request, user_id):\n board_count = Member.objects.filter(user_id=user_id, is_creator=True).count()\n return Response({\"board_count\": board_count})",
"def inspect_achievement(achievements, entity_type, entity, dirty_fields):\n\n # Getting only achievements that have sense to be checked\n achievements = achievements.filter(\n entity_type=entity_type,\n requirements__key__in=dirty_fields.keys()\n # NOTE: Excluding unlocked achievements\n ).exclude(\n id__in=entity.achievements.values_list('achievement_id', flat=True)\n ).distinct()\n\n # Inspecting\n for achievement in achievements:\n achievement.logic.inspect(entity)",
"async def get_aces_used(user_id):\n aces_used = ex.first_result(await ex.conn.fetchrow(\"SELECT acesused FROM blackjack.currentstatus WHERE userid = $1\", user_id))\n if aces_used is None:\n return []\n return aces_used.split(',')",
"def get_info(user):\n from Game.models import Ownership\n response = {}\n wallet = Wallet.objects.get(user=user)\n response['liquid'] = wallet.liquid_with_loans\n value_wallet = wallet.liquid_with_loans\n ownerships = Ownership.objects.filter(wallet=wallet, quantity__gt=0)\n assets = []\n asset_communication = ACommunication(settings.API_URL)\n for o in ownerships:\n asset = asset_communication.get_asset_quote(o.asset)\n asset.quantity = o.quantity\n value_wallet += o.quantity * asset.sell\n assets.append(asset)\n response['assets'] = assets\n response['value_wallet'] = value_wallet\n response['error'] = False\n return response",
"def getInfo(self):\n self.name, self.description = achievements[self.id]",
"def get_guild_achievements_data(self, region, namespace, realm_slug, guild_slug, **filters):\n filters['namespace'] = namespace\n params = [realm_slug, guild_slug]\n return self.get_resource('data/wow/guild/{0}/{1}/achievements', region, *params, **filters)",
"def _compute_user_stats():\n user_stats = []\n \n wmt16_group = Group.objects.filter(name='WMT16')\n wmt16_users = _get_active_users_for_group(wmt16_group)\n \n for user in wmt16_users:\n _user_stats = HIT.compute_status_for_user(user)\n _name = user.username\n _avg_time = seconds_to_timedelta(_user_stats[1])\n _total_time = seconds_to_timedelta(_user_stats[2])\n _data = (_name, _user_stats[0], _avg_time, _total_time)\n \n if _data[0] > 0:\n user_stats.append(_data)\n \n # Sort by total number of completed HITs.\n user_stats.sort(key=lambda x: x[1])\n user_stats.reverse()\n \n return user_stats",
"def get_count(username):\n return get_contributor(username)[\"count\"]",
"def assignments_count(request):\r\n count_assignments = 0\r\n # check if current user is authenticated or not.\r\n if request.user.is_authenticated:\r\n try:\r\n # get assignments count for students.\r\n student = request.user.student\r\n count_assignments = Assignment.objects.assignments_count(student)\r\n except:\r\n # get assignments count for adminhod.\r\n try: \r\n if request.user.adminhod:\r\n count_assignments = Assignment.objects.assignments_count()\r\n except:\r\n pass \r\n return {'count_assignments':count_assignments}",
"async def get_user_hw_action_list_count(\n request: Request,\n user_id: object = None,\n name=None) -> int:\n\n ret_val = 0\n query_str = get_user_hw_action_list_count_query\n try:\n\n async with request.app.pg.acquire() as connection:\n row = await connection.fetchval(query_str, user_id, name)\n if row is not None:\n ret_val = row\n except Exception as gclcerr:\n logger.error('get_user_hw_action_list_count service erred with: {}'.format(gclcerr))\n\n return ret_val",
"async def get_achievements_xbox360_earned(\n self, xuid, title_id, **kwargs\n ) -> Achievement360Response:\n url = f\"{self.ACHIEVEMENTS_URL}/users/xuid({xuid})/achievements?\"\n params = {\"titleId\": title_id}\n resp = await self.client.session.get(\n url, params=params, headers=self.HEADERS_GAME_360_PROGRESS, **kwargs\n )\n resp.raise_for_status()\n return Achievement360Response(**resp.json())",
"def get(self):\n return {'status': 'success', 'count': Announcement.query.count()}, 200",
"async def get_achievements_xbox360_all(\n self, xuid, title_id, **kwargs\n ) -> Achievement360Response:\n url = f\"{self.ACHIEVEMENTS_URL}/users/xuid({xuid})/titleachievements?\"\n params = {\"titleId\": title_id}\n resp = await self.client.session.get(\n url, params=params, headers=self.HEADERS_GAME_360_PROGRESS, **kwargs\n )\n resp.raise_for_status()\n return Achievement360Response(**resp.json())",
"def add_achievement(achievement_id):\r\n\r\n\tcurrent_unlocked_achievements = str(config.get(\"GAMEDATA\", \"ACHIEVEMENTS\")) # Get str from memory\r\n\tcurrent_unlocked_achievements = current_unlocked_achievements.split(\", \") # Converts str to list\r\n\r\n\t# We check if the player already have the achievement\r\n\tfor achievement in current_unlocked_achievements:\r\n\t\tif achievement_id == achievement: # If the player already have the achievement...\r\n\t\t\treturn\r\n\t\r\n\t# If we're here, this means that the player doesn't have the achievement yet, so we append it to the save.\r\n\r\n\tcurrent_unlocked_achievements.append(achievement_id) # Add item to list\r\n\tcurrent_unlocked_achievements = ', '.join(current_unlocked_achievements) # Convert list to str\r\n\r\n\tachievement_name = getstring(world.WORLD_ACHIEVEMENTS[achievement_id][\"NAME\"])\r\n\tcprint(\"You unlocked the achievement '\" + achievement_name + \"' !\", \"green\")",
"def get_user_profile_visits(self, username):\n cohorts = current_user.roles\n print 'cohorts'\n print cohorts\n user_visitors = GoogleAnalyticsVisitors.query.filter_by(username=username).all()\n\t\n user_visitors_dict_list = [x.as_dict() for x in user_visitors]\n user_visits = []\n for visit_dict in user_visitors_dict_list:\n date = visit_dict['date']\n count = visit_dict['visitors']\n user_visits.append([date,count])\n \tif cohorts:\n \t return make_response(dumps([{'key':\"Your visitors\", 'values':user_visits}]))\n \telse:\n \t start = user_visitors[-1].date\n \t end = user_visitors[0].date\n \t lines = []\n \t for cohort in cohorts:\n \t\tcohort_name = cohort.name\n \t\tcohort_visitors = GoogleAnalyticsVisitors.query.filter(GoogleAnalyticsVisitors.date >= start, GoogleAnalyticsVisitors.date <= end).filter_by(username=\"cohort:\"+cohort_name).all() \n \t\tvalues = []\n \t\tcohort_visitors_dict_list = [x.as_dict() for x in cohort_visitors]\n \t\n \t\tfor visit_dict in cohort_visitors_dict_list:\n \t\t date = visit_dict['date']\n \t\t count = visit_dict['visitors']\n \t\t values = [[date,count]] + values\n \t\tif values:\n \t\t lines.append({'key':cohort_name +'\\'s visitors','values':values})\n \t lines.append({'key':\"Your visitors\",'values':user_visits})\n\n \t return make_response(dumps(lines))",
"def get_activities_by_user(request, user_id):\n\n activities_info = []\n\n for activity in Activity.objects.filter(user=user_id):\n\n result = activity.result\n goal = result.goal\n ch = goal.cost_holder\n\n for record in activity.activityrecord_set.all():\n\n activities_info.append({\n 'ch': ch.name,\n 'goal': goal.name,\n 'result': result.name,\n 'activity': activity.name,\n 'start': record.start_datetime.isoformat(),\n 'end': record.end_datetime.isoformat(),\n 'delta': record.delta_seconds,\n })\n\n return HttpResponse(json.dumps(\n {'activities': activities_info}\n ))",
"def health(request):\n return HttpResponse(User.objects.count())",
"def test_get_user_assignable_assessments(self):\n api_instance = relias_api_client.AssessmentsApi(relias_api_client.ApiClient())\n result = api_instance.get_user_assignable_assessments(\"[email protected]\")\n self.assertEqual(result.total_count, 255)",
"def userstats(request):\r\n with ReqAuthorize(request):\r\n user = UserMgr.get(username=request.user.username)\r\n return {\r\n 'user': user,\r\n 'username': user.username,\r\n }",
"def get_incidents(self) -> tuple[list[Any], Any, Any | None]:\n timestamp = None\n fetch_limit = arg_to_number(self.fetch_limit)\n fetch_time = self.fetch_time\n if not fetch_limit or not fetch_time:\n raise DemistoException('Missing parameter - fetch limit or fetch time')\n last_run = demisto.getLastRun()\n if last_run and last_run.get('timestamp'):\n timestamp = last_run.get('timestamp', '')\n last_fetched_ids = last_run.get('last_fetched_ids', [])\n else:\n if last_fetch := arg_to_datetime(fetch_time, required=True):\n # convert to ISO 8601 format and add Z suffix\n timestamp = last_fetch.strftime(DATE_FORMAT)\n last_fetched_ids = []\n\n page_size = '100'\n # set the until argument to prevent duplicates\n until = get_now_time()\n response = self.list_incidents_request(page_size, '0', until, timestamp)\n if not response.get('items'):\n return [], last_fetched_ids, timestamp\n\n page_number = response.get('totalPages', 1) - 1\n total = 0\n total_items: list[dict] = []\n while total < fetch_limit and page_number >= 0:\n try:\n response = self.list_incidents_request(page_size, page_number, until, timestamp)\n except HTTPError as e:\n if e.response is not None and e.response.status_code == 429:\n raise DemistoException(\n 'Too many requests, try later or reduce the number of Fetch Limit parameter.'\n ) from e\n raise e\n\n items = response.get('items', [])\n new_items = remove_duplicates_for_fetch(items, last_fetched_ids)\n # items order is from old to new , add new items at the start of list to maintain order\n total_items = new_items + total_items\n total += len(new_items)\n page_number -= 1\n\n # bring the last 'fetch_limit' items, as order is reversed\n total_items = total_items[len(total_items) - fetch_limit:]\n return total_items, last_fetched_ids, timestamp",
"def getBadgeAwardedTime(userId, badgeId):\n url = f\"https://badges.roblox.com/v1/users/{userId}/badges/awarded-dates?badgeIds={badgeId}\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j['data']",
"async def list(self, ctx, user=None, date=None):\n if not user:\n user = ctx.message.author\n else:\n user = util.GetUserFromNameStr(ctx.message.server.members, user)\n change = GetPortfolioChange(user.id)\n portfolio = GetPortfolio(user.id, util.GetTimestamp(date))\n await self.bot.say(\n '```%s\\'s portfolio:\\n'\n 'Total Value: $%s (%.2f%s) \\n'\n '%s```' % (user, portfolio.Value(), change, \"%\", portfolio.AsTable()))",
"async def list(self, ctx, user: discord.Member=None):\n\n author = ctx.message.author\n\n if not user:\n user = author\n\n game_list = get_library()\n\n if check_key(user.id) and game_list.get(user.id).get(\"games\", False):\n user_game_list = get_library(user.id)\n\n message = pagify(\", \".join(sorted(user_game_list)), [', '])\n\n await self.bot.say(\"Please check your DM for the full list of games, {}.\".format(author.mention))\n await self.bot.send_message(author, \"{}'s games:\".format(user.mention))\n\n for page in message:\n await self.bot.send_message(author, (box(page)))\n else:\n await self.bot.say(\"{}, you do not have any games. Add one using `{p}game add <game_name>` and/or link your Steam profile with `{p}game steamlink <steam_id>`.\".format(user.mention, p=ctx.prefix))"
]
| [
"0.69487524",
"0.6321113",
"0.59182847",
"0.58167964",
"0.5750584",
"0.55335814",
"0.5509203",
"0.5495019",
"0.5376555",
"0.53676915",
"0.52398944",
"0.5217144",
"0.51565796",
"0.5069992",
"0.5047279",
"0.5040224",
"0.5028878",
"0.50272846",
"0.50176144",
"0.5015113",
"0.5011064",
"0.5005756",
"0.492927",
"0.49199823",
"0.4918588",
"0.49138668",
"0.49117202",
"0.49094355",
"0.4908698",
"0.4907168"
]
| 0.6477453 | 1 |
Get various stats like height, diet, abilities etc. for a creature | def getStats(self):
if self.type != "CREATURE" and self.type != None:
return
self.stats = _xmlUrlToDict(serverString + "/rest/creature/" + self.id, float) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_character_health(character: dict):\r\n print(\"Your health is: %d\" % character['HP'])",
"def get_damage():\n\n return character['Damage']",
"def get_hp():\n\n return character['HP']",
"def stats(self):\n\n for hero in self.heroes:\n print(\"{} has {} kills and {} deaths.\".format(hero.name, hero.kills, hero.deaths))",
"def get_stats(self):\n if self.character_data is None: raise Exception('You must call get_character() first.')\n character = self.character_data\n if self._stats is not None:\n return self._stats\n\n try:\n prof_bonus = int(character.value(\"H14\"))\n except (TypeError, ValueError):\n raise MissingAttribute(\"Proficiency Bonus\")\n\n index = 15\n stat_dict = {}\n for stat in ('strength', 'dexterity', 'constitution', 'intelligence', 'wisdom', 'charisma'):\n try:\n stat_dict[stat] = int(character.value(\"C\" + str(index)))\n index += 5\n except (TypeError, ValueError):\n raise MissingAttribute(stat)\n\n stats = BaseStats(prof_bonus, **stat_dict)\n self._stats = stats\n return stats",
"def get_weapon_stats(attack_level):\n if attack_level >= 60:\n # Dragon scimitar\n return (67, 66)\n elif attack_level >= 40:\n # Rune scimitar\n return (45, 44)\n elif attack_level >= 30:\n # Adamant scimitar\n return (29, 28)\n elif attack_level >= 20:\n # Mithril scimitar\n return (21, 20)\n elif attack_level >= 10:\n # Black scimitar\n return (19, 14)\n elif attack_level >= 5:\n # Steel scimitar\n return (15, 14)\n else:\n # Iron scimitar\n return (10, 9)",
"def advancedStats():",
"def show_info(self):\n # attr[0] attr[1]\n attrs = [(self.TYP.value, 'nam'),\n ('Skill', 'skl')]\n # voeg ook alle stats en skills in deze lijst toe.\n for stat in Minimals:\n attrs.append((stat.value, stat.name))\n attrs.append(('Spell Battery', 'cur_bat'))\n for stat in StatType:\n attrs.append((stat.value, stat.name))\n for skill in SkillType:\n attrs.append((skill.value, skill.name))\n\n # nu alle mogelijkheden geladen zijn, ga dan aan de slag met diegene die van toepassing zijn\n attr_list = []\n\n import enum\n for attr in attrs:\n value_of_attr = self.get_value_of(attr[1])\n # uitzondering, 'wht' altijd gewoon weergeven\n if attr[0] == StatType.wht.value:\n # deze uitzondering geldt niet voor weapons en shields.\n if not isinstance(self.get_value_of('skl'), enum.Enum): # niet wanneer 'skl' een waarde heeft\n attr_list.append((attr[0], str(value_of_attr)))\n elif value_of_attr:\n if isinstance(value_of_attr, enum.Enum): # uitzondering alleen voor 'skl'\n value_of_attr = value_of_attr.value\n elif attr[0] == StatType.hit.value: # uitzondering alleen voor 'hit'\n value_of_attr = str(value_of_attr)+\"%\"\n attr_list.append((attr[0], str(value_of_attr)))\n\n return attr_list",
"def stats(self):",
"def get_creature_type_properties(self, name):\n return self._get_monster_class(name).PROPERTIES",
"def summary(self):\n name = 'name : ' + self.get_name()\n description = 'description : ' + self.get_description()\n agility = 'agility : ' + str(self.get_agility())\n strength = 'strength : ' + str(self.get_strength())\n health_points = 'health_points : ' + str(self.get_health_points())\n summary = '\\n'.join([name, description, agility, strength, health_points])\n if self.take_weapon():\n summary += self.take_weapon().summary()\n return summary",
"def get_general_stats() ->List[BaseStat]:\n return [PositionalTendencies(),\n SpeedTendencies(),\n ItemGoals(),\n DropshotGoals(),\n DropshotBallPhaseTimes(),\n DropshotStats()\n ]",
"async def get_stats(self, ctx, game: str, name: str):\n\n string = await self.get_demon(ctx, game, name)\n if string is not None:\n async with aiofiles.open(os.path.join(self.data, game, \"demons\", string + \".json\"), \"r\") as f:\n data = await f.read()\n data = json.loads(data)\n for item in await self.stat_table(data):\n await ctx.send(item)",
"def calculate_base_stats(life):\n\tstats = {'arms': None,\n\t\t'legs': None,\n\t\t'melee': None,\n\t\t'speed_max': LIFE_MAX_SPEED}\n\t\n\t_flags = life['flags'].split('|')\n\t\n\tfor flag in _flags:\t\t\n\t\tif flag.count('['):\n\t\t\tif not flag.count('[') == flag.count(']'):\n\t\t\t\traise Exception('No matching brace in ALife type %s: %s' % (species_type, flag))\n\t\t\t\n\t\t\tstats[flag.lower().partition('[')[0]] = flag.partition('[')[2].partition(']')[0].split(',')\n\t\t\n\t\telif flag == 'HUNGER':\n\t\t\tlife['eaten'] = []\t\n\t\n\tif not 'hands' in life:\n\t\tlife['hands'] = []\n\t\n\tlife['life_flags'] = life['flags']\n\t\n\tstats['base_speed'] = bad_numbers.clip(LIFE_MAX_SPEED-len(stats['legs']), 0, LIFE_MAX_SPEED)\n\tstats['speed_max'] = stats['base_speed']\n\t\n\tfor var in life['vars'].split('|'):\n\t\tkey,val = var.split('=')\n\t\t\n\t\ttry:\n\t\t\tlife[key] = int(val)\n\t\t\tcontinue\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\ttry:\n\t\t\tlife[key] = life[val]\n\t\t\tcontinue\n\t\texcept:\n\t\t\tpass\n\t\t\n\t\ttry:\n\t\t\tlife[key] = val\n\t\t\tcontinue\n\t\texcept:\n\t\t\tpass\n\t\n\treturn stats",
"def analysis(self, game_info):\n return game_info['cards'][randint(0, 5)], TO_DROP",
"def stats(self, d_raw_materials=None):\n cm_stats = 'sugar {0} tablespoons remaining\\n'.format(d_raw_materials['sugar'])\n cm_stats += 'butter {0} teaspoons remaining\\n'.format(d_raw_materials['butter'])\n cm_stats += 'dark chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['dark chocolate'])\n cm_stats += 'mint chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['mint chocolate'])\n cm_stats += 'milk chocolate {0} tablespoons remaining\\n'.format(d_raw_materials['milk chocolate'])\n cm_stats += 'light corn syrup {0} teaspoons remaining\\n'.format(d_raw_materials['light corn syrup'])\n cm_stats += 'sweetened condensed milk {0} teaspoons remaining\\n'.format(d_raw_materials[\n 'sweetened condensed milk'])\n cm_stats += 'vanilla extract {0} teaspoons remaining\\n'.format(d_raw_materials['vanilla extract'])\n cm_stats += 'Reese\\'s Pieces {0} tablespoons remaining\\n'.format(d_raw_materials['Reese\\'s Pieces'])\n cm_stats += super(ChocolateMachine, self).stats()\n return cm_stats",
"def generateEnemyStats(healthRange, powerRange, smartsRating):\n\n stats = {\n 'healthRating': healthRange,\n 'powerRating': powerRange,\n 'smartsRating': smartsRating\n }\n return stats",
"def get_heroes_stats(tag, hero, platform=\"pc\", region=\"eu\", mode=\"quickplay\"):\n try:\n context = ssl._create_unverified_context()\n hero_stats = json.load(\n const.codec(\n urlopen(const.URL + platform + \"/\" + region + \"/\" + tag + \"/\" + mode + \"/hero/\" + hero + \"/\", context=context)))\n if \"error\" in hero_stats:\n raise BattleTagNotFound(hero_stats['error'])\n exit(1)\n #\n if bool(hero_stats[hero]) is False:\n raise HeroNotFound(\"An error occurred when fetching stats:\\nThis hero does not exist. Make sure you have input a valid hero name.\")\n exit(1)\n #\n result = h.Hero(\n dc.get_dic_obj(hero_stats[hero], \"Eliminations\", \"Elimination\"),\n dc.get_dic_obj(hero_stats[hero], \"FinalBlows\", \"FinalBlow\"),\n dc.get_dic_obj(hero_stats[hero], \"SoloKills\", \"SoloKill\"),\n dc.get_dic_obj(hero_stats[hero], \"ShotsFired\", \"ShotFired\"),\n dc.get_dic_obj(hero_stats[hero], \"ShotsHit\", \"ShotHit\"),\n dc.get_dic_obj(hero_stats[hero], \"CriticalHits\", \"CriticalHit\"),\n dc.get_dic_obj(hero_stats[hero], \"DamageDone\"),\n dc.get_dic_obj(hero_stats[hero], \"ObjectiveKills\", \"ObjectiveKills\"),\n dc.get_dic_obj(hero_stats[hero], \"Multikill\", \"Multikills\"),\n dc.get_dic_obj(hero_stats[hero], \"CriticalHitsperMinute\", \"CriticalHitperMinute\"),\n dc.get_dic_obj(hero_stats[hero], \"CriticalHitAccuracy\"),\n dc.get_dic_obj(hero_stats[hero], \"EliminationsperLife\", \"EliminationperLife\"),\n dc.get_dic_obj(hero_stats[hero], \"WeaponAccuracy\"),\n dc.get_dic_obj(hero_stats[hero], \"TeleporterPadsDestroyed\", \"TeleporterPadDestroyed\"),\n dc.get_dic_obj(hero_stats[hero], \"TurretsDestroyed\", \"TurretDestroyed\"),\n dc.get_dic_obj(hero_stats[hero], \"SelfHealing\"),\n dc.get_dic_obj(hero_stats[hero], \"Eliminations-MostinLife\", \"Elimination-MostinLife\"),\n dc.get_dic_obj(hero_stats[hero], \"EliminationsperLife\", \"EliminationperLife\"),\n dc.get_dic_obj(hero_stats[hero], \"DamageDone-MostinLife\"),\n dc.get_dic_obj(hero_stats[hero], \"WeaponAccuracy-BestinGame\"),\n dc.get_dic_obj(hero_stats[hero], \"KillStreak-Best\"),\n dc.get_dic_obj(hero_stats[hero], \"DamageDone-MostinGame\"),\n dc.get_dic_obj(hero_stats[hero], \"Eliminations-MostinGame\", \"Elimination-MostinGame\"),\n dc.get_dic_obj(hero_stats[hero], \"FinalBlows-MostinGame\", \"FinalBlow-MostinGame\"),\n dc.get_dic_obj(hero_stats[hero], \"ObjectiveKills-MostinGame\", \"ObjectiveKill-MostinGame\"),\n dc.get_dic_obj(hero_stats[hero], \"ObjectiveTime-MostinGame\"),\n dc.get_dic_obj(hero_stats[hero], \"SoloKills-MostinGame\", \"SoloKill-MostinGame\"),\n dc.get_dic_obj(hero_stats[hero], \"CriticalHits-MostinGame\", \"CriticalHit-MostinGame\"),\n dc.get_dic_obj(hero_stats[hero], \"CriticalHits-MostinLife\", \"CrtiticalHit-MostinLife\"),\n dc.get_dic_obj(hero_stats[hero], \"SelfHealing-Average\"),\n dc.get_dic_obj(hero_stats[hero], \"Deaths-Average\", \"Death-Average\"),\n dc.get_dic_obj(hero_stats[hero], \"SoloKills-Average\", \"SoloKill-Average\"),\n dc.get_dic_obj(hero_stats[hero], \"ObjectiveTime-Average\"),\n dc.get_dic_obj(hero_stats[hero], \"ObjectiveKills-Average\", \"ObjectiveKill-Average\"),\n dc.get_dic_obj(hero_stats[hero], \"FinalBlows-Average\", \"FinalBlow-Average\"),\n dc.get_dic_obj(hero_stats[hero], \"Eliminations-Average\", \"Elimination-Average\"),\n dc.get_dic_obj(hero_stats[hero], \"DamageDone-Average\"),\n dc.get_dic_obj(hero_stats[hero], \"Deaths\", \"Death\"),\n dc.get_dic_obj(hero_stats[hero], \"EnvironmentalDeaths\", \"EnvironmentalDeath\"),\n dc.get_dic_obj(hero_stats[hero], \"Medals-Bronze\", \"Medal-Bronze\"),\n dc.get_dic_obj(hero_stats[hero], \"Medals-Silver\", \"Medal-Silver\"),\n dc.get_dic_obj(hero_stats[hero], \"Medals-Gold\", \"Medal-Gold\"),\n dc.get_dic_obj(hero_stats[hero], \"Medals\", \"Medal\"),\n dc.get_dic_obj(hero_stats[hero], \"Cards\", \"Card\"),\n dc.get_dic_obj(hero_stats[hero], \"TimePlayed\"),\n dc.get_dic_obj(hero_stats[hero], \"GamesWon\", \"GameWon\"),\n dc.get_dic_obj(hero_stats[hero], \"ObjectiveTime\"),\n dc.get_dic_obj(hero_stats[hero], \"TimeSpentOnFire\"),\n dc.get_dic_obj(hero_stats[hero], \"Multikill-Best\"),\n )\n return result\n except urllib.error.URLError as e:\n print(\"An error occurred when fetching stats\\n\" + str(e))\n exit(1)\n except Exception as e:\n print(\"An error occurred:\\n \" + str(e))\n exit(1)",
"def bless_basic(unit):\n return {DAMAGE: unit.maximum_damage}",
"def stats(self):\n pass",
"def get_all_heroes_stats(tag, platform=\"pc\", region=\"eu\", mode=\"quickplay\"):\n #\n try:\n context = ssl._create_unverified_context()\n all_heroes = json.load(\n const.codec(urlopen(const.URL + platform + \"/\" + region + \"/\" + tag + \"/\" + mode + \"/allHeroes/\", context=context)))\n #\n if \"error\" in all_heroes:\n raise BattleTagNotFound(all_heroes['error'])\n exit(1)\n #\n result = ah.AllHeroes(dc.get_dic_obj(all_heroes, \"MeleeFinalBlows\", \"MeleeFinalBlow\"),\n dc.get_dic_obj(all_heroes, \"SoloKills\", \"SoloKill\"),\n dc.get_dic_obj(all_heroes, \"ObjectiveKills\", \"ObjectiveKill\"),\n dc.get_dic_obj(all_heroes, \"FinalBlows\", \"FinalBlow\"),\n dc.get_dic_obj(all_heroes, \"DamageDone\"),\n dc.get_dic_obj(all_heroes, \"Eliminations\", \"Elimination\"),\n dc.get_dic_obj(all_heroes, \"EnvironmentalKills\", \"EnvironmentalKill\"),\n dc.get_dic_obj(all_heroes, \"Multikills\", \"Multikill\"),\n dc.get_dic_obj(all_heroes, \"HealingDone\"),\n dc.get_dic_obj(all_heroes, \"Eliminations-MostinGame\", \"Elimination-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"FinalBlows-MostinGame\", \"FinalBlow-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"DamageDone-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"HealingDone-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"DefensiveAssists-MostinGame\", \"DefensiveAssist-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"OffensiveAssists-MostinGame\", \"OffensiveAssist-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"ObjectiveKills-MostinGame\", \"ObjectiveKill-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"ObjectiveTime-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"Multikill-Best\"),\n dc.get_dic_obj(all_heroes, \"SoloKills-MostinGame\", \"SoloKill-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"TimeSpentonFire-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"MeleeFinalBlows-Average\", \"MeleeFinalBlow-Average\"),\n dc.get_dic_obj(all_heroes, \"TimeSpentonFire-Average\"),\n dc.get_dic_obj(all_heroes, \"SoloKills-Average\", \"SoloKill-Average\"),\n dc.get_dic_obj(all_heroes, \"ObjectiveTime-Average\"),\n dc.get_dic_obj(all_heroes, \"ObjectiveKills-Average\", \"ObjectiveKill-Average\"),\n dc.get_dic_obj(all_heroes, \"HealingDone-Average\"),\n dc.get_dic_obj(all_heroes, \"FinalBlows-Average\", \"FinalBlow-Average\"),\n dc.get_dic_obj(all_heroes, \"Deaths-Average\", \"Death-Average\"),\n dc.get_dic_obj(all_heroes, \"DamageDone-Average\"),\n dc.get_dic_obj(all_heroes, \"Eliminations-Average\", \"Elimination-Average\"),\n dc.get_dic_obj(all_heroes, \"Deaths\", \"Death\"),\n dc.get_dic_obj(all_heroes, \"EnvironmentalDeaths\", \"EnvironmentalDeath\"),\n dc.get_dic_obj(all_heroes, \"Cards\", \"Card\"),\n dc.get_dic_obj(all_heroes, \"Medals\", \"Medal\"),\n dc.get_dic_obj(all_heroes, \"Medals-Gold\", \"Medal-Gold\"),\n dc.get_dic_obj(all_heroes, \"Medals-Silver\", \"Medal-Silver\"),\n dc.get_dic_obj(all_heroes, \"Medals-Bronze\", \"Medal-Bronze\"),\n dc.get_dic_obj(all_heroes, \"GamesPlayed\", \"GamePlayed\"),\n dc.get_dic_obj(all_heroes, \"GamesWon\", \"GameWon\"),\n dc.get_dic_obj(all_heroes, \"TimeSpentonFire\"),\n dc.get_dic_obj(all_heroes, \"ObjectiveTime\"),\n dc.get_dic_obj(all_heroes, \"TimePlayed\"),\n dc.get_dic_obj(all_heroes, \"MeleeFinalBlows-MostinGame\", \"MeleeFinalBlow-MostinGame\"),\n dc.get_dic_obj(all_heroes, \"GamesTied\", \"GameTied\") if mode == \"competitive\" else None,\n dc.get_dic_obj(all_heroes, \"GamesLost\", \"GameLost\") if mode == \"competitive\" else None,\n dc.get_dic_obj(all_heroes, \"DefensiveAssists\", \"DefensiveAssist\"),\n dc.get_dic_obj(all_heroes, \"DefensiveAssists-Average\", \"DefensiveAssist-Average\"),\n dc.get_dic_obj(all_heroes, \"OffensiveAssists\", \"OffensiveAssist\"),\n dc.get_dic_obj(all_heroes, \"OffensiveAssists-Average\", \"OffensiveAssist-Average\")\n )\n return result\n except urllib.error.URLError as e:\n print(\"An error occurred when fetching stats\\n\" + str(e))\n exit(1)\n except Exception as e:\n print(\"An error occurred:\\n \" + str(e))\n exit(1)",
"def get_health(self):\n self.__health = sum([i.get_health for i in self.__units])\n return self.__health",
"def evaulate_monster(generikmon):\r\n score = generikmon['headA']\r\n #score = generikmon['chinA']\r\n return score",
"def show_stats(self):\n print(\"\\nName: \" + self.name)\n print(\"Element Type: \" + self.element)\n print(\"Health: \" + str(self.current_health) + \" / \" + str(self.max_health))\n print(\"Speed: \" + str(self.speed))",
"def attack(health_meter):\n hit_list = 4 * ['igrac'] + 6 * ['neprijatelj']\n injured_unit = random.choice(hit_list)\n hit_points = health_meter[injured_unit]\n injury = random.randint(10, 15)\n health_meter[injured_unit] = max(hit_points - injury, 0)\n print(\"NAPAD! \", end='')\n show_health(health_meter)",
"def statistic(self):\n\t\tself.planet.tiles[self.y][self.x].set_occupant() # set occupant to the initial tile\n\t\tnum_tile = self.planet.width * self.planet.height\n\t\tsum = 0\n\t\tfor y in range(self.planet.height):\n\t\t\tfor x in range(self.planet.width):\n\t\t\t\ttile = self.planet.tiles[y][x]\n\t\t\t\tif tile.occupant == 1:\n\t\t\t\t\tsum += 1\n\t\tpercent = int((sum/num_tile)*100)\n\t\tprint(\"Explored: {}%\".format(percent))\n\t\tprint(\"Battery: {}/100\".format(self.battery))",
"def get_stats(self):\n stats = \"\\n\\nBOT STATS: This bot currently knowns \"\n if self.intents:\n categoryqty = 0\n patternqty = 0\n responseqty = 0\n\n for intent in self.intents['intents']:\n categoryqty += 1\n patternqty += len(intent['patterns'])\n responseqty += len(intent['responses'])\n \n stats += str(categoryqty)\n stats += \" Categories with in total \"\n stats += str(patternqty)\n stats += \" Input Patterns and \"\n stats += str(responseqty)\n stats += \" possible Responses\"\n stats += \"\\n\\n\"\n\n return stats",
"async def stats(self, ctx: Message):\n\t\tawait self.open_account(ctx.author.id, ctx.author.username)\n\t\tuserid = ctx.author.id\n\t\tusers = await self.get_stats_data()\n\n\t\trancmds = users[str(userid)][\"rancmd\"]\n\t\txp = users[str(userid)][\"xp\"]\n\t\tmsgs = users[str(userid)][\"sentmsgs\"]\n\t\twhisperto = [ctx.author.id]\n\t\tawait self.send(message=f\"{ctx.author.mention} Here are your stats! • Ran {rancmds} DogeBoss commands • XP: {xp} • Sent {msgs} messages\", whisper=whisperto)",
"def summary(self):\n name = 'name : ' + self.get_name()\n damage = 'damage : ' + str(self.get_damage())\n ammos = 'ammo : ' + str(self.get_ammos())\n owner = 'owner : ' + str(self.get_owner())\n return '\\n'.join([name, damage, ammos, owner])",
"def info_from_behaviors(behaviors):\n base_abilities = []\n hp_checkpoints = set()\n hp_checkpoints.add(100)\n card_checkpoints = set()\n has_enemy_remaining_branch = False\n\n for idx, es in enumerate(behaviors):\n # Extract the passives and null them out to simplify processing\n if type(es) in PASSIVE_MAP.values():\n base_abilities.append(es)\n behaviors[idx] = None\n\n # Find candidate branch HP values\n if type(es) == ESBranchHP:\n hp_checkpoints.add(es.branch_value)\n hp_checkpoints.add(es.branch_value - 1)\n\n # Find candidate action HP values\n if skill_has_condition(es):\n cond = es.condition\n if cond and cond.hp_threshold:\n hp_checkpoints.add(cond.hp_threshold)\n hp_checkpoints.add(cond.hp_threshold - 1)\n\n # Find checks for specific cards.\n if type(es) == ESBranchCard:\n card_checkpoints.update(es.branch_value)\n\n # Find checks for specific amounts of enemies.\n if type(es) == ESBranchRemainingEnemies or type(es) == ESAttackUPRemainingEnemies:\n has_enemy_remaining_branch = True\n\n return base_abilities, hp_checkpoints, card_checkpoints, has_enemy_remaining_branch"
]
| [
"0.6707282",
"0.6460665",
"0.6290449",
"0.61498976",
"0.60784054",
"0.60689604",
"0.60551375",
"0.59991527",
"0.59783566",
"0.595218",
"0.58617675",
"0.5846617",
"0.5796851",
"0.5701261",
"0.5671675",
"0.5670004",
"0.56550676",
"0.5632438",
"0.5631292",
"0.56186575",
"0.56045884",
"0.5590875",
"0.55759424",
"0.5521806",
"0.5470438",
"0.546357",
"0.5452315",
"0.5425623",
"0.5421725",
"0.5401377"
]
| 0.7294973 | 0 |
Get asset id, and name for assets in a sporecast. | def sporecastAssets(sporecastId, start=0, length=20):
url = "%s/rest/assets/sporecast/%s/%i/%i" % (serverString, sporecastId, start, length)
doc = minidom.parseString(urllib.urlopen(url).read().decode("utf-8", "ignore").encode("ascii", "xmlcharrefreplace"))
if int(doc.getElementsByTagName("status")[0].firstChild.data) != 1:
raise ServerError(doc.getElementsByTagName("status")[0].firstChild.data)
assets = []
for element in doc.getElementsByTagName("asset"):
assets += [Asset()]
assets[-1]._getInfoFromNode(element)
return assets | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sid(self) -> Asset:\n return self.asset",
"def assetName(self):\n\t\treturn self._assetName",
"def asset_id(self) -> str:\n return self.__asset_id",
"def get_name_and_meta(self, asset):\n # Get the asset ID.\n asset_cache_key = asset.get_cache_key()\n name_and_meta = self._cache.get(asset_cache_key)\n if name_and_meta is None:\n # Generate the name.\n asset_hash = asset.get_hash()\n asset_ext = asset.get_save_extension()\n name = \"{prefix}/{folder}/{hash}{ext}\".format(\n prefix = self._prefix,\n folder = asset_hash[:2],\n hash = asset_hash[2:],\n ext = asset_ext,\n )\n # Save the asset's params.\n meta = asset.get_save_meta()\n # Save the file to the asset cache.\n if not self._storage.exists(name):\n asset.save(self._storage, name, meta)\n # Cache the name.\n name_and_meta = (name, meta)\n self._cache.set(asset_cache_key, name_and_meta)\n return name_and_meta",
"def name(self):\n\t\treturn self.asset.name",
"def asset_id(self) -> str:\n return self._asset_id",
"def get_asset(self, asset_id):\n endpoint = '/assets/{}'.format(asset_id)\n return self._api_call('get', endpoint)",
"def _get_asset_info(item, name):\n\n if name in item.assets:\n return item.assets[name]\n elif name.replace(\"B\", \"B0\") in item.assets:\n # Bx -> B0x\n return item.assets[name.replace(\"B\", \"B0\")]\n elif name.replace(\"B0\", \"B\") in item.assets:\n # B0x -> Bx\n return item.assets[name.replace(\"B0\", \"B\")]\n else:\n available = [key for key in item.assets.keys() if key not in [\"thumbnail\", \"overview\", \"info\", \"metadata\"]]\n raise KeyError(\"asset '%s' not found. Available assets: %s\" % (name, avaialable))",
"def get_asset(self, asset_id):\n text, code = ApiClient(self._config, 'assets/' + asset_id).get()\n return Asset.deserialize(text)",
"def get_meta(self, asset):\n return self.get_name_and_meta(asset)[1]",
"def retrieveAsset(self, assetId):\n return self.get_json('/asset/%s' % assetId)",
"def getInfo(self):\n doc = minidom.parse(urllib.urlopen(serverString + \"/rest/asset/\" + self.id))\n self._getInfoFromNode(doc.getElementsByTagName(\"asset\")[0])",
"def get_name(self, asset):\n return self.get_name_and_meta(asset)[0]",
"def get_assets_metadata(self):\n return Metadata(**settings.METADATA['asset_ids'])",
"def get(self, name: str):\n asset = Asset(\"\", name, 0, 0)\n i = bisect_left(self.asset_collection, asset)\n if i != len(self.asset_collection) and self.asset_collection[i].name == asset.name:\n return self.asset_collection[i].to_list()\n return []",
"def asset_info(self, asset_id):\n response = self._client.get('workbenches/assets/%(asset_id)s/info',\n path_params={'asset_id': asset_id})\n return AssetInfo.from_dict(loads(response.text).get('info'))",
"def get_asset_details(self):\n\t\tif self._session:\n\t\t\tresults = self._session.get_asset_details()\n\t\t\tif results.get('success'):\n\t\t\t\treturn results.get('assetDetail', {})\n\n\t\treturn {}",
"def get_asset(self, short_name):\n return self._assets[short_name]",
"def getAssetInfo(self):\n return self._AssetInfo",
"def get_asset(self, name):\n assert self.has_asset(name), \"Asset is not created yet, use has_asset for checking\"\n return self.assets[name]",
"def api_asset_get():\n names = request.args.getlist(\"name\")\n\n result = []\n for name in names:\n asset = app.bank.get(name)\n if asset:\n result.append(asset)\n\n return jsonify(sorted(result)), 200",
"def asset(self, asset_id):\n headers, items = self._get('/asset/%s' % asset_id)\n return Asset.fromdict(items[0], api=self, full=True)",
"def getAssetWithName(self, name):\n return self.__assets[name]",
"def getAssetData(self, assetId):\n return self.get_json('/asset/%s/raw' % assetId)",
"def asset_by_common_name(self) -> Dict:\n if self._assets_by_common_name is None:\n self._assets_by_common_name = OrderedDict()\n for name, a_meta in self.item.assets.items():\n bands = []\n if 'eo:bands' in a_meta.extra_fields.keys():\n bands = a_meta.extra_fields['eo:bands']\n if len(bands) == 1:\n eo_band = bands[0]\n if 'common_name' in eo_band.keys():\n common_name = eo_band['common_name']\n if not self.is_valid_cname(common_name):\n raise ValueError(f'Must be one of the accepted common names. Got \"{common_name}\".')\n else:\n self._assets_by_common_name[common_name] = {'meta': a_meta, 'name': name}\n if not self._assets_by_common_name:\n raise ValueError(f\"Common names for assets cannot be retrieved\")\n return self._assets_by_common_name",
"def get_asset_info(self, id):\n\n if not isinstance(id, six.string_types):\n msg = 'Param id must be a str|unicode.'\n logger.exception(msg)\n raise ValueError(msg)\n\n asset_info = self.stub.get_asset_info(opac_pb2.TaskId(id=id))\n\n return {\n 'url': asset_info.url,\n 'url_path': asset_info.url_path\n }",
"def bsw_getAssetDetails(rootGrpName=None):\n if not rootGrpName:\n if pm.objExists('Texture_Group'):\n rootGrpName = 'Texture_Group'\n elif pm.objExists('rig_grp'):\n rootGrpName = 'rig_grp'\n elif pm.objExists('geo'):\n rootGrpName = 'geo'\n else:\n rootGrpName = 'None'\n astDept = {'Texture_Group': 'Texture', 'rig_grp': 'Rig', 'geo': 'Model', 'None': 'None'}\n if rootGrpName == 'None':\n return 'Not Exist', 'Not Exist', 'Not Exist', 'Not Exist', 'NotExist'\n rootGrp = pm.PyNode(rootGrpName)\n # get episode if environment is series, else return \"Not Exist\" string.\n episode = 'NotExist'\n if os.environ['BSW_PROJECT_TYPE'] == 'series':\n episode = rootGrp.assetEpisode.get()\n return astDept[rootGrpName], rootGrp.assetType.get(), rootGrp.assetName.get(), rootGrp.assetUID.get(), episode",
"def get_name(self):\n return self._assets[0].get_name()",
"def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)",
"def get_asset(self, asset_id, asset_type):\n return self.asset(asset_id, asset_type=asset_type)"
]
| [
"0.6362689",
"0.6348786",
"0.6256097",
"0.62524766",
"0.6244972",
"0.6203028",
"0.61857486",
"0.6132029",
"0.6098499",
"0.6091878",
"0.6044701",
"0.60265857",
"0.6021814",
"0.5971793",
"0.5933424",
"0.589987",
"0.5830877",
"0.5795518",
"0.5786101",
"0.5748652",
"0.5727737",
"0.57193184",
"0.5709193",
"0.56966364",
"0.56874675",
"0.5664212",
"0.56627905",
"0.5658079",
"0.56567365",
"0.5610589"
]
| 0.64142394 | 0 |
The interest will be 10 percent per annum compounded monthly using the 30/360 US day count convention . | def interest(self, from_date, to_date):
yearfrac = findates.daycount.yearfrac(from_date,
to_date,
"30/360 US")
months = yearfrac * 12
return Decimal((1.0 + \
self.annual_interest_rate / 12.0) ** months - 1.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __calculate_monthly_interest(self):\n return self.__percentage_interest / 12",
"def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0",
"def calculate(self):\n self._emi_months = self._period * 12\n self._total_interest = math.ceil(self._loan_amount * self._period * self._rate / 100)\n self._total_amount_pi = float(self._loan_amount + self._total_interest)\n self._emi_amount = math.ceil(self._total_amount_pi / self._emi_months)\n return self",
"def effecticeInterestRate():\n rate = float(input(\"What is your interest rate:\\n\"))\n compound = int(input(\"How many times in a year you give interest:\\n\"))\n\n EIR = (1 + ((rate/100)/compound))**compound - 1\n eir = EIR*100\n return \"Your effective interest rate is: %.3f\" % eir",
"def interest_to_principle(self) -> float:\n return float(round(self.total_interest / self.total_principal * 100, 1))",
"def total_interest(self) -> Decimal:\n return self._quantize(self.schedule(int(self.term / self.term_multiplier * self.n_periods)).total_interest)",
"def annualized_gains(self, day='today'):\n assert day == 'today' or isinstance(day, date), 'Error! You have to pass a datetime.date istance to the day parameter.'\n if day == 'today':\n day = self.data.index[-1]\n if self.data.index[-1] >= day >= self.data.index[0]:\n day = self._first_good_date(day)\n initialValue = self.invested_amount(day)\n finalValue = self.value(day)\n numberOfDays = (day - self.data.index[0]).days\n return round(((finalValue / initialValue)**(365/numberOfDays) - 1) * 100, 2) \n else:\n return 0",
"def payment_calc(mortgage_amount: float, years: int, interest: float) -> float:\n number_months = years * 12\n\n interest_monthly = interest / 100 / 12\n\n numerator = interest_monthly * ((1 + interest_monthly) ** number_months)\n denominator = (1 + interest_monthly) ** number_months - 1\n\n payment = round(mortgage_amount * (numerator / denominator), 2)\n\n return payment",
"def calculate_compound_total(principal, interest, n):\n return principal * (1 + interest / 100) ** n",
"def pay_off_fully(balance, annualInterestRate):\n\n #variable assignment\n currentBalance = balance\n monthlyInterestRate = annualInterestRate/12",
"def cal_scaled_interest(nominal_interest_rate, installment_time_period, interest_time_period, interest_paid_on_deposit_percent):\n periods_per_year = np.array([365, 52, 26, 24, 13, 12, 4, 2, 1])\n installments_period_dict = {'days':0, 'weeks':1, 'two-weeks':2, '15 days':3, '4 weeks':4, 'months':5, 'quarters':6, 'half-years':7, 'years':8}\n interest_period_dict = {'day':0, 'week':1, 'two-weeks':2, '15 days':3, '4 weeks':4, 'month':5, 'quarter':6, 'half-year':7, 'year':8}\n\n installments_arr = 1/ (periods_per_year / 12)\n nominal_arr = 1 / installments_arr\n scaled_interest = nominal_interest_rate*installments_arr[installments_period_dict[installment_time_period]] * nominal_arr[interest_period_dict[interest_time_period]]\n security_deposit_scaled_interest = interest_paid_on_deposit_percent / periods_per_year[installments_period_dict[installment_time_period]]\n return scaled_interest, security_deposit_scaled_interest",
"def __init__(self, principle=0, interest_rate=0.0, year=0):\n\n super().__init__(principle, interest_rate, year)\n self.principle = principle\n self.interest_rate = interest_rate\n self.year = year\n # private variable\n self.__date_of_calc = datetime.datetime.now()\n self.__percentage_interest = self.interest_rate / 100\n self.__months = self.year * 12\n # assert validation for the interest rate\n assert isinstance(interest_rate, float), 'is a not a float'",
"def simple_interest(p,r,t):\n \n try:\n p, r, t = float(p), float(r), float(t)\n i = (p*r*t)/100\n A = p + i\n A = round(A, 2)\n return i, A\n except Exception as e:\n return e",
"def __init__(self):\n self.annual_interest_rate = 10.0 / 100.0\n self.initial_loan_date = date(2014, 12, 1)\n self.currency = 'HKD'\n self.total_loan_amount = Money('100000.00', 'HKD')\n self.final_payment_date = self.initial_loan_date + \\\n relativedelta(years=1)",
"def compute_interest(self) -> float:\n interest = self._balance * SavingsAccount.RATE\n self.deposit(interest)\n return interest",
"def interest(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n\n t = PrettyTable([\n \"Account\", \"Last Interest Payment\", \"Next Payment\"\n ])\n t.align = \"r\"\n for a in account:\n a = Account(a, morphene_instance=stm)\n i = a.interest()\n t.add_row([\n a[\"name\"],\n i[\"last_payment\"],\n \"in %s\" % (i[\"next_payment_duration\"])\n ])\n print(t)",
"def get_interest_rates():\n\n try:\n tenure = int(request.get_json()[\"tenure\"])\n except:\n return jsonify({\"message\": \"Input is 'tenure' in months\"}), 400\n\n if tenure <= 5:\n return jsonify({\"interest_rate\": 10}), 200\n elif tenure > 5 and tenure <= 24:\n return jsonify({\"interest_rate\": 12}), 200\n else:\n return jsonify({\"interest_rate\": 15}), 200",
"def calculate_loan(self):\n # calc private method to calculate monthly interest\n monthly_interest = self.__calculate_monthly_interest()\n print(self.__months)\n print(self.principle)\n print(monthly_interest)\n # calc monthly payment.\n monthly_payment = self.principle * (monthly_interest *\n (\n 1 + monthly_interest) ** self.__months) / (\n (1 + monthly_interest) ** self.__months - 1)\n return monthly_payment",
"def monthly_payment(loan, downpayment, n, rmonth):\n f1 = rmonth * (loan - downpayment) * ( (1. + rmonth)**(n) )\n f2 = (1. + rmonth)**(n) - 1.\n return f1/f2",
"def total_interest(self):\n return sum(self.table[\"interest\"])",
"def monthly_gross_income(annual_salary):\n gross_income = annual_salary / 12\n return round_nearest_whole_dollar(gross_income)",
"def budget_problem3(balance, annualInterestRate):\r\n remaining = balance\r\n\r\n # creating the following bounds assists with bisection search\r\n lo = balance/12\r\n hi = ((balance * (annualInterestRate/12))**12)/12\r\n payment = (lo + hi)/2\r\n\r\n while remaining != 0:\r\n for month in range(12):\r\n remaining = (remaining - payment) * (1 + (annualInterestRate/12))\r\n if remaining > 0:\r\n lo = payment\r\n elif round(remaining,2) < -0.11:\r\n hi = payment\r\n else:\r\n break\r\n payment = (lo + hi)/2\r\n remaining = balance\r\n print 'Lowest Payment: ' + str(round(payment,2))\r\n return round(payment,2)",
"def monthly_net_income(gross_income, income_tax):\n return gross_income - income_tax",
"def main():\n locale.setlocale(locale.LC_MONETARY, 'en_US')\n print('Welcome to the Interest Calculator')\n calcualte_interest = 'y'\n while calcualte_interest == 'y':\n loan_amount = prompt_loan_amount()\n interest_rate = prompt_interest_rate()\n interest_amount = loan_amount * interest_rate\n\n print('-' * 50)\n print(f'Loan Amount: {locale.currency(loan_amount, grouping=True):>15}') #15 characters wide; right justified;\n print(f'Interest Rate: {interest_rate:>15.3%}') #15 characters wide; right justified; % format to 3 decimal places\n print(f'Interest Amount: {locale.currency(interest_amount, grouping=True):>15}') #15 characters wide; right justified;\n print('-' * 50)\n calcualte_interest = input('\\nContinue? (y/n): ')",
"def return_schedule(loan, downpayment, n, rmonth):\n # Prepare arrays:\n principal = np.zeros(n)\n interest = np.zeros(n)\n\n # Calculate monthly payment:\n Pmonth = monthly_payment(loan, downpayment, n, rmonth)\n\n # Fill arrays:\n for i in range(n):\n if i == 0:\n interest[i] = (loan - downpayment)*rmonth\n else:\n interest[i] = (loan - downpayment - np.sum(principal[:i]))*rmonth\n principal[i] = Pmonth - interest[i]\n print('Monthly payment is:',Pmonth,'USD')\n return principal, interest",
"def calc_annual_investment(devs, param):\n\n observation_time = param[\"observation_time\"]\n interest_rate = param[\"interest_rate\"]\n q = 1 + param[\"interest_rate\"]\n\n \n # Calculate capital recovery factor\n CRF = ((q**observation_time)*interest_rate)/((q**observation_time)-1)\n \n # Calculate annuity factor for each device\n for device in devs.keys():\n \n # Get device life time\n life_time = devs[device][\"life_time\"]\n\n # Number of required replacements\n n = int(math.floor(observation_time / life_time))\n \n # Inestment for replcaments\n invest_replacements = sum((q ** (-i * life_time)) for i in range(1, n+1))\n\n # Residual value of final replacement\n res_value = ((n+1) * life_time - observation_time) / life_time * (q ** (-observation_time))\n\n # Calculate annualized investments \n if life_time > observation_time:\n devs[device][\"ann_factor\"] = (1 - res_value) * CRF \n else:\n devs[device][\"ann_factor\"] = ( 1 + invest_replacements - res_value) * CRF \n \n\n return devs",
"def daily_incidents(df2):\n\n if (df2[\"Holiday\"] == \"Thanksgiving Day\") | (df2[\"Holiday\"] == \"Christmas Day\"):\n d_inc = df2[\"Total\"] / 18\n elif df2[\"Holiday\"] == \"Non-holidays\":\n d_inc = df2[\"Total\"] / 6712\n else:\n d_inc = df2[\"Total\"] / 19\n\n return d_inc",
"def lapserate_moist_adiabate():\n return 6.5",
"def get_NextMonthsBalance(self):\n balance = (self.principal * math.exp(self.interestRate * (1/12))) - self.actualMonthlyPayment\n if balance <= 0:\n return 0\n return balance",
"def getLoanInterest(self, credit):\n if credit >= 720:\n return 0.11\n elif credit >= 680:\n return 0.14\n elif credit >= 640:\n return 0.19\n else:\n return -1"
]
| [
"0.7366988",
"0.61610496",
"0.60701096",
"0.59977466",
"0.5990223",
"0.5928109",
"0.5894441",
"0.5854523",
"0.58104223",
"0.5760911",
"0.57510215",
"0.5710957",
"0.5657481",
"0.56404054",
"0.56403047",
"0.56179816",
"0.55730104",
"0.5527004",
"0.549311",
"0.5468215",
"0.5457764",
"0.5444441",
"0.5418662",
"0.5406826",
"0.54036576",
"0.53999454",
"0.5339693",
"0.5333005",
"0.53229433",
"0.5318251"
]
| 0.7286555 | 1 |
Any principal amounts in this loan will be paid in Hong Kong dollars. Any accured interest shall be paid in the form of Bitcoin with the interest rate calculated in Hong Kong dollars | def payments(self, loan):
self.currency_interest = "XBT"
"""The lender agrees to provide the borrower half of the loan amount
on the initial loan on the initial date"""
loan.fund(on=self.initial_loan_date,
amount=self.total_loan_amount * \
Decimal(0.5))
"""The lender agrees to pledge the remaining loan amount toward
the kickstarter campaign of the borrower."""
loan.fund(on=self.kickstarter_payment_date,
amount=self.total_loan_amount * \
Decimal(0.5))
""" Standard payment schedule - The borrower intends to
payback period will be separated into 8 installments and
completed in 8 months. The payback will begin in the 5th
month. However, unless the special conditions are triggered,
the borrower is required to only pay the interest on the loan
until the final payment date."""
""" Special payment schedule - If First campaign funded over
USD 65,000, the borrower must pay back entire loan including
one year interest within the two months after Crowd Funding
Platform pay the fund."""
""" If First campaign funded over USD 58,000, will pay back 4
Installment in advance, after Crowd Funding Platform pay the
fund. The rest of the loan will keep paying followed the
standard schedule until all loan including interest is paid
back."""
if (self.kickstarter_revenue > Money(65000, "USD")):
payment_date = self.kickstarter_payment_date + \
relativedelta(months=2)
loan.add_to_balance(on=payment_date,
amount = loan.interest(payment_date,
self.final_payment_date,
loan.remaining_balance()))
loan.payment(on=payment_date,
amount = loan.remaining_balance())
else:
if (self.kickstarter_revenue > Money(58000, "USD")):
payment_date = self.kickstarter_payment_date + \
relativedelta(months=2)
loan.payment(on=payment_date,
amount = lambda : loan.remaining_principal()() * Decimal(0.5))
start_payment_date = self.initial_loan_date + \
relativedelta(months=4)
loan.amortize(on=start_payment_date,
amount = loan.remaining_balance(),
payments=8,
interval=relativedelta(months=1))
"""The borrower agrees to pay back the any remaining principal
and accrued interest one year after the loan is issued."""
loan.payment(on=self.final_payment_date,
amount= loan.remaining_balance()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n self.annual_interest_rate = 10.0 / 100.0\n self.initial_loan_date = date(2014, 12, 1)\n self.currency = 'HKD'\n self.total_loan_amount = Money('100000.00', 'HKD')\n self.final_payment_date = self.initial_loan_date + \\\n relativedelta(years=1)",
"def calculate_compound_total(principal, interest, n):\n return principal * (1 + interest / 100) ** n",
"def pay_off_fully(balance, annualInterestRate):\n\n #variable assignment\n currentBalance = balance\n monthlyInterestRate = annualInterestRate/12",
"def calculate(self):\r\n if self.__calculation_type == self.__DIFFERENTIATED_PAY:\r\n for month in range(1, self.__principal_term+1):\r\n self.__differentiated_pay.append(\r\n ceil(\r\n (self.__credit_principal/self.__principal_term)\r\n + self.__credit_interest*(self.__credit_principal\r\n - (self.__credit_principal\r\n * (month-1))\r\n / self.__principal_term)\r\n )\r\n )\r\n self.__overpayment = sum(self.__differentiated_pay) - self.__credit_principal\r\n\r\n for i, dp in enumerate(self.__differentiated_pay, 1):\r\n print(f'Month {i}: paid out {dp}')\r\n print()\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n elif self.__calculation_type == self.__ANNUITY:\r\n if self.__user_choice == self.__SEEK_ANNUITY_MONTHLY:\r\n self.__annuity_monthly = ceil(\r\n self.__credit_principal * ((self.__credit_interest\r\n * pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest\r\n , self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = (self.__annuity_monthly * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n print(f'Your annuity payment = {self.__annuity_monthly}!')\r\n\r\n elif self.__user_choice == self.__SEEK_TERM:\r\n self.__principal_term = ceil(\r\n log(self.__annuity_monthly / (self.__annuity_monthly\r\n - (self.__credit_interest\r\n * self.__credit_principal))\r\n , 1+self.__credit_interest)\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal\r\n )\r\n years = self.__principal_term // 12\r\n months = self.__principal_term % 12\r\n\r\n print(f'You need {years} year{\"s\" if self.__principal_term > 1 else \"\"}'\r\n f'{\" and \" + str(months) + \" months\" if months > 0 else \"\"}'\r\n f' to repay this credit!')\r\n\r\n elif self.__user_choice == self.__SEEK_CREDIT_PRINCIPAL:\r\n self.__credit_principal = ceil(\r\n self.__annuity_monthly\r\n / ((self.__credit_interest\r\n * pow(1+self.__credit_interest, self.__principal_term)\r\n )\r\n / (pow(1+self.__credit_interest, self.__principal_term)\r\n - 1)\r\n )\r\n )\r\n self.__overpayment = ceil(self.__annuity_monthly\r\n * self.__principal_term\r\n - self.__credit_principal)\r\n\r\n print(f'Your credit principal = {self.__credit_principal}!')\r\n print(f'Overpayment = {self.__overpayment}')\r\n\r\n else:\r\n print('Incorrect parameters')\r\n self.usage()",
"def payment_calc(mortgage_amount: float, years: int, interest: float) -> float:\n number_months = years * 12\n\n interest_monthly = interest / 100 / 12\n\n numerator = interest_monthly * ((1 + interest_monthly) ** number_months)\n denominator = (1 + interest_monthly) ** number_months - 1\n\n payment = round(mortgage_amount * (numerator / denominator), 2)\n\n return payment",
"def total_paid(self) -> Decimal:\n return self.total_principal + self.total_interest",
"def calculate_loan(self):\n # calc private method to calculate monthly interest\n monthly_interest = self.__calculate_monthly_interest()\n print(self.__months)\n print(self.principle)\n print(monthly_interest)\n # calc monthly payment.\n monthly_payment = self.principle * (monthly_interest *\n (\n 1 + monthly_interest) ** self.__months) / (\n (1 + monthly_interest) ** self.__months - 1)\n return monthly_payment",
"def investment(principal, interest):\r\n while True:\r\n principal *= (1 + interest)\r\n yield principal",
"def budget_problem3(balance, annualInterestRate):\r\n remaining = balance\r\n\r\n # creating the following bounds assists with bisection search\r\n lo = balance/12\r\n hi = ((balance * (annualInterestRate/12))**12)/12\r\n payment = (lo + hi)/2\r\n\r\n while remaining != 0:\r\n for month in range(12):\r\n remaining = (remaining - payment) * (1 + (annualInterestRate/12))\r\n if remaining > 0:\r\n lo = payment\r\n elif round(remaining,2) < -0.11:\r\n hi = payment\r\n else:\r\n break\r\n payment = (lo + hi)/2\r\n remaining = balance\r\n print 'Lowest Payment: ' + str(round(payment,2))\r\n return round(payment,2)",
"def compute_interest(self) -> float:\n interest = self._balance * SavingsAccount.RATE\n self.deposit(interest)\n return interest",
"def interest_to_principle(self) -> float:\n return float(round(self.total_interest / self.total_principal * 100, 1))",
"def apr(self) -> Decimal:\n new_payment = self._simple_interest(term=1)\n apr = new_payment / self.principal\n return self._quantize(apr * 100)",
"def calculate(self):\n self._emi_months = self._period * 12\n self._total_interest = math.ceil(self._loan_amount * self._period * self._rate / 100)\n self._total_amount_pi = float(self._loan_amount + self._total_interest)\n self._emi_amount = math.ceil(self._total_amount_pi / self._emi_months)\n return self",
"def findPayment (loan, r , m):\r\n\r\n return loan*((r*(l+r)**m)/((l+r)**m-1))",
"def _award_accounts(self):\n\n prize_money = 0\n for i in xrange(len(self.accounts)):\n # Each savings account has a 1% chance of quadrupling their principal. The\n # chance is independent between accounts.\n if random.randint(1, 100) == 1:\n prize_money += 3 * self.accounts[i]\n self.accounts[i] *= 4\n return prize_money",
"async def pokedollar(ctx, *args):\n author = ctx.message.author\n balance = database.get_pokedollars(author)\n return await ctx.send(\":dollar: | **{} you have ₱{}**\".format(author, balance))",
"def compute_total_paid(self):\n total = 0.0\n for line in self.loan_ids:\n if line.pay:\n total += line.amount\n self.total_paid = total",
"def payment_transaction():\n print(\"Please insert coins.\")\n payment = dict(quarters=int(input(\"How many quarters?:\")) * 0.25,\n dime=int(input(\"How many dimes?: \")) * 0.10,\n nickles=int(input(\"How many nickles?: \")) * 0.05,\n pennies=int(input(\"How many pennies?: \")) * 0.01\n )\n\n return round(sum(payment.values()), 2)",
"def calculate_half_percent_interest_on_account(list_of_all_accounts_known, ID_account_to_give_interest):\n for account in list_of_all_accounts_known:\n if ID_account_to_give_interest == account.account_id:\n account.balance += (account.balance * 0.005)",
"def interest(account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n if \"default_account\" in mph.config:\n account = [mph.config[\"default_account\"]]\n\n t = PrettyTable([\n \"Account\", \"Last Interest Payment\", \"Next Payment\"\n ])\n t.align = \"r\"\n for a in account:\n a = Account(a, morphene_instance=stm)\n i = a.interest()\n t.add_row([\n a[\"name\"],\n i[\"last_payment\"],\n \"in %s\" % (i[\"next_payment_duration\"])\n ])\n print(t)",
"def loan(self):",
"def adjusted_pa(personal_allowance, salary):\n\t\tlo, hi = 100000, 120000\n\t\tif salary <= lo:\n\t\t\treturn personal_allowance\n\t\telif salary >= hi:\n\t\t\treturn 0\n\t\telse:\n\t\t\treturn (salary - 100000) / 2",
"def get_loan_info():\n\n try:\n principal = int(request.get_json()[\"amount\"])\n tenure = int(request.get_json()[\"tenure\"])\n except:\n return jsonify({\"message\": \"Input is 'amount' and 'tenure'\"}), 400\n\n interest_rate = get_interest_rate(tenure)\n emi = calculate_emi(principal, interest_rate, tenure)\n total = round(emi * tenure, 2)\n interest = round(total - principal, 2)\n\n output = {\n \"principal\": principal,\n \"tenure\": tenure,\n \"interest\": interest,\n \"interest_rate\": interest_rate,\n \"emi\": emi,\n \"total\": total,\n }\n\n return jsonify({\"loan_info\": output})",
"def turn(self):\n\n # Let \n # I = loan interest rate\n # E = awarded money as a result of certain accounts randomly quadrupling\n # A = original assets under management\n #\n # Then profit = A * I - E\n self.profits.append(self.assets_under_management * LOAN_INTEREST - \\\n self._award_accounts())\n self.assets_under_management = np.sum(self.accounts)",
"def cash_coupon(certificate, percentage):\n return sum(stake for name, stake in certificate['underlyings'].items()) * percentage",
"def pay_loan(self, loan_id):\n r = requests.put(self.base_url + f'/users/{self.username}/loans/{loan_id}', headers=self.auth_header)\n return r.text",
"def course(self, currency, sum):\n if currency == \"USD\":\n url = \"https://finance.rambler.ru/currencies/USD/\"\n elif currency == \"EUR\":\n url = \"https://finance.rambler.ru/currencies/EUR/\"\n else:\n return sum * 1000\n site = requests.get(url)\n soup = bs4.BeautifulSoup(site.text, 'html.parser')\n com = float(soup.find(\"div\", attrs={\"class\": \"finance-currency-plate__currency\"}).text.split()[0])\n return com * sum * 1000",
"def tax(bill):\r\n bill *= 1.08\r\n print(\"With tax: %f\" % bill)\r\n return bill",
"def PV_IncomePremium(t):\n if t > last_t:\n return 0\n else:\n return prj_incm_Premium(t) + PV_IncomePremium(t + 1) / (1 + DiscRate(t))",
"def effecticeInterestRate():\n rate = float(input(\"What is your interest rate:\\n\"))\n compound = int(input(\"How many times in a year you give interest:\\n\"))\n\n EIR = (1 + ((rate/100)/compound))**compound - 1\n eir = EIR*100\n return \"Your effective interest rate is: %.3f\" % eir"
]
| [
"0.65862256",
"0.6448981",
"0.6371054",
"0.6341864",
"0.62965953",
"0.62625",
"0.61843383",
"0.6176643",
"0.612621",
"0.6064576",
"0.60598963",
"0.60487306",
"0.598384",
"0.58938223",
"0.58387935",
"0.5828344",
"0.58106977",
"0.5804773",
"0.5758548",
"0.57565445",
"0.5748683",
"0.5740272",
"0.57370776",
"0.5701136",
"0.5659247",
"0.56407773",
"0.5628939",
"0.56079715",
"0.55907613",
"0.5566652"
]
| 0.676511 | 0 |
Method returns QuerySet of orders of matched shift. | def get_orders_with_shift(shift_id):
return Order.objects.filter(shift_id=shift_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def orders(self):\n return self._orders",
"def orders(self) -> List[MetatraderOrder]:\n return self._orders",
"def get_orders(self):\n return self.order_lst",
"def queryset(self, request):\n qs = self.model.all_objects.get_query_set()\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs",
"def getOrderList(self):\r\n\t\treturn self.pair.orders",
"def get_queryset(self):\n self.object = self.get_object()\n return self.object.sticker_set.all().order_by('-modification_date')",
"def getOrderList(self):\r\n\t\treturn self.orders",
"def orders(self) -> List[Order]:\n return store.orders.get_orders(self.exchange, self.symbol)",
"def contributed_orders(self):\n return (o for o in self.orders if o.ordered_by_id != self.id)",
"def get_orderings(self):\n if self._orderings is Undefined:\n self._orderings = self.normalize_orderings(self.ordering)\n return self._orderings",
"def queryset(self, ordering=None):\r\n qs = self.model._default_manager.get_query_set()\r\n if not ordering:\r\n ordering = self.ordering or () # otherwise we might try to *None, which is bad ;)\r\n if ordering:\r\n qs = qs.order_by(*ordering)\r\n return qs",
"def reversed(self):\n return QuerySet(reversed(list(self)))",
"def get_queryset(self, request):\n querys = self.model.all_objects.get_queryset()\n ordering = self.get_ordering(request)\n if ordering:\n querys = querys.order_by(*ordering)\n return querys",
"def orders(self):\n\t\tORDER_MAP = [\"Do Nothing\", \"Collect All\", \"Drop All\", \"Collect\", \"Drop\", \"Collect All But\", \"Drop All But\", \"Garrison\"]\n\t\treturn [(delay, Star(star_id, galaxy=self.galaxy), ORDER_MAP[order], num_ships)\n\t\t for delay, star_id, order, num_ships in self.data.o]",
"def getOrderHistory(self):\n return self.__orderhistory",
"def queryset(self, request):\n qs = super(AdRepOrderAdmin, self).queryset(request)\n qs = AdRepOrder.objects.select_related().filter(id__in=qs\n ).defer('ad_rep__site__envelope',\n 'ad_rep__site__geom',\n 'ad_rep__site__point')\n return qs",
"def get_update_orders(self):\n\n return self.stratgroup.get_orders_to_update_if(UPDATED)",
"def ordering(self):\n value = []\n for i in self:\n if isinstance(i, PQ):\n value.extend(i.ordering())\n else:\n value.append(i)\n\n return value",
"def get_query_set(self):\n return super(ContributorManager, self).get_query_set().order_by('position')",
"def get_queryset(self):\n rs = super(BaseQuerysetMixin, self).get_queryset()\n if self.request.GET.get(\"ordering\") is None:\n rs = rs.order_by(\"id\")\n return rs",
"def history_orders(self, **params):\n return self._get('historyOrders', signed=True, params=params)",
"def get_queryset(self):\n return Order._default_manager.filter(user=self.request.user)",
"def orders(self):\n return(self._d_orders['trades'])",
"def waypoints_ordered(self):\n return self.waypoints.select_related().order_by('localtime')",
"def history_orders(self, **params):\n return self._get('option/historyOrders', signed=True, params=params, version=None)",
"def get_all_orders():",
"def get_new_orders(self):\n\n # note we only get orders from the strategies with UPDATED =\n # True, i.e. only those which got new pricing information this\n # tick. Among other reasons, this is because some strategies\n # (e.g. MMStrategy) need to be fed new prices in order to\n # clear the order dictionary, so if we didn't use _if, we\n # could potentially place these orders many times.\n\n return self.stratgroup.get_orders_to_place_if(UPDATED)",
"def queryset(self, request):\n qs = super(SiteAdmin, self).queryset(request)\n qs = Site.admin.select_related().filter(id__in=qs)\n ordering = self.ordering or ()\n if ordering:\n qs = qs.order_by(*ordering)\n return qs",
"def get_queryset(self):\n self.object = self.get_object()\n return self.object.sticker_set.filter(sprint__isnull=True).order_by(\n '-modification_date'\n )",
"def query_orders(self):\n return self._call_txtrader_api('query_orders', {})"
]
| [
"0.6120419",
"0.6019099",
"0.5978205",
"0.5944654",
"0.58207524",
"0.5806931",
"0.57802045",
"0.57569706",
"0.56282717",
"0.55805904",
"0.5565082",
"0.5544036",
"0.55177367",
"0.55061555",
"0.5448757",
"0.5423498",
"0.5401589",
"0.5366619",
"0.53172815",
"0.5313821",
"0.53027195",
"0.52850413",
"0.52721936",
"0.52694446",
"0.5240839",
"0.52340025",
"0.52332616",
"0.5220325",
"0.52151066",
"0.5213824"
]
| 0.7151643 | 0 |
Return sales per category { | def get_context_data(self, **kwargs):
start, end = self.get_start_end_dates(self.request)
if start or end is not None:
category_list = []
misc_items = 0
discount = 0
category_sales = {}
count_test_items = 0
out = {}
total = 0
orders = self.get_orders_with_range(start, end)
for order in orders:
for item in order.items.exclude(void_status=True).all():
sales = item.price * item.quantity
# tax_total+=item.price*item.tax*item.quantity
count_test_items += item.quantity
product = item.product
# out['misc']={}
# if not product.categories.all():
# out['misc']={'categories':[]}
# else:
# out['misc'] = {'sales':True, 'categories':product.categories.all()}
# print out['misc']['categories']
for category in product.categories.all()[:1]:
if category.parent:
if category.parent in out:
if category in out[category.parent]['categories']:
sale = out[category.parent]['categories'][category]
sale[category] += sales
out[category.parent]['categories'][category] = sale
out[category.parent]['sales'] += sales
else:
out[category.parent]['categories'][category] = {category: sales}
out[category.parent]['sales'] += sales
else:
out[category.parent] = {'parent': True, 'categories': {}, 'sales': 0}
out[category.parent]['categories'][category] = {category: sales}
out[category.parent]['sales'] = sales
else:
if category in out:
# out[category]['categories'][category] = [category]
out[category]['sales'] += sales
else:
out[category] = {'parent': False, 'sales': 0}
out[category]['categories'] = [category]
out[category]['sales'] = sales
if not product.categories.all():
misc_items += item.quantity
if 'misc' in category_sales:
category_sales['misc'] += sales
else:
category_sales['misc'] = sales
else:
for category in product.categories.all()[:1]:
category_list += [category] * item.quantity
if category in category_sales:
category_sales[category] += sales
else:
category_sales[category] = sales
discount += order.discount_total
categories = Counter(category_list).items()
new_categories = list()
for category in categories:
new_categories.append((category[0], category[1], category_sales[category[0]]))
if misc_items != 0:
new_categories.append(('Misc', misc_items, category_sales['misc']))
total = misc_items
total += sum(category[1] for category in categories)
total_sales = sum(category[2] for category in new_categories)
return {'output': out, 'discount': discount, 'categories': new_categories, 'total': total,
'total_sales': total_sales}
else:
return {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def view_total_sales():\n # Later will add the ability to sort by date and Category\n try:\n with session_scope() as db_session:\n orders = db_session.query(Order).all()\n\n if len(orders) < 1:\n return {\n 'code': 404,\n 'message': 'There are no sales'\n }, 404\n\n nmbr_itm = 0\n for order in orders:\n for items in order.order_lines:\n nmbr_itm = nmbr_itm + items.quantity\n\n except DBAPIError as db_error:\n # Returns an error in case of a integrity constraint not being followed.\n return {\n 'code': 400,\n 'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)\n }, 400\n except NoResultFound:\n # Returns an error in case of a integrity constraint not being followed.\n return {\n 'code': 400,\n 'message': \"No sales have been registered\"\n }, 400\n return {\n 'numberItems': nmbr_itm\n }, 200",
"def get_daily_product_sales(self):\n self.products['daily'] = self.products['session_start_date'].apply(lambda x: convert_dt_to_day_str(x))\n self.products = self.products.query(\"payment_amount == payment_amount\")\n self.products['payment_amount'] = self.products['payment_amount'].apply(lambda x: float(x))\n self.daily_products = self.products.reset_index().groupby([\"daily\", \"products\"]).agg(\n {\"payment_amount\": \"sum\", 'index': 'count'}).reset_index().rename(columns={\"index\": \"order_count\"})\n return self.daily_products",
"def get_all_sales(self):\n all_sales = self.dbconn.get_all_sales()\n return all_sales",
"def get_data_sales(self):\n return {\n 'search_type': SearchForm.SEARCH_TYPE_SALE,\n 'min_price': '40000',\n 'max_price': '50000',\n 'location':'Test, Test',\n 'min_bedrooms': '5',\n 'property_type': str(PropertyTypeFactory().slug)\n }",
"def monthly_sales(self):\n last_thirty_days = timezone.now() - timedelta(days=30)\n items = self.item_set.filter(status=\"sold\", updated_at__gte=last_thirty_days)\n total_sales = 0\n for item in items:\n total_sales += item.price\n return total_sales",
"def total_sales():\n data = []\n orders = Order.objects.all()\n for order in orders:\n data.append(order.get_total_cost())\n return sum(data)",
"def total_sales(self):\n total_sales = 0\n items = self.item_set.filter(status=\"sold\")\n for item in items:\n total_sales += item.price\n return total_sales",
"def returns_by_category(self):\n cate_weights = self.weights_by_category\n cate_returns = {}\n for cate in self.unique_category:\n if cate_weights[cate] == 0:\n cate_returns[cate] = 0\n else:\n cate_returns[cate] = (self.returns[self.category == cate] *\n self.weights[self.category == cate]).sum()/cate_weights[cate]\n return pd.Series(cate_returns, index=self.unique_category)",
"def view_total_sales_by_date(start_date, end_date=None):\n # Later will add the ability to sort by date and Category\n try:\n with session_scope() as db_session:\n if end_date is not None:\n if validate(start_date) and validate(end_date):\n pass\n else:\n return '', 404\n orders = db_session.query(Order).filter(Order.date.between(start_date, end_date)).all()\n else:\n if validate(start_date):\n pass\n else:\n return '', 404\n orders = db_session.query(Order).filter(Order.date == start_date).all()\n if len(orders) < 1:\n return {\n 'code': 404,\n 'message': 'There are no sales'\n }, 404\n\n nmbr_itm = 0\n for order in orders:\n for items in order.order_lines:\n nmbr_itm = nmbr_itm + items.quantity\n\n except DBAPIError as db_error:\n # Returns an error in case of a integrity constraint not being followed.\n return {\n 'code': 400,\n 'message': re.search('DETAIL: (.*)', db_error.args[0]).group(1)\n }, 400\n except NoResultFound:\n # Returns an error in case of a integrity constraint not being followed.\n return {\n 'code': 400,\n 'message': \"No sales have been registered\"\n }, 400\n return {\n 'numberItems': nmbr_itm\n }, 200",
"def get_hourly_categories_of_sales(self):\n self.hourly_product_cat = self.products.groupby([\"hours\", \"category\"]).agg(\n {\"id\": lambda x: len(np.unique(x))}).reset_index().rename(columns={\"id\": \"order_count\"})\n self.hourly_product_cat['product_cat_order_count_rank'] = self.hourly_product_cat.sort_values(\n by=[\"hours\", \"order_count\"], ascending=False).groupby(\"hours\").cumcount() + 1\n self.hourly_product_cat = pd.merge(self.hourly_product_cat,\n self.products.groupby(\"hours\").agg(\n {\"id\": lambda x: len(np.unique(x))}).reset_index().rename(\n columns={\"id\": \"hourly_total_orders\"}),\n on='hours', how='left')\n self.hourly_product_cat['hourly_order_ratio'] = self.hourly_product_cat['order_count'] / \\\n self.hourly_product_cat['hourly_total_orders']\n return self.hourly_product_cat",
"def get_sales_data(fname='TOTALSA.csv'):\n sales_months = pd.read_csv(fname)\n dates = sales_months['DATE'].tolist()\n for ix, date in enumerate(dates):\n dates[ix] = datetime.strptime(date, '%Y-%m-%d')\n dates[ix] = int(dates[ix].strftime('%Y'))\n sales_months['Year'] = dates\n result = sales_months.groupby(\n 'Year')['TOTALSA'].sum().to_frame().sort_index()\n result['TOTALSA'] = result['TOTALSA']\n result.rename(\n columns={'TOTALSA': 'Total Sales of Personal Vehicles (in millions)'}, inplace=True)\n return(result)",
"def get_all_sales_ids_for_customer_ids():\n\n # your code",
"def get_sum_of_sales_per_customer_from_table(table):\n summed_sales_per_customer = {}\n for customer in {line[CUSTOMER_ID] for line in table}:\n sum_of_sales = common.szum_list([line[PRICE] for line in table if line[CUSTOMER_ID] == customer])\n summed_sales_per_customer[customer] = sum_of_sales\n return summed_sales_per_customer",
"def get_sales_data():\n print(\"Retrieving all the sales information...\")\n data = SHEET.worksheet('sales')\n print(\"Compilation complete!\\n\")\n return data",
"def get_expenses_by_category(conn, date_from, date_to, *args):\n rows = select_transactions(conn, date_from, date_to, *args)\n expenses_by_category = {}\n for row in rows:\n if row[3] == \"£\":\n multiplier = 1\n elif row[3] == \"€\":\n multiplier = 0.9\n elif row[3] == \"$\":\n multiplier = 0.8\n try:\n expenses_by_category[f\"{row[5]}\"] += float(row[2])*multiplier\n except KeyError:\n expenses_by_category[f\"{row[5]}\"] = float(row[2])*multiplier\n return expenses_by_category",
"def yearly_sales(self):\n\n last_365_days = timezone.now() - timedelta(days=365)\n items = self.item_set.filter(status=\"sold\", updated_at__gte=last_365_days)\n total_sales = 0\n if item in items:\n total_sales += item.price\n return total_sales",
"def all_sales(self, username):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM sales;\")\n res = cur.fetchall()\n sales_records=[]\n for a_sale in res:\n record = {\n 'sales_id':a_sale[0],\n 'attendant':a_sale[1],\n 'product_name':a_sale[2],\n 'price':a_sale[3],\n 'quantity':a_sale[4]\n }\n sales_records.append(record)\n return jsonify({\"Records\": sales_records}), 200",
"def get_all_sales():\n admin = \"admin\"\n if [\"role\"] != admin:\n return jsonify({\"message\": \"Only an admin can view all sales records\"}), 401\n response = jsonify(sale_object.get_all_sales())\n response.status_code = 200\n return response",
"def add_sales_per_customer(historical, test):\n # load historical - use this in data.py\n # historical = pd.read_csv('./data/raw/train.csv')\n\n data = historical.groupby('Store').mean()\n data.loc[:, 'sales-per-customer'] = data.loc[:, 'Sales'] / data.loc[:, 'Customers']\n data = data.loc[:, ['Customers', 'sales-per-customer']]\n data.columns = ['mean-customers', 'sales-per-customer']\n data.fillna({\n 'mean-customers': np.mean(data.loc[:, 'mean-customers']),\n 'sales-per-customer': np.mean(data.loc[:, 'sales-per-customer'])\n }, inplace=True)\n test = test.merge(data, on='Store')\n return test",
"def SumCostByCategory(category):\n\n logs.logger.debug(\n \"Start to adds all amount of Cost objects based on category.\")\n try:\n searchedCostByCategoryFromDB = GetAllCostByCategoryFromDB(category)\n sumTotal = 0\n for item in searchedCostByCategoryFromDB:\n sumTotal += item.amount\n logs.logger.info(\n \"Based on the category adds all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def get_grouped_prod(all_customers_data, trans_column, prod_l_cat):\n return all_customers_data.select(trans_column, prod_l_cat)\\\n.groupBy(prod_l_cat).agg(F.countDistinct(trans_column).alias('hhds'))",
"def get_catergories(request):\n shop_obj = Shop.objects.get(user=request.user)\n shop_categories = shop_obj.categories\n value_added = list(\n map(\n lambda x: {\n \"name\": x[\"name\"],\n \"slug\": x[\"slug\"],\n \"public_slug\": x[\"public_slug\"],\n \"product_count\": Products.objects.filter(\n shop_rel=shop_obj, genre__slug=x[\"slug\"]\n ).count(),\n },\n shop_categories,\n )\n )\n resp_payload = {\"shop_categories\": value_added}\n return Response(data=resp_payload, status=status.HTTP_200_OK)",
"def get_sale_prices():\n\n r = requests.post(settings.qv_url, data=REQUEST_DATA)\n response = r.json()\n\n data_processed = [process_property(prop) for prop in response['LocalAreaSales']]\n\n return data_processed",
"def total_seller(df):\n \n return pd.pivot_table(df, values = \"Total\", index = [\"Name\"], aggfunc = \"sum\")",
"def SumCostByDayPerCategory(day, category):\n\n logs.logger.debug(\n \"Start to adds all amount of Cost objects based on the payment date and on the category\")\n try:\n searchedCostByDayPerCategoryFromDB = session.query(\n Cost.Cost).filter(Cost.Cost.dateOfPayment == day).filter(\n Cost.Cost.category == category).all()\n sumTotal = 0\n for item in searchedCostByDayPerCategoryFromDB:\n sumTotal += item.amount\n logs.logger.info(\n \"Based on the payment date and on the category adds all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def SumCostByMonthPerCategory(year, numberOfMonth, category):\n\n logs.logger.debug(\n \"Start to adds all amount of Cost objects based on the month of payment date and on the category.\")\n try:\n num_days = calendar.monthrange(year, numberOfMonth)[1]\n searchedCostByMonthFromDB = GetAllCostByDateOfPaymentBandFromDB(\n date(year, numberOfMonth, 1), date(year, numberOfMonth, num_days))\n sumTotal = 0\n for item in searchedCostByMonthFromDB:\n if item.category.value == category:\n sumTotal += item.amount\n logs.logger.info(\n \"Based on the month of payment date and on the category adds all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def SumCostByYearPerCategory(year, category):\n\n logs.logger.debug(\n \"Start to adds all amount of Cost objects based on the year of payment date and on the category.\")\n try:\n first_day_of_year = date(year, 1, 31)\n last_day_of_year = date(year, 12, 31)\n searchedCostByYearFromDB = GetAllCostByDateOfPaymentBandFromDB(\n first_day_of_year, last_day_of_year)\n sumTotal = 0\n for item in searchedCostByYearFromDB:\n if item.category.value == category:\n sumTotal += item.amount\n logs.logger.info(\n \"Based on the year of payment date and on the category adds all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def getproductbycategory(self):\n\n eight = self.geteightcategories()\n productsliste = []\n for category in eight:\n for page in tqdm(range(1, 5)):\n r2 = requests.get(\n \"https://fr.openfoodfacts.org/cgi/search.pl?action=process\"\n + \"&tagtype_0=categories&tag_contains_0=contains&tag_0={}\".format(\n category\n )\n + \"&tag_contains_1=contains&tag_1=france&page_size=500\"\n + \"&fields=url,categories_tags_fr,product_name,stores_tags\"\n + \",nutriscore_grade&tagtype_1=purchase_places&sort_by=\"\n + \"unique_scans_n&json=1&page={}\".format(page)\n )\n dataproducts = r2.json()\n if dataproducts[\"page_count\"] is None:\n continue\n else:\n for items in dataproducts[\"products\"]:\n productsliste.append(items)\n\n self.rawproductdata = productsliste",
"def SumCostByBetweenDatesPerCategory(startDate, endDate, category):\n\n logs.logger.debug(\n \"Start to adds all amount of Cost objects between two payment date and on the category\")\n try:\n searchedCostByBetweenDatesFromDB = GetAllCostByDateOfPaymentBandFromDB(\n startDate, endDate)\n sumTotal = 0\n for item in searchedCostByBetweenDatesFromDB:\n if item.category.value == category:\n sumTotal += item.amount\n logs.logger.info(\n \"Between two payment date and on the category adds all amount of Cost objects.\")\n return sumTotal\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def get_console_game_sales(console_name: str) -> float:\n c = df.groupby('Platform')\n return c['Global_Sales'].sum()[console_name]"
]
| [
"0.62891865",
"0.603454",
"0.60087216",
"0.59924215",
"0.59762335",
"0.59438944",
"0.5927264",
"0.5879687",
"0.5754501",
"0.57487327",
"0.5735331",
"0.5724742",
"0.5700442",
"0.5695713",
"0.56687236",
"0.5666951",
"0.5654417",
"0.55597746",
"0.5549785",
"0.55187017",
"0.55146253",
"0.5504792",
"0.54505825",
"0.5426787",
"0.5372082",
"0.5343193",
"0.53244096",
"0.52530843",
"0.5252249",
"0.52475643"
]
| 0.61560917 | 1 |
Apply filters so Datacontainer object | def apply_filters(self, filters):
self._data = self.model.objects.filter(**filters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filter(self, filters):",
"def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)",
"def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data",
"def _applyFilters(self) -> None:\n self._dataframe = self._source.loc[:, self._visable_columns]\n for column, value in self._filters.items():\n if value is not None:\n self._dataframe = self._dataframe[self._source[column] == value]\n else:\n self._dataframe = self._dataframe[self._source[column].isnull()]\n\n self.layoutChanged.emit()",
"def filter(self, *args, **kwargs):",
"def _initialize_data_filter(self):\n df_params = self._loading_params.copy()\n df_params[\"filter_negate\"] = True\n df_params[\"filter_upper\"] = True\n self._data_filter = LoadProcessedData(**df_params)",
"def before_each(self, dataset: pydicom.dataset.Dataset) -> None:\r\n for a_filter in self.filters:\r\n a_filter.before_each(dataset)",
"def apply_filters(self, new_filters):\n\t\tself.filters = new_filters",
"def get_filters(self):",
"def process_filters(self, filters, queryset, view):\n return filters",
"def get_data_filter(args):\n diff_data(args, \"filter\")",
"def filter(self, filter_dict):\n pass",
"def filters(self, filters):\n\n self._filters = filters",
"def update_filters(self):\n\n # Update household filter\n household_filter = [True if agent == 'household' else False for agent \\\n in self.source.data['agent_type']]\n self.household_view.filters[0] = BooleanFilter(household_filter)\n\n # Update neighbourhood filter\n neighbourhood_filter = [True if agent == 'neighbourhood' else False for\\\n agent in self.source.data['agent_type']]\n self.neighbourhood_view.filters[0] = BooleanFilter(\n neighbourhood_filter)\n\n # Update school filter\n school_filter = [True if agent == 'school' else False for agent in \\\n self.source.data['agent_type']]\n self.school_view.filters[0] = BooleanFilter(school_filter)",
"def apply_filters(filters, items):\n return scom.apply_filters(filters, items)",
"def filter():\n return get_filter_data(db, MyTable)",
"def filter_data(data,filters):\n final_filter = pd.Series(np.array([True] * data.shape[0]))\n for attribute, value in filters:\n final_filter &= data[attribute] == value\n return data[final_filter]",
"def filter(self, func):\r\n\r\n d = self.data\r\n f = []\r\n for i in d:\r\n if func(i):\r\n f.append(i)\r\n return Records(f)",
"def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))",
"def _base_proxies_filter(self, category: str, filters: list) -> list:\n\n data_filtered = []\n \n if category == 'country':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=0, filters=filters)\n )\n \n elif category == 'anonymity':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=1, filters=filters)\n )\n\n elif category == 'protocol':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=2, filters=filters)\n )\n \n elif category == 'google_passed':\n data_filtered.extend(\n Froxy._filter_model(self.storage.get(), line=2, col=3, filters=filters)\n )\n\n return data_filtered",
"def run(self):\n query = self.query\n\n # count before filtering\n # self.cardinality = query.add_columns(self.columns[0].sqla_expr).count()\n\n self._set_column_filter_expressions()\n self._set_global_filter_expression()\n self._set_sort_expressions()\n self._set_yadcf_data(query)\n\n # apply filters\n query = query.filter(\n *[e for e in self.filter_expressions if e is not None])\n self.filtered_query = deepcopy(query)\n\n # self.cardinality_filtered = query.add_columns(\n # self.columns[0].sqla_expr).count()\n\n # apply sorts\n query = query.order_by(\n *[e for e in self.sort_expressions if e is not None])\n\n # add paging options\n length = int(self.params.get('length'))\n if length >= 0:\n query = query.limit(length)\n elif length == -1:\n pass\n else:\n raise(ValueError(\n 'Length should be a positive integer or -1 to disable'))\n query = query.offset(int(self.params.get('start')))\n\n # add columns to query\n query = query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.filtered_query = self.filtered_query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.query = query\n # fetch the result of the queries\n column_names = [col.mData if col.mData else str(i)\n for i, col in enumerate(self.columns)]\n # self.results = [{k: v for k, v in zip(\n # column_names, row)} for row in query.all()]",
"def filter(self, *args, **kwargs):\n clone = self._clone()\n for f in args:\n clone.filter_obj.add_filter(f)\n for key, value in kwargs.items():\n clone.filter_obj.add_filter_param(key, value)\n return clone",
"def filter(data, mask, **kwargs):\n return Component(\n \"Filter\",\n arguments={\n 'data': Component.of(data),\n 'mask': Component.of(mask)\n },\n options={\n \n },\n constraints=kwargs)",
"def _load_filter(self, *args, **kwargs):\n raise NotImplementedError",
"def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))",
"def _add_filters(self, filters):\n self._env.filters['dateformat'] = dateformat\n self._env.filters.update(filters or {})",
"def filter_values(self):\n dfilter = self.args.datafilter\n self.logger.info(u'Filtering values with:{f}'.format(f=dfilter))\n data = self.outputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}, value:{v}'.format(k=key,\n v=value))\n returned_data = dict_value_filter(key, value, dfilter, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data after filter:{d}'.format(d=newdata))\n\n self.outputdata = newdata",
"def build_filters(self, filters=None):\n if filters is None:\n filters = {}\n\n grouped = get_grouped_filters(filters)\n branch_filters = get_branch_filter(filters)\n orm_filters = super(StoryResource, self).build_filters(filters)\n orm_filters['grouped'] = grouped\n orm_filters['br_filter'] = branch_filters\n\n if 'content_type__in' in filters:\n orm_filters['content_type__in'] = [CONTENT_HYDRATE[f] for f in filters['content_type__in'].split(',')]\n\n return orm_filters",
"def apply_filter(self, inplace=True):\n\n if self.filter is None:\n if not inplace:\n return copy.deepcopy(self)\n else:\n return None\n\n x = copy.copy(self.__dict__)\n x['data'] = self.get_data()\n x['locs'] = self.get_locs()\n\n if self.filter == 'kurtosis':\n x['kurtosis'] = x['kurtosis'][x['kurtosis'] <= x['kurtosis_threshold']]\n\n for key in ['n_subs', 'n_elecs', 'n_sessions', 'dur', 'filter_inds', 'nifti_shape']:\n if key in x.keys():\n x.pop(key)\n\n boc = Brain(**x)\n boc.filter = None\n boc.update_info()\n if inplace:\n self.__init__(boc)\n else:\n return boc",
"def update_filters(self, **kwargs):\n self._FILTERS = kwargs"
]
| [
"0.73095095",
"0.71926576",
"0.69960827",
"0.66669726",
"0.6536904",
"0.6523246",
"0.64838815",
"0.64456093",
"0.642133",
"0.6333619",
"0.6310267",
"0.62748665",
"0.6264728",
"0.62351424",
"0.61123586",
"0.6049168",
"0.6038185",
"0.6031785",
"0.6025596",
"0.6010805",
"0.6010543",
"0.6001558",
"0.5992133",
"0.5954509",
"0.59503716",
"0.59480476",
"0.5945957",
"0.5923451",
"0.592021",
"0.5915153"
]
| 0.7357808 | 0 |
Apply annotation to datacontainer object | def annotate(self, annotation):
self._data = self._data.annotate(**annotation) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _convert_annotations(self, ast):\n self.annotations = IDLAnnotations(ast)",
"def annotate(self, **annotations):\n _check_annotations(annotations)\n self.annotations.update(annotations)",
"def annotate(self, annotation_=None):\n # Important: Need a copy, not the reference to the original object\n annotation_ = copy.deepcopy(annotation_)\n annotation_.annotate(self, from_dataset=True)\n history_record = annotation_.create_history_record()\n self.annotations.append(history_record)\n self._append_task(kind='annotation', task=history_record)",
"def annotations(self, annotations):\n self._annotations = annotations",
"def add_annotation(self, annotation):\n self.annotations.append(annotation)",
"def __init__(self, namespace, data=None, annotation_metadata=None,\n sandbox=None, time=0, duration=None):\n\n super(Annotation, self).__init__()\n\n if annotation_metadata is None:\n annotation_metadata = AnnotationMetadata()\n\n self.annotation_metadata = AnnotationMetadata(**annotation_metadata)\n\n self.namespace = namespace\n\n self.data = SortedKeyList(key=self._key)\n\n if data is not None:\n if isinstance(data, dict):\n self.append_columns(data)\n else:\n self.append_records(data)\n\n if sandbox is None:\n sandbox = Sandbox()\n\n self.sandbox = Sandbox(**sandbox)\n\n self.time = time\n self.duration = duration",
"def copy_annotations(from_data, to_data, annot_type):\n\n for annot in from_data.annotations.select_type(annot_type):\n entity = anafora.AnaforaEntity()\n entity.id = annot.id\n entity.spans = annot.spans\n entity.type = annot.type\n to_data.annotations.append(entity)",
"def add_annotation_metadata(self, value):\n\n if self._properties is None:\n self._properties = LabelProperties()\n self._properties.parameters.insert_new_element(value)",
"def fetch_annotation(self):\n annotation = self.dataset.annotation\n\n # Fetch filters for the dataset's alignment dimension from this query builder\n dataset_alignment_dimension_filters = self.fetch_query_filters(\n annotation.dataset_alignment_field_alias\n )\n\n # Update fields in filters for the dataset's alignment dimension to the annotation's alignment field\n annotation_alignment_dimension_filters = [\n dataset_alignment_filter.for_(annotation.alignment_field)\n for dataset_alignment_filter in dataset_alignment_dimension_filters\n ]\n\n annotation_alignment_field = annotation.alignment_field\n if annotation_alignment_field.data_type == DataType.date:\n dataset_alignment_dimension = self.fetch_query_dimension(\n annotation.dataset_alignment_field_alias\n )\n\n if hasattr(dataset_alignment_dimension, \"interval_key\"):\n # Use the interval key of the dataset's alignment dimension for the annotation's alignment field\n # Otherwise we would need to copy it to prevent issues from patching directly\n annotation_alignment_field = DatetimeInterval(\n annotation.alignment_field, dataset_alignment_dimension.interval_key\n )\n\n annotation_dimensions = [annotation_alignment_field, annotation.field]\n\n annotation_query = make_slicer_query(\n database=self.dataset.database,\n base_table=annotation.table,\n dimensions=annotation_dimensions,\n filters=annotation_alignment_dimension_filters,\n )\n\n annotation_df = fetch_data(\n self.dataset.database, [annotation_query], [annotation.alignment_field]\n )\n\n return annotation_df",
"def markAsNeedsAnnotationsDictionary(self):\n self.needs_annotations_dict = True",
"def annotate(self, request, pk=None):\n\n # Get the document of the detail view and deserialize data\n document = self.get_object()\n serializer = AnnotationSerializer(\n data=request.data,\n context={'request': request, 'document': document}\n )\n\n # Validate the serializer and save the annotation\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n # Return the response\n return Response(serializer.data)",
"def updateAnnot( xdata, ydata, pixels, annot, rawdata, **kwargs):\n\ty, x = pol2cart( ydata/180, xdata, pixels )\n\tannot.xy = ( xdata, ydata )\n\t# Inconsistent wrapping; plot the right variable.\n\tif xdata < 0:\n\t\txdata += 2 * np.pi\n\ttext = 'Az=' + str( round( xdata * 180 / np.pi, 1 ) )+ ', El=' + str( round( np.arccos( ydata/180 ) * 180/np.pi, 1) ) + u'\\xb0' + '\\nInt.=' + '{:.3E}'.format((rawdata[int(y),int(x)]))\n\tannot.set_text( text )\n\tannot.get_bbox_patch().set_alpha( 0.66 )\n\tannot.set_color('black')",
"def __species_annotation__(self,aggregation_so_far,annotation):\n return Survey.__species_annotation__(self,aggregation_so_far,[annotation])",
"def __call__(self, *input_data):\n \n pb_ann = Any()\n pb_ann.Pack(annotation_to_protobuf.convert_annotation(input_data))\n request = annotation_pb2.ProcessRequest(pipeline_name = self._pipeline_name, \n input_annotations = pb_ann)\n \n response = self._stub.process(request)\n return annotation_from_protobuf.convert_annotation(response.output_annotations)",
"def drawAnnotation(self,i=0):\n #print \"DRAW %s\" % i\n self._annotations[i] = [ self.annotations[i][0](n) for n in self.names ]",
"def annotations(self):\n annotations = {\"date\": self.date_trunc(\"usage_start\")}\n # { query_param: database_field_name }\n fields = self._mapper.provider_map.get(\"annotations\")\n for q_param, db_field in fields.items():\n annotations[q_param] = F(db_field)\n if (\n \"project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"and:project\" in self.parameters.parameters.get(\"group_by\", {})\n or \"or:project\" in self.parameters.parameters.get(\"group_by\", {})\n ):\n annotations[\"project\"] = F(\"namespace\")\n\n return annotations",
"def analyze(self, annotation):\n for annotator in self.sequence:\n annotation = annotator.validate_and_annotate(annotation)\n return annotation",
"def _render_annotation(self, index, el):\r\n attr = {}\r\n attr.update(self._get_annotation_class_attr(index, el))\r\n attr.update(self._get_annotation_data_attr(index, el))\r\n\r\n el.tag = 'span'\r\n\r\n for key in attr.keys():\r\n el.set(key, attr[key]['value'])\r\n if '_delete' in attr[key] and attr[key]['_delete'] is not None:\r\n delete_key = attr[key]['_delete']\r\n del el.attrib[delete_key]",
"def setup_annotation(self):\n annotation = self.ax_fig.annotate(\n '', xy=(0, 0), ha='left',\n xytext=(-20, 20), textcoords='offset points', va='bottom',\n bbox=dict(\n boxstyle='round,pad=0.5', fc='yellow', alpha=0.2),\n arrowprops=dict(\n arrowstyle='->', connectionstyle='arc3,rad=0'))\n return annotation",
"def getAnnotation(self, ind):\n if not np.issubdtype(ind, np.integer):\n logger.warning(f'myMplCanvas.getAnnotation() got bad ind: {ind} {type(ind)}')\n return\n\n xStat = self.stateDict['xStat']\n yStat = self.stateDict['yStat']\n groupByColumnName = self.stateDict['groupByColumnName']\n\n analysisName = self.plotDf.at[ind, groupByColumnName]\n \n # into the master df, only if viewing raw\n index = self.plotDf.at[ind, 'index']\n \n try:\n region = self.plotDf.at[ind, 'Region'] # not all will have this\n except (KeyError) as e:\n region = 'n/a'\n try:\n _sex = self.plotDf.at[ind, 'Sex'] # not all will have this\n except (KeyError) as e:\n _sex = 'n/a'\n\n xVal = self.plotDf.at[ind, xStat]\n yVal = self.plotDf.at[ind, yStat]\n\n # oligo specific\n _masterDf = self.stateDict['masterDf']\n \n # corerect\n try:\n grandParentFolder = self.plotDf.at[ind, 'grandParentFolder']\n grandParentFolder = str(grandParentFolder)\n except (KeyError) as e:\n grandParentFolder = None\n\n parentFolder = self.plotDf.at[ind, 'parentFolder']\n file = self.plotDf.at[ind, 'file']\n\n # grandParentFolder = _masterDf.at[ind, 'grandParentFolder']\n # grandParentFolder = str(grandParentFolder)\n # parentFolder = _masterDf.at[ind, 'parentFolder']\n # file = _masterDf.at[ind, 'file']\n\n # print('xxx grandParentFolder:', grandParentFolder, type(grandParentFolder))\n # print('xxx parentFolder:', parentFolder)\n # print('xxx file:', file)\n \n if grandParentFolder is not None:\n path = os.path.join(grandParentFolder, parentFolder, file)\n else:\n parentFolder = str(parentFolder)\n path = os.path.join(parentFolder, file)\n\n returnDict = {\n 'ind': ind,\n 'index': index,\n 'analysisName': analysisName,\n 'region': region,\n 'Sex': _sex,\n 'xVal': xVal,\n 'yVal': yVal,\n #'plotDf': self.plotDf, # potentially very big\n 'path': path,\n }\n return returnDict",
"def setAnnotation(self, *args):\n return _libsbml.SBase_setAnnotation(self, *args)",
"def annotate(m, ss_seq): # -> None:\n ...",
"def _add_metadata_as_attrs(data, units, description, dtype_out_vert):\n if isinstance(data, xr.DataArray):\n return _add_metadata_as_attrs_da(data, units, description,\n dtype_out_vert)\n else:\n for name, arr in data.data_vars.items():\n _add_metadata_as_attrs_da(arr, units, description,\n dtype_out_vert)\n return data",
"def create_annotation(raw):\n annotation_pandas = pd.DataFrame(columns=[\"onset\", \"duration\", \"description\"])\n for idx, event in enumerate(raw.annotations):\n annotation_pandas.loc[idx] = [\n event[\"onset\"],\n event[\"duration\"],\n event[\"description\"],\n ]\n return annotation_pandas",
"def add_annotations(annot_tuples, ref_data, annot_type):\n\n for annot in ref_data.annotations.select_type(annot_type):\n annot_begin, annot_end = annot.spans[0]\n annot_tuples.append((annot_begin, annot_end, annot.id))",
"def update_annot(ind):\n # update text annotation\n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n idxlist = []\n for element in PC:\n idxlist.append(np.allclose(element, pos))\n idx = idxlist.index(True)\n annotation_string = f'{idx + 1}\\n'\n if display_parameter_values:\n for i, label in enumerate(parameterList):\n annotation_string += (f'{parameters[i, idx]: 10.2f} '\n f'+/- {errors[i, idx]:8.2f} '\n f'({label})\\n')\n annot.set_text(annotation_string[:-1])\n annot.get_bbox_patch().set_alpha(0.4)\n\n # update immage annotation\n label = mapp.listOfFiles[idx].split(os.sep)[-1].split('.')[0]\n image = get_image(mapp.pltdir, label)\n ab.xy = pos\n ab.offsetbox = OffsetImage(image)\n ax.add_artist(ab)\n if show_both_images:\n additional_image = get_image(additional_fitplot_folder, label)\n ac.xy = pos + shift_second_image\n ac.offsetbox = OffsetImage(additional_image)\n ax.add_artist(ac)",
"def _add_annotation(raw_fig):\n data_ax = raw_fig.mne.ax_main\n\n key_event = KeyEvent(name=\"Annotation\", canvas=raw_fig.canvas, key=\"a\")\n raw_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n ann_fig = raw_fig.mne.fig_annotation\n for key in \"test\": # Annotation will be named: BAD_test\n key_event = KeyEvent(name=\"Bad\", canvas=ann_fig.canvas, key=key)\n ann_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n key_event = KeyEvent(name=\"Enter\", canvas=ann_fig.canvas, key=\"enter\")\n ann_fig.canvas.callbacks.process(\"key_press_event\", key_event)\n\n # Draw a 4 second long Annotation.\n _fake_click(raw_fig, data_ax, [1.0, 1.0], xform=\"data\", button=1, kind=\"press\")\n _fake_click(raw_fig, data_ax, [5.0, 1.0], xform=\"data\", button=1, kind=\"motion\")\n _fake_click(raw_fig, data_ax, [5.0, 1.0], xform=\"data\", button=1, kind=\"release\")",
"def load_data(self, annotation_json, images_dir):\r\n # Load json from file\r\n json_file = open(annotation_json)\r\n coco_json = json.load(json_file)\r\n json_file.close()\r\n \r\n # Add the class names using the base method from utils.Dataset\r\n source_name = \"coco_like\"\r\n ids={}\r\n i=0\r\n for category in coco_json['categories']:\r\n i+=1\r\n class_id = category['id']\r\n ids[class_id]=i\r\n class_name = category['name']\r\n if class_id < 1:\r\n print('Error: Class id for \"{}\" cannot be less than one. (0 is reserved for the background)'.format(class_name))\r\n return\r\n \r\n self.add_class(source_name, class_id, class_name)\r\n for annotation in coco_json['annotations']:\r\n annotation[\"category_id\"]=ids[annotation[\"category_id\"]]\r\n \r\n # Get all annotations\r\n \r\n annotations = {}\r\n for annotation in coco_json['annotations']:\r\n image_id = annotation['image_id']\r\n if image_id not in annotations:\r\n annotations[image_id] = []\r\n annotations[image_id].append(annotation)\r\n \r\n # Get all images and add them to the dataset\r\n seen_images = {}\r\n for image in coco_json['images']:\r\n image_id = image['id']\r\n if image_id in seen_images:\r\n print(\"Warning: Skipping duplicate image id: {}\".format(image))\r\n else:\r\n seen_images[image_id] = image\r\n try:\r\n image_file_name = image['file_name']\r\n image_width = image['width']\r\n image_height = image['height']\r\n except KeyError as key:\r\n print(\"Warning: Skipping image (id: {}) with missing key: {}\".format(image_id, key))\r\n \r\n image_path = os.path.abspath(os.path.join(images_dir, image_file_name))\r\n image_annotations = annotations[image_id]\r\n \r\n # Add the image using the base method from utils.Dataset\r\n self.add_image(\r\n source=source_name,\r\n image_id=image_id,\r\n path=image_path,\r\n width=image_width,\r\n height=image_height,\r\n annotations=image_annotations\r\n )",
"def __init__(self, dictionary: dict[str, t.Any]) -> None:\n missing_keys = self.__annotations__.keys() - dictionary.keys()\n if missing_keys:\n raise KeyError(f\"Fetched object lacks expected keys: {missing_keys}\")\n for annotation in self.__annotations__:\n setattr(self, annotation, dictionary[annotation])",
"def setAnnotation(self, *args):\n return _libsbml.Model_setAnnotation(self, *args)"
]
| [
"0.6233152",
"0.60961413",
"0.60317147",
"0.5947871",
"0.5821766",
"0.57982534",
"0.5784234",
"0.57694525",
"0.5739188",
"0.56861943",
"0.56736404",
"0.56035674",
"0.55868423",
"0.55451065",
"0.55258566",
"0.55225915",
"0.5500008",
"0.54937",
"0.548274",
"0.54651",
"0.5452524",
"0.54518664",
"0.5451705",
"0.5450651",
"0.5424964",
"0.541316",
"0.5398806",
"0.5387393",
"0.5374061",
"0.53717744"
]
| 0.7512382 | 0 |
Apply aggregation to datacontainer object | def aggregate(self, aggregation):
self._data = self._data.aggregate(**aggregation) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_aggregation_data(self, payload):\n raise NotImplementedError()",
"def aggregate_query(self):\n raise NotImplementedError",
"def _aggregation_target(self):\n ...",
"def aggregate(self, arg):\n return self.agg(arg)",
"def aggregate(self, *args, **kwargs):\n raise NotImplementedError('Cannot re-aggregate an AggregateQuerySet')",
"def aggregate(\n self, axis=0, func=np.nanmean, **kwargs\n ) -> \"Dataset\":\n if axis == \"items\":\n if self.n_items <= 1:\n return self\n\n name = kwargs.pop(\"name\", func.__name__)\n data = func(self.to_numpy(), axis=0, **kwargs)\n item = self._agg_item_from_items(self.items, name)\n da = DataArray(\n data=data,\n time=self.time,\n item=item,\n geometry=self.geometry,\n dims=self.dims,\n zn=self._zn,\n )\n \n return Dataset([da], validate=False)\n else:\n res = {\n name: da.aggregate(axis=axis, func=func, **kwargs)\n for name, da in self._data_vars.items()\n }\n return Dataset(data=res, validate=False)",
"def aggregate_results(self):\n\n raise NotImplementedError",
"def aggregate(self, cls, *args, **kwargs):\n m = mapper(cls)\n return self.impl.aggregate(m.collection, *args, **kwargs)",
"def finalize_aggregated_data(aggregated_data):\n\n if aggregated_data['sum'] < aggregated_data['min']:\n aggregated_data['min'] = aggregated_data['sum']\n\n if aggregated_data['sum'] > aggregated_data['max']:\n aggregated_data['max'] = aggregated_data['sum']\n\n return aggregated_data",
"def aggregator():\n return Aggregator(\n agg_col=\"col_a\", values_col=\"col_b\", aggregates=[\"min\", \"max\", \"avg\", \"sum\"]\n )",
"def fetch_aggregation(self):\n return None",
"def aggregate(self, **aggregations):\n # Before we iterate, reset the aggregations\n for _, agg in aggregations.items():\n agg.reset()\n # Do the accumulation\n for attrs in self:\n for _, agg in aggregations.items():\n agg.accumulate(attrs)\n # Return the results\n return {name: agg.result for name, agg in aggregations.items()}",
"def aggregate(self, agpath):\n return data.Aggregate(self, agpath)",
"def agg(self, arg):\n # DataFrame{'a': [1, 1, 2], 'b': [1, 2, 3], 'c': [2, 2, 1]})\n # a.groupby('a').agg('sum') -- applied on rest\n # a.groupby('a').agg(['sum', 'min']) -- both applied on rest\n # a.groupby('a').agg({'b': ['min', 'mean']}) -- applied on\n # TODO\n # a.groupby('a').aggregate( a= me['a'].mean(), b_min =me['b'].min(), b_mean=me['c'].mean()))\n # f1 = lambda x: x.quantile(0.5); f1.__name__ = \"q0.5\"\n # f2 = lambda x: x.quantile(0.75); f2.__name__ = \"q0.75\"\n # a.groupby('a').agg([f1, f2])\n\n res = {}\n for f, c in zip(self._key_fields, self._unzip_group_keys()):\n res[f.name] = c\n for agg_name, field, op in self._normalize_agg_arg(arg):\n res[agg_name] = self._apply1(field, op)\n return self._parent._fromdata(res, None)",
"def calc_aggregate(self, dataset):\n if not self.needs_aggregate:\n logup('no aggregate calculation needed', level='warning')\n logger.warning(\"no aggregate calculation needed\")\n return # no need to calculate\n if not dataset.is_cached:\n raise HXLException(\"need a cached dataset for calculating an aggregate value\")\n if self.value == 'min':\n self.value = dataset.min(self.pattern)\n self.op = operator.eq\n elif self.value == 'max':\n self.value = dataset.max(self.pattern)\n self.op = operator.eq\n elif self.value == 'not min':\n self.value = dataset.min(self.pattern)\n self.op = operator.ne\n elif self.value == 'not max':\n self.value = dataset.max(self.pattern)\n self.op = operator.ne\n else:\n raise HXLException(\"Unrecognised aggregate: {}\".format(value))\n self.needs_aggregate = False",
"def get_results_from_aggregation_sources(self, context):",
"def aggregate(self):\n data_to_track = {}\n for possession in self.possessions_to_track_aggregate:\n data_to_track[possession] = self._haves[possession]\n\n for variable in self.variables_to_track_aggregate:\n try:\n data_to_track[variable] = self.__dict__[variable]\n except KeyError:\n pass\n self.database_connection.put([\"aggregate\",\n data_to_track,\n self.group,\n self.round])",
"def getAggregateData(self, pipeline: t.Mapping[t.Text, t.Any],\n filter: t.Mapping[t.Text, t.Any] = {},\n ) -> DatasetData:\n session = self.session_maker()\n\n mongoquery = self._mongo_query(session).query(\n filter=filter,\n aggregate=pipeline,\n ).end()\n\n schema = self.general_schema\n data_dir = self.data_dir\n\n #TODO: Make sure this is tested (above vars are unused, why?)\n\n data = [row._asdict() for row in mongoquery.all()]\n\n # close the ORM session when done\n session.close()\n\n return DatasetData(data=data)",
"def aggregate(self, *args, **kwargs):\n return AggregateQuerySet(self, args, kwargs)",
"def test_aggregate_ds(self, thredds_catalog):\n ncml_ns = \"http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2\"\n\n top_level_ds = [el for el in thredds_catalog if el.tag == get_full_tag(\"dataset\")]\n agg_ds = None\n for el in top_level_ds[0]:\n if el.tag == get_full_tag(\"dataset\"):\n for subel in el:\n if subel.tag == get_full_tag(\"netcdf\", ns=ncml_ns):\n agg_ds = el\n break\n\n assert agg_ds is not None, \"Aggregation dataset not found\"\n assert self.has_access_method(agg_ds, \"wms\")\n assert self.has_access_method(agg_ds, \"wcs\")\n assert self.has_access_method(agg_ds, \"OpenDAPServer\")\n\n properties = agg_ds.findall(get_full_tag(\"property\"))\n assert len(properties) == 1\n assert \"name\" in properties[0].attrib\n assert \"value\" in properties[0].attrib\n assert \"jasmin.eofrom.space\" in properties[0].attrib[\"value\"]",
"def update_aggregated_data(aggregated_data, datum):\n if 'last_date' not in aggregated_data:\n aggregated_data['last_date'] = datum['date']\n\n if aggregated_data['last_date'] != datum['date']:\n \"\"\"\n We are calculating daily min, max values so only update when hit new date.\n \"\"\"\n\n if aggregated_data['sum'] < aggregated_data['min']:\n aggregated_data['min'] = aggregated_data['sum']\n\n if aggregated_data['sum'] > aggregated_data['max']:\n aggregated_data['max'] = aggregated_data['sum']\n\n aggregated_data['last_date'] = datum['date']\n \n\n sign = 1\n if datum['type'] == 'debit':\n sign = -1\n\n aggregated_data['n'] += 1\n aggregated_data['sum'] += sign * Decimal(datum['amount'])\n\n return aggregated_data",
"def apply_aggregation(t, node_data, f=lambda x, y: x+y):\n try:\n result = dict()\n for k, v in t.iteritems():\n if type(k) is not tuple:\n result[k] = node_data[k]\n else:\n result[k] = apply_tuple(k, node_data, f)\n return result\n except:\n print(t)\n raise",
"def compress(self):\n aggregation_functions = {}\n for attribute in self.__df.columns:\n aggregation_functions[attribute] = self.__aggregate\n grouped_df = self.__df.groupby(by=[self.__config.get_key_attribute()], as_index=False)\n self.__df = grouped_df.agg(aggregation_functions)\n self.__df = self.__df.astype(self.__config.get_data_types())",
"def _update_aggregate_dataset(self, formula, new_dframe, name, groups,\n agg_dataset):\n # parse aggregation and build column arguments\n aggregation, new_columns = self.make_columns(\n formula, name, new_dframe)\n\n agg = Aggregator(self.dataset, self.dframe,\n groups, aggregation, name)\n new_agg_dframe = agg.update(agg_dataset, self, formula, new_columns)\n\n # jsondict from new dframe\n new_data = new_agg_dframe.to_jsondict()\n\n for merged_dataset in agg_dataset.merged_datasets:\n # remove rows in child from this merged dataset\n merged_dataset.remove_parent_observations(\n agg_dataset.dataset_id)\n\n # calculate updates on the child\n merged_calculator = Calculator(merged_dataset)\n call_async(merged_calculator.calculate_updates, merged_calculator,\n new_data, parent_dataset_id=agg_dataset.dataset_id)",
"def aggregate(self):\n aggregations_params = self.pop_aggregations_params()\n if self.view._auth_enabled:\n self.check_aggregations_privacy(aggregations_params)\n self.stub_wrappers()\n\n params = self._query_params.copy()\n params['_aggregations_params'] = aggregations_params\n\n return ACLFilterES(self.view.Model.__name__).aggregate(\n request=self.view.request, **params)",
"def __push_aggregation_lowest_layer(self, aggregation_object, aggregation_name, table, id_name):\n id = 0\n aggregation_value = 0\n for aggregation in aggregation_object:\n id = aggregation[aggregation_name][0]\n aggregation_value = aggregation[aggregation_name][1]\n self.__postgre_db.update(table, \"aggregation=\" + str(aggregation_value), id_name + \"=\" + str(id))",
"def aggregation(self):\n aggregation_texts_snippets = self.__postgre_db.query(\"SELECT aggregate_texts_snippets()\")\n aggregation_snippet_offsets = self.__postgre_db.query(\"SELECT aggregate_snippet_offsets()\")\n\n # push 2 lowest levels of the hierarchy\n self.__push_aggregation_lowest_layer(\n aggregation_texts_snippets, str('aggregate_texts_snippets'), \"texts_snippets\", \"text_id\")\n self.__push_aggregation_lowest_layer(\n aggregation_snippet_offsets, str('aggregate_snippet_offsets'), \"snippet_offsets\", \"id\")\n\n # push rest of the hierarchy\n self.__push_aggregation(\n \"pattern_single_pattern\", \"snippet_offsets\", str('pattern_id'), str('single_pattern_id'))\n self.__push_aggregation(\"has_object\", \"pattern_single_pattern\", str('bscale_id'), str('pattern_id'))\n self.__push_aggregation(\"has_attribute\", \"has_object\", str('bsort_id'), str('bscale_id'))",
"def _aggregate(group_df):\n out = {}\n for col in data_columns:\n # The name of the error column (if it exists)\n error_col = f\"{col}_moe\"\n\n # remove any NaN rows\n subset = group_df.dropna(subset=[col], how=\"any\")\n\n # aggregat if we had any rows left\n if len(subset):\n\n # column values, margin of error (if it exists)\n args = np.column_stack(\n [subset[col], subset.get(error_col, np.zeros(len(subset)))]\n )\n\n # do the aggregation\n aggval, moe = cda.approximate_sum(*args)\n else:\n aggval = moe = np.nan\n\n # store\n out[col] = aggval\n if error_col in subset.columns:\n out[f\"{col}_moe\"] = moe\n\n out[\"geometry\"] = group_df.geometry.unary_union\n return pd.Series(out)",
"def set_aggregate_data(self, event_name, value, key=None):\n \n raise NotImplementedError()",
"def _aggregate(self, *params): \n serialized_params = np.array([self._serialize(client) for client in params])\n serialized_aggregation = self._aggregate(*serialized_params)\n aggregated_weights = self._deserialize(serialized_aggregation)\n \n return aggregated_weights"
]
| [
"0.7029448",
"0.6770916",
"0.6738702",
"0.6578427",
"0.6575661",
"0.6472465",
"0.6460138",
"0.64330184",
"0.64211506",
"0.6398619",
"0.6384194",
"0.6379172",
"0.6363482",
"0.6252356",
"0.616049",
"0.6106326",
"0.6095447",
"0.60918665",
"0.60694814",
"0.6055015",
"0.60463846",
"0.60434234",
"0.59948504",
"0.59895617",
"0.59876394",
"0.5984618",
"0.5957529",
"0.59330124",
"0.586444",
"0.5862268"
]
| 0.74370414 | 0 |
Stop all timers in a canvas. | def _stop_timers(canvas):
for attr in dir(canvas):
try:
attr_obj = getattr(canvas, attr)
except NotImplementedError:
# This try/except is needed because canvas.position raises
# an error (it is not implemented in this backend).
attr_obj = None
if isinstance(attr_obj, Timer):
attr_obj.stop() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cancel_all(self):\n for timer in self._timers:\n timer.Stop()",
"def stopAll():\n master = MasterTimer.getMasterTimer()\n\n for timer in master.timers.values():\n timer.over_start = 0 # deal with what recursion may have caused\n timer.stop()\n\n _Timer._frozen = True\n\n master.end_time = time.time()",
"def killtimers(self):\n for timer in self._timers: timer.cancel()\n self._timers = []",
"def stop_timer(self):\r\n self.countdownTimer.stop()",
"def stop_all_timers(self):\n if self.advertise_wait_entry is not None:\n self.advertise_wait_entry.deactivate()\n if self.neighbor_refresh_timer_entry is not None:\n self.neighbor_refresh_timer_entry.deactivate()",
"def cancel_multi_kill_timer(self) -> None:\n self._multi_kill_timer = None",
"def stop(self):\n self.m_stop = True\n self.m_canvas.clear()",
"def close(self):\n for runner in self.values():\n runner.stop()",
"def loop_stop(self):\n super(TimerLoop, self).loop_stop()\n self.timer.cancel()\n self.loop_confirm_stopped()",
"def stop(self):\n with self._lock:\n self._running.clear()\n if self._timer:\n self._timer.cancel()\n self._timer = None",
"def stop_all_threads_and_timer(self):\n\n try:\n # thread_manager threads\n self.thread_manager.stop_threads()\n\n # stop_services\n renderthreads_services_setup.stop_services(self)\n\n except:\n # log\n self.logger.debug('Error stopping threads for queue.')",
"def _stop_all(self):\n # LEDs\n self.cam_led.off\n self.analysis_led[0].off\n self.analysis_led[1].off\n self.error.off\n \n # motors\n self.motor.stop()\n self.wash.stop()",
"def stop(self):\n if self.running:\n self._unschedule_all()\n self.loop.stop()",
"def stop_all():\n\twhile _running:\n\t\t_running[0].stop(noerror=True)",
"def clear_events():\n while len(events) > 0:\n canvas.delete(events.pop())",
"def reset_stop_timer(self) -> None: \r\n self.stop_timer = 0",
"def kill(self):\n for tlight in self.trafficLights:\n self.trafficLights[tlight].stop()",
"def stop(self):\n for task in self._tasks:\n task.stop()",
"def stop_timer(self):\n self.end_time = datetime.now()",
"def disconnect_events(self):\n for c in self._cids:\n self.canvas.mpl_disconnect(c)",
"def stop(self) -> None:\n for instance in self.instances:\n instance.listener = None\n instance.stop()",
"def stop_all():\r\n motors.stop_all_motors()\r\n led.set_colour_solid(0)\r\n display.clear()",
"def stop(self):\n self._stopped.set()\n if self._timer:\n self._timer.cancel()\n self._timer = None",
"def stop_timer(self):\n self.log.info(\"{} timer stopped ({} seconds)\".format(self.name, self.interval))\n self.start_event.clear()\n # self.count = self.interval / self.sleep_chunk",
"def stop(self):\r\n for srv in self._servers:\r\n srv.stop()",
"def stop(self) -> None:\n with self._samplers_lock:\n for sampler in self._samplers.values():\n sampler.stop()",
"def stopall(self):\n\n for i in self.bots:\n try:\n i.stop()\n except:\n pass",
"def kill_all(self):\n self._stop_all('kill')",
"def cancel_all():\n\twhile _running:\n\t\t_running[0].cancel(noerror=True)",
"def stop():\n current_event_loop().stop()"
]
| [
"0.7255637",
"0.70239574",
"0.69405055",
"0.6344354",
"0.62706256",
"0.61834806",
"0.61712563",
"0.616413",
"0.6134287",
"0.60338205",
"0.6012264",
"0.59403634",
"0.59361386",
"0.5933978",
"0.5830889",
"0.58190876",
"0.5800669",
"0.57923174",
"0.5773049",
"0.57693774",
"0.5759693",
"0.57549804",
"0.5735152",
"0.5674505",
"0.56677353",
"0.56476986",
"0.5625832",
"0.56030476",
"0.55921537",
"0.55617565"
]
| 0.78883404 | 0 |
Logit crossentropy loss with masking. | def masked_logit_cross_entropy(preds, labels, mask):
loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)
loss = tf.reduce_sum(loss, axis=1)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.]))
loss *= mask
return tf.reduce_mean(loss) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def masked_softmax_cross_entropy(logits, labels, mask):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)",
"def masked_softmax_cross_entropy(preds, labels, mask):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)",
"def masked_sigmoid_cross_entropy(logits, labels, mask):\n labels = tf.cast(labels, dtype=tf.float32)\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels)\n loss=tf.reduce_mean(loss,axis=1)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)",
"def masked_softmax_cross_entropy(preds, labels, mask):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)",
"def masked_softmax_cross_entropy(preds, labels, mask):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)",
"def masked_softmax_cross_entropy(preds, labels, mask):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)",
"def masked_softmax_cross_entropy(preds, labels, mask):\r\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n mask /= tf.reduce_mean(mask)\r\n loss *= tf.transpose(mask)\r\n return tf.reduce_mean(tf.transpose(loss))",
"def masked_softmax_cross_entropy(preds, labels, mask):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.maximum(tf.reduce_sum(mask), tf.constant([1.]))\n loss *= mask\n return tf.reduce_mean(loss)",
"def masked_softmax_cross_entropy(self, preds, labels, mask):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)",
"def cross_entropy(input: Tensor, target: Tensor) -> Tensor:\n norm_log = log_softmax(input, 1)\n\n np_one_hot = np.eye(input.shape[1])[target.data]\n tensor_one_hot = tensor(np_one_hot, 'one-hot', False, True)\n\n mask = -norm_log * tensor_one_hot\n mask_sum = sum(mask, 1)\n loss = sum(mask_sum, 0)\n\n return loss / input.shape[0]",
"def masked_sigmoid_cross_entropy(preds, labels, mask):\n loss_all = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=preds, labels=labels)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss = tf.multiply(loss_all, mask[:, tf.newaxis])\n return tf.reduce_mean(loss)",
"def cross_entropy_loss(self, logits, labels):\n return F.cross_entropy(logits, labels)",
"def loss(self, labels, logits, mask=None):\n losses = tf.nn.softmax_cross_entropy_with_logits(tf.one_hot(labels, depth=logits.shape[-1]), logits, axis=-1)\n seq_mask = mask # logits._keras_mask# tf.sequence_mask(lengths, self.seq_len)\n loss = tf.reduce_mean(tf.boolean_mask(losses, seq_mask))\n\n return loss",
"def masked_bilinearsigmoid_cross_entropy(preds, labels, mask, negative_mask):\r\n\r\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)\r\n mask += negative_mask\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n # mask /= tf.reduce_mean(mask)\r\n mask = tf.reshape(mask, shape=[79924])\r\n loss *= mask\r\n return tf.reduce_mean(loss)",
"def test_with_binary_cross_entropy_loss(self):\n predict = self._get_default_predictions_tensor()\n target = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)\n mask = self._get_default_mask_tensor()\n\n loss_weigths = [1.0, 0.5]\n ce_crit = nn.BCEWithLogitsLoss(reduction=\"none\")\n mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths)\n\n # expected result\n ce_loss = ce_crit(predict, target)\n _mask = mask.expand_as(ce_loss)\n mask_loss = ce_loss * _mask\n mask_loss = mask_loss[_mask == 1] # consider only mask samples for mask loss computing\n expected_loss = ce_loss.mean() * loss_weigths[0] + mask_loss.mean() * loss_weigths[1]\n\n # mask ce loss result\n loss = mask_ce_crit(predict, target, mask)\n\n self._assertion_torch_values(expected_loss, loss)",
"def classification_loss(self, logit, target):\n return F.cross_entropy(logit, target)",
"def _bce_loss_with_logits(output, labels, **kwargs):\n return F.binary_cross_entropy_with_logits(output, labels, reduction='none', **kwargs)",
"def mask_logits(x, mask):\n return x + -1e30 * (1 - mask)",
"def loss(logits, labels):\n labels = tf.to_int64(labels)\n# labels = tf.to_float(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='xentropy')\n# y_conv = tf.nn.softmax(logits)\n# cross_entropy = -tf.reduce_sum(labels*tf.log(y_conv))\n return tf.reduce_mean(cross_entropy, name='xentropy_mean')",
"def test_with_cross_entropy_loss_maskless(self):\n predict = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)\n target = self._get_default_target_tensor()\n # Create a mask filled with zeros to disable the attention component\n mask = self._get_default_mask_tensor() * 0.0\n\n loss_weigths = [1.0, 0.5]\n ce_crit = nn.CrossEntropyLoss(reduction=\"none\")\n mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths)\n\n # expected result - no contribution from mask\n ce_loss = ce_crit(predict, target)\n expected_loss = ce_loss.mean() * loss_weigths[0]\n\n # mask ce loss result\n loss = mask_ce_crit(predict, target, mask)\n\n self._assertion_torch_values(expected_loss, loss)",
"def cross_entropy_loss(logits, labels, label_smoothing=0., dtype=jnp.float32):\n num_classes = logits.shape[-1]\n labels = jax.nn.one_hot(labels, num_classes, dtype=dtype)\n if label_smoothing > 0:\n labels = labels * (1 - label_smoothing) + label_smoothing / num_classes\n logp = jax.nn.log_softmax(logits.astype(dtype))\n return -jnp.mean(jnp.sum(logp * labels, axis=-1))",
"def softmax_cross_entropy_loss(logit, labels):\n p = softmax(logit)\n loss_i = - labels * np.log(p + 1e-8)\n return np.mean(loss_i)",
"def cross_entropy_loss(outputs, labels): \n# works properly\n \n m = labels.shape[0]\n p = outputs\n log_likelihood = -1*torch.log(p[range(m),labels])\n loss = torch.sum(log_likelihood) / m\n return loss.item()",
"def test_with_cross_entropy_loss(self):\n predict = torch.randn(self.batch, self.num_classes, self.img_size, self.img_size)\n target = self._get_default_target_tensor()\n mask = self._get_default_mask_tensor()\n\n loss_weigths = [1.0, 0.5]\n ce_crit = nn.CrossEntropyLoss(reduction=\"none\")\n mask_ce_crit = MaskAttentionLoss(criterion=ce_crit, loss_weights=loss_weigths)\n\n # expected result\n ce_loss = ce_crit(predict, target)\n _mask = mask.view_as(ce_loss)\n mask_loss = ce_loss * _mask\n mask_loss = mask_loss[_mask == 1] # consider only mask samples for mask loss computing\n expected_loss = ce_loss.mean() * loss_weigths[0] + mask_loss.mean() * loss_weigths[1]\n\n # mask ce loss result\n loss = mask_ce_crit(predict, target, mask)\n\n self._assertion_torch_values(expected_loss, loss)",
"def pixel_bce_with_logits(input, target):\n if not (target.size() == input.size()):\n raise ValueError(\"Target size ({}) must be the same as input size ({})\".format(target.size(), input.size()))\n\n max_val = (-input).clamp(min=0)\n loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()\n\n '''if weight is not None:\n loss = loss * weight\n if size_average:\n return loss.mean()\n else:\n return loss.sum()'''\n\n return loss",
"def gcn_masked_softmax_cross_entropy(preds, labels, positive_mask, negative_mask, pos_weight):\r\n loss = tf.nn.weighted_cross_entropy_with_logits(targets=labels, logits=preds, pos_weight=pos_weight)\r\n\r\n # preds = tf.cast(preds, tf.float32)\r\n # labels = tf.cast(labels, tf.float32)\r\n # loss = tf.square(preds - labels)\r\n\r\n positive_mask += negative_mask\r\n # print(mask)\r\n mask = tf.cast(positive_mask, dtype=tf.float32)\r\n # mask /= tf.reduce_mean(mask)\r\n mask = tf.reshape(mask, shape=[79924])\r\n\r\n loss *= mask\r\n return tf.reduce_mean(loss)",
"def cross_entropy_loss():\n return nn.CrossEntropyLoss()",
"def calculate_cross_entropy(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])",
"def binary_crossentropy(output, target):\r\n return -(target * tensor.log(output) + (1.0 - target) * tensor.log(1.0 - output))",
"def maskNLLLoss(self, decoder_output, target, mask, dev):\n mask = mask.unsqueeze(-1)\n nTotal = mask.sum()\n crossEntropy = -torch.log(torch.gather(decoder_output.squeeze(1), 1, target.unsqueeze(-1)).squeeze(1))\n loss = crossEntropy.masked_select(mask).mean()\n loss = loss.to(dev)\n return loss, nTotal.item()"
]
| [
"0.8175898",
"0.78751826",
"0.78335226",
"0.78063595",
"0.78063595",
"0.78063595",
"0.77657765",
"0.77401185",
"0.7647151",
"0.7540406",
"0.7469373",
"0.7366661",
"0.73047715",
"0.7263361",
"0.70555925",
"0.7047556",
"0.7037855",
"0.7005814",
"0.69924366",
"0.69920903",
"0.6989983",
"0.6972032",
"0.69042915",
"0.6872337",
"0.6863052",
"0.68595326",
"0.68437576",
"0.68215686",
"0.6783512",
"0.6778606"
]
| 0.82311463 | 0 |
L2 loss with masking. | def masked_l2(preds, actuals, mask):
loss = tf.nn.l2(preds, actuals)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
loss *= mask
return tf.reduce_mean(loss) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def l2_loss(input, target):\n pos_inds = torch.nonzero(target > 0.0).squeeze(1)\n if pos_inds.shape[0] > 0:\n cond = torch.abs(input[pos_inds] - target[pos_inds])\n loss = 0.5 * cond**2 / pos_inds.shape[0]\n else:\n loss = input * 0.0\n return loss.sum()",
"def masked_l1_loss(prediction, target, mask):\n abs_error = F.l1_loss(prediction, target, reduction='none')\n loss = weighted_mean(abs_error, mask)\n return loss",
"def l2_loss(inputs, reduction='mean', **kwargs):\n args = ArgHelper.parse(locals())\n args['reduction'] = reduction.upper()\n op_lib = loss_ops_lib.L2Loss\n if context.executing_eagerly():\n return op_lib \\\n .instantiate(reduction=args['reduction']) \\\n .apply(inputs)\n else:\n return op_lib.blend(**args)",
"def loss_hole(self, mask, y_true, y_pred):\n return self.l1((1-mask) * y_true, (1-mask) * y_pred)",
"def loss_valid(self, mask, y_true, y_pred):\n return self.l1(mask * y_true, mask * y_pred)",
"def l1_loss(y_true, y_pred, y_mask):\n y_shape = tf.shape(y_true)\n border = 3\n max_pixels_shifts = 2*border\n size_image = HR_SIZE\n size_croped_image = size_image - max_pixels_shifts\n clear_pixels = size_croped_image*size_croped_image\n cropped_predictions = y_pred[:, border:size_image -\n border, border:size_image-border]\n\n X = []\n for i in range(max_pixels_shifts+1): # range(7)\n for j in range(max_pixels_shifts+1): # range(7)\n cropped_labels = y_true[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n cropped_y_mask = y_mask[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n\n cropped_y_mask = tf.cast(cropped_y_mask, tf.float32)\n\n cropped_predictions_masked = tf.cast(\n cropped_predictions, tf.float32)*cropped_y_mask\n cropped_labels_masked = cropped_labels*cropped_y_mask\n\n total_pixels_masked = tf.reduce_sum(cropped_y_mask, axis=[1, 2])\n\n # bias brightness\n b = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.subtract(cropped_labels_masked, cropped_predictions_masked),\n axis=[1, 2])\n\n b = tf.reshape(b, [y_shape[0], 1, 1, 1])\n\n corrected_cropped_predictions = cropped_predictions_masked+b\n corrected_cropped_predictions = corrected_cropped_predictions*cropped_y_mask\n\n l1_loss = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.abs(\n tf.subtract(cropped_labels_masked,\n corrected_cropped_predictions)\n ), axis=[1, 2]\n )\n X.append(l1_loss)\n X = tf.stack(X)\n min_l1 = tf.reduce_min(X, axis=0)\n\n return min_l1",
"def l2_loss(x, y, kernel_name=\"l2_loss\"):\n shape = x.get(\"shape\")\n dtype = x.get(\"dtype\")\n\n check_shape(shape, param_name=\"x\")\n\n check_list = [\"float16\", \"float32\"]\n if not dtype.lower() in check_list:\n raise RuntimeError(\n \"l2_loss only support float16 float32\")\n\n shape, axis = util.simplify_axis_shape(shape, range(len(shape)))\n\n inp_dtype = dtype.lower()\n data_input = tvm.placeholder(shape, name=\"data_input\", dtype=inp_dtype)\n\n coeff_sqrt = tvm.const(1.0 / (2**(0.5)), dtype=inp_dtype)\n\n data_mul = te.lang.cce.vmuls(data_input, coeff_sqrt)\n data_sqr = te.lang.cce.vmul(data_mul, data_mul)\n res = te.lang.cce.sum(data_sqr, axis)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n config = {\"name\": kernel_name,\n \"tensor_list\": [data_input, res]}\n te.lang.cce.cce_build_code(sch, config)",
"def l2_loss(obs, actual):\n # (tf.Tensor, tf.Tensor, float) -> tf.Tensor\n return tf.reduce_sum(tf.square(obs - actual), 1)",
"def _get_l2_reg(self) -> torch.Tensor:\n loss = 0\n for param in self.model.parameters():\n loss += (param ** 2).sum()\n return loss",
"def detector_loss(self, input, target, mask=None, loss_type=\"softmax\"):\n if loss_type == \"l2\":\n loss_func = nn.MSELoss(reduction=\"mean\")\n loss = loss_func(input, target)\n elif loss_type == \"softmax\":\n loss_func_BCE = nn.BCELoss(reduction='none').cuda()\n loss = loss_func_BCE(nn.functional.softmax(input, dim=1), target)\n loss = (loss.sum(dim=1) * mask).sum()\n loss = loss / (mask.sum() + 1e-10)\n return loss",
"def maskNLLLoss(self, decoder_output, target, mask, dev):\n mask = mask.unsqueeze(-1)\n nTotal = mask.sum()\n crossEntropy = -torch.log(torch.gather(decoder_output.squeeze(1), 1, target.unsqueeze(-1)).squeeze(1))\n loss = crossEntropy.masked_select(mask).mean()\n loss = loss.to(dev)\n return loss, nTotal.item()",
"def loss(self,A2,label):\r\n m = label.shape[0]\r\n\r\n log_likelihood = -np.log(A2[label,range(m)])\r\n loss = np.sum(log_likelihood) / m\r\n return loss",
"def kp_l2_loss(kp_pred, kp_gt):\n criterion = torch.nn.MSELoss()\n\n vis = (kp_gt[:, :, 2, None] > 0).float()\n\n # This always has to be (output, target), not (target, output)\n return criterion(vis * kp_pred, vis * kp_gt[:, :, :2])",
"def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss",
"def bits_loss(msg, output, message_length):\n return reconstruction_loss(msg, output) * message_length",
"def L2(yhat, y):\n loss = np.dot((y - yhat).T,(y - yhat))\n \n return loss",
"def tf_l2_loss(Gt, pred,_axis):\n l2diff = tf.subtract(Gt, pred)\n l2loss = tf.reduce_sum(tf.square(l2diff), axis=_axis)\n l2loss = tf.maximum(l2loss, 1e-10)\n l2loss = tf.sqrt(l2loss) # (n_batch, n_class) -> (n_batch, 1)\n\n return l2loss",
"def _flat_reconstruction_loss(self, flat_x_target, flat_rnn_output):\n pass",
"def smooth_l1_loss(y_true, y_pred):\n\n # Take absolute difference\n x = K.abs(y_true - y_pred)\n\n # Find indices of values less than 1\n mask = K.cast(K.less(x, 1.0), \"float32\")\n\n # Loss calculation for smooth l1\n loss = (mask * (0.5 * x ** 2)) + (1 - mask) * (x - 0.5)\n return loss",
"def l2_loss(self, t, use_logit: bool = False):\n c = 0\n if use_logit:\n return np.mean([(self._irf[i].interpolant(t[p]) - logit(self._x[p, i])) ** 2\n for p in range(t.shape[0]) for i in range(self._x.shape[1])])\n else:\n return np.mean([(self._irf[i].interpolant(t[p]) - self._x[p, i]) ** 2\n for p in range(t.shape[0]) for i in range(self._x.shape[1])])",
"def compute_loss(self):",
"def content_loss(content, target):\n return l2_loss(content, target)",
"def l2_loss(params):\n \"\"\" It is a vec for each branch\"\"\"\n loss_branches_vec = []\n # TODO This is hardcoded but all our cases rigth now uses four branches\n for i in range(len(params['branches']) -1):\n loss_branches_vec.append(((params['branches'][i] - params['targets']) **2\n * params['controls_mask'][i])\n * params['branch_weights'][i])\n \"\"\" The last branch is a speed branch\"\"\"\n # TODO: Activate or deactivate speed branch loss\n loss_branches_vec.append((params['branches'][-1] - params['inputs']) ** 2\n * params['branch_weights'][-1])\n return loss_branches_vec, {}",
"def localisation_loss(self, y_true, y_pred):\n mask_positives = y_true[..., 1]\n # Calculate the smooth L1 loss between every ground truth box and every default box\n loc_loss_all = tf.cast(self.smooth_L1(y_true[..., 2:6], y_pred[..., 2:6]), dtype=tf.float32)\n # Pick only the positive predicted boxes with the positive mask and sum over all coordinates of a box\n loss_positives = tf.reduce_sum(loc_loss_all * mask_positives, axis=-1)\n return loss_positives",
"def mask_iou_loss(labels, pred_maskiou, gt_maskiou, loss_weight):\n def l2_loss(input, target):\n \"\"\"\n very similar to the smooth_l1_loss from pytorch, but with\n the extra beta parameter\n \"\"\"\n pos_inds = torch.nonzero(target > 0.0).squeeze(1)\n if pos_inds.shape[0] > 0:\n cond = torch.abs(input[pos_inds] - target[pos_inds])\n loss = 0.5 * cond**2 / pos_inds.shape[0]\n else:\n loss = input * 0.0\n return loss.sum()\n\n if labels.numel() == 0:\n return pred_maskiou.sum() * 0\n \n index = torch.arange(pred_maskiou.shape[0]).to(device=pred_maskiou.device)\n maskiou_loss = l2_loss(pred_maskiou[index, labels], gt_maskiou)\n maskiou_loss = loss_weight * maskiou_loss\n \n return maskiou_loss",
"def loss(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n c2 = 0\n c1 = (np.log(1+np.exp(-1*y1*k1)))\n for i in range(N):\n c2 += c1[i][0]\n l = c2 / N + (0.5 * self.l2_reg * np.dot(self.w,np.transpose(self.w)))\n l1 = l[0][0]\n return l1\n\n\n #raise NotImplementedError",
"def make_loss_mask(shapes):\n\t# TODO(student): Write code.\n\n\tmasks = tf.sequence_mask(shapes, 500)\n\tmasks.set_shape([None, 2, 500])\n\tn_sample = tf.shape(shapes)[0]\n\n\tv1 = tf.cast(tf.broadcast_to(tf.expand_dims(masks[:, 0, :], 2), (n_sample, 500, 500)), tf.float32)\n\tv2 = tf.cast(tf.broadcast_to(tf.expand_dims(masks[:, 1, :], 2), (n_sample, 500, 500)), tf.float32)\n\treturn v1*tf.transpose(v2, [0, 2, 1])",
"def loss_fn(model):\n with flax.nn.stateful(state) as new_state:\n with flax.nn.stochastic(prng_key):\n logits = model(batch['image'])\n loss = cross_entropy_loss(logits, batch['label'])\n # TODO(britefury): check if applying L2 regularization to weights but\n # *not* biases improves results\n weight_penalty_params = jax.tree_leaves(model.params)\n weight_l2 = sum([jnp.sum(x ** 2)\n for x in weight_penalty_params\n if x.ndim > 1])\n weight_penalty = l2_reg * 0.5 * weight_l2\n loss = loss + weight_penalty\n return loss, (new_state, logits)",
"def masked_bilinearsigmoid_cross_entropy(preds, labels, mask, negative_mask):\r\n\r\n loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=preds, labels=labels)\r\n mask += negative_mask\r\n mask = tf.cast(mask, dtype=tf.float32)\r\n # mask /= tf.reduce_mean(mask)\r\n mask = tf.reshape(mask, shape=[79924])\r\n loss *= mask\r\n return tf.reduce_mean(loss)",
"def _compute_unreduced_loss_impl(self, labels, logits, mask=None):\n raise NotImplementedError('Calling an abstract method.')"
]
| [
"0.7192427",
"0.7092018",
"0.6669804",
"0.6657751",
"0.65931445",
"0.6583433",
"0.6574499",
"0.6478774",
"0.6429942",
"0.63954186",
"0.63901883",
"0.63768196",
"0.6355528",
"0.6325545",
"0.63063776",
"0.6249161",
"0.6247764",
"0.6239328",
"0.6223718",
"0.6219932",
"0.62042207",
"0.61830294",
"0.6169123",
"0.61417633",
"0.61386967",
"0.6108964",
"0.6097203",
"0.6095952",
"0.6062591",
"0.60590535"
]
| 0.7907347 | 0 |
Indicator function for zero elements z. | def fun(self, x):
if np.any(x[self._z] != 0):
return np.inf
else:
return 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step(z):\n result = np.zeros_like(z)\n result[z > 0] = 1.0\n return result",
"def closure(Z):\r\n Z = np.array(Z)\r\n Z = Z/float(np.sum(Z))\r\n if any(Z < 0):\r\n return None\r\n else:\r\n return Z",
"def alpha_where_z_not_zero(z, x, beta2):\n\n arg1 = np.sqrt(2 * np.abs(m(z, x, beta2)))\n arg2 = -2 * (m(z, x, beta2) + nu(x, beta2))\n arg3 = 2 * eta(z, x, beta2) / arg1\n \n zsign=np.sign(z)\n \n return np.real(1 / 2 * (zsign*arg1 + np.sqrt(abs(arg2 -zsign*arg3))))",
"def zero_crosser(indicator: pd.Series) -> pd.Series:\n indicator = indicator.fillna(0)\n return (((indicator.shift() * indicator) <= 0) * np.sign(indicator)).astype(int)",
"def __Vs__(self, z):\r\n return -np.array(I(z > self.psi), dtype=int)",
"def has_z(self): # -> bool:\n ...",
"def zzX_zero_of(f, d=0):\n return zzX_zero(poly_level(f)-d)",
"def is_zero(self, a):\n return not a",
"def zzx_content(f):\n cont = INT_ZERO\n\n for coeff in f:\n cont = igcd(cont, coeff)\n\n if cont == 1:\n break\n\n return cont",
"def is_zero(self):\n return float(self.coeff.nominator) / self.coeff.denominator == 0.0",
"def zzX_zero_p(f):\n if poly_univariate_p(f):\n return not f\n else:\n if len(f) == 1:\n return zzX_zero_p(f[0])\n else:\n return False",
"def pz_fn(self, z):\n pass",
"def nnz(self):",
"def z(self) -> int:",
"def zero_crossings(x):\n return np.array(np.where(np.diff(np.sign(x)))[0])",
"def __nonzero__(self):\n return not self.as_point == (0, 0)",
"def userToPlotZ(z): \n return dislin.nzposn(z)",
"def get_zeros(self):\n return self.serie.isin([0]).sum()",
"def poly_from_zeros(z):\n if len(z) == 0:\n return [1]\n p = [1, -z[0]]\n for k in range(1, len(z)):\n p = _convolve(p, [1, -z[k]])\n return p",
"def fun(self, x):\n if np.any(x > 0):\n return np.inf\n else:\n return 0",
"def z0(self):\n return self.params['z0']",
"def invert0(x):\n return 0 if x > 0 else 1",
"def sigmoid(z): \n return 1/(1 + np.e**(-z))",
"def fun(self, x):\n if np.any(x < 0):\n return np.inf\n else:\n return 0",
"def zeros_like(self):\n raise NotImplementedError",
"def alpha_where_z_equals_zero(x, beta2):\n b = nu(x,beta2)\n c = -3*(beta2 * x**2)/4/beta2/(1+x)\n root1 = (-b + np.sqrt(b**2 - 4*c))/2\n # root2 = (-b - np.sqrt(b**2 - 4*c))/2 \n # since b>0, root2 is always negative and discarded\n \n return sqrt(root1)",
"def missing_values(dataset, inverse=False):\n zero = numpy.int8(0)\n one = numpy.int8(1)\n if inverse:\n converter = numpy.vectorize(lambda x:\n zero if (x == 0.0 and math.copysign(1, x) < 0.0) else one)\n else:\n converter = numpy.vectorize(lambda x:\n one if (x == 0.0 and math.copysign(1, x) < 0.0) else zero)\n return converter(dataset)",
"def z(self) -> float:\n return self.A[3] if self.scalar_vector else self.A[2]",
"def __init__(\n self, z, scale=None, stretch=None, shift=None, linear=None, const=None,\n ):\n # stretch can be eliminated by bringing into shift\n # (since multiplying does not change zero locations)\n if stretch is not None and shift is not None:\n shift = shift/stretch\n stretch = None\n\n self._z = z\n\n # we can also eliminate scaling,\n # but this is taken care of by parent class\n super(ZerosInd, self).__init__(\n scale=scale, stretch=stretch, shift=shift,\n linear=linear, const=const,\n )",
"def I(x):\n if abs(x-L/2.0) > 0.1:\n return 0\n else:\n return 1"
]
| [
"0.6553376",
"0.6466061",
"0.63404375",
"0.62919295",
"0.62098426",
"0.61954325",
"0.61022073",
"0.6096026",
"0.60554713",
"0.6028677",
"0.60004807",
"0.600019",
"0.5950524",
"0.59340006",
"0.5925675",
"0.59205705",
"0.59179205",
"0.59094787",
"0.5909203",
"0.58988225",
"0.58760476",
"0.5871982",
"0.58521545",
"0.5843707",
"0.578385",
"0.5783703",
"0.57763565",
"0.57735384",
"0.57707906",
"0.5723644"
]
| 0.7281125 | 0 |
Toma el nombre de la tabla, las columnas a mostrar, y el query (como un where o un inner join), esta parte es opcional e imprime el resultado del query | def make_query(table_name, cols, query):
str_query = None
if query == None:
str_query = "SELECT {} FROM {};".format(cols, table_name)
else:
str_query = "SELECT {} FROM {} {};".format(cols, table_name, query)
print(">>>ejecutando: ", str_query)
sistema.cursor.execute(str_query)
for row in sistema.cursor.fetchall():
print(row) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def construct_result_table(self, biomodels):\n select = 'SELECT DISTINCT '\n cached_alias = 'c'\n join_exps = 'FROM %s_%s AS %s' % (self.app_label, \n settings.DISBI['JOINED_TABLENAME'], \n cached_alias)\n left_join_template = '''\n LEFT JOIN (\n %s\n ) AS %s\n ON (%s = %s)\n '''\n subtables_not_null_column = []\n select_bios = []\n relations = Relations(self.app_label, model_superclass=(BiologicalModel, MetaModel))\n for biomodel in biomodels:\n # Requested experiments related to biomodel.\n req_exps_for_bio = [] \n for exp in self.req_exps:\n if exp.biomodel == biomodel:\n req_exps_for_bio.append(exp)\n \n select_bios.extend(self.get_show_columns(biomodel))\n \n\n # Get Meta models for Bio model and the show columns to the \n # SELECT clause.\n metamodels_of_biomodel = relations.get_related_metamodels(biomodel)\n #print(metamodels_of_biomodel)\n for metamodel in metamodels_of_biomodel:\n select_bios.extend(self.get_show_columns(metamodel))\n\n \n for exp in req_exps_for_bio:\n select_bios.extend(self.get_display_names(exp))\n exp_alias = 'exp%s' % exp.pk\n join_exps += left_join_template % (\n self.construct_exptable(exp),\n exp_alias,\n '%s.%s_id' % (cached_alias, biomodel.__name__.lower()),\n '%s.%s' % (exp_alias, exp.biofield.column)\n )\n subtables_not_null_column.append((self.get_notnull_column(exp), \n str(exp.id)))\n exclude_empty = 'WHERE ' + ' OR '.join(['%s_%s IS NOT NULL' % col \n for col in subtables_not_null_column])\n \n sql = '\\n'.join((select + ', '.join(select_bios), join_exps, exclude_empty))\n return re.sub(r'^\\s+', '', sql, flags=re.MULTILINE)",
"def select_form_table(conn , quary):\n cur = conn.cursor()\n cur.execute(quary)\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)",
"def determine_query():\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"",
"def get_execute_table(self, limit=None):\n query = self.select_all()\n self.cur.execute(query)\n if limit is None:\n result = self.cur.fetchall()\n else:\n result = self.cur.fetchmany(limit)\n return to_data_frame(result)",
"def get_execute_table(self, limit=None):\n query = self.select_all()\n self.cur.execute(query)\n if limit is None:\n result = self.cur.fetchall()\n else:\n result = self.cur.fetchmany(limit)\n return to_data_frame(result)",
"def select_query(self):\n query = db.select([self.tables])\n print(query)\n ResultProxy = self.connection.execute(query)\n ResultSet = ResultProxy.fetchall()\n return ResultSet",
"def select(self, table, columns=['*'], condition='', orderby='', limit=0, isFetchAll=True):\n return True",
"def get_sql_str_select(self,table_name, field_names = ['*'],where_ind_name = None, where_ind_value = None, where_clause = None):\n\t\tif (where_ind_name!=None) & (where_ind_value!=None):\n\t\t\tsql_str = 'SELECT %s FROM %s.%s WHERE %s = %s'%(','.join([str(x) for x in field_names]),\t\t\t\t\t\t\t\t\t\t\t\t\tself.__schema_name,\t\t\t\t\t\t\t\t\t\t\t\t\t\ttable_name,\t\t\t\t\t\t\t\t\t\t\t\t\t\t where_ind_name,\t\t\t\t\t\t\t\t\t\t\t\t\t\t where_ind_value)\n\t\telse:\n\t\t\tsql_str = 'SELECT %s FROM %s.%s %s'%(','.join([str(x) for x in field_names]),\t\t\t\t\t\t\t\t\t\t\t\t\tself.__schema_name,\t\t\t\t\t\t\t\t\t\t\t\t\t\ttable_name, where_clause)\n\t\t# print(sql_str)\n\t\treturn sql_str",
"def sql_query(self, table, record_name, columns):\n cursorObj = self.db.cursor()\n cursorObj.execute('SELECT {0} FROM {1} WHERE name=\"{2}\"'.format(columns, table, record_name))\n records = cursorObj.fetchall()\n return records",
"def get_data(db, columns, table, condition=\"\"):\n cur = db.cursor()\n cur.execute(SELECT.format(columns, table) + \" \" + condition)\n return cur.fetchall()",
"def select(self, table, where=None, *args):\n\n result = None\n query = \"SELECT \"\n keys = args\n l = len(keys) - 1\n for i, key in enumerate(keys):\n query += \"`\"+key+\"`\"\n if i < l:\n query += \",\"\n query += \" FROM %s\" % table\n if where:\n query += \" WHERE %s\" % where\n\n self.__open()\n self.__cursor.execute(query)\n result = self.__cursor.fetchall()\n self.__close()\n return result",
"def _select_table(self):\n\n return self.postgres.execute(f\"SELECT * FROM {self.table_name};\")",
"def get_table_query_string(self) -> str:\n if self.database and self.table:\n return f'\"{self.database}\".\"{self.schema}\".\"{self.table}\"'\n elif self.table:\n return f'\"{self.table}\"'\n else:\n return f\"({self.query})\"",
"def select_from_table(self, table_name):\n sql_str = \"SELECT * FROM {tb}\".format(tb=table_name)\n cur = self.conn.cursor()\n cur.execute(sql_str)\n names = [description[0] for description in cur.description]\n\n rows = cur.fetchall()\n\n df = pd.DataFrame(rows, columns =names) \n\n return df",
"def _select(\n self, table=None, fields=(), where=None, order=None, limit=None\n ):\n\n sql = 'SELECT %s FROM `%s`' % (','.join(fields), table)\n\n if where and len(where) > 0:\n sql += ' WHERE %s' % where[0]\n\n if order:\n sql += ' ORDER BY %s' % order[0]\n\n if len(order) > 1:\n sql += ' %s' % order[1]\n\n if limit:\n sql += ' LIMIT %s' % limit[0]\n\n if len(limit) > 1:\n sql += ', %s' % limit[1]\n\n return self.query(sql, where[1] if where and len(where) > 1 else None)",
"def select(self, table, where=None, *args, **kwargs):\n result = None\n query = 'SELECT '\n keys = args\n values = tuple(kwargs.values())\n length = len(keys) - 1\n\n for i, key in enumerate(keys):\n query += \"`\" + key + \"`\"\n if i < length:\n query += \",\"\n\n query += ' FROM {}'.format(table)\n\n if where:\n query += \" WHERE {}\".format(where)\n\n print(query)\n\n self.__open()\n self.__session.execute(query, values)\n number_rows = self.__session.rowcount\n number_columns = len(self.__session.description)\n\n if number_rows >= 1 and number_columns > 1:\n result = [item for item in self.__session.fetchall()]\n else:\n result = [item[0] for item in self.__session.fetchall()]\n\n self.__close()\n\n return result",
"def queryTable(self, in_table_name, in_field_name, in_conditions=[]):\n fields = ','.join(in_field_name if type(in_field_name)is list else [])\n query = \"SELECT {} FROM {}\".format(fields, in_table_name)\n cond_list = []\n for c,(cond_field, cond_value) in enumerate(in_conditions):\n condition_string = ' WHERE {}=?' if c == 0 else ' AND {}=?'\n query += condition_string.format(cond_field)\n cond_list.append(cond_value)\n\n result = self.cursor.execute('{};'.format(query), tuple(cond_list))\n return result.fetchall()",
"def _assemble(self):\n selectop = self._headopt and f'{self._headopt}' or ''\n select = f'{selectop} ' + ', '.join(self._head)\n froms = 'from ' + ', '.join(self._tables)\n joins = ' '.join(self._joins)\n wheres, wkw = self._build_where()\n\n order = ''\n if self._order:\n order = f'order by {self._order[0]} {self._order[1]}'\n limit = ''\n if self._limit:\n limit = f'limit {self._limit}'\n\n kw = self._kw.copy()\n kw.update(wkw)\n return (f'select {select} '\n f'{froms} '\n f'{joins} '\n f'{wheres} '\n f'{order} '\n f'{limit}'\n ), kw",
"def get_query(self):\n columns = ','.join(['\"{}\"'.format(x) for x in self.columns])\n query = 'SELECT {} FROM \"{}\"'.format(columns, self.table)\n filter_params = []\n if self.filters:\n filter_sql, filter_params = filter_postgis(self.filters)\n query += ' WHERE {}'.format(filter_sql)\n query += ';'\n return str(text(query)), filter_params",
"def mostrar_tablero(tablero):\n numeros_de_fila = []\n for x in range(1, numero_columnas+1):\n y = str(x)\n numeros_de_fila.append(y)\n print(Fore.MAGENTA + \" \" + \" \".join(numeros_de_fila)) #fila de numeros que designa las columnas\n for w in range(numero_columnas):\n if w < 9:\n print(Fore.MAGENTA + str(w + 1) + Fore.RESET + \" \" + \" \".join(str(x) for x in tablero[w])) #imprime primero el numero de la fila y luego imprime la fila del tablero\n else: print(Fore.MAGENTA + str(w + 1) + Fore.RESET + \" \" + \" \".join(str(x) for x in tablero[w])) #si la fila es mayor que 9 se debe quitar un espacio ya que el numero 10 son dos digitos\n print(\"\\n\")",
"def select_advanced(self, sql, *args):\n od = OrderedDict(args)\n query = sql\n values = tuple(od.values())\n self.__open()\n self.__session.execute(query, values)\n number_rows = self.__session.rowcount\n number_columns = len(self.__session.description)\n\n if number_rows >= 1 and number_columns > 1:\n result = [item for item in self.__session.fetchall()]\n else:\n result = [item[0] for item in self.__session.fetchall()]\n\n self.__close()\n return result",
"def crearTabla(self):\n mensaje = self.base.createTable()\n showinfo('Resultado', mensaje)",
"def task103(self):\n self.ex(\"\"\"\nSELECT\n a.cust_id AS customer_id,\n a.product_cd AS product_cd,\n concat(i.fname, ' ', i.lname) AS individual_customer_name,\n b.name AS buisness_customer_name\nFROM account AS a\n LEFT JOIN individual AS i ON a.cust_id = i.cust_id\n LEFT JOIN business AS b ON a.cust_id = b.cust_id;\"\"\")",
"def query_generic_table(self, table_name):\n\n query = \"select * from {}\"\n try:\n self.dbCursor.execute(query.format(table_name))\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)\n finally:\n return self.dbCursor.fetchall()",
"def generate_select_sql(self, condition, fields):\n return \"SELECT %s FROM %s WHERE %s\" % (fields, self.tablename, condition)",
"def get_query(self):\n columns = ','.join(['\"{}\"'.format(x) for x in self._columns])\n query = 'SELECT {} FROM \"{}\"'.format(columns, self._table)\n filter_params = []\n if self._filters:\n filter_sql, filter_params = filter_postgis(self._filters)\n query += ' WHERE {}'.format(filter_sql)\n query += ';'\n return str(text(query)), filter_params",
"def sql(self, tabtype, only_limit=False, prefix=None):\n if only_limit and self.limit < 1:\n return ''\n\n exprs = self.exprs\n if tabtype is not None:\n exprs = [(f, o) for f, o in exprs if f in tabtype._sql_fields]\n if len(exprs) == 0:\n return ''\n\n as_sql = lambda f: tabtype._as_sql(f, prefix=prefix)\n s = ' ORDER BY '\n s += ', '.join('%s %s' % (as_sql(f), o) for f, o in exprs)\n if self.limit > 0:\n s += ' LIMIT %d' % self.limit\n return s",
"def sql_dict_to_str(self, result, tables):\n table = tables[result['sql']['from'][0]]\n header_names = table['header_name'] + ['空列']\n header_ids = table['header_id'] + ['null']\n sql = result['sql']\n\n str_cond_list, sql_cond_list = [], []\n where_conds, orderby_conds = [], []\n for cond in sql['conds']:\n if cond[1] in [4, 5]:\n orderby_conds.append(cond)\n else:\n where_conds.append(cond)\n for cond in where_conds:\n header_name = header_names[cond[0]]\n if header_name == '空列':\n continue\n header_id = '`%s`.`%s`' % (table['table_id'], header_ids[cond[0]])\n op = self.cond_ops[cond[1]]\n value = cond[2]\n str_cond_list.append('( ' + header_name + ' ' + op + ' \"' + value\n + '\" )')\n sql_cond_list.append('( ' + header_id + ' ' + op + ' \"' + value\n + '\" )')\n cond_str = ' ' + self.cond_conn_ops[sql['cond_conn_op']] + ' '\n str_where_conds = cond_str.join(str_cond_list)\n sql_where_conds = cond_str.join(sql_cond_list)\n if len(orderby_conds) != 0:\n str_orderby_column = ', '.join(\n [header_names[cond[0]] for cond in orderby_conds])\n sql_orderby_column = ', '.join([\n '`%s`.`%s`' % (table['table_id'], header_ids[cond[0]])\n for cond in orderby_conds\n ])\n str_orderby_op = self.cond_ops[orderby_conds[0][1]]\n str_orderby = '%s %s' % (str_orderby_column, str_orderby_op)\n sql_orderby = '%s %s' % (sql_orderby_column, str_orderby_op)\n limit_key = orderby_conds[0][2]\n is_in, limit_num = False, -1\n for key in self.limit_dict:\n if key in limit_key:\n is_in = True\n limit_num = self.limit_dict[key]\n break\n if is_in:\n str_orderby += ' LIMIT %d' % (limit_num)\n sql_orderby += ' LIMIT %d' % (limit_num)\n # post process null column\n for idx, sel in enumerate(sql['sel']):\n if sel == len(header_ids) - 1:\n primary_sel = 0\n for index, attrib in enumerate(table['header_attribute']):\n if attrib == 'PRIMARY':\n primary_sel = index\n break\n if primary_sel not in sql['sel']:\n sql['sel'][idx] = primary_sel\n else:\n del sql['sel'][idx]\n else:\n str_orderby = ''\n\n str_sel_list, sql_sel_list = [], []\n for idx, sel in enumerate(sql['sel']):\n header_name = header_names[sel]\n header_id = '`%s`.`%s`' % (table['table_id'], header_ids[sel])\n if sql['agg'][idx] == 0:\n str_sel_list.append(header_name)\n sql_sel_list.append(header_id)\n else:\n str_sel_list.append(self.agg_ops[sql['agg'][idx]] + '('\n + header_name + ')')\n sql_sel_list.append(self.agg_ops[sql['agg'][idx]] + '('\n + header_id + ')')\n\n if len(str_cond_list) != 0 and len(str_orderby) != 0:\n final_str = 'SELECT %s FROM %s WHERE %s ORDER BY %s' % (\n ', '.join(str_sel_list), table['table_name'], str_where_conds,\n str_orderby)\n final_sql = 'SELECT %s FROM `%s` WHERE %s ORDER BY %s' % (\n ', '.join(sql_sel_list), table['table_id'], sql_where_conds,\n sql_orderby)\n elif len(str_cond_list) != 0:\n final_str = 'SELECT %s FROM %s WHERE %s' % (\n ', '.join(str_sel_list), table['table_name'], str_where_conds)\n final_sql = 'SELECT %s FROM `%s` WHERE %s' % (\n ', '.join(sql_sel_list), table['table_id'], sql_where_conds)\n elif len(str_orderby) != 0:\n final_str = 'SELECT %s FROM %s ORDER BY %s' % (\n ', '.join(str_sel_list), table['table_name'], str_orderby)\n final_sql = 'SELECT %s FROM `%s` ORDER BY %s' % (\n ', '.join(sql_sel_list), table['table_id'], sql_orderby)\n else:\n final_str = 'SELECT %s FROM %s' % (', '.join(str_sel_list),\n table['table_name'])\n final_sql = 'SELECT %s FROM `%s`' % (', '.join(sql_sel_list),\n table['table_id'])\n\n sql = SQLQuery(\n string=final_str, query=final_sql, sql_result=result['sql'])\n\n return sql",
"def test_select(self):\n my_conn = MySQL(*self.conn_params)\n table_name = \"inf_schema\"\n inf_schema = my_conn.get_table(table_name)\n # SELECT * FROM inf_schema\n # WHERE table_name like 'INNO%' AND avg_row_length > 100\n results = my_conn.engine.execute(select('*')\n .where(inf_schema.c.table_name\n .like('INNO%'))\n .where(inf_schema.c.avg_row_length >\n 100)\n .select_from(inf_schema)).fetchall()\n table_df = pd.DataFrame(results)\n self.assertGreaterEqual(len(table_df), 6)",
"def task101(self):\n self.ex(\"\"\"\nSELECT\n a.account_id AS account_id,\n pt.name AS product_type,\n p.name AS product_name\nFROM account AS a\n RIGHT JOIN product AS p ON a.product_cd = p.product_cd\n INNER JOIN product_type AS pt ON p.product_type_cd = pt.product_type_cd\nORDER BY account_id\"\"\")"
]
| [
"0.601532",
"0.59708345",
"0.5957253",
"0.5854742",
"0.5854742",
"0.58174795",
"0.58170044",
"0.5810371",
"0.5797387",
"0.5764381",
"0.57541776",
"0.5746789",
"0.5740096",
"0.57264084",
"0.5715317",
"0.5706358",
"0.56669486",
"0.5631278",
"0.5615111",
"0.5588208",
"0.5562837",
"0.55185884",
"0.55118763",
"0.5469637",
"0.54611003",
"0.54568875",
"0.5451652",
"0.5449126",
"0.5441274",
"0.543162"
]
| 0.64813566 | 0 |
Parses through the 'inventory' parameter in target_objs to fetch the remote inventories and stores it in save_dir before recursively copying it to the output_path (relative to the inventory path) Overwrites existing inventory items if force fetched | def fetch_inventories(inventory_path, target_objs, save_dir, force, pool):
git_inventories = defaultdict(list)
http_inventories = defaultdict(list)
# To ensure no duplicate output path
inv_output_path = defaultdict(set)
for target_obj in target_objs:
try:
inventories = target_obj["inventory"]
for inv in inventories:
inv_type = inv["type"]
source_uri = inv["source"]
source_hash = hashlib.sha256(source_uri.encode())
# hashing the source, subdir and ref together for git sources
# as different inventory items can have the same git uri
if "subdir" in inv:
subdir = inv["subdir"]
source_hash.update(subdir.encode())
if "ref" in inv:
ref = inv["ref"]
source_hash.update(ref.encode())
if source_hash in cached.inv_sources:
continue
# output_path is relative to inventory_path
# ./inventory by default
output_path = normalise_join_path(inventory_path, inv["output_path"])
logger.debug("Updated output_path from %s to %s", inv["output_path"], output_path)
inv["output_path"] = output_path
if output_path in inv_output_path[source_uri]:
# if the output_path is duplicated for the same source_uri
logger.debug("Skipping duplicate output path for uri %s", source_uri)
continue
else:
inv_output_path[source_uri].add(output_path)
if inv_type == "git":
git_inventories[source_uri].append(inv)
elif inv_type in ("http", "https"):
http_inventories[source_uri].append(inv)
else:
logger.warning("%s is not a valid source type", inv_type)
except KeyError:
logger.debug("Target object %s has no inventory key", target_obj["vars"]["target"])
continue
git_worker = partial(fetch_git_dependency, save_dir=save_dir, force=force, item_type="Inventory")
http_worker = partial(fetch_http_dependency, save_dir=save_dir, force=force, item_type="Inventory")
[p.get() for p in pool.imap_unordered(git_worker, git_inventories.items()) if p]
[p.get() for p in pool.imap_unordered(http_worker, http_inventories.items()) if p] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pseudo_build_inventory(self, inventory_update, private_data_dir):\n src = inventory_update.source\n\n injector = None\n if inventory_update.source in InventorySource.injectors:\n injector = InventorySource.injectors[src]()\n\n if injector is not None:\n content = injector.inventory_contents(inventory_update, private_data_dir)\n # must be a statically named file\n self.write_private_data_file(private_data_dir, injector.filename, content, sub_dir='inventory', file_permissions=0o700)\n rel_path = os.path.join('inventory', injector.filename)\n elif src == 'scm':\n rel_path = os.path.join('project', inventory_update.source_path)\n\n return rel_path",
"def prepare_artifacts(self):\n\n logger.info(\"Handling artifacts...\")\n target_dir = os.path.join(self.target, 'image')\n fetch_artifacts_url = []\n\n for image in self.images:\n for artifact in image.all_artifacts:\n logger.info(\"Preparing artifact '{}'\".format(artifact['name']))\n\n if isinstance(artifact, _PlainResource) and \\\n config.get('common', 'redhat'):\n try:\n fetch_artifacts_url.append({'md5': artifact['md5'],\n 'url': get_brew_url(artifact['md5']),\n 'target': os.path.join(artifact['target'])})\n artifact['target'] = os.path.join('artifacts', artifact['target'])\n logger.debug(\n \"Artifact '{}' added to fetch-artifacts-url.yaml\".format(artifact['name']))\n except:\n logger.warning(\"Plain artifact {} could not be found in Brew, trying to handle it using lookaside cache\".\n format(artifact['name']))\n artifact.copy(target_dir)\n # TODO: This is ugly, rewrite this!\n artifact['lookaside'] = True\n\n else:\n artifact.copy(target_dir)\n\n fetch_artifacts_file = os.path.join(self.target, 'image', 'fetch-artifacts-url.yaml')\n\n if fetch_artifacts_url:\n with open(fetch_artifacts_file, 'w') as _file:\n yaml.safe_dump(fetch_artifacts_url, _file, default_flow_style=False)\n\n logger.debug(\"Artifacts handled\")",
"def update_target(target_info, temp_dir, images_dir, inventory, args):\n target_name = target_info.get(\"target\")\n target_sha256 = target_info.get(\"sha256_hash\")\n filename = target_info.get(\"filename\")\n temp_path = os.path.join(temp_dir, filename)\n # Add a trailing slash to make sure that urljoin handles things properly\n full_url = urljoin(args.base_url+'/', target_info.get(\"url\"))\n _, downloaded_size, downloaded_sha256 = download(\n images_url=full_url,\n filename=temp_path,\n buffer_size=args.buffer_size,\n print_progress=(_LOG_LEVEL <= _LOG_LEVELS.get(\"INFO\", 3))\n )\n if downloaded_size == 0:\n log(\"INFO\", \"Skipping target: {}\".format(target_name))\n return\n log(\"TRACE\", \"{} successfully downloaded ({} Bytes)\"\n .format(temp_path, downloaded_size))\n # If the SHA256 in the manifest has the value '0', this is a special case\n # and we just skip the verification step\n if target_sha256 == '0':\n log(\"DEBUG\", \"Skipping SHA256 check for {}.\".format(full_url))\n # If the check fails, print an error and don't unzip the file\n elif downloaded_sha256 != target_sha256:\n log(\"ERROR\", \"Downloaded SHA256 does not match manifest for {}!\"\n .format(full_url))\n return\n # Note: this skips the --keep option, so we'll never keep image packages\n # that fail the SHA256 checksum\n ## Now copy the contents to the final destination (the images directory)\n delete_from_inv(target_info, inventory, images_dir)\n if os.path.splitext(temp_path)[1].lower() == '.zip':\n archive_namelist = extract(\n temp_path,\n images_dir,\n args.test)\n if args.keep:\n # If the user wants to keep the downloaded archive,\n # save it to the images directory and add it to the inventory\n shutil.copy(temp_path, images_dir)\n archive_namelist.append(filename)\n else:\n archive_namelist = []\n shutil.copy(temp_path, images_dir)\n ## Update inventory\n inventory[target_name] = {\"repo_hash\": target_info.get(\"repo_hash\"),\n \"contents\": archive_namelist,\n \"filename\": filename}",
"def main():\n args = parse_args()\n images_dir = get_images_dir(args)\n log(\"INFO\", \"Images destination: {}\".format(os.path.abspath(images_dir)))\n try:\n manifest = parse_manifest(get_manifest_raw(args))\n if args.list_targets:\n print_target_list(\n manifest,\n args\n )\n return True\n log(\"TRACE\", \"Manifest:\\n{}\".format(\n \"\\n\".join(\"{}\".format(item) for item in manifest.items())\n ))\n\n # Read the inventory into a dictionary we can perform lookups on\n if os.path.isfile(args.inventory_location):\n inventory_fn = args.inventory_location\n else:\n inventory_fn = os.path.join(images_dir, _INVENTORY_FILENAME)\n inventory = parse_inventory(inventory_fn=inventory_fn)\n log(\"TRACE\", \"Inventory: {}\\n{}\".format(\n os.path.abspath(inventory_fn),\n \"\\n\".join(\"{}\".format(item) for item in inventory.items())\n ))\n\n # Determine the URLs to download based on the input regular expressions\n if not args.types:\n types_regex_l = [_DEFAULT_TARGET_REGEX]\n else:\n types_regex_l = args.types\n\n log(\"TRACE\", \"RegExs for target selection: {}\".format(types_regex_l))\n targets_info = lookup_urls(types_regex_l, manifest, inventory, args.refetch)\n # Exit early if we don't have anything to download\n if targets_info:\n target_urls = [info.get(\"url\") for info in targets_info]\n log(\"DEBUG\", \"URLs to download:\\n{}\".format(\n \"\\n\".join(\"{}\".format(item) for item in target_urls)\n ))\n else:\n return True\n\n ## Now download all the images archives into a temp directory\n if args.dry_run:\n for target_info in targets_info:\n log(\"INFO\", \"[Dry Run] Fetch target: {}\".format(\n target_info.get(\"filename\")))\n return True\n with TemporaryDirectory() as temp_dir:\n for target_info in targets_info:\n update_target(\n target_info,\n temp_dir,\n images_dir,\n inventory,\n args\n )\n ## Update inventory with all the new content\n write_inventory(inventory, inventory_fn)\n\n except Exception as ex:\n log(\"ERROR\", \"Downloader raised an unhandled exception: {ex}\\n\"\n \"You can run this again with the '--verbose' flag to see more information\\n\"\n \"If the problem persists, please email the output to: {contact}\"\n .format(contact=_CONTACT, ex=ex))\n # Again, we wait on Windows systems because if this is executed in a\n # window, and immediately fails, the user doesn't have a way to see the\n # error message, and if they're not very savvy, they won't know how to\n # execute this in a shell.\n if not _YES and platform.system() == 'Windows':\n input('Hit Enter to continue.')\n return False\n log(\"INFO\", \"Images download complete.\")\n return True",
"def pre_combine_inventory(self, target, src_files):\n config = self.config\n\n self.stderr.write(f\"Layers detected: {self.layer_names_all}\\n\")\n if self.layer_names_all != self.layer_names_used:\n self.stderr.write(f\"Layers after filter: {self.layer_names_used}\\n\")\n\n # Convert src_files to a set to speed up\n src_files = set(src_files)\n self.target_extra_files = set()\n for (root, dirs, files) in relwalk(target, followlinks=config.follow_symlink):\n for fn in files:\n tgt_file = os.path.join(root, fn)\n if tgt_file not in src_files:\n if fn == CONTROLLED_DIR_MARKER or config.block_files.search(fn):\n continue # pragma: no cover (peephole optimization)\n self.target_extra_files.add(tgt_file)\n return src_files",
"def take(self, agent):\n\n parent_folder = self.FolderID\n\n # Check if we have inventory turned on\n if not(parent_folder and agent.settings.ENABLE_INVENTORY_MANAGEMENT):\n logger.warning(\"Inventory not available, please enable settings.ENABLE_INVENTORY_MANAGEMENT\")\n return \n\n if not(parent_folder):\n # locate Object folder\n objects_folder = [ folder for folder in agent.inventory.folders if folder.Name == 'Objects' ]\n if objects_folder:\n parent_folder = objects_folder[0].FolderID\n else:\n logger.warning(\"Unable to locate top-level Objects folder to take item into inventory.\")\n return\n\n self.derez(agent, 4, parent_folder, uuid.uuid4(), agent.ActiveGroupID)",
"def populate_local_repo(case_dict, ignore_logs, ignore_timing):\n # ---------------------------------------------------------------------\n logger.debug(\"populate_local_repo\")\n os.chdir(case_dict[\"CASEROOT\"])\n\n # loop through the archive_list and copy to the temp archive dir\n for archive in case_dict[\"archive_list\"]:\n if os.path.exists(archive):\n if os.path.isdir(archive):\n try:\n target = case_dict[\"archive_temp_dir\"] + \"/\" + archive\n shutil.copytree(\n archive,\n target,\n symlinks=False,\n ignore=shutil.ignore_patterns(*_ignore_patterns),\n )\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=archive,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n else:\n try:\n shutil.copy2(archive, case_dict[\"archive_temp_dir\"])\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=archive,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # add files with .xml as the suffix\n xml_files = glob.glob(\"*.xml\")\n for xml_file in xml_files:\n if os.path.isfile(xml_file):\n try:\n shutil.copy2(xml_file, case_dict[\"archive_temp_dir\"])\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=xml_file,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # add files with .xml as the suffix from the postprocess directory\n if os.path.isdir(\"./postprocess\"):\n pp_path = \"{0}/{1}\".format(case_dict[\"archive_temp_dir\"], \"postprocess\")\n if not os.path.exists(pp_path):\n os.mkdir(pp_path)\n xml_files = glob.glob(\"./postprocess/*.xml\")\n for xml_file in xml_files:\n if os.path.isfile(xml_file):\n try:\n shutil.copy2(xml_file, pp_path)\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=xml_file,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # add files with user_nl_ as the prefix\n user_files = glob.glob(\"user_nl_*\")\n for user_file in user_files:\n if os.path.isfile(user_file):\n try:\n shutil.copy2(user_file, case_dict[\"archive_temp_dir\"])\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=user_file,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # add files with Depends as the prefix\n conf_files = glob.glob(\"Depends.*\")\n for conf_file in conf_files:\n if os.path.isfile(conf_file):\n try:\n shutil.copy2(conf_file, case_dict[\"archive_temp_dir\"])\n except OSError as error:\n msg = _copy_template.substitute(\n function=\"populate_local_repo\",\n source=conf_file,\n dest=case_dict[\"archive_temp_dir\"],\n error=error.errno,\n strerror=error.strerror,\n )\n logger.warning(msg)\n\n # check if ignore_logs is specified\n if ignore_logs:\n os.chdir(case_dict[\"archive_temp_dir\"])\n if os.path.isdir(\"./logs\"):\n try:\n shutil.rmtree(\"./logs\")\n except OSError:\n logger.warning(\n 'in \"populate_local_repo\" - Unable to remove \"logs\" in archive_temp_dir.'\n )\n if os.path.isdir(\"./postprocess/logs\"):\n os.chdir(\"./postprocess\")\n try:\n shutil.rmtree(\"./logs\")\n except OSError:\n logger.warning(\n 'in \"populate_local_repo\" - '\n 'Unable to remove \"postprocess/logs\" in archive_temp_dir.'\n )\n os.chdir(case_dict[\"CASEROOT\"])\n\n # check if ignore_timing is specified\n if ignore_timing:\n os.chdir(case_dict[\"archive_temp_dir\"])\n if os.path.isdir(\"./timing\"):\n try:\n shutil.rmtree(\"./timing\")\n except OSError:\n logger.warning(\n 'in \"populate_local_repo\" - Unable to remove \"timing\" in archive_temp_dir.'\n )\n os.chdir(case_dict[\"CASEROOT\"])",
"def load_target_inventory(inventory_path, targets, ignore_class_notfound=False):\n target_objs = []\n inv = inventory_reclass(inventory_path, ignore_class_notfound)\n\n # if '-t' is set on compile, only loop through selected targets\n if targets:\n targets_list = targets\n else:\n targets_list = inv[\"nodes\"]\n\n for target_name in targets_list:\n try:\n inv_target = inv[\"nodes\"][target_name]\n target_obj = inv_target[\"parameters\"][\"kapitan\"]\n # check if parameters.kapitan is empty\n if not target_obj:\n raise InventoryError(\n \"InventoryError: {}: parameters.kapitan has no assignment\".format(target_name)\n )\n target_obj[\"target_full_path\"] = inv_target[\"parameters\"][\"_reclass_\"][\"name\"][\"path\"]\n require_compile = not ignore_class_notfound\n valid_target_obj(target_obj, require_compile)\n validate_matching_target_name(target_name, target_obj, inventory_path)\n logger.debug(\"load_target_inventory: found valid kapitan target %s\", target_name)\n target_objs.append(target_obj)\n except KeyError:\n logger.debug(\"load_target_inventory: target %s has no kapitan compile obj\", target_name)\n pass\n\n return target_objs",
"def fetch(args):\n storage, remote_path = split_storage(args.remote)\n\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit(\"Local file %s already exists, not overwriting.\" % local_path)\n\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n\n osf = _setup_osf(args)\n project = osf.project(args.project)\n\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print(\"Local file %s already matches remote.\" % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n\n # only fetching one file so we are done\n break",
"def restore_inventory(self):\n if config.get(\"aws\", \"s3_bucket\"):\n loaded_archives = self.load_archives_from_s3()\n\n with glacier_shelve() as d:\n archives = {}\n for a in loaded_archives:\n print a\n archives[a[\"filename\"]] = a[\"archive_id\"]\n d[\"archives\"] = archives\n else:\n raise Exception(\"You must set s3_bucket in order to backup/restore inventory to/from S3.\")",
"def save_inventory(junos_module, inventory):\n if junos_module.conn_type == \"local\" :\n dev = junos_module.dev\n file_name = '%s-inventory.xml' % (dev.facts['hostname'])\n else:\n facts = junos_module._pyez_conn.get_facts()\n file_name = '%s-inventory.xml' % (facts['hostname'])\n if junos_module.params.get('savedir') is not None:\n save_dir = junos_module.params.get('savedir')\n file_path = os.path.normpath(os.path.join(save_dir, file_name))\n junos_module.logger.debug(\"Saving inventory to: %s.\", file_path)\n try:\n with open(file_path, 'wb') as fact_file:\n fact_file.write(to_bytes(inventory, encoding='utf-8'))\n junos_module.logger.debug(\"Inventory saved to: %s.\", file_path)\n except IOError:\n junos_module.fail_json(msg=\"Unable to save inventory. Failed to \"\n \"open the %s file.\" % (file_path))",
"def _generate_inventory(self, datapath):\n \n files = [file for file in listdir(datapath) if '.nc' in file and not 'xyz' in file]\n # file_prefixes = list(set([ file.split('_')[0] for file in files ]))\n # file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n if self.extra_pref:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2] + [self.extra_pref]) for file in files ]))\n else:\n file_prefixes = list(set([ \"_\".join(file.split('_')[0:2]) for file in files ]))\n \n inventory = {}\n for file_prefix in file_prefixes:\n fname = path.join(datapath,f'{file_prefix}{self.first_suffix}')\n if not self.metafile:\n self.metafile = fname\n vars = [ var for var in list(Dataset(fname).variables) if var not in self.skip_vars ]\n for var in vars:\n inventory[var] = {'files': sorted([path.join(datapath,file) \n for file in listdir(datapath) if file_prefix in file])}\n return inventory",
"def build_args(self, inventory_update, private_data_dir, passwords):\n # Get the inventory source and inventory.\n inventory_source = inventory_update.inventory_source\n inventory = inventory_source.inventory\n\n if inventory is None:\n raise RuntimeError('Inventory Source is not associated with an Inventory.')\n\n args = ['ansible-inventory', '--list', '--export']\n\n # special case for constructed inventories, we pass source inventories from database\n # these must come in order, and in order _before_ the constructed inventory itself\n if inventory_update.inventory.kind == 'constructed':\n inventory_update.log_lifecycle(\"start_job_fact_cache\")\n for input_inventory in inventory_update.inventory.input_inventories.all():\n args.append('-i')\n script_params = dict(hostvars=True, towervars=True)\n source_inv_path = self.write_inventory_file(input_inventory, private_data_dir, f'hosts_{input_inventory.id}', script_params)\n args.append(to_container_path(source_inv_path, private_data_dir))\n # Include any facts from input inventories so they can be used in filters\n start_fact_cache(\n input_inventory.hosts.only(*HOST_FACTS_FIELDS),\n os.path.join(private_data_dir, 'artifacts', str(inventory_update.id), 'fact_cache'),\n inventory_id=input_inventory.id,\n )\n\n # Add arguments for the source inventory file/script/thing\n rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)\n container_location = os.path.join(CONTAINER_ROOT, rel_path)\n source_location = os.path.join(private_data_dir, rel_path)\n\n args.append('-i')\n args.append(container_location)\n # Added this in order to allow older versions of ansible-inventory https://github.com/ansible/ansible/pull/79596\n # limit should be usable in ansible-inventory 2.15+\n if inventory_update.limit:\n args.append('--limit')\n args.append(inventory_update.limit)\n\n args.append('--output')\n args.append(os.path.join(CONTAINER_ROOT, 'artifacts', str(inventory_update.id), 'output.json'))\n\n if os.path.isdir(source_location):\n playbook_dir = container_location\n else:\n playbook_dir = os.path.dirname(container_location)\n args.extend(['--playbook-dir', playbook_dir])\n\n if inventory_update.verbosity:\n args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))\n\n return args",
"def build_inventory(self):\n self.inventory = {\n 'all': {\n 'hosts': [],\n 'vars': self.group_variables\n },\n '_meta': {'hostvars': {}}\n }\n\n # add all droplets by id and name\n for droplet in self.data['droplets']:\n for net in droplet['networks']['v4']:\n if net['type'] == 'public':\n dest = net['ip_address']\n else:\n continue\n\n self.inventory['all']['hosts'].append(dest)\n\n self.add_host(droplet['id'], dest)\n\n self.add_host(droplet['name'], dest)\n\n # groups that are always present\n for group in ('digital_ocean',\n 'region_' + droplet['region']['slug'],\n 'image_' + str(droplet['image']['id']),\n 'size_' + droplet['size']['slug'],\n 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']),\n 'status_' + droplet['status']):\n self.add_host(group, dest)\n\n # groups that are not always present\n for group in (droplet['image']['slug'],\n droplet['image']['name']):\n if group:\n image = 'image_' + DigitalOceanInventory.to_safe(group)\n self.add_host(image, dest)\n\n if droplet['tags']:\n for tag in droplet['tags']:\n self.add_host(tag, dest)\n\n # hostvars\n info = self.do_namespace(droplet)\n self.inventory['_meta']['hostvars'][dest] = info",
"def save_inv_cache(compile_path, targets):\n if cached.inv_cache:\n inv_cache_path = os.path.join(compile_path, \".kapitan_cache\")\n # If only some targets were selected (-t), overwride only their inventory\n if targets:\n saved_inv_cache = None\n try:\n with open(inv_cache_path, \"r\") as f:\n saved_inv_cache = yaml.safe_load(f)\n except Exception:\n pass\n\n if saved_inv_cache:\n if \"inventory\" not in saved_inv_cache:\n saved_inv_cache[\"inventory\"] = {}\n else:\n saved_inv_cache = {}\n saved_inv_cache[\"inventory\"] = {}\n\n for target in targets:\n if target not in saved_inv_cache[\"inventory\"]:\n saved_inv_cache[\"inventory\"][target] = {}\n\n saved_inv_cache[\"inventory\"][target][\"classes\"] = cached.inv_cache[\"inventory\"][target][\n \"classes\"\n ]\n saved_inv_cache[\"inventory\"][target][\"parameters\"] = cached.inv_cache[\"inventory\"][target][\n \"parameters\"\n ]\n\n with open(inv_cache_path, \"w\") as f:\n logger.debug(\"Saved .kapitan_cache for targets: %s\", targets)\n yaml.dump(saved_inv_cache, stream=f, default_flow_style=False)\n\n else:\n with open(inv_cache_path, \"w\") as f:\n logger.debug(\"Saved .kapitan_cache\")\n yaml.dump(cached.inv_cache, stream=f, default_flow_style=False)",
"def load_inventory(self):\n for item in self.items:\n self.rooms[int(item.initial_room_id) - 1].inventory.add(item)",
"def fetch(self, dest='./'):\n nice_dest = PhysicalKey.from_url(fix_url(dest))\n file_list = []\n pkg = Package()\n\n for logical_key, entry in self.walk():\n physical_key = entry.physical_key\n new_physical_key = nice_dest.join(logical_key)\n\n file_list.append((physical_key, new_physical_key, entry.size))\n\n # return a package reroot package physical keys after the copy operation succeeds\n # see GH#388 for context\n new_entry = entry.with_physical_key(new_physical_key)\n pkg._set(logical_key, new_entry)\n\n copy_file_list(file_list, message=\"Copying objects\")\n\n return pkg",
"def save_local_copy(self, adi):\r\n temp = self.from_copy\r\n self.from_copy = False\r\n products = self.get_products(adi)\r\n print(\"Saving products from {}...\".format(adi))\r\n self.rf.dump_json(products, self.products_copy.format(adi))\r\n for product in products[\"data\"][\"products\"]:\r\n print(\"Saving {}...\".format(product[\"name\"].translate({ord(c): None for c in \"\\\\/:*\\\"<>|\"})))\r\n product_detail = self.get_product_detail(adi, product[\"productId\"], product[\"name\"])\r\n self.rf.dump_json(product_detail, self.product_detail_copy.format(adi, product[\"name\"].translate({ord(c): None for c in \"\\\\/:*\\\"<>|\"})))\r\n self.from_copy = temp",
"def _get_remote_results(self):\n\n if not self._setup_has_ran:\n raise CoreError('The results object must be setup before executing!')\n\n staf_request = ('COPY DIRECTORY \"{0}\" TODIRECTORY \"{1}\" TOMACHINE \"{2}\" RECURSE '\n 'KEEPEMPTYDIRECTORIES'.format(unix_style_path(self._remote_results_path),\n unix_style_path(self._local_results_path),\n BespokeGlobals.BESPOKE_SERVER_HOSTNAME))\n\n result = self._staf_handle.submit(self._sut.network_address, 'fs', staf_request)\n\n if result.rc != 0:\n raise CoreError('Failed to copy the results directory '\n '\"{0}\" from remote machine!'.format(self._remote_results_path))",
"def delete_from_inv(target_info, inventory, images_dir):\n target = inventory.get(target_info.get(\"target\"), {})\n target_name = target.get(\"target\")\n log(\"TRACE\", \"Removing contents of {} from inventory ({})\".format(\n target, target.get(\"contents\", [])))\n dirs_to_delete = []\n # Delete all of the files\n for image_fn in target.get(\"contents\", []):\n image_path = os.path.join(images_dir, image_fn)\n if os.path.isfile(image_path):\n os.remove(image_path)\n log(\"TRACE\", \"Deleted {} from inventory\".format(image_path))\n elif os.path.isdir(image_path):\n dirs_to_delete.append(image_fn)\n else: # File doesn't exist\n log(\"WARN\", \"File {} in inventory does not exist\".format(image_path))\n # Then delete all of the (empty) directories\n for dir_path in dirs_to_delete:\n try:\n if os.path.isdir(dir_path):\n os.removedirs(dir_path)\n except os.error as ex:\n log(\"ERROR\", \"Failed to delete dir: {}\".format(ex))\n inventory.pop(target_name, None)\n return True",
"def export_inventory(self, cursor, user, ids, context):\n website_obj = self.pool.get('magento.instance.website')\n\n website_id = context.get('active_id')\n t = threading.Thread(target=website_obj.export_inventory_to_magento,\n args=(cursor, user, website_id, context, True))\n t.daemon = True\n t.start()\n\n return True#self.open_products(cursor, user, map(int, products), context)",
"def downloadToRemoteFileList(self):\n self.remoteWordList = []\n self.remoteDir = {}\n self.ftp.dir('.', self.addItemToRemoteFileList)\n # self.Remote_completerModel.setStringList(self.remoteWordList)",
"def sync_observation_files(all_targets):\n print(\"Starting to write results to files\")\n # loop over each target and check whether it hass been assigned to a fiber.\n for i_target in range(all_targets.n_targets):\n for tile_file in all_targets.tile_names[i_target]:\n results_file = tile_file.replace(\"Targets_Tile\", \"Results_Tile\")\n f = fits.open(results_file, mode='update')\n\n tmp_id = np.int_(f[1].data['TARGETID'])\n tmp_nobs = f[1].data['NOBS']\n loc = np.where(tmp_id == all_targets.id[i_target])\n if(np.size(loc)!=0):\n tmp_nobs[loc[0]] = tmp_nobs[loc[0]] + 1\n # TOWRITE: still have to make the update to ASSIGNEDTYPE and ASSIGNEDZ \n else:\n raise ValueError('The target id %d in tile was not found in local list'%(all_targets.id[i_target]))\n \n f.flush()\n f.close()\n return",
"def copy_files(self, source, target):\n\n if source == target and is_local(self.borrowed_ctx.host):\n logger.warning(\"IGNORE self-node: {}\".format(self.borrowed_ctx.host))\n return\n\n try:\n for item in os.listdir(source):\n if os.path.isfile(os.path.join(source, item)):\n logger.debug(\n \"processing {} --> {}\".format(\n os.path.join(source, item), self.borrowed_ctx.host\n )\n )\n self._sftp_channel.put(\n os.path.join(source, item), \"%s/%s\" % (target, item)\n )\n else:\n self.mkdir(\"%s/%s\" % (target, item), ignore_existing=True)\n self.copy_files(\n os.path.join(source, item), \"%s/%s\" % (target, item)\n )\n except Exception as e:\n logger.warning(\n \"Error of processing target = ({}:{}), for reason: {}\".format(\n self.borrowed_ctx.host, self.borrowed_ctx.port, e,\n )\n )\n exit(0)",
"def save_data(self, inventory):\n ## Save partitions\n LOG.info('Saving partitions.')\n\n num = self._save_partitions(inventory.partitions.itervalues())\n\n LOG.info('Saved %d partitions.', num)\n\n ## Save groups\n LOG.info('Saving groups.')\n\n num = self._save_groups(inventory.groups.itervalues())\n\n LOG.info('Saved %d groups.', num)\n\n ## Save sites\n LOG.info('Saving sites.')\n\n num = self._save_sites(inventory.sites.itervalues())\n\n LOG.info('Saved %d sites.', num)\n\n ## Save sitepartitions\n LOG.info('Saving sitepartitions.')\n\n def all_sitepartitions():\n for site in inventory.sites.itervalues():\n for partition in inventory.partitions.itervalues():\n yield site.partitions[partition]\n\n num = self._save_sitepartitions(all_sitepartitions())\n\n LOG.info('Saved %d sitepartitions.', num)\n\n ## Save datasets\n LOG.info('Saving datasets.')\n\n num = self._save_datasets(inventory.datasets.itervalues())\n\n LOG.info('Saved %d datasets.', num)\n\n ## Save blocks\n LOG.info('Saving blocks.')\n\n def all_blocks():\n for dataset in inventory.datasets.itervalues():\n for block in dataset.blocks:\n yield block\n \n num = self._save_blocks(all_blocks())\n\n LOG.info('Saved %d blocks.', num)\n\n ## Save files\n LOG.info('Saving files.')\n\n def all_files():\n for dataset in inventory.datasets.itervalues():\n for block in dataset.blocks:\n for lfile in block.files:\n yield lfile\n\n num = self._save_files(all_files())\n\n LOG.info('Saved %d files.', num)\n\n ## Save dataset replicas\n LOG.info('Saving dataset replicas.')\n\n def all_replicas():\n for site in inventory.sites.itervalues():\n for replica in site.dataset_replicas():\n yield replica\n\n num = self._save_dataset_replicas(all_replicas())\n\n LOG.info('Saved %d dataset replicas.', num)\n\n ## Save block replicas\n LOG.info('Saving block replicas.')\n\n def all_replicas():\n for site in inventory.sites.itervalues():\n for dataset_replica in site.dataset_replicas():\n for block_replica in dataset_replica.block_replicas:\n yield block_replica\n\n num = self._save_block_replicas(all_replicas())\n\n LOG.info('Saved %d block replicas.', num)",
"def update_hookup_library_rel_path(scs_hookup_inventory, hookup_library_rel_path, readonly=False):\n\n # CLEAR INVENTORY\n scs_hookup_inventory.clear()\n\n taken_hoookup_ids = {} # temp dict for identifying unique hookups and preventing creation of duplicates (same type and id)\n for abs_path in _path_utils.get_abs_paths(hookup_library_rel_path, is_dir=True):\n\n if abs_path:\n\n # READ ALL \"SII\" FILES IN INVENTORY FOLDER\n for root, dirs, files in os.walk(abs_path):\n\n lprint(\"D Going to parse hookup directory:\\n\\t %r\", (root,))\n\n # print(' root: \"%s\"\\n dirs: \"%s\"\\n files: \"%s\"' % (root, dirs, files))\n for file in files:\n if file.endswith(\".sii\"):\n filepath = os.path.join(root, file)\n # print(' filepath: \"%s\"' % str(filepath))\n hookup_container = _sii.get_data_from_file(filepath)\n\n # ADD ALL ITEMS FROM CONTAINER INTO INVENTORY\n if hookup_container:\n for item in hookup_container:\n # if item.type == 'sign_model':\n if item.id.startswith('_'):\n continue\n else:\n typeid = str(item.type + \" : \" + item.id)\n\n # ignore taken type&ids\n if typeid in taken_hoookup_ids:\n continue\n else:\n taken_hoookup_ids[typeid] = True\n\n hookup_file = scs_hookup_inventory.add()\n hookup_file.name = typeid\n hookup_file.item_id = item.id\n\n if 'model' in item.props:\n # if model is defined as array ( appears if additional lod models are defined )\n # then use first none lod model\n if isinstance(item.props['model'], type(list())):\n hookup_file.model = item.props['model'][0]\n else:\n hookup_file.model = item.props['model']\n\n if 'brand_idx' in item.props:\n try:\n hookup_file.brand_idx = int(item.props['brand_idx'])\n except:\n pass\n\n if 'dir_type' in item.props:\n hookup_file.dir_type = item.props['dir_type']\n\n if 'low_poly_only' in item.props:\n if item.props['low_poly_only'] == 'true':\n hookup_file.low_poly_only = True\n\n if '.svn' in dirs:\n dirs.remove('.svn') # ignore SVN\n\n if not readonly:\n update_item_in_file('Paths.HookupRelDirPath', hookup_library_rel_path)",
"def prepareRemote(self, client, host):\n if not os.path.exists( client.location ) or not os.path.isdir( client.location ):\n raise Exception( \"The sources of client {0} should be found in local directory '{1}', but that either doesn't exist or is not a directory.\".format( client.name, client.location ) )\n if not source.prepareRemote(self, client, host):\n return False\n if self.isInCleanup():\n return\n host.sendFiles( client.location, self.remoteLocation(client, host) )\n return True",
"def post_install(self, dest_dir):\n for obj in self.objects_used:\n obj.post_install(dest_dir)",
"def test_save_asset_data():\n\n inventory_ = copy.deepcopy(self._inventory)\n\n asset = inventory_[\"assets\"][0]\n asset.update({\n \"key\": \"value\"\n })\n\n inventory.save(\n name=self._project[\"name\"],\n config=self._config,\n inventory=inventory_\n )\n\n asset = io.find_one({\"type\": \"asset\", \"name\": asset[\"name\"]})\n print(asset)\n assert_equals(asset[\"data\"][\"key\"], \"value\")",
"def backup_inventory(self):\n if config.get(\"aws\", \"s3_bucket\"):\n archives = self.load_archives()\n\n s3_bucket = S3Backend(self.conf).bucket\n k = Key(s3_bucket)\n k.key = self.backup_key\n\n k.set_contents_from_string(json.dumps(archives))\n\n k.set_acl(\"private\")"
]
| [
"0.5402499",
"0.53651613",
"0.5304608",
"0.5192518",
"0.5163583",
"0.51302993",
"0.5085026",
"0.50551826",
"0.49963874",
"0.49099976",
"0.48465687",
"0.4845763",
"0.48405397",
"0.48283368",
"0.4792837",
"0.4774951",
"0.47671747",
"0.47575256",
"0.4754527",
"0.47445658",
"0.47164157",
"0.47147432",
"0.46999428",
"0.46855402",
"0.46646664",
"0.46472296",
"0.46452695",
"0.4625851",
"0.4614578",
"0.46022725"
]
| 0.7771995 | 0 |
load probe names frome file | def load_probes(probe_file):
probes = common.read_file(probe_file)
probe_list = list(filter(None, probes))
return probe_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(name):\n return []",
"def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here",
"def load_specs(self, filename):\n self.filename = filename\n # Add loading functionality here",
"def load_people(self, file_path):\n pass",
"def load_names(path):\n global taxid_names, scientific_names, synonyms, lowercase_names\n with open(path, 'r') as r:\n for line in r:\n (taxid, name, unique, kind) = re.split(r'\\s*\\|\\s*', line.strip('|\\n\\t '), 3)\n if kind == 'scientific name':\n taxid_names[taxid] = name\n scientific_names[name] = taxid\n else:\n synonyms[name] = taxid\n lowercase_names[name.lower()] = taxid",
"def load_devices_names_file(self):\n try:\n with open(self._path, \"r\") as infile:\n logger.debug(\"Loading devices names from <%s>\", self._path)\n self._devices_names = yaml.safe_load(infile) or {}\n except yaml.YAMLError as error:\n logger.error(\"In devices file %s: %s\", self._path, error)\n raise DevicesNamesConfigLoadError()\n except Exception:\n logger.error(\n \"Could not load device names config <%s>, a new one will be created after successfull start\",\n self._path,\n )",
"def loadFromFile(self, filename):\n\t\treturn []",
"def load_names(args):\n # NAMES is a json document which is just a list of names\n if os.path.isfile(args.names):\n with open(args.names, 'r') as n:\n try:\n names = json.load(n)\n except:\n sys.exit(\"ERROR: {0} is invalid JSON\".format(args.names))\n else:\n sys.exit(\"ERROR {0} file not found.\".format(args.names))\n if len(names) <= 1:\n sys.exit(\"ERROR: {0} needs to have more than 1 name in it\".format(args.names))\n return names",
"def LoadBatch(filename):",
"def gene_txt_of_load(dirname):\n str_list=[]\n list_name=[]\n print 'Yolo Debut'\n for file in os.listdir(dirname):\n if file.endswith(\".npy\"):\n str_list.append(file[:-4]+'=np.load(dirname+'+'\\\"/\\\"'+'+\\\"'+file+'\\\")')\n list_name.append(file[:-4])\n print '\\n'.join(str_list)\n print ','.join(list_name)\n return str_list",
"def loadProfile(fname):\n \n x = np.loadtxt(fname)\n return x[:,1]",
"def load(self, filename):\n\n with open(filename) as f:\n\n for val in f:\n\n val = val.strip().split(\"#\", 1)[0]\n\n if val == '':\n continue\n\n val = int(val, 2)\n self.ram[self.address] = val\n self.address += 1\n\n if len(sys.argv) != 2:\n print(\"Expected Usage: ls8.py [filename-to-run]\")\n sys.exit(1)\n\n if ValueError:\n pass",
"def load(filename):\n print(uc.load(filename))",
"def load_sample(filename):\n return open(os.path.join(SAMPLES, filename)).read()",
"def load(self, name):\n # ext = os.path.splitext(name)[1]\n # if ext == '.mat':\n # self.load_matlab(name)\n # else:\n # self.load_pkl(name)\n self.load_pkl(name)\n nhashes = sum(self.counts)\n # Report the proportion of dropped hashes (overfull table)\n dropped = nhashes - sum(np.minimum(self.depth, self.counts))\n print(\"Read fprints for\", sum(n is not None for n in self.names),\n \"files (\", nhashes, \"hashes) from\", name,\n \"(%.2f%% dropped)\" % (100.0 * dropped / max(1, nhashes)))",
"def __init__(self, file_name, load_uncertainty=False):\n if file_name[-3:] == 'npz':\n self._load_npz(file_name)\n else:\n self._load_3ddose(file_name, load_uncertainty)",
"def loadPulseData(filename, suffix = ''):\n data = np.genfromtxt(filename+'.txt', skip_header=3, names=True,\n dtype='i8,f8,S5,f8,f8,f8,f8,f8,f8')\n print \"Importing...\\n\"\n for key in data.dtype.fields.keys():\n name = key + suffix\n print name\n globals()[name] = data[key]",
"def load_names(self):\n temp_names = []\n\n with open(self.NAMES_FILE) as f:\n for line in f:\n if len(line.strip()) > 0:\n temp_names.append(line.strip())\n\n return temp_names",
"def _load_file(name):\n filename = 'ind.{}.{}'.format(dataset_name, name)\n filename = os.path.join(path, filename)\n with open(filename, 'rb') as f:\n if sys.version_info > (3, 0):\n return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg\n else:\n return pickle.load(f)",
"def load_file(file_name):\n try:\n hdu_list = fits.open(file_name.rstrip(','))\n except OSError as e:\n print('{0} likely not found'.format(file_name))\n raise OSError(e)\n return hdu_list",
"def load_phantom(self,file_or_fname):\n pass",
"def load_signnames(path):\n data = np.genfromtxt(path, dtype = None, delimiter = \",\", skip_header = 1)\n return dict((i, v.decode(\"utf-8\")) for i, v in data)",
"def load_lookup_tables(file_names):\n\n # Check input argument type - - - - - - - - - - - - - - - - - - - - - - - - -\n #\n if (type(file_names) == types.StringType):\n file_names = [file_names] # Make a list out of a single file name\n\n if (type(file_names) != types.ListType):\n msg = ['Input argument is of wrong type: '+file_names, \\\n 'Must be either of type \"string\" or \"list\"']\n log_message(msg,'err')\n raise Exception()\n\n dict = {} # New empty dictionary\n max_len = 1 # Maximal length of the longest key sequence\n\n # Loop over file names - - - - - - - - - - - - - - - - - - - - - - - - - - -\n #\n for fn in file_names:\n\n # Open file and read all lines into a list\n #\n try:\n f = open(fn,'r')\n except:\n log_message('Cannot open file: '+fn,'err')\n raise IOError()\n\n file_data = f.readlines()\n f.close()\n\n tag = '' # Start with no tag\n\n # Now process all lines - - - - - - - - - - - - - - - - - - - - - - - - - -\n #\n for line in file_data:\n l = line.strip() # Remove line separators\n if (len(l) > 0) and (l[0] != '#'): # Not empty line and no comment line\n\n if (l[:5] == 'tag=<'): # It's a line with a new tag\n tag = l[5:7]\n if (tag not in config.name_hmm_obser) and \\\n (tag not in config.geoloc_hmm_obser): # Make sure tag is valid\n log_message('Illegal tag: '+tag,'err')\n raise Exception()\n\n else: # A line with an entry\n\n if (tag == ''): # make sure a tag is set\n log_message('No tag set in file: '+fn,'err')\n raise Exception()\n\n ll = l.split(':') # Separate key from values\n\n if (len(ll) == 2): # Line contains a key - - - - - - - - - - - - - -\n k = ll[0].strip().lower() # Get key, make lower and strip spaces\n\n k_list = k.split(' ') # Make a list of key words\n if (len(k_list) > max_len): # Update maximal key sequence length\n max_len = len(k_list)\n\n val = string.join(k_list,'_')\n key = tuple(k_list)\n this_tag = tag\n\n if (k != ''): # If key is non-empty insert it into dictionary\n if (dict.has_key(key)):\n test_val = dict[key][0] # Value without tag\n test_tag = dict[key][1]\n\n if (val == test_val): # Same values\n if (test_tag.find(this_tag) < 0): # This tag is not in tags\n this_tag = test_tag+'/'+this_tag\n else:\n msg = ['Key already in dictionary with different value', \\\n 'Key: \"'+str(key)+'\", old value: \"'+ \\\n str(dict[key][0])+'\", new value: \"'+str(val)+'\"']\n log_message(msg,'err')\n raise Exception()\n\n this_val = (val, this_tag)\n dict.update({key:this_val}) # Insert key itself into dicionary\n\n v = ll[1].lower() # Get values in a string\n\n elif (len(ll) == 1): # Line contains only values - - - - - - - - - -\n v = ll[0].lower() # Get values in a string\n\n else:\n log_message('Illegal file format in file: '+fn+', line: '+l,'err')\n raise Exception()\n\n vv = v.split(',') # Split values into a list\n\n for v in vv: # Loop over all values - - - - - - - - - - - - - - - -\n vs = v.strip()\n if (vs != ''): # Only append non-empty values\n k_list = vs.split(' ') # Make a list of key words\n if (len(k_list) > max_len): # Update maximal key sequence length\n max_len = len(k_list)\n key = tuple(k_list)\n this_tag = tag\n\n if (dict.has_key(key)):\n test_val = dict[key][0] # Value without tag\n test_tag = dict[key][1]\n\n if (val == test_val): # Same values\n if (test_tag.find(this_tag) < 0): # This tag is not in tags\n this_tag = test_tag+'/'+this_tag\n else:\n msg = ['Key already in dictionary with different value', \\\n 'Key: \"'+str(key)+'\", old value: \"'+ \\\n str(dict[key][0])+'\", new value: \"'+str(val)+'\"']\n log_message(msg,'err')\n raise Exception()\n\n this_val = (val, this_tag)\n dict.update({key:this_val}) # Insert key itself into dicionary\n\n return [dict, max_len]",
"def load_ref_case(fname, name):\r\n with open(fname, 'rb') as f:\r\n\r\n a = np.load(f)\r\n\r\n data = a[name]\r\n \r\n return data",
"def load(self, fname, snver=1):\n self._data = self._io.load(fname, snver=snver)",
"def load(self):\n address = 0\n if len(sys.argv) < 2:\n print(\"Please pass in a second file name: python3 ls8.py second_filename.py\")\n sys.exit()\n file_name = sys.argv[1]\n try:\n file = open(file_name, \"r\")\n except FileNotFoundError:\n print(f\"{sys.argv[0]}: {sys.argv[1]} file was not found.\")\n sys.exit()\n \n for line in file.readlines():\n instruction = line.split(\"#\")[0]\n instruction = instruction.strip() \n if len(instruction) > 0:\n self.ram_write(address, int(instruction, 2))\n address += 1 \n file.close()",
"def load_devices():",
"def loadDCData(filename):\n data = np.genfromtxt(filename+'.txt', skip_header=3, names=True)\n globals()[filename] = data",
"def load_firstnames(gender):\n return load_resource(\"resources/%s.txt\" % gender)",
"def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')"
]
| [
"0.64903796",
"0.6430997",
"0.6430997",
"0.6164019",
"0.61238116",
"0.59681827",
"0.58534676",
"0.5852975",
"0.5834001",
"0.58317584",
"0.58097345",
"0.5800229",
"0.57658803",
"0.5750195",
"0.5747341",
"0.5730893",
"0.5716048",
"0.56928027",
"0.56847095",
"0.5663239",
"0.5639348",
"0.5633489",
"0.5633254",
"0.5616012",
"0.56023455",
"0.55994564",
"0.55887514",
"0.55876845",
"0.55864686",
"0.55541015"
]
| 0.70129216 | 0 |
Get the virtual addr of probes. | def get_probe_address(elf_path, probes, section='.text'):
assert len(probes) <= 26, 'Too many probes'
text_data = objdump_section(elf_path, '.text')
name_to_addr = parse_func_names(text_data)
probe_names = list(string.ascii_uppercase)
name_idx = 0
ret = []
for probe in probes:
assert probe in name_to_addr, '{} not found'.format(probe)
ret.append('{}:0x{}'.format(probe_names[name_idx], name_to_addr[probe]))
name_idx += 1
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vasp_address(self):\n return self.vasp_addr",
"def _get_virtual_oper_VipV6_address(self):\n return self.__virtual_oper_VipV6_address",
"def virtual_router_ip(self):\n return self._virtual_router_ip",
"def addr(self):\r\n return self._addr",
"def Vaddr_breakdown(self, addr):\n if addr[:2] == '0b':\n addr = addr[2:]\n pagenum_bits = int(math.log2(self.MaxPTEntries)) #Number of bits for page.\n offset_bits = int(math.log2(self.pagesize))\n assert len(addr) == pagenum_bits + offset_bits\n pagenum = int(addr[:pagenum_bits], 2)\n offset = int(addr[pagenum_bits:], 2)\n return pagenum, offset",
"def LocalAddress(self) -> _n_5_t_0:",
"def get_pb_visit_lookup(self) -> Dict[str, int]:\n return self.pb_visit_lookup",
"def sim_access(self, addr):\n if addr[:2] == '0b':\n addr = addr[2:]\n vpn, offset = self.Vaddr_breakdown(addr) #Break down virtual address into VPN and . \n ppn = self.searchTLB(vpn) #Search TLB for a VPN->PPN mapping. ppn = None if not found.\n phys_addr = 0\n if not ppn: #VPN->PPN mapping not found in TLB\n ppn = self.searchPT(vpn, ppn) #Next search the page table for a mapping. Either returns or finds the next available PPN in memory (LRU)\n phys_addr = str(bin(ppn)) + str(bin(offset))\n self.accesses += 1\n return phys_addr\n else:\n #Found ppn. Concatenate with offset and return as physical address bitstring. \n phys_addr = str(bin(ppn)) + str(bin(offset))\n self.accesses += 1\n return phys_addr",
"def virtual_to_physical_address(cls, addr: int) -> int:\n if addr > 0xffffffff80000000:\n return addr - 0xffffffff80000000\n return addr - 0xc0000000",
"def get_vip_address(self, vip_name):\n networks = self.nailgun_client.get_networks(self.cluster_id)\n vip = networks.get('vips').get(vip_name, {}).get('ipaddr', None)\n asserts.assert_is_not_none(\n vip, \"Failed to get the IP of {} server\".format(vip_name))\n\n logger.debug(\"VIP '{0}': {1}\".format(vip_name, vip))\n return vip",
"def get_addr(self):\n return Server.t_addresses.get(threading.get_ident())",
"def ip_addr(self):\n return self.ip_addresses[0]",
"def min_addr(self):\n return self.vaddr",
"def get_address(machine: Machine) -> str:\n default_route, _ = machine.run(\"ip route get 8.8.8.8\")\n return re.search(\" src ([0-9.]+) \", default_route).group(1)",
"def __get_probe(self, address):\r\n if address not in self.__probes:\r\n self.__probes[address] = Probe(self.__id, address)\r\n return self.__probes[address]",
"def __get_probe(self, address):\r\n if address not in self.__probes:\r\n self.__probes[address] = Probe(self.__id, address)\r\n return self.__probes[address]",
"def get_host_ip_addr():\n return nova_conf.my_ip",
"def get_vm_ip(vm_name):\n return ll_vms.wait_for_vm_ip(vm_name)[1]['ip']",
"def tunnel1_vgw_inside_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"tunnel1_vgw_inside_address\")",
"def get_addrs(self):\n # TODO check if server is listening\n return self.multiaddrs",
"def getAddress(self) -> int:\n ...",
"def get_v_plot_address(p): \n return hex(int('0x%s' % pacman.pacman_configuration.get('memory_addresses', 'v_hex'), 16) + (int('0x400000', 16))*(p-1))[2:]",
"def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip",
"def ipaddr(self):\n return self.mesh.ipaddr()",
"def get_address(self):\n return logic.address(self.get_program())",
"def get_remote_addr(self):\n connection = self._open_db()\n cursor = connection.cursor()\n cursor.execute('SELECT remote_addr FROM sessions WHERE id = ?;', \\\n (self.sid,))\n remote_addr = cursor.fetchone()\n cursor.close()\n connection.close()\n return remote_addr[0]",
"def remote_addr(self):\r\n route = self.remote_route\r\n return route[0] if route else None",
"def get_entry_addr(afile):\n cmd = 'readelf -h ' + cmd_quote(afile) + ' | grep \"Entry point address:\" || true'\n output = get_shell_cmd_output(cmd)\n if output:\n tokens = output.split(\":\")\n entry_addr = tokens[1].split()[0]\n #verbose(afile + \" entry point address is: \" + entry_addr, LEVEL_2)\n return int(entry_addr, 0)\n return 0",
"def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]",
"def getAddressOfIndex(self) -> long:\n ..."
]
| [
"0.65414345",
"0.6157455",
"0.6135771",
"0.5897749",
"0.5837022",
"0.57624984",
"0.5688131",
"0.56795603",
"0.5670054",
"0.564151",
"0.5618105",
"0.5552885",
"0.555211",
"0.5512466",
"0.5505781",
"0.5505781",
"0.5487975",
"0.5483153",
"0.54597396",
"0.5440602",
"0.5429957",
"0.5420009",
"0.5398925",
"0.5356204",
"0.5344948",
"0.53412586",
"0.53329957",
"0.53258187",
"0.5313614",
"0.5308599"
]
| 0.638283 | 1 |
Load the COMPAS recidivism dataset. The purpose of this dataset is to predict whether a criminal will recidivate within two years of release. | def load_recidivism(return_X_y=False):
return _load_dataset(
'compas/two-year-recidivism.csv',
target='two_year_recid',
return_X_y=return_X_y
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_crime():\n\n # LOAD DATA FROM FILE.\n # filename = \"resources\\CommViolPredUnnormalizedData.csv\"\n filename = os.path.join('resources', 'CommViolPredUnnormalizedData.csv')\n data = pd.read_csv(filename, header=0, sep=';', na_values='?', skipinitialspace=True)\n data = data.sample(frac=1, random_state=42)\n\n targets = ['violentPerPop']\n pfeatures = ['race']\n\n # Drop rows with no associated attribute to be predicted.\n dataset = data.dropna(subset=targets, axis=0).reset_index(drop=True)\n\n # Keep only features that have more than 95% of points with associated value.\n features_to_drop = list()\n n_points = len(dataset)\n acc_rate = 0.95\n\n for c in dataset.columns:\n tot_values = np.sum(dataset[c].isna())\n if tot_values >= (1 - acc_rate) * n_points:\n features_to_drop.append(c)\n\n dataset = dataset.drop(features_to_drop, axis=1)\n\n # Remove features that are either correlated with the target or useless.\n feat_to_remove = [\n 'fold',\n 'communityname',\n 'state',\n 'murders',\n 'murdPerPop',\n 'rapes',\n 'rapesPerPop',\n 'robberies',\n 'robbbPerPop',\n 'assaults',\n 'assaultPerPop',\n 'burglaries',\n 'burglPerPop',\n 'larcenies',\n 'larcPerPop',\n 'autoTheft',\n 'autoTheftPerPop',\n 'arsons',\n 'arsonsPerPop',\n 'nonViolPerPop'\n ]\n\n feat_to_remove += targets + pfeatures\n\n # Prepare the feature dataset.\n features = [f for f in dataset.columns if f not in feat_to_remove]\n dataset = dataset[features + pfeatures + targets]\n\n # Last check on Nan values.\n dataset = dataset.dropna(axis=0).reset_index(drop=True)\n\n # Force all types to float.\n for c in dataset.columns:\n dataset[c] = dataset[c].astype(float)\n\n # Features selection.\n top_features = utils.get_top_features(dataset[features], dataset[targets], n=15)\n\n for pfeat in pfeatures:\n if pfeat in top_features:\n print(\"Protected feature \" + pfeat + \" in top features!\")\n\n x, xp, y = dataset[top_features].values, dataset[pfeatures].values, dataset[targets].values\n\n return x, xp, y",
"def fetch_ricci_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(\n \"ricci\", \"classification\", astype=\"pandas\", preprocess=preprocess\n )\n orig_X = pd.concat([train_X, test_X]).sort_index()\n orig_y = pd.concat([train_y, test_y]).sort_index()\n if preprocess:\n race = pd.Series(orig_X[\"race_W\"] == 1, dtype=np.float64)\n dropped_X = orig_X.drop(labels=[\"race_B\", \"race_H\", \"race_W\"], axis=1)\n encoded_X = dropped_X.assign(race=race)\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [{\"feature\": \"race\", \"reference_group\": [1]}],\n }\n return encoded_X, orig_y, fairness_info\n else:\n fairness_info = {\n \"favorable_labels\": [\"Promotion\"],\n \"protected_attributes\": [{\"feature\": \"race\", \"reference_group\": [\"W\"]}],\n }\n return orig_X, orig_y, fairness_info",
"def fetch_compas_df(preprocess=False):\n (train_X, train_y), (test_X, test_y) = lale.datasets.openml.fetch(\n \"compas\", \"classification\", astype=\"pandas\", preprocess=False\n )\n orig_X = pd.concat([train_X, test_X]).sort_index().astype(np.float64)\n orig_y = pd.concat([train_y, test_y]).sort_index().astype(np.float64)\n if preprocess:\n race = pd.Series(orig_X[\"race_caucasian\"] == 1, dtype=np.float64)\n dropped_X = orig_X.drop(\n labels=[\"race_african-american\", \"race_caucasian\"], axis=1\n )\n encoded_X = dropped_X.assign(race=race)\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"sex\", \"reference_group\": [1]},\n {\"feature\": \"race\", \"reference_group\": [1]},\n ],\n }\n return encoded_X, orig_y, fairness_info\n else:\n fairness_info = {\n \"favorable_labels\": [1],\n \"protected_attributes\": [\n {\"feature\": \"sex\", \"reference_group\": [1]},\n {\"feature\": \"race_caucasian\", \"reference_group\": [1]},\n ],\n }\n return orig_X, orig_y, fairness_info",
"def load_ccs9():\n ccs9 = pd.read_csv(pkg_resources.resource_filename(__name__,'$dxref 2015.csv'))\n ccs9 = ccs9.reset_index()\n for col in ccs9.columns:\n ccs9.loc[:,col]=ccs9[col].str.strip('\\'')\n ccs9.columns=ccs9.iloc[0,:]\n ccs9 = ccs9.iloc[1:,:]\n ccs9 = ccs9.replace(r'^\\s*$', np.nan, regex=True)\n ccs9 = ccs9.loc[ccs9['ICD-9-CM CODE'].notnull(),:]\n ccs9.loc[:,'ICD-9-CM CODE'] = ccs9['ICD-9-CM CODE'].str.replace(' ','')\n ccs9.loc[:,'CCS CATEGORY'] = ccs9['CCS CATEGORY'].str.replace(' ','')\n ccs9 = ccs9.iloc[:,0:4] \n ccs9_labels = pd.read_csv(pkg_resources.resource_filename(__name__,'dxlabel 2015.csv'))\n ccs9 = ccs9.merge(ccs9_labels,how='left',left_on='CCS CATEGORY',right_on='CCS DIAGNOSIS CATEGORIES')\n ccs9.drop('CCS CATEGORY DESCRIPTION',axis=1,inplace=True)\n ccs9.drop('CCS DIAGNOSIS CATEGORIES',axis=1,inplace=True)\n ccs9.columns = [i.replace('CCS DIAGNOSIS CATEGORIES LABELS','CCS CATEGORY DESCRIPTION') for i in ccs9.columns]\n return ccs9",
"def remission(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'remission.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/boot/remission.csv'\n maybe_download_and_extract(path, url,\n save_file_name='remission.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata",
"def load_ccs10():\n ccs10 = pd.read_csv(pkg_resources.resource_filename(__name__,'ccs_dx_icd10cm_2019_1.csv'))\n ccs10.columns=[i.strip('\\'') for i in ccs10.columns]\n for col in ccs10.columns:\n ccs10.loc[:,col]=ccs10[col].str.strip('\\'')\n ccs10 = ccs10.replace(r'^\\s*$', np.nan, regex=True)\n ccs10.loc[:,'ICD-10-CM CODE'] = ccs10['ICD-10-CM CODE'].str.replace(' ','')\n ccs10=ccs10.iloc[:,0:4]\n return ccs10",
"def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )",
"def load_data(path=\"data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))\n\n return features.todense(), adj, labels",
"def load_cci10():\n \n cci10 = pd.read_csv(pkg_resources.resource_filename(__name__,'cci_icd10cm_2019_1.csv'))\n \n cci10.columns = [i.strip('\\'') for i in cci10.columns]\n \n for col in cci10.columns:\n cci10.loc[:,col] = cci10[col].str.strip('\\'')\n cci10 = cci10.replace(r'^\\s*$', np.nan, regex=True)\n cci10.columns = [i.replace('CHRONIC INDICATOR','CHRONIC') for i in cci10.columns]\n \n dict_bodysystem = [\n ('1','Infectious and parasitic disease'),\n ('2','Neoplasms'),\n ('3','Endocrine, nutritional, and metabolic diseases and immunity disorders'),\n ('4','Diseases of blood and blood-forming organs'),\n ('5','Mental disorders'),\n ('6','Diseases of the nervous system and sense organs'),\n ('7','Diseases of the circulatory system'),\n ('8','Diseases of the respiratory system'),\n ('9','Diseases of the digestive system'),\n ('10','Diseases of the genitourinary system'),\n ('11','Complications of pregnancy, childbirth, and the puerperium'),\n ('12','Diseases of the skin and subcutaneous tissue'),\n ('13','Diseases of the musculoskeletal system'),\n ('14','Congenital anomalies'),\n ('15','Certain conditions originating in the perinatal period'),\n ('16','Symptoms, signs, and ill-defined conditions'),\n ('17','Injury and poisoning'),\n ('18','Factors influencing health status and contact with health services'),\n ]\n \n cci10 = cci10.merge(pd.DataFrame(dict_bodysystem,columns=['BODY SYSTEM','BODY SYSTEM DESCRIPTION']),how='left',on='BODY SYSTEM')\n \n cci10.loc[:,'ICD-10-CM CODE'] = cci10['ICD-10-CM CODE'].str.replace(' ','')\n \n return cci10",
"def cma_bst(redownload: bool = False) -> Dataset:\n return Dataset.get(\"cma_bst\", redownload=redownload)",
"def crohn(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'crohn.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/gap/crohn.csv'\n maybe_download_and_extract(path, url,\n save_file_name='crohn.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata",
"def read_covid():\n return pd.read_csv(_COVID_FILE, parse_dates=[\"date\"])",
"def load_citations(data_folder: str, num_articles: int = 16980) -> np.ndarray:\n citations = np.zeros([num_articles, num_articles])\n with open(os.path.join(data_folder, \"citations.dat\")) as citations_file:\n for i, line in enumerate(citations_file.readlines()):\n for citation in line.strip().split()[1:]:\n citations[i][int(citation)] = 1\n return citations",
"def test_fetch_crime_sedf(self):\n assert isinstance(_vector.fetch_beach_access_data(f='arcgis'), \n pd.DataFrame)",
"def load_cci9():\n cci9 = pd.read_csv(pkg_resources.resource_filename(__name__,'cci2015.csv'),skiprows=1)\n cci9.columns = [i.strip('\\'') for i in cci9.columns]\n \n for col in cci9.columns:\n cci9.loc[:,col] = cci9[col].str.strip('\\'')\n cci9 = cci9.replace(r'^\\s*$', np.nan, regex=True)\n cci9.columns=[i.replace('CATEGORY DESCRIPTION','CHRONIC') for i in cci9.columns]\n\n dict_bodysystem=[\n ('1' ,'Infectious and parasitic disease'),\n ('2' ,'Neoplasms'),\n ('3' ,'Endocrine, nutritional, and metabolic diseases and immunity disorders'),\n ('4' ,'Diseases of blood and blood-forming organs'),\n ('5' ,'Mental disorders'),\n ('6' ,'Diseases of the nervous system and sense organs'),\n ('7' ,'Diseases of the circulatory system'),\n ('8' ,'Diseases of the respiratory system'),\n ('9' ,'Diseases of the digestive system'),\n ('10','Diseases of the genitourinary system'),\n ('11','Complications of pregnancy, childbirth, and the puerperium'),\n ('12','Diseases of the skin and subcutaneous tissue'),\n ('13','Diseases of the musculoskeletal system'),\n ('14','Congenital anomalies'),\n ('15','Certain conditions originating in the perinatal period'),\n ('16','Symptoms, signs, and ill-defined conditions'),\n ('17','Injury and poisoning'),\n ('18','Factors influencing health status and contact with health service'),\n ]\n \n cci9 = cci9.merge(pd.DataFrame(dict_bodysystem,columns=['BODY SYSTEM','BODY SYSTEM DESCRIPTION']),how='left',on='BODY SYSTEM')\n \n cci9.loc[:,'ICD-9-CM CODE'] = cci9['ICD-9-CM CODE'].str.replace(' ','')\n \n return cci9",
"def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source",
"def load_data():\n df = pd.read_csv(\"../../Data/breast_cancer_data/data.csv\")\n\n cols = df.columns\n X = df[cols[2:-1]].to_numpy()\n y = df[cols[1]].to_numpy()\n y = (y=='M').astype(np.int) * 2 - 1\n\n train_X = X[:-150]\n train_y = y[:-150]\n\n test_X = X[-150:]\n test_y = y[-150:]\n\n return train_X, train_y, test_X, test_y",
"def load_data():\n data = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data\", header=None)\n\n # utiliza somente as duas primeiras classes\n data = data[:100]\n # transforma as classes em 0 e 1\n data[4] = np.where(data.iloc[:, -1] == 'Iris-setosa', 0, 1)\n data = np.asmatrix(data, dtype='float64')\n return data",
"def load_occupancy_dataset(trainsize=500, testsize=1000):\n filename = 'datasets/numericsequence.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset",
"def loadRes(self, resFile):\n res = COCO()\n res.dataset['images'] = [img for img in self.dataset['images']]\n\n print('Loading and preparing results...')\n tic = time.time()\n if type(resFile) == str: #or type(resFile) == unicode:\n anns = json.load(open(resFile))\n elif type(resFile) == np.ndarray:\n anns = self.loadNumpyAnnotations(resFile)\n else:\n anns = resFile\n assert type(anns) == list, 'results in not an array of objects'\n annsImgIds = [ann['image_id'] for ann in anns]\n assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \\\n 'Results do not correspond to current coco set'\n if 'caption' in anns[0]:\n imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])\n res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]\n for id, ann in enumerate(anns):\n ann['id'] = id+1\n elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n bb = ann['bbox']\n x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]\n if not 'segmentation' in ann:\n ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]\n ann['area'] = bb[2]*bb[3]\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'segmentation' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n # now only support compressed RLE format as segmentation results\n ann['area'] = maskUtils.area(ann['segmentation'])\n if not 'bbox' in ann:\n ann['bbox'] = maskUtils.toBbox(ann['segmentation'])\n ann['id'] = id+1\n ann['iscrowd'] = 0\n elif 'keypoints' in anns[0]:\n res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])\n for id, ann in enumerate(anns):\n s = ann['keypoints']\n x = s[0::3]\n y = s[1::3]\n x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)\n ann['area'] = (x1-x0)*(y1-y0)\n ann['id'] = id + 1\n ann['bbox'] = [x0,y0,x1-x0,y1-y0]\n print('DONE (t={:0.2f}s)'.format(time.time()- tic))\n\n res.dataset['annotations'] = anns\n res.createIndex()\n return res",
"def _download_cxr_model(self):\n file_id = \"1KIsLmVv8jKTVG_LxchMZAvR7rugHy7uB\"\n download_from_google_drive(file_id=file_id, folder=\"data/\", name=\"covid_cxr.zip\")",
"def load_covid_cases_data(date=None):\n if not date:\n date = datetime.today()\n data = requests.get(f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date.strftime(\"%m-%d-%Y\")}.csv')\n\n f = StringIO(data.text)\n reader = csv.DictReader(f, delimiter=',')\n results = {}\n for row in reader:\n fips = row.pop('FIPS', None)\n if fips:\n results[int(fips)] = row\n print(f\"{date.strftime('%m-%d-%Y')} has {len(results.keys())} results\")\n return results",
"def reprogramming(\n subset: str = ReprogrammingSubset.FULL.s,\n path: Union[str, Path] = \"datasets/reprogramming.h5ad\",\n **kwargs: Any,\n) -> AnnData:\n subset = ReprogrammingSubset(subset)\n adata = _load_dataset_from_url(path, *_datasets[\"reprogramming\"], **kwargs)\n\n if subset == ReprogrammingSubset.FULL:\n return adata\n if subset == ReprogrammingSubset.K48:\n return adata[~adata.obs[\"cluster\"].isnull()].copy()\n if subset == ReprogrammingSubset.K85:\n return adata[~adata.obs[\"timecourse\"].isnull()].copy()\n\n raise NotImplementedError(\n f\"Subsetting option `{subset.s!r}` is not yet implemented.\"\n )",
"def load_cifar(dataset_name='cifar10'):\n dataset_name = dataset_name.strip().lower().replace(' ', '')\n\n if dataset_name.lower() not in ['cifar10', 'cifar100']:\n raise ValueError('Only cifar10 or cifar100 are valid dataset_name.')\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'\n if dataset_name == 'cifar100':\n baseURL = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n\n dirname = os.path.join(_trident_dir, dataset_name.strip())\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n\n \"\"\"Load CIFAR data from `path`\"\"\"\n _,filename,ext=split_path(baseURL)\n download_file(baseURL, dirname, filename+ext, dataset_name)\n file_path = os.path.join(dirname, filename+ext)\n\n\n if '.tar' in ext:\n extract_archive(file_path, dirname, archive_format='auto')\n filelist = glob.glob(dirname + '/*/*.*')\n extract_path ,_,_= split_path(filelist[0])\n filelist = [f for f in os.listdir(extract_path) if os.path.isfile(os.path.join(extract_path, f))]\n data=[]\n label=[]\n test_data=[]\n test_label=[]\n for file_path in filelist:\n if 'data_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n data.append(entry['data'])\n label.append(entry['labels'])\n elif 'test_batch' in file_path:\n with open(os.path.join(extract_path,file_path), 'rb') as f:\n entry = pickle.load(f, encoding='latin1')\n test_data.append(entry['data'])\n test_label.append(entry['labels'])\n data = np.concatenate(data)\n data = data.reshape((data.shape[0], 3, 32, 32))\n data = data.transpose(0, 2, 3, 1).astype(np.float32)\n\n test_data = np.concatenate(test_data)\n test_data = test_data.reshape((test_data.shape[0], 3, 32, 32))\n test_data = test_data.transpose(0, 2, 3, 1).astype(np.float32)\n\n # Prepare labels\n label = np.concatenate(label)\n test_label = np.concatenate(test_label)\n\n trainData = Iterator(data=ImageDataset(data,object_type=ObjectType.rgb), label=LabelDataset(label,object_type=ObjectType.classification_label))\n testData = Iterator(data=ImageDataset(test_data,object_type=ObjectType.rgb), label=LabelDataset(test_label,object_type=ObjectType.classification_label))\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',\n 'truck'] if dataset_name == 'cifar10' else [], 'en-US')\n return dataset",
"def load_data(path=\"../data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n features = sp.sparse.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n# labels = encode_onehot(idx_features_labels[:, -1])\n values = np.unique(idx_features_labels[:, -1])\n values.sort()\n labels = np.zeros(idx_features_labels.shape[0])\n for i in range(labels.shape[0]):\n labels[i] = np.where(values == idx_features_labels[i, -1])[0][0]\n labels = torch.tensor(labels).long()\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.sparse.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n# features = normalize(features)\n adj = normalize(adj + sp.sparse.eye(adj.shape[0]))\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n features = torch.FloatTensor(np.array(features.todense()))\n# labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, features, labels, idx_train, idx_val, idx_test",
"def read_ct_data(train_start, train_count, eval_start, eval_count):\n data = pd.read_csv('/opt/train.csv')\n\n # Dropping the id column\n data.drop(['ID_code'], axis=1, inplace=True)\n\n data = data.values\n return (data[train_start:train_start + train_count],\n data[eval_start:eval_start + eval_count])",
"def load_data():\n df = pd.read_csv(\"https://raw.githubusercontent.com/Andrea-Giuliani/Python-Project/master/data/final_dataset.csv\",sep=',') \n return df",
"def load_radiography_data():\n # Load all Covid Images\n images = []\n labels = []\n for filename in os.listdir(\n os.path.join(\"COVID-19 Radiography Database\", \"COVID-19\")):\n img = cv2.imread(\n os.path.join(\"COVID-19 Radiography Database\", \"COVID-19\", filename), cv2.IMREAD_GRAYSCALE)\n if img is not None:\n images.append(img)\n labels.append(\"covid\")\n\n count_covid_images = len(images)\n\n # Load all Normal (non-covid) Images\n for filename in os.listdir(\n os.path.join(\"COVID-19 Radiography Database\", \"NORMAL\")):\n img = cv2.imread(\n os.path.join(\"COVID-19 Radiography Database\", \"NORMAL\", filename), cv2.IMREAD_GRAYSCALE)\n if img is not None and count_covid_images > 0:\n images.append(img)\n labels.append(\"normal\")\n count_covid_images = count_covid_images - 1\n\n X_train, X_test, y_train, y_test = train_test_split(images, labels, test_size=0.25, shuffle=True)\n return (np.array(X_train), np.array(y_train)), (np.array(X_test), np.array(y_test))",
"def load(self, filename):\n # XXX Hay que comprobar los datos leidos y lanzar excepcion\n f = open(filename)\n prelaciones = []\n asig = []\n rec = []\n l = f.readline()\n while l:\n # Activities and following activities\n if l[0:21] == 'PRECEDENCE RELATIONS:':\n f.readline()\n l = f.readline()\n while l[0] != '*':\n data = l.split()\n prel = (data[0], data[3:])\n prelaciones.append(prel)\n l = f.readline()\n\n # Activity duration and resource units needed\n if l[0] == '-':\n l = f.readline()\n while l[0] != '*':\n asig.append(l.split())\n l = f.readline()\n\n # Name, type and unit of resources\n if l[0:22] == 'RESOURCEAVAILABILITIES':\n l = f.readline()\n while l[0] != '*':\n rec.append(l.split())\n l = f.readline()\n\n l = f.readline()\n \n # Create data structure\n cont = 1\n activities = []\n for prelacion in prelaciones:\n activities.append([cont, prelacion[0], prelacion[1], '', '', '', '', '', ('Beta')])\n cont += 1 \n\n # Update activities duration\n for n in range(len(asig)): \n activities[n][6] = float(asig[n][2])\n\n # Update resources\n i = 1\n m = 0\n resources = []\n if len(rec) < 2:\n raise InvalidFileFormatException()\n\n for n in range(len(rec[1])):\n # Renewable\n if rec[0][m]=='R' or rec[0][m][0]=='R':\n if rec[0][m]=='R':\n row=[rec[0][m]+rec[0][i], 'Renewable', '', rec[1][n]] \n m+=2\n else:\n row=[rec[0][m], 'Renewable', '', rec[1][n]] \n m+=1 \n # Non Renewable\n elif rec[0][m]=='N' or rec[0][m][0]=='N':\n if rec[0][m]=='N':\n row=[rec[0][m]+rec[0][i], 'Non renewable', rec[1][n], '']\n m+=2\n else:\n row=[rec[0][m], 'Non renewable', rec[1][n], ''] \n m+=1\n # Double constrained\n elif rec[0][m]=='D' or rec[0][m][0]=='D':\n if rec[0][m]=='D':\n row=[rec[0][m]+rec[0][i], 'Double constrained', rec[1][n], rec[1][n]]\n m+=2\n else:\n row=[rec[0][m], 'Double constrained', rec[1][n], rec[1][n]] \n m+=1\n \n resources.append(row)\n i += 2\n # Note: Unlimited resources are not present on PSPLIB projects and so \n # not taken into account here\n\n # Resources needed per activity\n asignation = []\n for n in range(len(asig)): \n for m in range(3, 3+len(rec[1])): #len(self.rec[1]): number of resources \n if asig[n][m] != '0': #unused resources are not shown\n i = m-3\n row = [asig[n][0], resources[i][0], asig[n][m]] \n asignation.append(row)\n \n return (activities, [], resources, asignation)",
"def coco_raw_data(data_path=None):\n train= _read_chars(os.path.join(data_path, \"train_caps.txt\"))\n val = _read_chars(os.path.join(data_path, \"dev_caps.txt\"))\n test = _read_chars(os.path.join(data_path, \"test_caps.txt\"))\n chars = set(train)\n id_2_word = dict(enumerate(chars))\n word_to_id = {i: w for w, i in id_2_word.items()}\n train_data = _file_to_word_ids(train, word_to_id)\n valid_data = _file_to_word_ids(val, word_to_id)\n test_data = _file_to_word_ids(test, word_to_id)\n return train_data, valid_data, test_data, word_to_id, id_2_word"
]
| [
"0.6137884",
"0.5845019",
"0.57248294",
"0.5718205",
"0.5666465",
"0.55472314",
"0.5543531",
"0.55415714",
"0.54749215",
"0.5438623",
"0.5411372",
"0.5403246",
"0.5398308",
"0.53844136",
"0.5298849",
"0.529671",
"0.5291875",
"0.52728283",
"0.52658653",
"0.52654713",
"0.52479",
"0.5247042",
"0.52424467",
"0.5238308",
"0.5234316",
"0.5228056",
"0.52219844",
"0.52211106",
"0.5209353",
"0.5192074"
]
| 0.6738154 | 0 |
Load the GDP growth dataset (from FRED data). The purpose of this dataset is to forecast GDP growth based on macroeconomic variables. | def load_gdp(return_X_y=False):
return _load_dataset(
'gdp/GDP-growth.csv',
target='GDP_g',
return_X_y=return_X_y
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_fg(path):\n import pandas as pd\n path = os.path.expanduser(path)\n filename = 'sample_fg.csv'\n if not os.path.exists(os.path.join(path, filename)):\n url = 'http://dustintran.com/data/r/Stat2Data/SampleFG.csv'\n maybe_download_and_extract(path, url,\n save_file_name='sample_fg.csv',\n resume=False)\n\n data = pd.read_csv(os.path.join(path, filename), index_col=0,\n parse_dates=True)\n x_train = data.values\n metadata = {'columns': data.columns}\n return x_train, metadata",
"def gdp():\n gdp_csv = pd.read_csv(csv_path(\"UN_GDP.csv\"), index_col=0, usecols=[0, 3],\n dtype={\"Value\": np.float32})\n gdp_csv.columns = [\"GDP\"]\n return gdp_csv",
"def update_grad_data():\n t_file = 'hcapgrd1_full_data_*.fits*'\n out_dir = deposit_dir + '/Grad_save/'\n tdir = out_dir + 'Gradcap/'\n#\n#--- read grad group name\n#\n gfile = house_keeping + 'grad_list'\n grad_list = mcf.read_data_file(gfile)\n\n [tstart, tstop, year] = ecf.find_data_collecting_period(tdir, t_file)\n\n get_data(tstart, tstop, year, grad_list, out_dir)",
"def read_gdp() -> pd.DataFrame:\n\n gdp_df = pd.read_csv(\"data/API_NY.GDP.PCAP.CD_DS2_en_csv_v2_988471.csv\",\n header=4, usecols=[0,62], names=[\"Country\", \"GDP\"])\n\n index = gdp_df[gdp_df[\"Country\"]==\"Iran, Islamic Rep.\"].index.values[0]\n gdp_df.loc[index, \"Country\"] = \"Iran\"\n index = gdp_df[gdp_df[\"Country\"] == \"United States\"].index.values[0]\n gdp_df.loc[index, \"Country\"] = \"US\"\n index = gdp_df[gdp_df[\"Country\"] == \"Russian Federation\"].index.values[0]\n gdp_df.loc[index, \"Country\"] = \"Russia\"\n\n gdp_df = gdp_df.dropna()\n\n return gdp_df",
"def load_gtsf(self):\n gtsf_path = os.path.join(self.data_path, 'gtsf')\n trips = pd.read_csv(os.path.join(gtsf_path, 'trips.txt'))\n stops = pd.read_csv(os.path.join(gtsf_path, 'stops.txt'))\n stop_times = pd.read_csv(os.path.join(gtsf_path, \"stop_times.txt\"))\n routes = pd.read_csv(os.path.join(gtsf_path, 'routes.txt'))\n cal = pd.read_csv(os.path.join(gtsf_path, 'calendar.txt'))\n self.gtsf = {\"trips\": trips, 'stops': stops, \"stop_times\": stop_times, \"routes\": routes, 'calendar': cal}",
"def load_gldas_elevation_dataset(gldas_elevation_file): \n d1 = xr.open_dataset(gldas_elevation_file).load()\n return d1",
"def load_population_data():\n return pd.read_csv(os.path.join('data', 'processed', 'worldometer_data.csv'),\n usecols = ['Country/Region','Population'])",
"def load_expt_gaps():\n path = os.path.join(DATA_DIR, \"bandgap-zhuo-4604.csv\")\n df = pd.read_csv(path, index_col=False)\n return df",
"def get_eval_data() -> GraphDataset:\n _load_data_if_needed()\n return eval_data",
"def load_gdp_distribution_data(data_file):\n header = parse_data_header(data_file)\n with open(data_file, 'r') as data:\n data = np.genfromtxt(data, delimiter=' ', skip_header=6)\n # Set the areas in which there is no data to 0\n data[data == header['NODATA_value']] = 0\n return header, data",
"def geolife(redownload: bool = False) -> Dataset:\n return Dataset.get(\"geolife\", redownload=redownload)",
"def readData(path_to_dataset, train_size=0.8, validation_size=0.2):\n data = pd.read_csv(os.path.join(path_to_dataset, 'training_set_rel3.tsv'), sep='\\t', encoding='ISO-8859-1')\n # Drop columns that has null value \n data = data.dropna(axis=1)\n # Only take 4 columns of data from the dataset: essay_id, essay_set, essay, domain1_score\n data = data[['essay_id', 'essay_set', 'essay', 'domain1_score']]\n # Perform 80:20 train-test split on the training data\n train_set, test_set = train_test_split(data, train_size=train_size, random_state=0)\n # Split the 80% training set further into 60:20\n training_set, validation_set = train_test_split(train_set, test_size=validation_size, random_state=0)\n return training_set, test_set, validation_set",
"def load_data(ticker, n_steps=50, scale=True, shuffle=True, lookup_step=1, \r\n test_size=0.2, feature_columns=['open', 'high', 'low', 'close', 'adjclose', 'volume',\r\n 'adjclose_v', 'value_gas', 'value_silver', 'value_gold', 'value_usd',\r\n 'PMI', 'Production', 'New Orders', 'Backlog of Orders',\r\n 'Supplier Deliveries', 'Inventories', 'Customers Inventories',\r\n 'Employment', 'Prices', 'New Export Orders', 'Imports']):\r\n \r\n ind = []\r\n quandl.ApiConfig.api_key = 'hXxA3xSampghdgkVeSJC'\r\n pmi = pd.read_excel(\"pmi.xlsx\", index_col=0)\r\n \r\n \r\n # Cleaning, processing and transforming the gold data\r\n gold = quandl.get(\"WGC/GOLD_DAILY_USD\")\r\n gold.columns = [\"value_gold\"]\r\n \r\n gold_ind = []\r\n gold_data = []\r\n for i in range(1, len(gold.index)):\r\n if(gold.index[i-1].day != gold.index[i].day-1):\r\n for k in range(gold.index[i].day - gold.index[i-1].day):\r\n try:\r\n gold_ind.append(datetime(gold.index[i-1].year, gold.index[i-1].month, gold.index[i-1].day+k))\r\n gold_data.append(gold.value_gold[i-1])\r\n except:\r\n pass\r\n else:\r\n gold_ind.append(gold.index[i-1])\r\n gold_data.append(gold.value_gold[i-1])\r\n \r\n \r\n gold = pd.DataFrame(data = gold_data, index = gold_ind, columns = [\"value_gold\"])\r\n # Cleaning, processing and transforming the USD data \r\n gold_temp = quandl.get(\"WGC/GOLD_DAILY_USD\")\r\n gold_temp.columns = [\"value_gold\"]\r\n \r\n usd_value = quandl.get(\"FRED/TWEXB\")\r\n usd_value.columns = [\"value_usd\"]\r\n data = []\r\n for i in range(len(list(usd_value.value_usd))-1):\r\n for k in range(5):\r\n data.append(list(usd_value.value_usd)[i])\r\n data.append(list(usd_value.value_usd)[-1])\r\n new_usd = pd.DataFrame(data, index=gold_temp.index[gold_temp.index.get_loc('1995-01-04'):gold_temp.index.get_loc('2020-01-01')+1])\r\n new_usd.columns = [\"value_usd\"]\r\n \r\n new_usd_ind = []\r\n new_usd_data = []\r\n for i in range(1, len(new_usd.index)):\r\n if(new_usd.index[i-1].day != new_usd.index[i].day-1):\r\n for k in range(new_usd.index[i].day - new_usd.index[i-1].day):\r\n try:\r\n new_usd_ind.append(datetime(new_usd.index[i-1].year, new_usd.index[i-1].month, new_usd.index[i-1].day+k))\r\n new_usd_data.append(new_usd.value_usd[i-1])\r\n except:\r\n pass\r\n else:\r\n new_usd_ind.append(new_usd.index[i-1])\r\n new_usd_data.append(new_usd.value_usd[i-1])\r\n \r\n \r\n new_usd = pd.DataFrame(data = new_usd_data, index = new_usd_ind, columns = [\"value_usd\"])\r\n \r\n # Cleaning, processing and transforming the silver data\r\n silver = quandl.get(\"LBMA/SILVER\")\r\n silver.drop([\"GBP\", \"EURO\"], axis=1, inplace=True)\r\n silver.columns = [\"value_silver\"]\r\n \r\n silver_ind = []\r\n silver_data = []\r\n for i in range(1, len(silver.index)):\r\n if(silver.index[i-1].day != silver.index[i].day-1):\r\n for k in range(silver.index[i].day - silver.index[i-1].day):\r\n try:\r\n silver_ind.append(datetime(silver.index[i-1].year, silver.index[i-1].month, silver.index[i-1].day+k))\r\n silver_data.append(silver.value_silver[i-1])\r\n except:\r\n pass\r\n else:\r\n silver_ind.append(silver.index[i-1])\r\n silver_data.append(silver.value_silver[i-1])\r\n \r\n \r\n silver = pd.DataFrame(data = silver_data, index = silver_ind, columns = [\"value_silver\"])\r\n\r\n # Cleaning, processing and transforming the gas data\r\n \r\n gas = quandl.get(\"FRED/DGASUSGULF\")\r\n gas.columns = [\"value_gas\"]\r\n \r\n gas_ind = []\r\n gas_data = []\r\n for i in range(1, len(gas.index)):\r\n if(gas.index[i-1].day != gas.index[i].day-1):\r\n for k in range(gas.index[i].day - gas.index[i-1].day):\r\n try:\r\n gas_ind.append(datetime(gas.index[i-1].year, gas.index[i-1].month, gas.index[i-1].day+k))\r\n gas_data.append(gas.value_gas[i-1])\r\n except:\r\n pass\r\n else:\r\n gas_ind.append(gas.index[i-1])\r\n gas_data.append(gas.value_gas[i-1])\r\n \r\n \r\n gas = pd.DataFrame(data = gas_data, index = gas_ind, columns = [\"value_gas\"])\r\n # Cleaning, processing and transforming the Volatility data\r\n v_data = si.get_data(\"^VIX\")\r\n v_data.drop([\"ticker\", \"volume\", 'open', 'high', 'low', 'close'], axis=1, inplace=True)\r\n v_data.columns = [\"adjclose_v\"]\r\n \r\n v_data_ind = []\r\n v_data_data = []\r\n for i in range(1, len(v_data.index)):\r\n if(v_data.index[i-1].day != v_data.index[i].day-1):\r\n for k in range(v_data.index[i].day - v_data.index[i-1].day):\r\n try:\r\n v_data_ind.append(datetime(v_data.index[i-1].year, v_data.index[i-1].month, v_data.index[i-1].day+k))\r\n v_data_data.append(v_data.adjclose_v[i-1])\r\n except:\r\n pass\r\n else:\r\n v_data_ind.append(v_data.index[i-1])\r\n v_data_data.append(v_data.adjclose_v[i-1])\r\n \r\n \r\n v_data = pd.DataFrame(data = v_data_data, index = v_data_ind, columns = [\"adjclose_v\"])\r\n \r\n # see if ticker is already a loaded stock from yahoo finance\r\n if isinstance(ticker, str):\r\n # load it from yahoo_fin library\r\n df = si.get_data(ticker)\r\n elif isinstance(ticker, pd.DataFrame):\r\n # already loaded, use it directly\r\n df = ticker\r\n else:\r\n raise TypeError(\"ticker can be either a str or a `pd.DataFrame` instances\")\r\n df_ind = []\r\n df_data = {\"open\":[], \"high\":[], \"low\":[], \"close\":[], \"adjclose\":[], \"volume\":[], \"ticker\":[]}\r\n for i in range(1, len(df.index)):\r\n if(df.index[i-1].day != df.index[i].day-1):\r\n for k in range(df.index[i].day - df.index[i-1].day):\r\n try:\r\n df_ind.append(datetime(df.index[i-1].year, df.index[i-1].month, df.index[i-1].day+k))\r\n df_data[\"open\"].append(df.open[i-1])\r\n df_data[\"high\"].append(df.high[i-1])\r\n df_data[\"low\"].append(df.low[i-1])\r\n df_data[\"close\"].append(df.close[i-1])\r\n df_data[\"adjclose\"].append(df.adjclose[i-1])\r\n df_data[\"volume\"].append(df.volume[i-1])\r\n df_data[\"ticker\"].append(df.ticker[i-1])\r\n except:\r\n pass\r\n else:\r\n df_ind.append(df.index[i-1])\r\n df_data[\"open\"].append(df.open[i-1])\r\n df_data[\"high\"].append(df.high[i-1])\r\n df_data[\"low\"].append(df.low[i-1])\r\n df_data[\"close\"].append(df.close[i-1])\r\n df_data[\"adjclose\"].append(df.adjclose[i-1])\r\n df_data[\"volume\"].append(df.volume[i-1])\r\n df_data[\"ticker\"].append(df.ticker[i-1])\r\n df = pd.DataFrame(data = df_data, index = df_ind) \r\n final_df = pd.concat([df, v_data, gas, silver, gold, new_usd, pmi], axis=1, join=\"inner\")\r\n print(final_df.columns)\r\n # this will contain all the elements we want to return from this function\r\n result = {}\r\n # we will also return the original dataframe itself\r\n result['final_df'] = final_df.copy()\r\n # make sure that the passed feature_columns exist in the dataframe\r\n for col in feature_columns:\r\n assert col in final_df.columns, f\"'{col}' does not exist in the dataframe.\"\r\n\r\n if scale:\r\n column_scaler = {}\r\n # scale the data (prices) from 0 to 1\r\n for column in feature_columns:\r\n scaler = preprocessing.MinMaxScaler()\r\n final_df[column] = scaler.fit_transform(np.expand_dims(final_df[column].values, axis=1))\r\n column_scaler[column] = scaler\r\n\r\n # add the MinMaxScaler instances to the result returned\r\n result[\"column_scaler\"] = column_scaler\r\n\r\n # add the target column (label) by shifting by `lookup_step`\r\n final_df['future'] = final_df['adjclose'].shift(-lookup_step)\r\n\r\n # last `lookup_step` columns contains NaN in future column\r\n # get them before droping NaNs\r\n last_sequence = np.array(final_df[feature_columns].tail(lookup_step))\r\n \r\n # drop NaNs\r\n final_df.dropna(inplace=True)\r\n\r\n sequence_data = []\r\n sequences = deque(maxlen=n_steps)\r\n\r\n for entry, target in zip(final_df[feature_columns].values, final_df['future'].values):\r\n sequences.append(entry)\r\n if len(sequences) == n_steps:\r\n sequence_data.append([np.array(sequences), target])\r\n\r\n # get the last sequence by appending the last `n_step` sequence with `lookup_step` sequence\r\n # for instance, if n_steps=50 and lookup_step=10, last_sequence should be of 59 (that is 50+10-1) length\r\n # this last_sequence will be used to predict in future dates that are not available in the dataset\r\n last_sequence = list(sequences) + list(last_sequence)\r\n # shift the last sequence by -1\r\n last_sequence = np.array(pd.DataFrame(last_sequence).shift(-1).dropna())\r\n # add to result\r\n result['last_sequence'] = last_sequence\r\n \r\n # construct the X's and y's\r\n X, y = [], []\r\n for seq, target in sequence_data:\r\n X.append(seq)\r\n y.append(target)\r\n\r\n # convert to numpy arrays\r\n X = np.array(X)\r\n y = np.array(y)\r\n\r\n # reshape X to fit the neural network\r\n X = X.reshape((X.shape[0], X.shape[2], X.shape[1]))\r\n \r\n # split the dataset\r\n result[\"X_train\"], result[\"X_test\"], result[\"y_train\"], result[\"y_test\"] = train_test_split(X, y, \r\n test_size=test_size, shuffle=shuffle)\r\n # return the result\r\n return result",
"def get_gs_as_dataset(self, fname):\n return pd.read_csv(f\"{self.gs_base_url}/{fname}\", sep=\"\\t\")",
"def gdf(self) -> gpd.GeoDataFrame:\n path = str(get_path(\"geojson/FeatureCollection02.json\"))\n df = gpd.read_file(path)\n return df",
"def load_demographic_data(self):\n\n # load household size distribution and age distribution\n self.hh_comp = load_probs(os.path.join(self.params['resource_prefix'],\n self.params['hh_composition']), False)\n self.params['age_cutoffs'] = [int(x) for x in self.hh_comp[0][1:][0]] # yuk!\n self.params['adult_age'] = int(float(self.params['adult_age']))\n self.age_dist = load_probs(os.path.join(self.params['resource_prefix'],\n self.params['age_distribution']))\n\n annual_factor = self.params['t_dur']/365.0\n\n # load and scale MORTALITY rates\n self.death_rates = {}\n self.death_rates[0] = self.parse_age_rates(os.path.join(\n self.params['resource_prefix'],\n self.params['death_rates_m']), annual_factor, True)\n self.death_rates[1] = self.parse_age_rates(os.path.join(\n self.params['resource_prefix'],\n self.params['death_rates_f']), annual_factor, True)\n\n ### load FERTILITY age probs (don't require scaling) for closed pops\n self.fertility_age_probs = load_prob_tables(os.path.join(\n self.params['resource_prefix'],\n self.params['fertility_age_probs']))\n self.fertility_parity_probs = None\n #load_probs_new(os.path.join(\n # self.params['resource_prefix'],\n # self.params['fertility_parity_probs']))\n self.fertility_age_rates = self.parse_age_rates(os.path.join(\n self.params['resource_prefix'],\n self.params['fertility_age_rates']), annual_factor, False)\n self.new_marriage_years = float(self.params['new_marriage_years'])\n self.new_marriage_fertility = float(self.params['marriage_based_fertility'])\n self.single_mother_fertility = float(self.params['single_mother_fertility'])\n self.params['partner_age_diff'] = float(self.params['partner_age_diff'])\n self.params['partner_age_sd'] = float(self.params['partner_age_sd'])\n\n ### load and scale leave/couple/divorce and growth rates\n if self.params['dyn_rates']:\n # rates will be a list of annual values\n self.params['leaving_probs'] = load_prob_list(os.path.join(\n self.params['resource_prefix'], self.params['leaving_prob_file']))\n self.params['couple_probs'] = load_prob_list(os.path.join(\n self.params['resource_prefix'], self.params['couple_prob_file']))\n self.params['divorce_probs'] = load_prob_list(os.path.join(\n self.params['resource_prefix'], self.params['divorce_prob_file']))\n self.params['growth_rates'] = load_prob_list(os.path.join(\n self.params['resource_prefix'], self.params['growth_rate_file']))\n self.params['imm_rates'] = load_prob_list(os.path.join(\n self.params['resource_prefix'], self.params['imm_rate_file']))\n\n self.params_adj['leaving_probs'] = [adjust_prob(x, self.params['t_dur'])\n for x in self.params['leaving_probs']]\n self.params_adj['couple_probs'] = [adjust_prob(x, self.params['t_dur'])\n for x in self.params['couple_probs']]\n self.params_adj['divorce_probs'] = [adjust_prob(x, self.params['t_dur'])\n for x in self.params['divorce_probs']]\n self.params_adj['growth_rates'] = [adjust_prob(x, self.params['t_dur'])\n for x in self.params['growth_rates']]\n self.params_adj['imm_rates'] = [adjust_prob(x, self.params['t_dur'])\n for x in self.params['imm_rates']]\n\n self.dyn_years = min(len(self.death_rates[0][0])-1, len(self.fertility_age_probs)-1,\n len(self.params_adj['leaving_probs'])-1, len(self.params_adj['couple_probs'])-1,\n len(self.params_adj['divorce_probs'])-1, len(self.params_adj['growth_rates'])-1)\n\n else:\n # adjust demographic event probabilities according to time step\n self.params_adj['couple_probs'] = [adjust_prob(\n self.params['couple_prob'], self.params['t_dur'])]\n self.params_adj['leaving_probs'] = [adjust_prob(\n self.params['leaving_prob'], self.params['t_dur'])]\n self.params_adj['divorce_probs'] = [adjust_prob(\n self.params['divorce_prob'], self.params['t_dur'])]\n self.params_adj['growth_rates'] = [adjust_prob(\n self.params['growth_rate'], self.params['t_dur'])]\n self.params_adj['imm_rates'] = [adjust_prob(\n self.params['imm_rate'], self.params['t_dur'])]",
"def load_data_from_fold(data_path):\r\n print(\"\\nLoading data from json folder {}\".format(data_path))\r\n\r\n SAMPLES_TO_CONSIDER = 22050\r\n\r\n data = preprocess_dataset(data_path, SAMPLES_TO_CONSIDER)\r\n\r\n X = np.array(data[\"MFCCs\"])\r\n y = np.array(data[\"labels\"])\r\n print(\"Training sets loaded!\")\r\n print(\"data size :\", X.shape, \"labels size: \", y.shape)\r\n print(\"release the 'data' for memories\")\r\n del data\r\n\r\n return X, y",
"def _load_training_data(self):\n self._save_training_data()",
"def gc_data(dataset, dirname, train_ratio=0.8):\n \n # Define path where dataset should be saved\n data_path = \"data/{}.pth\".format(dataset)\n\n # If already created, do not recreate\n if os.path.exists(data_path):\n data = torch.load(data_path)\n else:\n if dataset == 'syn6':\n #G = gengraph.gen_syn6()\n data = SimpleNamespace()\n with open('data/BA-2motif.pkl', 'rb') as fin:\n data.edge_index, data.x, data.y = pkl.load(fin)\n data.x = np.ones_like(data.x)\n else:\n # MUTAG\n data = SimpleNamespace()\n with open('data/Mutagenicity.pkl', 'rb') as fin:\n data.edge_index, data.x, data.y = pkl.load(fin)\n\n # Define NumSpace dataset\n data.x = torch.FloatTensor(data.x)\n data.edge_index = torch.FloatTensor(data.edge_index)\n data.y = torch.LongTensor(data.y)\n _, data.y = data.y.max(dim=1)\n data.num_classes = 2\n data.num_features = data.x.shape[-1]\n data.num_nodes = data.edge_index.shape[1]\n data.num_graphs = data.x.shape[0]\n data.name = dataset\n\n # Shuffle graphs \n p = torch.randperm(data.num_graphs)\n data.x = data.x[p]\n data.y = data.y[p]\n data.edge_index = data.edge_index[p]\n \n # Train / Val / Test split\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y, train_ratio)\n # Save data\n torch.save(data, data_path)\n return data",
"def get_datadfs(self, recal=True):\n\n # those files contain score >1\n trn_fname = os.path.join(self.save_dir, 'reg_trn_set_df.csv')\n val_fname = os.path.join(self.save_dir, 'reg_val_set_df.csv')\n tst_fname = os.path.join(self.save_dir, 'reg_tst_set_df.csv')\n\n if os.path.exists(trn_fname) and os.path.exists(val_fname) and os.path.exists(tst_fname) and not recal:\n logging.info('reading...\\n{}\\n{}\\n{}'.format(trn_fname, val_fname, tst_fname))\n trn_set_df = pd.read_csv(trn_fname, sep='\\t')\n val_set_df = pd.read_csv(val_fname, sep='\\t')\n tst_set_df = pd.read_csv(tst_fname, sep='\\t')\n else:\n # read phage and bact relation file\n data_meta_info_f = self.config['data_meta_info']\n if not os.path.exists(data_meta_info_f):\n logging.error('data meta information file not Found. {}'.format(data_meta_info_f))\n raise FileNotFoundError('data meta information file not Found')\n data_info = pd.read_csv(data_meta_info_f)\n logging.info('total samples: {}'.format(len(data_info)))\n\n # select specific columns\n sel_cols = 'hostID,phageID,b_Score,phage_bac_name,bac_len,phage_len'.split(',')\n data_info_df = data_info[sel_cols]\n\n # select rows with score > 1\n data_info_sel = data_info_df[data_info_df['b_Score'] > 1].copy()\n data_info_sel.reset_index(drop=True, inplace=True)\n\n b_scores = data_info_sel['b_Score'].values\n b_scores = b_scores.reshape(-1, 1)\n\n # MinMaxScaler()\n mm_scaler = MinMaxScaler()\n mm_scaler.fit(b_scores)\n mm_scaler_fname = os.path.join(self.save_dir, 'minmax_scaler.joblib')\n if os.path.exists(mm_scaler_fname):\n os.remove(mm_scaler_fname)\n dump(mm_scaler, mm_scaler_fname)\n data_info_sel['minmax_bscore'] = np.squeeze(mm_scaler.transform(b_scores))\n\n # StandardScaler()\n ss_scaler = StandardScaler()\n ss_scaler.fit(b_scores)\n ss_scaler_fname = os.path.join(self.save_dir, 'standard_scaler.joblib')\n if os.path.exists(ss_scaler_fname):\n os.remove(ss_scaler_fname)\n dump(ss_scaler, ss_scaler_fname)\n data_info_sel['norm_bscore'] = np.squeeze(ss_scaler.transform(b_scores))\n\n # MaxAbsScaler()\n ma_scaler = MaxAbsScaler()\n ma_scaler.fit(b_scores)\n ma_scaler_fname = os.path.join(self.save_dir, 'maxabs_scaler.joblib')\n if os.path.exists(ma_scaler_fname):\n os.remove(ma_scaler_fname)\n dump(ma_scaler, ma_scaler_fname)\n data_info_sel['maxabs_bscore'] = np.squeeze(ma_scaler.transform(b_scores))\n\n logging.info('number of samples with score > 1 : {}'.format(len(data_info_sel)))\n\n # creat train, validation and test dataset\n # split dataframe, 80% for train , 10% for validation and 10% for test\n trn_set_df, val_set_df, tst_set_df = np.split(data_info_sel.sample(frac=1, random_state=1),\n [int(.7*len(data_info_sel)), int(.85*len(data_info_sel))])\n\n trn_set_df.reset_index(drop=True, inplace=True)\n val_set_df.reset_index(drop=True, inplace=True)\n tst_set_df.reset_index(drop=True, inplace=True)\n\n if os.path.exists(trn_fname):\n os.remove(trn_fname)\n trn_set_df.to_csv(trn_fname, sep='\\t', index=False)\n if os.path.exists(val_fname):\n os.remove(val_fname)\n val_set_df.to_csv(val_fname, sep='\\t', index=False)\n if os.path.exists(tst_fname):\n os.remove(tst_fname)\n tst_set_df.to_csv(tst_fname, sep='\\t', index=False)\n\n logging.info('train sample: {}'.format(len(trn_set_df)))\n logging.info('valid sample: {}'.format(len(val_set_df)))\n logging.info('test sample: {}'.format(len(tst_set_df)))\n\n return trn_set_df, val_set_df, tst_set_df",
"def load_dataset():\n try:\n data_path = ROOT_PATH.joinpath('data', 'Complete_TAVG_Daily_LatLong1_1880.nc')\n ds = xarray.open_dataset(data_path)\n return ds\n except FileNotFoundError:\n raise",
"def load():\n filepath = dirname(abspath(__file__))\n##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####\n data = recfromtxt(open(filepath + '/spector.csv',\"rb\"), delimiter=\" \",\n names=True, dtype=float, usecols=(1,2,3,4))\n names = list(data.dtype.names)\n endog = array(data[names[3]], dtype=float)\n endog_name = names[3]\n exog = column_stack(data[i] for i in names[:3]).astype(float)\n exog_name = names[:3]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset",
"def load_data(data_path):\n print(\"RNN Language MODEL: Loading gigaword corpus\")\n return data.CorpusGigaword(data_path)",
"def load_G(self, G_checkpoint):\n checkpoint = torch.load(G_checkpoint)\n self.G.load_state_dict(checkpoint['gnet'])\n self.gen_optim.load_state_dict(checkpoint['gopt'])",
"def load_wmt_en_fr_dataset(path='data'):\n path = os.path.join(path, 'wmt_en_fr')\n # URLs for WMT data.\n _WMT_ENFR_TRAIN_URL = \"http://www.statmt.org/wmt10/\"\n _WMT_ENFR_DEV_URL = \"http://www.statmt.org/wmt15/\"\n\n def gunzip_file(gz_path, new_path):\n \"\"\"Unzips from gz_path into new_path.\"\"\"\n logging.info(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)\n\n def get_wmt_enfr_train_set(path):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n filename = \"training-giga-fren.tar\"\n maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True)\n train_path = os.path.join(path, \"giga-fren.release2.fixed\")\n gunzip_file(train_path + \".fr.gz\", train_path + \".fr\")\n gunzip_file(train_path + \".en.gz\", train_path + \".en\")\n return train_path\n\n def get_wmt_enfr_dev_set(path):\n \"\"\"Download the WMT en-fr training corpus to directory unless it's there.\"\"\"\n filename = \"dev-v2.tgz\"\n dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False)\n dev_name = \"newstest2013\"\n dev_path = os.path.join(path, \"newstest2013\")\n if not (gfile.Exists(dev_path + \".fr\") and gfile.Exists(dev_path + \".en\")):\n logging.info(\"Extracting tgz file %s\" % dev_file)\n with tarfile.open(dev_file, \"r:gz\") as dev_tar:\n fr_dev_file = dev_tar.getmember(\"dev/\" + dev_name + \".fr\")\n en_dev_file = dev_tar.getmember(\"dev/\" + dev_name + \".en\")\n fr_dev_file.name = dev_name + \".fr\" # Extract without \"dev/\" prefix.\n en_dev_file.name = dev_name + \".en\"\n dev_tar.extract(fr_dev_file, path)\n dev_tar.extract(en_dev_file, path)\n return dev_path\n\n logging.info(\"Load or Download WMT English-to-French translation > {}\".format(path))\n\n train_path = get_wmt_enfr_train_set(path)\n dev_path = get_wmt_enfr_dev_set(path)\n\n return train_path, dev_path",
"def load():\n filepath = dirname(abspath(__file__))\n data = recfromtxt(filepath + '/scotvote.csv', delimiter=\",\",\n names=True, dtype=float, usecols=(1,2,3,4,5,6,7,8))\n names = list(data.dtype.names)\n endog = array(data[names[0]], dtype=float)\n endog_name = names[0]\n exog = column_stack(data[i] for i in names[1:]).astype(float)\n exog_name = names[1:]\n dataset = Dataset(data=data, names=names, endog=endog, exog=exog,\n endog_name = endog_name, exog_name=exog_name)\n return dataset",
"def load_gltf(self):\n with open(str(self.path)) as fd:\n self.gltf = GLTFMeta(self.path, json.load(fd), self.meta)",
"def test_GFD_import_from_constructor(self):\n filepath = '7.txt'\n original_gfd = flow_processing_input.GroundFlowData()\n original_gfd.detector_flow_data = createGFDDataset(10).dataset\n original_gfd.export_to_file(filepath)\n new_gfd = flow_processing_input.GroundFlowData(filepath)\n os.remove(filepath)\n # Check if new_gfd contains the same attributes as the original_gfd\n self.assertTrue(new_gfd == original_gfd)",
"def import_gpa_data(filename):\n\tgpa_df = pd.read_csv(filename, index_col='STUDENT ID')\n\tgpa_df = gpa_df.rename(index=int, columns={\"GRADE LEVEL\": \"grade\", \n\t\t\"LAST NAME\": \"last_name\", \"FIRST NAME\": \"first_name\", \n\t\t\"AVG GPA\":\"weekly_gpa\"})\n\tgpa_df.index.names = ['ID']\n\t# round values to 2 decimal places\n\tgpa_df = gpa_df.round(2)\n\n\treturn gpa_df",
"def load_dataset(label, max_deltaR=None):\n\n # Files should be located in the datasets directory\n particles = np.load(\"datasets/PARTICLES_\"+label+\".npy\", allow_pickle=True)\n\n with open(\"datasets/DROP_\"+label, 'rb') as pickle_file:\n water = pickle.load(pickle_file)\n\n with open(\"datasets/INTERACTIONS_\"+label, 'rb') as pickle_file:\n interactions = pickle.load(pickle_file)\n\n samples = particles_to_samples(particles)\n\n if max_deltaR is not None:\n whr = water.deltaR.flatten() < max_deltaR\n samples = samples[whr]\n water.deltaR = water.deltaR[whr]\n water.theta = water.theta[whr]\n\n return samples, water, interactions"
]
| [
"0.5956029",
"0.5645633",
"0.55883247",
"0.55594856",
"0.5555773",
"0.54849714",
"0.5457794",
"0.5436919",
"0.5369149",
"0.5354158",
"0.52888817",
"0.52768093",
"0.5245818",
"0.52390873",
"0.5233721",
"0.5226974",
"0.52218026",
"0.52175444",
"0.51985455",
"0.51948434",
"0.5190641",
"0.51821",
"0.515934",
"0.51473796",
"0.5129557",
"0.5112603",
"0.5098027",
"0.5092504",
"0.5082062",
"0.5072126"
]
| 0.72741246 | 0 |
Sets the nickname of this GetSesameResponse. | def nickname(self, nickname):
if nickname is None:
raise ValueError("Invalid value for `nickname`, must not be `None`") # noqa: E501
self._nickname = nickname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_nickname(self, nickname):\n \n if len(nickname) > globals.MAX_NICKNAME_LENGTH:\n nick = nickname[0:globals.MAX_NICKNAME_LENGTH-3]+\"...\"\n else:\n nick = nickname\n \n self._nickname.set_message(nick)",
"def nickname(self, new_nickname):\r\n self.set({\"nickname\": new_nickname})",
"def set_nick(self, nick):\n raise NotImplementedError",
"async def nickname(self, ctx, *, nickname=\"\"):\n # [p]set nickname <nickname>\n\n nickname = nickname.strip()\n if nickname == \"\":\n nickname = None\n try:\n await self.bot.change_nickname(ctx.message.server.me, nickname)\n await self.bot.say(\"Done.\")\n except discord.Forbidden:\n await self.bot.say(\"I cannot do that, I lack the \"\n \"\\\"Change Nickname\\\" permission.\")",
"def set_nickname(self, nickname):\n self.nickname = nickname\n self.tweets_list = TweetsLinkedList(nickname)\n self.tweets_list.create_linked()",
"def nickname(self):\r\n if \"nickname\" in self.data:\r\n return self.data[\"nickname\"]\r\n return None",
"def setName(self, newName):\n self.__username = newName",
"async def _nick(self, nick: str) -> str:\n\n logger.debug(f\"Setting nick to {nick!r}\")\n\n self._target_nick = nick\n\n reply = await self._connection.send(\"nick\", {\"name\": nick})\n data = self._extract_data(reply)\n\n new_nick = data[\"to\"]\n self._target_nick = new_nick\n\n if self._session is not None:\n self._session = self._session.with_nick(new_nick)\n\n logger.debug(f\"Set nick to {new_nick!r}\")\n\n return new_nick",
"async def nick(self, ctx, *, nickname):\n if len(nickname) > 32:\n await ctx.send(\"Nickname must be 32 characters or fewer\")\n return\n await ctx.me.edit(nick=nickname)\n await ctx.send(f\"Nickname changed to {nickname}\")",
"def add_nickname(self, nickname):\n if 'Nicknames' not in self.properties:\n self.properties['Nicknames'] = []\n if (len(self.properties['Nicknames']) == 1 and self.properties['Nicknames'][0].startswith('Temp')):\n self.properties['Nicknames'][0] = nickname.title()\n else:\n self.properties['Nicknames'].append(nickname.title())",
"def get_nickname(self):\n return self._nick",
"def set_username(self, value):\n self.username = value",
"def change_username(self, name):\n self.username = name",
"def add_nickname(self, name):\n if not(name in self.nicknames):\n self.nicknames.append(name)",
"def _switch_nick(self):\n self.nickname = self.firstnick + str(random.randint(1000, 9999))\n self._log(self.botlog, 'Switching to nick %s' % self.nickname)\n self._send('NICK %s' % self.nickname)",
"def nickname():\n return jsonify(name=getRandomLine(nickNamesFile))",
"def get_black_player_nickname(self, obj):\n return obj.black_player.nickname",
"async def set_nick(\n client,\n event,\n user: ('user', 'Who\\'s?'),\n nick: P(str, 'Their new nick', min_length = 1, max_length = 32) = None,\n):\n yield\n await client.user_guild_profile_edit(event.guild, user, nick=nick)\n yield f'{user:f}\\'s nick has been updated'",
"def received_NAME(self, message=None):\n\n\t\tself.player_client.send_message(self.player_model.player.name)",
"def Network_displayName(self, data):\n self.name = data['name']\n self._server.Send_publicInfo()",
"def __str__(self):\n return self.nickname",
"def get_white_player_nickname(self, obj):\n return obj.white_player.nickname",
"def set_username(self, value):\n raise NotImplementedError('set_username')",
"def sendnick(self):\n self._send(\"NICK %s\" % (CONFIG[\"nick\"]))",
"def on_nicknameinuse(self, conn, event) -> None:\n self._nickname += '_'\n conn.nick(self._nickname)",
"async def nick(self, context: SlashContext, user: discord.User, nickname: str = None):\n author = await context.guild.fetch_member(context.author_id)\n if not author.guild_permissions.manage_nicknames:\n embed = discord.Embed(\n title=\"Error!\",\n description=\"You don't have enough permissions to change the nickname of this user.\",\n color=0xE02B2B\n )\n return await context.send(embed=embed)\n member = await context.guild.fetch_member(user.id)\n try:\n await member.edit(nick=nickname)\n embed = discord.Embed(\n title=\"Changed Nickname!\",\n description=f\"**{member}'s** new nickname is **{nickname}**!\",\n color=0x42F56C\n )\n await context.send(embed=embed)\n except:\n embed = discord.Embed(\n title=\"Error!\",\n description=\"An error occurred while trying to change the nickname of the user. Make sure my role is above the role of the user you want to change the nickname.\",\n color=0xE02B2B\n )\n await context.message.channel.send(embed=embed)",
"def update_nick(self, nick):\n if self.nick == nick:\n return\n\n # Update the nick hashmap\n if self.nick:\n self.users.nick_hashmap[self.nick].remove(self)\n self.users.nick_hashmap[nick].append(self)\n\n LOG.info(\"Updating user nick: {} -> {}\".format(self.nick, nick))\n\n self.nick = nick\n\n self.users.modified_callback()",
"def rename(self,newName):\n self.userName = newName",
"async def nick(\n self, context: Context, user: discord.User, *, nickname: str = None\n ) -> None:\n member = context.guild.get_member(user.id) or await context.guild.fetch_member(\n user.id\n )\n try:\n await member.edit(nick=nickname)\n embed = discord.Embed(\n description=f\"**{member}'s** new nickname is **{nickname}**!\",\n color=0x9C84EF,\n )\n await context.send(embed=embed)\n except:\n embed = discord.Embed(\n description=\"An error occurred while trying to change the nickname of the user. Make sure my role is above the role of the user you want to change the nickname.\",\n color=0xE02B2B,\n )\n await context.send(embed=embed)",
"def change_client_name(self, name, client):\n if self.name_is_unique(name):\n client.set_name(name)\n self.send_message('Usuario actualizado exitosamente.', client.get_socket())\n else:\n self.send_message('Nombre repetido.', client.get_socket())"
]
| [
"0.6738245",
"0.6678954",
"0.6415821",
"0.6341075",
"0.6067638",
"0.5979994",
"0.5887139",
"0.5860738",
"0.577463",
"0.57002896",
"0.55578184",
"0.5484942",
"0.54815024",
"0.54040253",
"0.540214",
"0.5382869",
"0.53730917",
"0.53687507",
"0.53608084",
"0.5348472",
"0.53451854",
"0.53218573",
"0.52987385",
"0.5296937",
"0.5293087",
"0.5280001",
"0.5265115",
"0.52576",
"0.51707846",
"0.5148062"
]
| 0.69570684 | 0 |
returns true if password is 1030 characters long and consists of letters and numbers | def validate_password(password):
if re.match(r"^[a-zA-Z0-9]{10,30}$", password):
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def validate_password(password):\n return isinstance(password, str) and len(password) >= 8 and \\\n re.search(r'[A-Z]', password) and re.search(r'[0-9]', password)",
"def password_validates(password):\n if any(char.isdigit() for char in password) \\\n and any(char.isalpha() for char in password) \\\n and len(password) > 5:\n return True\n else:\n return False",
"def valid_password(password):\n password_regex = re.compile(r\"^.{8,20}$\")\n return password and password_regex.match(password)",
"def is_valid_password(password):\n if len(password) < MIN_LENGTH:\n return False\n return True",
"def valid_password(password):\n val = True\n\n if len(password) < 8:\n val = False\n return val\n\n if not any(char.isdigit() for char in password):\n val = False\n return val\n\n if not any(char.isupper() for char in password):\n val = False\n return val\n\n if not any(char.islower() for char in password):\n val = False\n return val\n\n if val:\n return val",
"def is_valid_password(variable):\n if re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', variable):\n return True\n return False",
"def password_validator(password: str) -> bool:\n uppercase_regex = re.compile(r\"[A-Z]+\")\n lowercase_regex = re.compile(r\"[a-z]+\")\n digit_regex = re.compile(r\"\\d+\")\n\n if len(password) < 8:\n return False\n if not uppercase_regex.findall(password):\n return False\n if not lowercase_regex.findall(password):\n return False\n if not digit_regex.findall(password):\n return False\n else:\n return True",
"def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False",
"def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False",
"def password_validator(username, password):\n digits = re.search(r'\\d+', password)\n capital_letters = re.search(r'[A-Z]+', password)\n lenght = len(password) > PASSWORD_MIN_LENGTH\n special_symbol = re.search(r'[\\-\\/\\@\\?\\!\\,\\.\\#\\&\\*]+', password)\n\n statement = digits and capital_letters and lenght and special_symbol\n\n if statement:\n return True\n return False",
"def verify_password(password):\n password_reg_exp = re.compile(r\"^.{3,20}$\")\n return password and password_reg_exp.match(password)",
"def is_valid_password(password):\n assert password is not None\n password = str(password)\n return len(password) >= 8 and any(s.islower() for s in password) \\\n and any(s.isupper() for s in password) \\\n and any(s.isdigit() for s in password)",
"def acceptable_password(password):\r\n LOG.debug(\"PASS\")\r\n LOG.debug(password)\r\n\r\n if password is not None:\r\n LOG.debug(len(password))\r\n\r\n if password is None:\r\n return False\r\n\r\n if len(password) < 3:\r\n return False\r\n\r\n return True",
"def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()",
"def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)",
"def is_complex(password):\n if len(password) >= 12:\n if any(c.isupper() for c in password):\n if any(c.islower() for c in password):\n if any(c.isdigit() for c in password):\n if any(c in punctuation for c in password):\n return True\n return False",
"def is_valid_password(self, password):\n rex = \"^[a-zA-Z0-9@_+-.]{3,}$\"\n return re.match(rex, password)",
"def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True",
"def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)",
"def check(self, password):\n\n if len(password) < self.min_length:\n return False\n\n digits = len(findall(r\"\\d\", password))\n if digits < self.min_digits:\n return False\n\n special_chars = sum(v for k, v in Counter(password).items() if k in punctuation)\n if special_chars < self.min_special:\n return False\n\n alpha_chars = sum(v for k, v in Counter(password).items() if k in ascii_letters)\n if alpha_chars < self.min_alpha:\n return False\n\n upper_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_uppercase\n )\n if upper_chars < self.min_upper:\n return False\n\n lower_chars = sum(\n v for k, v in Counter(password).items() if k in ascii_lowercase\n )\n if lower_chars < self.min_lower:\n return False\n\n if self.check_breaches and check_password(password):\n return False\n\n if self.func and not self.func(password):\n return False\n\n return True",
"def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)",
"def valid_password(password):\n if password is None: # SQLite integrity check\n return False\n if len(password) < 8: # Arbitrary length minimum\n return False\n return True",
"def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length",
"def invalid_password(password):\n special_characters = ['$', '#', '@']\n password = password.replace(\" \", \"\")\n test_conditions = [\n (len(password) >= 8 and len(password) <= 12),\n (any(x.isupper() for x in password) and any(x.islower()\n for x in password)),\n (any(y in password for y in special_characters)\n and any(y.isdigit() for y in password))\n ]\n if all(condition is True for condition in test_conditions):\n return False\n return True",
"def check_pass(text):\r\n\r\n upperRegex = re.compile(r'[A-Z]')\r\n lowerRegex = re.compile(r'[a-z]')\r\n lengthRegex = re.compile(r'.{8,}')\r\n digitRegex = re.compile(r'\\d')\r\n\r\n if not upperRegex.search(text):\r\n return False\r\n elif not lowerRegex.search(text):\r\n return False\r\n elif not lengthRegex.search(text):\r\n return False\r\n elif not digitRegex.search(text):\r\n return False\r\n else:\r\n return True",
"def is_valid_password_v1(password):\n letter_count = sum([x == password[\"letter\"] for x in list(password[\"password\"])])\n return password[\"low\"] <= letter_count <= password[\"high\"]",
"def is_strong(password):\n # at least eight characters long\n if len(password) < 8:\n return False\n\n # contains both uppercase and lowercase characters\n upper = re.compile(r\"[A-Z]\")\n up = upper.search(password)\n lower = re.compile(r\"[a-z]\")\n low = lower.search(password)\n\n # has at least one digit\n digit = re.compile(r'[0-9]')\n dig = digit.search(password)\n\n if up is None or low is None or dig is None:\n return False\n\n return True",
"def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True",
"def username_is_valid(username):\n\n if len(username) < MINIMUM_PASSWORD_LENGTH:\n return False\n else:\n return True",
"def password_is_valid(password: str) -> bool:\n pattern = re.compile(\n r'^(?=.{8,32}$)(?=.*[A-Z])(?=.*[a-z])(?=.*[0-9])(?=.*[!\"#$%&\\'()*+-./:;<=>?@[\\]^_`{|} ~,\\\\]).*')\n return pattern.match(password)"
]
| [
"0.8082899",
"0.8026214",
"0.7925359",
"0.7862876",
"0.7859197",
"0.78590196",
"0.780382",
"0.774973",
"0.773781",
"0.77359647",
"0.76997024",
"0.7637889",
"0.76283514",
"0.76140875",
"0.7501108",
"0.74899054",
"0.7432814",
"0.74295545",
"0.74141884",
"0.7379572",
"0.7365518",
"0.7359909",
"0.735625",
"0.72812474",
"0.72786087",
"0.7273086",
"0.726535",
"0.7248264",
"0.7239257",
"0.7230095"
]
| 0.8334488 | 0 |
Iterates over the pages of an API operation results. Paginators act as an abstraction over the process of iterating over an entire result set of a truncated API operation. Yields an iterable with the response obtained from applying `method`. | def paginate(
client: client, method: str, **kwargs: Dict[str, Any]
) -> Generator[Dict[str, Any], None, None]:
paginator = client.get_paginator(operation_name=method)
for page in paginator.paginate(**kwargs):
yield page | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __iter__(self):\n while self.has_next_page():\n response = self.get_next_page_response()\n for item in self.get_items_from_response(response):\n yield item",
"def iterateAllPaginated(self, resource, func=dict):\n\n res = self.getRequest(resource)\n page = vsdModels.Pagination(**res)\n for item in self.iteratePageItems(page, func):\n yield item",
"def iterate_by_page(response):\n response = response\n while True:\n yield response\n next_page = response.get('paging', {}).get('next', '')\n if not next_page:\n break\n response = json.load(urllib2.urlopen(next_page))",
"def iteratePageItems(self, page, func=dict):\n\n for item in page.items:\n yield func(**item)\n\n if page.nextPageUrl:\n res = self.getRequest(page.nextPageUrl)\n nextPage = vsdModels.Pagination(**res)\n for nextItem in self.iteratePageItems(nextPage, func=func):\n yield nextItem",
"def __iter__(self):\n\t\tif self.is_paginated:\n\t\t\treturn self\n\n\t\traise PaginationError(\"The response is not paginated.\")",
"def __iter__(self):\n return self.paged()",
"def __iter__(self):\n invalid_options = ('limit', )\n if any(x in invalid_options for x in self.options):\n raise ResultException(103, invalid_options, self.options)\n\n try:\n self._page_size = int(self._page_size)\n if self._page_size <= 0:\n raise ResultException(104, self._page_size)\n except ValueError:\n raise ResultException(104, self._page_size)\n\n init_opts = {\n 'skip': self.options.pop('skip', None),\n 'startkey': self.options.pop('startkey', None)\n }\n\n self._call = partial(self._ref, #pylint: disable=attribute-defined-outside-init\n limit=self._real_page_size,\n **self.options)\n\n response = self._call(**{k: v\n for k, v\n in init_opts.items()\n if v is not None})\n\n return self._iterator(response)",
"def iter_call(self, service, method,\r\n chunk=100, limit=None, offset=0, *args, **kwargs):\r\n if chunk <= 0:\r\n raise AttributeError(\"Chunk size should be greater than zero.\")\r\n\r\n if limit:\r\n chunk = min(chunk, limit)\r\n\r\n result_count = 0\r\n kwargs['iter'] = False\r\n while True:\r\n if limit:\r\n # We've reached the end of the results\r\n if result_count >= limit:\r\n break\r\n\r\n # Don't over-fetch past the given limit\r\n if chunk + result_count > limit:\r\n chunk = limit - result_count\r\n results = self.call(service, method,\r\n offset=offset, limit=chunk, *args, **kwargs)\r\n\r\n # It looks like we ran out results\r\n if not results:\r\n break\r\n\r\n # Apparently this method doesn't return a list.\r\n # Why are you even iterating over this?\r\n if not isinstance(results, list):\r\n yield results\r\n break\r\n\r\n for item in results:\r\n yield item\r\n result_count += 1\r\n\r\n offset += chunk\r\n\r\n if len(results) < chunk:\r\n break",
"async def paginate(action: callable, *args, **kwargs) -> dict:\n kwargs.pop(\"cursor\", None); await asyncio.sleep(1)\n\n response = (await action(*args, **kwargs)).data\n\n try:\n kwargs[\"cursor\"] = response[\"response_metadata\"][\"next_cursor\"]\n except KeyError:\n kwargs[\"cursor\"] = str()\n\n yield response\n\n while kwargs[\"cursor\"]:\n\n response = (await action(*args, **kwargs)).data; await asyncio.sleep(1)\n\n try:\n kwargs[\"cursor\"] = response[\"response_metadata\"][\"next_cursor\"]\n except KeyError:\n kwargs[\"cursor\"] = str()\n\n yield response",
"def yield_pages_from_operation(\n exclusive_start_path: KeyPath,\n last_evaluated_path: KeyPath,\n limit_path: KeyPath,\n items_path: KeyPath,\n # whether or not limiting _happens_ is controlled by whether you set a limit in your request dict\n # but if you provide limit_path you must provide items_path and vice-versa,\n # or we won't be able figure out how to create the new limit for each paged request.\n operation: ty.Callable[..., dict],\n # the thing that turns a request into the next page of a response\n request: dict,\n # your basic request\n last_evaluated_callback: LastEvaluatedCallback = None,\n) -> ty.Iterable[dict]:\n assert all((limit_path, items_path)) or not any((limit_path, items_path))\n request = deepcopy(request)\n # we make a copy of your request because we're going to modify it\n # as we paginate but you shouldn't have to deal with that.\n\n get_le = partial(get_at_path, last_evaluated_path)\n set_es = partial(set_at_path, exclusive_start_path)\n get_limit = partial(get_at_path, limit_path)\n set_limit = partial(set_at_path, limit_path)\n get_items = partial(get_at_path, items_path)\n\n # the limiting logic is an add-on and does not have to be used\n starting_limit = 0\n if limit_path:\n assert items_path\n starting_limit = get_limit(request)\n\n limit = starting_limit\n ExclusiveStart: ty.Any = get_le(request) or \"\"\n\n while ExclusiveStart is not None:\n assert limit is None or limit >= 0\n if ExclusiveStart:\n set_es(request, ExclusiveStart)\n if limit:\n set_limit(request, limit)\n page_response = operation(**request)\n last_evaluated = get_le(page_response)\n if last_evaluated_callback:\n # we call your callback for every page, not just the last one.\n last_evaluated_callback(last_evaluated)\n yield page_response # we yield the entire response\n ExclusiveStart = last_evaluated or None\n if starting_limit:\n # a limit was requested\n limit = limit - len(get_items(page_response))\n if limit <= 0:\n # we're done\n ExclusiveStart = None",
"def __paginate_call(client, method, output_key, params=None):\n def is_response_success(response):\n return response['ResponseMetadata']['HTTPStatusCode'] == 200\n\n params = dict() if params is None else params\n params['PaginationConfig'] = dict(PageSize=AWS_PAGE_SIZE)\n\n paginator = client.get_paginator(method)\n responses = list(paginator.paginate(**params))\n\n if not all([is_response_success(r) for r in responses]):\n raise Exception('Error during execution of method {method}'.format(method=method))\n\n responses = [r[output_key] for r in responses]\n return reduce(lambda x, y: x + y, responses)",
"def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:",
"def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:",
"def iter_pages(self) -> Generator[Tuple[Optional[List[dict]], int], None, None]:\n # retrieves the data for the given url\n data_list, response, result = self.retrieve_data(self.url)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Failed to retrieve the data even though 10 attempts were given\")\n yield None, None\n return\n\n # this retrieves the page for the given url\n page_number = get_url_page_number(self.url)\n\n # yields the first page of data and its page number\n yield data_list, page_number\n\n while 'next' in response.links.keys():\n\n # gets the next page from the last responses header\n next_page = response.links['next']['url']\n\n # Here we don't need to pass in params with the page, or the default params because the url from the headers already has those values\n data_list, response, result = self.retrieve_data(next_page)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(f\"Failed to retrieve the data for even though 10 attempts were given. Url: {next_page}\")\n return\n\n page_number = get_url_page_number(next_page)\n\n # if either the data or response is None then yield None and return\n if data_list is None or response is None:\n return\n\n # yield the data from the page and its number\n yield data_list, page_number",
"def _paginated_generator(self, request_args):\n while request_args:\n resp = self._api._session.request(**request_args)\n if not resp.ok:\n raise Basecamp3Error(response=resp)\n link_header = resp.headers.get(\"Link\")\n if link_header:\n next_page_url = self._LINK_HEADER_URL_REGEX.findall(link_header)[0]\n request_args = {'url': next_page_url, 'method': 'GET'} # get ready to call the next page\n else:\n request_args = None # clear it so we break the loop\n items_json = resp.json()\n for jdict in items_json:\n item = self.OBJECT_CLASS(jdict, self) # convert JSON dict into a BasecampObject\n yield item",
"def paginate(client_fun, *args, **kwargs):\n resp = client_fun(*args, **kwargs)\n yield from resp['content']\n total_elements = resp['totalElements']\n page_size = resp['pageSize']\n page_number = resp['pageNumber'] + 1\n if 'page_number' in kwargs:\n kwargs.pop('page_number')\n while page_number * page_size < total_elements:\n resp = client_fun(*args, page_number=page_number, **kwargs)\n yield from resp['content']\n page_number = resp['pageNumber'] + 1",
"def iterResponsePages(service, payload, verbose, slow_down):\n token = 0\n next_page = True\n data = {'reports': []}\n\n\n while next_page:\n if verbose:\n print(f'Fetching rows starting at position: {token}')\n if slow_down > 0:\n time.sleep(slow_down)\n \n data_tmp = service.reports().batchGet(body=payload).execute()\n token = data_tmp.get('reports')[0].get('nextPageToken')\n\n if token != None:\n payload.get('reportRequests')[0].update({'pageToken': token})\n else:\n next_page = False\n payload.get('reportRequests')[0].update({'pageToken': '0'})\n\n for report in data_tmp.get('reports'):\n data.get('reports').append(report)\n\n return data",
"def yield_with_paginator(self, func_command, return_string, filters_req=None):\n if filters_req is None:\n filters_req = {}\n\n for _page in self.client.get_paginator(func_command.__name__).paginate(**filters_req):\n Boto3Client.EXEC_COUNT += 1\n print(\"Executed API Calls Count:\".format(Boto3Client.EXEC_COUNT))\n for response_obj in _page[return_string]:\n yield response_obj",
"def _collect_results(self, request_method, request_args, request_kwargs={}, request_params={}):\n results = []\n cursor = None\n page_params = copy.copy(request_params)\n\n while True:\n if cursor:\n page_params['cursor'] = cursor\n response = request_method(\n *request_args,\n **request_kwargs,\n params=page_params\n )\n _raise_on_error(response)\n response_json = response.json()\n results.extend(response_json['results'])\n if response_json['next']:\n cursor = get_cursor_from_url(response_json['next'])\n else:\n return results",
"def _get_iter(self, url, params):\n for current_page_index in itertools.count():\n result_dict = self._get_page(url, params, current_page_index)\n for document in result_dict['entries']:\n yield document\n if not result_dict['isNextPageAvailable']:\n break",
"def _all_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_token = None\n is_truncated = True\n while is_truncated:\n page = page_function(token=next_token, **kwargs)\n next_token = page.next_token\n is_truncated = page.is_truncated and next_token is not None\n for task in page.page_data:\n yield task",
"def iterate_by_item(response):\n response = response\n while True:\n for r in response.get('data', []):\n yield r\n next_page = response.get('paging', {}).get('next', '')\n if not next_page:\n break\n response = json.load(urllib2.urlopen(next_page))",
"def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)",
"def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data",
"def __iter__(self) -> Generator[Optional[dict], None, None]:\n data_list, response, result = self.retrieve_data(self.url)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Failed to retrieve the data even though 10 attempts were given\")\n yield None\n return\n\n # yield the first page data\n for data in data_list:\n yield data\n\n while 'next' in response.links.keys():\n next_page = response.links['next']['url']\n\n # Here we don't need to pass in params with the page, or the default params because the url from the headers already has those values\n data_list, response, result = self.retrieve_data(next_page)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Failed to retrieve the data even though 10 attempts were given\")\n return\n\n for data in data_list:\n yield data",
"def test_iter_paging(self):\n ref = mock.Mock()\n ref.side_effect = [\n {'rows': [x for x in range(100)]},\n {'rows': []}\n ]\n rslt = Result(ref, page_size=10)\n collection = [x for x in rslt]\n self.assertEqual(len(collection), 100)",
"def _all_offset_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_offset = 0\n is_truncated = True\n while is_truncated:\n page = page_function(offset=next_offset, **kwargs)\n next_offset = page.offset + page.limit\n is_truncated = page.total > next_offset\n for data in page.page_data:\n yield data",
"def iter_pages(self):\n for num in range(1, self.pages + 1):\n yield Page(num)",
"def get_paged(self, path, params=None):\n if params:\n params = params.copy()\n else:\n params = dict()\n\n max_page = 0\n page = 1\n while page <= max_page or not max_page:\n results, headers = self.get(path, params=params)\n for result in results:\n yield result\n\n page += 1\n params[\"page\"] = page\n\n if max_page == 0:\n try:\n links = headers[\"Link\"]\n except KeyError:\n # If not present, there is only one page.\n break\n max_page = self._parse_link_header(links)",
"def paginated_call(self) -> global___Snippet.ClientCall:"
]
| [
"0.71955776",
"0.711028",
"0.70803607",
"0.6967728",
"0.694704",
"0.69221467",
"0.69088256",
"0.6870713",
"0.6851433",
"0.681792",
"0.67492384",
"0.6684067",
"0.6684067",
"0.6591044",
"0.6528769",
"0.64971346",
"0.6483189",
"0.64801604",
"0.6460868",
"0.6440734",
"0.64176244",
"0.63793015",
"0.6364898",
"0.6359608",
"0.63491285",
"0.63263124",
"0.6310482",
"0.6307823",
"0.62951887",
"0.6268901"
]
| 0.7691072 | 0 |
Function add a new paragraph in two files (main+answers) | def input_file_docx(str_write, str_answer):
paragraph = dti.add_paragraph(str_write)
paragraph_format = paragraph.paragraph_format
paragraph_format.space_after = Pt(1.0)
paragraph = dti1.add_paragraph(str_answer)
paragraph_format = paragraph.paragraph_format
paragraph_format.space_after = Pt(1.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def m_func1(p1,p2):\r\n if p2==1 :\r\n f=open(\"Mehjabin_Diet.text\",\"a\")\r\n f.write(str(p1))\r\n f.write(\"\\n\")\r\n f.write(input(\" \"))\r\n f.write(\"\\n\")\r\n f.write(\"*********************************************\\n\")\r\n f.close()\r\n else :\r\n\r\n f = open(\"Mehjabin_Exercise.txt\", \"a\")\r\n f.write(str(p1))\r\n f.write(\"\\n\")\r\n f.write(input(\" \"))\r\n f.write(\"\\n\")\r\n f.write(\"*********************************************\\n\")\r\n\r\n f.close()\r\n print(\"You'r daily Report is here...\\n\")\r\n report = int(input(\"which report you want...\\n 1 for Diet \\n 2 for Exercise\\n->>\"))\r\n\r\n if report == 1:\r\n f = open(\"Mehjabin_Diet.text\", \"rt\")\r\n for i in f:\r\n print(i, end=\" \")\r\n f.close()\r\n else:\r\n f = open(\"Mehjabin_Exercise.txt\", \"rt\")\r\n for i in f:\r\n print(i, end=\" \")\r\n f.close()",
"def h_func1(p1,p2):\r\n if p2==1 :\r\n f=open(\"Harry_Diet.text\",\"a\")\r\n f.write(str(p1))\r\n f.write(\"\\n\")\r\n f.write(input(\" \"))\r\n f.write(\"\\n\")\r\n f.write(\"*********************************************\\n\")\r\n\r\n f.close()\r\n else:\r\n f = open(\"Harry_Exercise.txt\", \"a\")\r\n f.write(str(p1))\r\n f.write(\"\\n\")\r\n f.write(input(\" \"))\r\n f.write(\"\\n\")\r\n f.write(\"*********************************************\\n\")\r\n\r\n f.close()\r\n print(\"You'r daily Report is here...\\n\")\r\n report = int(input(\"which report you want...\\n 1 for Diet \\n 2 for Exercise\\n->>\"))\r\n\r\n if report==1 :\r\n f=open(\"Harry_Diet.text\",\"rt\")\r\n for i in f:\r\n print(i,end=\" \")\r\n f.close()\r\n else:\r\n f = open(\"Harry_Exercise.txt\", \"rt\")\r\n for i in f:\r\n print(i, end=\" \")\r\n f.close()",
"def __add(files):\n # Open master.tex and read in lines\n master = open('master.tex', 'r')\n lines = master.readlines()\n master.close()\n\n # Find end notes tag\n try:\n start = lines.index(\"\\\\documentclass{article}\\n\") + 1\n end = lines.index(\"%END NOTES\\n\")\n except:\n cli.log.error(f'Incorrect {emph(\"master.tex\")} file, aborting')\n exit(1)\n\n # Insert files into master.tex\n for file in files:\n line_to_insert = '\\\\input{' \\\n f'{file}' \\\n '}\\n'\n if '.tex' not in file:\n cli.log.warning(f'{emph(file)} isn\\'t a .tex file, copying anyway')\n if lines.__contains__(line_to_insert):\n if not milc.questions.yesno(f'{emph(file)} already exists in {emph(\"master.tex\")}, '\n f'do you want to insert again?'):\n continue\n cli.log.info(f'Inserting {emph(file)} into {emph(\"master.tex\")}')\n if 'preamble.tex' in file:\n lines.insert(start, line_to_insert)\n else:\n lines.insert(end, line_to_insert)\n end += 1\n\n # Write new master.tex\n master = open('master.tex', 'w')\n master.writelines(lines)\n master.close()",
"def s_func1(p1,p2):\r\n if p2==1 :\r\n f=open(\"Sameer_Diet.text\",\"a\")\r\n f.write(str(p1))\r\n f.write(\"\\n\")\r\n f.write(input(\" \"))\r\n f.write(\"\\n\")\r\n f.write(\"*********************************************\\n\")\r\n f.close()\r\n elif p2==2:\r\n\r\n f = open(\"Sameer_Exercise.txt\", \"a\")\r\n f.write(str(p1))\r\n f.write(\"\\n\")\r\n f.write(input(\" \"))\r\n f.write(\"\\n\")\r\n f.write(\"*********************************************\\n\")\r\n\r\n f.close()\r\n else:\r\n print(\"Wrong Input\\try again..\")\r\n\r\n\r\n print(\"You'r daily Report is here...\\n\")\r\n report = int(input(\"which report you want...\\n 1 for Diet \\n 2 for Exercise\\n->>\"))\r\n\r\n if report == 1:\r\n f = open(\"Sameer_Diet.text\", \"rt\")\r\n for i in f:\r\n print(i, end=\" \")\r\n f.close()\r\n elif report==2 :\r\n f = open(\"Sameer_Exercise.txt\", \"rt\")\r\n for i in f:\r\n print(i, end=\" \")\r\n f.close()\r\n else:\r\n print(\" Unacceptable report request...\")",
"def saveline(filename, fi, p1_CG, p2_CG, append=True):\n\n fid = open(filename, 'a' if append else 'w')\n snippet = ' '.join(map(str, [fi, p1_CG, p2_CG]))\n fid.write(snippet + '\\n')\n fid.close()",
"def addContent(text):",
"def test_extend_to_paragraph(self):\n before_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n after_b = \"\"\"\\\n Americans live in the most severe weather-prone country on Earth. Each year,\n Americans cope with an average of 10,000 thunderstorms, 2,500 floods, 1,000\n tornadoes, as well as an average of 6 deadly hurricanes. Potentially deadly\n weather impacts every American. Communities can now rely on the National Weather\n Service’s StormReady program to help them guard against the ravages of Mother\n Nature.\n\n Some 90% of all presidentially declared disasters are weather related, leading\n to around 500 deaths per year and nearly $14 billion in damage. StormReady, a\n program started in 1999 in Tulsa, OK, helps arm America's communities with the\n communication and safety skills needed to save lives and property– before and\n during the event. StormReady helps community leaders and emergency managers\n strengthen local safety programs.\n\n StormReady communities are better prepared to save lives from the onslaught of\n severe weather through better planning, education, and awareness. No community\n is storm proof, but StormReady can help communities save lives. Does StormReady\n make a difference?\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"9.0\", \"9.0\"),\n after_sel=(\"8.0\", \"13.33\"),\n command_name=\"extend-to-paragraph\",\n )",
"def test_add_1(self):\n contents = testdata.get_words()\n d = testdata.create_dir()\n ts = {\n \"foo.txt\": [contents],\n }\n ds = d.add(ts)\n path = ds[0]\n self.assertTrue(os.path.isfile(path), \"{} does not exist\".format(path))\n self.assertEqual(contents, path.read_text())",
"def add_paragraph_option():\n extra_content_lst = []\n paragraph = input('Do you want to add another paragraph to your website? [yes]')\n while paragraph == 'yes' or paragraph == '':\n title = input('Title of your paragraph?')\n content = input('Content of your paragraph (single line)')\n images = image_option()\n extra_content_lst.append((ContentInfo(title, content, images, None)))\n paragraph = input('Do you want to add another paragraph to your website? [yes]')\n\n return extra_content_lst",
"def add_experiment(experiment_file, exp_method_template, experiment):\n with open(exp_method_template, 'r') as f:\n exp_text = f.readlines()\n for idx, l in enumerate(exp_text):\n exp_text[idx] = exp_text[idx].replace('EDIT', experiment)\n exp_text[idx] = exp_text[idx].replace('RANDALPHA', experiment)\n with open(experiment_file, 'r') as f:\n text = f.readlines()\n text += exp_text\n with open(experiment_file, 'w') as f:\n f.writelines(text)",
"def make_source_text(input_one, input_two, input_three, output_text):\n # clear out the previous file contents\n open(output_text, 'w').close()\n # copy from three input files based on question answers\n copy_text(input_one, output_text)\n copy_text(input_two, output_text)\n copy_text(input_three, output_text)\n return output_text",
"def add_from_file(writer):\n with open(\"to_test_db\", \"rb\") as reader:\n lines = reader.readlines()\n place = \"out\"\n codes = []\n code = []\n for i, line in enumerate(lines):\n if \"<--NEW CODE-->\" in line:\n if \"lang\" in place:\n codes += [(q_id, ans_id, \"\".join(code), lang)]\n place = \"q_id\"\n elif \"<--language\" in line:\n place = \"lang\"\n lang = line.split(\"<--language=\")[1].split(\"-->\")[0]\n elif \"q_id\" in place:\n q_id = line.split(\"<--question_id=\")[1].split(\"-->\")[0]\n place = \"a_id\"\n elif \"a_id\" in place:\n ans_id = line.split(\"<--ans_id=\")[1].split(\"-->\")[0]\n place = \"code\"\n code = []\n elif \"code\" in place:\n code += [line]\n codes += [(q_id, ans_id, \"\".join(code), lang)]\n for next_id, entry in enumerate(codes):\n (q_id, ans_id, code, lang) = entry\n print next_id, entry\n writer.add_document(question_id=return_unicode(int(q_id)), answer_id=return_unicode(int(ans_id)), code=return_unicode(code), language=return_unicode(lang),code_id=return_unicode(next_id))\n CR_DOCS_DB.insert({\"question_id\": return_unicode(int(q_id)), \"answer_id\": return_unicode(int(ans_id)), \"code\": return_unicode(code), \"language\": return_unicode(lang), \"code_id\": return_unicode(next_id)})\n return len(codes)",
"def main():\n # Get the date and time in a formatted string.\n today = datetime.datetime.today()\n date_formatted = today.strftime(\"%Y-%m-%d\")\n time_formatted = today.strftime(\"%H:%M:%S\")\n\n # Form the file name and path.\n file_name = date_formatted+\"-post.md\"\n file_path = os.path.join(POSTS_DIR, file_name)\n\n # Make the new header.\n header = HEADER_TEMPLATE.format(date_formatted, time_formatted)\n\n with open(file_path, 'w') as f:\n f.write(header)\n \n os.system(EDITOR+\" \"+file_path)",
"def test_add_word_in_file(self):\n pass",
"def generateEndOfQuiz(filename):\n\n with codecs.open(os.path.join(target,filename), 'a', \"utf-8\") as testTemplate:\n testTemplate.write(\"++\")",
"def add(cli):\n __check_in_autonotes_dir()\n\n # File args\n files = cli.config.add.file\n\n # Add the files to master.tex\n __add(files)",
"def main():\n\n args = get_args()\n text = args.text\n mutations = args.mutations\n random.seed(args.seed)\n alpha = ''.join(sorted(string.ascii_letters + string.punctuation))\n\n new_text_line = []\n if os.path.isfile(text):\n print('You said: ', end='')\n for line in open(text, 'rt'):\n new_text_line = list(line.rstrip())\n print(f'\"{line.rstrip()}\"')\n len_text = len(line.rstrip())\n num_mutations = round(len_text * mutations)\n for i in random.sample(range(len_text), num_mutations):\n # print(f'i = {i}, char = {line[i]}, index = {alpha.find(line[i])}')\n # list.index 함수로 색인 위치 찾으면 없을때 error 발생. 있는지 확인하고 쓰던가 아니면 find 함수 쓸 것!!\n # find 반환값 : 색인 위치. 찾을 수 없는 경우 -1 반환\n new_text_line[i] = random.choice(alpha.replace(line[i], '')) # replace 함수는 line[i] 에 해당하는 문자가 alpha에 '있으면' 해당 문자를 치환한다. 없어도 error 안 남.\n print(f'I heard : \"' + ''.join(new_text_line)+ '\"')\n else:\n print(f'You said: \"{text}\"')\n new_text_line = list(text)\n len_text = len(text)\n num_mutations = round(len_text * mutations)\n for i in random.sample(range(len_text), num_mutations):\n new_text_line[i] = random.choice(alpha.replace(text[i], ''))\n print(f'I heard : \"' + ''.join(new_text_line)+ '\"')",
"def link(ctx, note1, note2):\n directory = ctx.obj[\"config\"][\"owner\"][\"dir\"]\n\n note1, note2 = Note(directory, note1), Note(directory, note2)\n\n if note1.filename == note2.filename:\n Utils.display_error(\n \"Cannot create a link between a note and itself.\", \"yellow\")\n\n with open(note1.path, \"a\") as file:\n file.write(\"[{}]({})\\n\".format(note2.filename, note2.filename))\n\n with open(note2.path, \"a\") as file:\n file.write(\"[{}]({})\\n\".format(note1.filename, note1.filename))\n\n click.secho(\"Success! {} <-> {}\".format(note1.filename,\n note2.filename), fg=\"green\")",
"def edit(ctx, name, header):\n\n # create local copies of ctx vaiables for easy access\n path = ctx.obj[\"path\"]\n gitCommand = ctx.obj[\"gitCommand\"]\n\n if(not isdir(path)):\n print(\"No notes directory found at \" + path)\n return\n\n if(header != ''):\n system('touch ' + path + '/' + name)\n system('echo ' + header + \" > \" + path + '/' + name)\n\n system('$EDITOR ' + path + \"/\" + name)\n system(gitCommand + \"add .\")\n system(gitCommand + \"commit -m 'edited \" + name + \"'\")",
"def new(cli):\n __check_in_autonotes_dir()\n\n # Filename\n filename = cli.config.new.filename\n file_date = datetime.now().strftime(\"%Y%m%d\")\n today = datetime.now().strftime(\"%b %d\")\n if not cli.config.new.no_date:\n filename += f'-{file_date}'\n filename += '.tex'\n if os.path.exists(filename):\n cli.log.error(f'File {emph(filename)} already exists.')\n exit(1)\n\n # Note title\n title = cli.config.new.title\n if title == '':\n title = f'Untitled {cli.config.new.filename}'\n\n # Open file for writing\n file = open(filename, 'w')\n cli.log.info(f'Created {emph(filename)}')\n new_note_str = f'% {filename}\\n\\n' \\\n '\\\\section{' \\\n f'{title.replace(\"_\", \" \")}' \\\n '}\\n' \\\n '\\\\marginnote{' \\\n f'{today}' \\\n '\\\\index{' \\\n f'{today}' \\\n '}}[.2cm]\\n'\n file.write(new_note_str)\n file.close()\n\n # Add the new file to master.tex\n __add([filename])",
"def test_add(self):\n self._create_db()\n self._create_config()\n self._create_dmsdir()\n doc = os.path.join(testDataPath, \"simplepdf.pdf\")\n self.io.clear()\n self.io.addinput('2015')\n self.io.addinput('12')\n self.io.addinput('31')\n sys.argv = [\"prog\", \"add\", doc]\n easydms.cli.main()\n\n self.io.clear()\n self.io.addinput('2015')\n self.io.addinput('11')\n self.io.addinput('15')\n self.io.addinput('')\n self.io.addinput('')\n self.io.addinput('')\n easydms.cli.main()",
"def file(self, sentence):\n req = open(\"python.py\" , 'w+')\n req.write(sentence)\n req.close()",
"def add_note():\n pass",
"def add_paragraph(self, text: str) -> None:\n\n tag = r'\\par ' + text\n self.doc = self.doc + tag\n # self.add_space()",
"def add():\n\ttry:\n\t task = sys.argv[2]\n\t file = open(\"todo.txt\", \"a\")\n\t file.write(task + \"\\n\")\n\t print('Added todo: \"{}\"'.format(task))\n\texcept IndexError:\n\t print(\"Error: Missing todo string. Nothing added!\")",
"def write_to_txt(self):\r\n file = open(self.output_path, 'w')\r\n for question_id in self.question_ids:\r\n file.write(self.questions[question_id].question_string+str(self.questions[question_id].answer)+'\\n')\r\n file.close()",
"def append_write(filename=\"\", text=\"\"):\n with open(filename, \"a\", encoding=\"utf-8\") as file1:\n return file1.write(text)",
"def add_blogpost(manuscript, subject, url):\n line_number = 0\n with open(manuscript, \"r\") as file:\n lines = file.readlines()\n for line in lines:\n if (\"## ブロマガ全集\" in line):\n lines.insert(line_number + 2, f\"- [{subject}]({url})\\n\")\n with open(manuscript, \"w\") as file:\n file.writelines(lines)\n print(\"Add:\", subject)\n return 0\n line_number += 1",
"def add_file_to_case(self, file_):\n\n cur = self.app.conn.cursor()\n text_len = 0\n if file_[2] is not None:\n text_len = len(file_[2]) - 1\n link = {'caseid': self.case['caseid'], 'fid': file_[0], 'pos0': 0,\n 'pos1': text_len, 'owner': self.app.settings['codername'],\n 'date': datetime.datetime.now().astimezone().strftime(\"%Y-%m-%d %H:%M:%S\"), 'memo': \"\"}\n\n # Check for an existing duplicated linked file first\n cur.execute(\"select * from case_text where caseid = ? and fid=? and pos0=? and pos1=?\",\n (link['caseid'], link['fid'], link['pos0'], link['pos1']))\n result = cur.fetchall()\n if len(result) > 0:\n msg = _(\"This file has already been linked to this case \") + file_[1] + \"\\n\"\n return msg\n # Even non-text files can be assigned to the case here\n sql = \"insert into case_text (caseid, fid, pos0, pos1, owner, date, memo) values(?,?,?,?,?,?,?)\"\n cur.execute(sql, (link['caseid'], link['fid'], link['pos0'], link['pos1'],\n link['owner'], link['date'], link['memo']))\n self.app.conn.commit()\n msg = file_[1] + _(\" added to case.\") + \"\\n\"\n\n # Update table entry assigned to Yes\n rows = self.ui.tableWidget.rowCount()\n for row in range(0, rows):\n fid = int(self.ui.tableWidget.item(row, 0).text())\n if fid == file_[0]: # file_[0] is fid\n item = QtWidgets.QTableWidgetItem(_(\"Yes\"))\n item.setFlags(QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)\n self.ui.tableWidget.setItem(row, 2, item)\n return msg",
"def add_doc(writer, entry):\n m = 0\n q_id = entry['Id']\n snippets = get_possible_snippets(q_id, False)\n for code, lang, ans_id in snippets:\n writer.add_document(question_id=q_id, answer_id=ans_id, code=return_unicode(code), language=return_unicode(lang))\n m += 1\n return m"
]
| [
"0.6011007",
"0.59650946",
"0.59082407",
"0.5866411",
"0.57768893",
"0.5753639",
"0.5731659",
"0.5672916",
"0.56459105",
"0.5576199",
"0.54995733",
"0.5464282",
"0.5458846",
"0.545788",
"0.5384909",
"0.5362748",
"0.5355174",
"0.5349958",
"0.53219575",
"0.53206825",
"0.53050077",
"0.5302342",
"0.52893203",
"0.5286487",
"0.52643573",
"0.52633655",
"0.52617633",
"0.5250744",
"0.5240691",
"0.52241504"
]
| 0.6053899 | 0 |
Authorizes outbrain object If token exists in yaml, outbrain object will get the attribute "token" with the token string If token was generated more than 28 days ago, it gets another token, and adds it to the outbrain object as the new token attribute If the token is still valid, no new token is requested | def authorize(outb, creds):
try:
outb.token = creds["token"]
token_gen_date = datetime.datetime.strptime(creds["token_generated_on"], "%Y-%m-%d__%H_%M_%S")
# Convert token generated date to datetime object for comparison
if (datetime.datetime.now() - datetime.timedelta(days=28)) > token_gen_date:
print("Token was created more than 28 days ago, re-authorizing...")
outb.token = outb.get_token(outb.user, outb.password)
creds["token"] = outb.token
creds["token_generated_on"] = datetime.datetime.now().strftime("%Y-%m-%d__%H_%M_%S")
with open("outbrain.yml", "w") as f:
yaml.dump(creds, f, default_flow_style=False)
else:
print("Token was created less than 28 days ago, no authorization needed. Continuing...")
except KeyError:
outb.token = outb.get_token(outb.user, outb.password)
creds["token"] = outb.token
creds["token_generated_on"] = datetime.datetime.now().strftime("%Y-%m-%d__%H_%M_%S")
with open("outbrain.yml", "w") as f:
yaml.dump(creds, f, default_flow_style=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def UserToken(self) -> object:",
"def auth_token(self):",
"def authorize(event, context):\n token = event['authorizationToken']\n log.debug(\"Token: {}\".format(token))\n principalId = token\n context = {\n 'simpleAuth': True,\n }\n\n table = dynamodb.Table(os.environ['ACCESSTOKENS_TABLE'])\n dbresponse = table.scan(\n FilterExpression=Attr('token').eq(token)\n )\n if len(dbresponse['Items']) == 1:\n if dbresponse['Items'][0]['enabled'] == True:\n policy = generatePolicy('allow', event['methodArn'])\n context['user'] = dbresponse['Items'][0]['name']\n else:\n policy = generatePolicy('deny', event['methodArn'])\n else:\n # Check if metasmoke has a new token matching this one\n url = \"https://metasmoke.erwaysoftware.com/smoke_detector/check_token/{}\".format(token)\n with urlopen(url) as response:\n ms_response = json.load(response)\n if ms_response[\"exists\"]:\n # Add the token to our table\n \n item = {\n 'token': token,\n 'name': ms_response[\"location\"],\n 'created_at': ms_response[\"created_at\"],\n 'modified_by': ms_response[\"owner_name\"],\n 'modified_at': ms_response[\"updated_at\"],\n 'enabled': True\n }\n\n table.put_item(Item=item)\n\n # Allow the requests\n policy = generatePolicy('allow', event['methodArn'])\n context['user'] = item['name']\n else:\n # No token matches. Deny the request\n policy = generatePolicy('deny', event['methodArn'])\n\n response = {\n 'principalId': principalId,\n 'policyDocument': policy,\n 'context': context\n }\n log.debug(response)\n return response",
"def look_up_a_token():\n try:\n data = request.get_json(force=True)\n except Exception:\n data = None\n if data:\n tok = data['token']\n else:\n tok = request.headers.get('TOK_ID')\n request.data\n\n try:\n creation_time = int(round(datetime.timestamp(tokens[tok]), 0))\n issue_time = tokens[tok].isoformat()\n except Exception:\n _now = datetime.now(UTC)\n creation_time = int(round(datetime.timestamp(_now)))\n issue_time = _now.isoformat()\n tokens[tok] = _now\n expire_time = datetime.fromtimestamp(creation_time + 2764790)\n\n return jsonify({\n \"data\": {\n \"accessor\": \"8609694a-cdbc-db9b-d345-e782dbb562ed\",\n \"creation_time\": creation_time,\n \"creation_ttl\": 2764800,\n \"display_name\": \"fooname\",\n \"entity_id\": \"7d2e3179-f69b-450c-7179-ac8ee8bd8ca9\",\n \"expire_time\": expire_time.isoformat(),\n \"explicit_max_ttl\": 0,\n \"id\": tok,\n \"identity_policies\": [\n \"dev-group-policy\"\n ],\n \"issue_time\": issue_time,\n \"meta\": {\n \"username\": \"tesla\"\n },\n \"num_uses\": 0,\n \"orphan\": True,\n \"path\": \"auth/kubernetes/login\",\n \"policies\": [\n \"default\"\n ],\n \"renewable\": True,\n \"ttl\": 2764790\n }\n })",
"def __init__(self, token):\n self.token = token\n self.time_of_blacklisting = datetime.now()",
"def authenticate( self ):\n\n print(\"Getting new token\")\n self.getFrob()\n self.getAuthKey()\n self.getToken()\n self.cacheToken()",
"def __init__(self, token):\n self.token = token",
"def __init__(self, token):\n self.token = token",
"def __init__(self, token):\n self.token = token",
"def __init__(self, token):\n\n self.token = token",
"def for_user(self, a_token, a_secret):\n\t\tself.a_token = a_token\n\t\tself.a_secret = a_secret",
"def token_updater(token):\n try:\n with open(self.OAUTH_TOKEN_PATH, 'w') as f:\n json.dump(token, f)\n except Exception as err:\n log.Error('Could not save the OAuth2 token to %s. This means '\n 'you may need to do the OAuth2 authorization '\n 'process again soon. Original error: %s' % (\n self.OAUTH_TOKEN_PATH, err))",
"def get_new_token(self):\n # Save result of this API call into self instance __token\n self.__token = apidnac.ApiDNAC.api_get_token()\n # Save result to the defined parameter (\"token\") in file cache_config\n self.save_param('token', self.__token)\n # Return self instance __token\n return self.__token",
"def save_token(self):\n if self.token is None:\n raise ValueError(\n \"You must set the \\\"token\\\" first.\"\n )\n\n config.update(\n outlook_token=self.token,\n )\n\n return True",
"async def validate_token(self, token):",
"def check_token(token: str, secret: str | List[str], max_age_seconds: int = 60 * 60 * 24) -> Any:\n return URLSafeTimedSerializer(secret).loads(token, max_age=max_age_seconds, salt=\"token\")",
"def test_replace_o_auth_authorize_token(self):\n pass",
"def _authorize(self, token=None, store_token=False, reenter_token=False): # pragma: no cover\n\n if token is None and \"MAST_API_TOKEN\" in os.environ:\n token = os.environ[\"MAST_API_TOKEN\"]\n\n if token is None:\n token = keyring.get_password(\"astroquery:mast.stsci.edu.token\", \"masttoken\")\n\n if token is None or reenter_token:\n auth_server = conf.server.replace(\"mast\", \"auth.mast\")\n auth_link = auth_server + \"/token?suggested_name=Astroquery&suggested_scope=mast:exclusive_access\"\n info_msg = \"If you do not have an API token already, visit the following link to create one: \"\n log.info(info_msg + auth_link)\n token = getpass(\"Enter MAST API Token: \")\n\n # store password if desired\n if store_token:\n keyring.set_password(\"astroquery:mast.stsci.edu.token\", \"masttoken\", token)\n\n self._session.headers[\"Accept\"] = \"application/json\"\n self._session.cookies[\"mast_token\"] = token\n info = self.session_info(silent=True)\n\n if not info[\"anon\"]:\n log.info(\"MAST API token accepted, welcome %s\" % info[\"attrib\"].get(\"display_name\"))\n else:\n log.warn(\"MAST API token invalid!\")\n\n return not info[\"anon\"]",
"def __init__(self, token):\r\n self.apiroot = 'https://api-ssl.bitly.com/v3'\r\n\r\n self.access_token = token\r\n self.add_filter(self.add_authorization)",
"def test_read_o_auth_authorize_token(self):\n pass",
"def test_create_o_auth_authorize_token(self):\n pass",
"def validate_token():\n try:\n token = validate_auth()\n except Unauthorized:\n return jsonify(valid=False, expires_in=0)\n expires = oidc.user_getfield('exp')\n delta = expires - datetime.now().timestamp()\n return jsonify(valid=True, expires_in=delta)",
"def load_token(self):\n token = None\n\n if config.outlook_token:\n token = self.token_constructor(config.outlook_token)\n\n return token",
"def __init__(self):\n self.token = None\n self.login()",
"def __call__(self, context, callback):\r\n\r\n callback((('authorization', 'Bearer ' + self.token_hash ),), None)",
"def save_bearer_token(self, token, request, *args, **kwargs):\n log.debug('Save bearer token %r', token)\n self._tokensetter(token, request, *args, **kwargs)\n return request.client.default_redirect_uri",
"def __init__(self, authenticator, access_token, expires_in, scope):\n super(ImplicitAuthorizer, self).__init__(authenticator)\n self._expiration_timestamp = time.time() + expires_in\n self.access_token = access_token\n self.scopes = set(scope.split(' '))",
"def _set_token(self) -> None:\n if 'token' in self.params['user'].keys():\n logger.debug('user token already set')\n else:\n logger.debug('setting user token')\n\n elems = [e.get_attribute('href') for e in self._driver.find_elements_by_xpath(\"//a[@href]\")]\n elems = [e for e in elems if 'editwh.php' in e]\n try:\n match = re.search(pattern=r'(?<=ee\\=)\\d*(?=\\&e)', string=elems[0])\n except IndexError:\n raise IndexError('source of html page has no href with token')\n\n if match:\n self.params['user']['token'] = match[0]\n else:\n raise ValueError('did not extract token from %s', elems[0])",
"def save(self, *args, **kwargs):\n if not self.id:\n self.api_key = self.__generate_key(self.__api_key_length)\n self.api_secret = self.__generate_key(self.__api_secret_length)\n super(Token, self).save(*args, **kwargs)",
"def authenticate(self):\n # Check if we already have access token and secret\n if not os.path.exists(self.sTOKEN_FILE):\n # 1) Obtain Request token\n oauth = OAuth1(self.apiKey, client_secret=self.apiKeySecret, callback_uri='oob')\n r = requests.post(url=self.sREQUEST_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n resource_owner_key = credentials.get('oauth_token')[0]\n resource_owner_secret = credentials.get('oauth_token_secret')[0]\n\n # 2) Obtain authorization for the user to access resources\n # Redirect the user to /authorize and get the callback\n authorize_url = self.sAUTHORIZE_URL + '?oauth_token=' + resource_owner_key + \\\n '&oauth_consumer_key=' + self.apiKey + \\\n '&Access=Full&Permissions=Modify'\n\n print 'Please go here and authorize,', authorize_url\n verifier = raw_input('Please enter the six-digit PIN code: ')\n\n # 3) Obtain final access token\n oauth = OAuth1(self.apiKey, client_secret = self.apiKeySecret,\n resource_owner_key = resource_owner_key,\n resource_owner_secret = resource_owner_secret,\n verifier=verifier)\n r = requests.post(url=self.sACCESS_TOKEN_URL, auth=oauth)\n\n credentials = parse_qs(r.content)\n access_token = credentials.get('oauth_token')[0]\n access_token_secret = credentials.get('oauth_token_secret')[0]\n\n # Store access token so we can use it later\n with open(self.sTOKEN_FILE, 'w') as f:\n json.dump({'access_token': access_token,\n 'access_token_secret': access_token_secret}, f)\n\n else:\n with open(self.sTOKEN_FILE, 'r') as f:\n tokens = json.load(f)\n access_token = tokens.get('access_token')\n access_token_secret = tokens.get('access_token_secret')\n\n # store the file access token details for use in other methods\n self.accessToken = access_token\n self.accessTokenSecret = access_token_secret"
]
| [
"0.61070186",
"0.59754103",
"0.5947583",
"0.5901249",
"0.5759543",
"0.55899185",
"0.5571527",
"0.5571527",
"0.5571527",
"0.55064756",
"0.5453319",
"0.5414617",
"0.5396474",
"0.53617793",
"0.5358859",
"0.5350986",
"0.53467685",
"0.53420526",
"0.53314936",
"0.53241503",
"0.53237593",
"0.53147334",
"0.5298698",
"0.52960515",
"0.529068",
"0.52732205",
"0.52713645",
"0.5271229",
"0.52633226",
"0.52518326"
]
| 0.75723505 | 0 |
Returns a list of dicts with all the campaign ids and names for the marketer_id | def get_camp_ids_names_containing_str(marketer_id, string):
all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])
return [{"id": x.get("id"), "name": x.get("name")} for x in all_campaigns if string in x["name"]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_campaign_name_list(self):\n campaigns = self.find('campaigns', {})\n campaign_names = []\n for campaign in campaigns:\n if 'name' in campaign:\n campaign_names.append(campaign['name'])\n return campaign_names",
"def get_camp_ids_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [x.get(\"id\") for x in all_campaigns if string in x[\"name\"]]",
"def get_drip_campaigns(self):\n return list(DripCampaign.objects(user_id=self.user_id))",
"def get_all_camapaign_stats_data(campaign_id):\n all_campaign_stats_data = []\n all_campaign_stats = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_stat in all_campaign_stats:\n campaign_stat_data = {}\n campaign_stat_data['username'] = campaign_stat.username\n campaign_stat_data['file'] = campaign_stat.file\n campaign_stat_data['edit_type'] = campaign_stat.edit_type\n campaign_stat_data['edit_action'] = campaign_stat.edit_action\n campaign_stat_data['country'] = campaign_stat.country\n campaign_stat_data['depict_item'] = campaign_stat.depict_item\n campaign_stat_data['depict_prominent'] = campaign_stat.depict_prominent\n campaign_stat_data['caption_text'] = campaign_stat.caption_text\n campaign_stat_data['caption_language'] = campaign_stat.caption_language\n campaign_stat_data['date'] = campaign_stat.date\n all_campaign_stats_data.append(campaign_stat_data)\n return all_campaign_stats_data",
"def getAllCampaigns(service):\n # Using AWQL to retrieve campaigns.\n query = (adwords.ServiceQueryBuilder()\n .Select('Id', 'Name', 'Status', 'StartDate', 'EndDate',\n 'BudgetId', 'BudgetStatus', 'BudgetName', 'Amount',\n 'BudgetReferenceCount', 'IsBudgetExplicitlyShared')\n .Limit(0, pageSize)\n .Build())\n campaigns = []\n for page in query.Pager(service):\n if page['entries']:\n for campaign in page['entries']:\n campaigns.append(campaign)\n else:\n pass\n return campaigns",
"def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])",
"def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n 'impressions',\n 'cpm',\n 'relevance_score']\n #Need to create a new list for data\n data = []\n \n #Grab lifetime campaign stats to find start and end date of campaign and convert to datetime formate\n insights = campaign.get_insights(fields=fields, params={'date_preset':'lifetime'})\n date_begin = datetime.datetime.strptime(insights[0]['date_start'], \"%Y-%m-%d\")\n date_end = datetime.datetime.strptime(insights[0]['date_stop'], \"%Y-%m-%d\")\n date_diff = datetime.timedelta(days=25)\n new_date = date_begin + date_diff\n\n #Pass in these values to the api\n api_date_first = str(date_begin).split()[0]\n api_date_last = str(new_date).split()[0]\n\n #Strange API limitation where you can only grab 25 values at a time. \n while date_begin < date_end:\n insights = campaign.get_insights(fields=fields, params={ 'time_range':{'since':api_date_first, 'until':api_date_last}, 'time_increment':1})\n insights = list(insights)\n date_begin = new_date \n new_date = date_begin + date_diff\n api_date_first = api_date_last\n api_date_last = str(new_date).split()[0]\n data += insights\n\n return data",
"def find_campaigns_as_caller(caller):\n\n \"\"\"Get Campaigns for Caller\"\"\"\n campaigns_as_caller = caller.campaigns_as_caller.filter(\n status__in=[x.value[0] for x in call_campaign_statuses_for_caller],\n ).order_by('-date_created')\n\n \"\"\"Check Call Tool Feature Access for Campaigns\"\"\"\n campaigns = [x for x in campaigns_as_caller if has_call_feature_access_for_local_group(\n x.local_group\n )]\n\n return campaigns",
"def get_campaign_data(ad_account: adaccount.AdAccount) -> {}:\n logging.info('get campaign data for account {}'.format(ad_account['account_id']))\n campaigns = ad_account.get_campaigns(\n fields=['id',\n 'name',\n 'adlabels'],\n params={'limit': 1000,\n 'status': ['ACTIVE',\n 'PAUSED',\n 'ARCHIVED']})\n result = {}\n\n for campaign in campaigns:\n result[campaign['id']] = {'name': campaign['name'],\n 'attributes': parse_labels(\n campaign.get('adlabels', []))}\n return result",
"def get_market_info(\n self, market_id: str\n ) -> Tuple[str, datetime, Dict[int, str]]:\n market_filter_ = market_filter(market_ids=[market_id])\n\n market = (\n self._client\n .betting\n .list_market_catalogue(\n filter=market_filter_,\n market_projection=['MARKET_START_TIME', 'RUNNER_DESCRIPTION']\n )[0]\n )\n\n market_name = market.market_name\n market_start_time = market.market_start_time\n\n selections = {}\n for runner in market.runners:\n selections[runner.selection_id] = runner.runner_name\n\n return market_name, market_start_time, selections",
"def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)",
"def get_adcampaign_detail(self, account_id, campaign_id, date_preset):\n campaign_fields = [\n 'name', 'campaign_status', 'daily_budget', 'lifetime_budget',\n 'start_time', 'end_time']\n campaign_data_columns = [\n 'campaign_name', 'reach', 'frequency', 'clicks',\n 'actions', 'total_actions', 'ctr', 'spend']\n adgroup_data_columns = [\n 'campaign_id', 'campaign_name', 'adgroup_id', 'adgroup_name',\n 'reach', 'frequency', 'clicks', 'ctr', 'actions', 'cpm', 'cpc',\n 'spend']\n demographic_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'age', 'gender']\n placement_data_columns = [\n 'campaign_id', 'reach', 'frequency', 'clicks', 'actions', 'spend',\n 'cpc', 'cpm', 'ctr', 'cost_per_total_action', 'placement']\n campaign_filters = [{\n 'field': 'campaign_id', 'type': 'in', 'value': [campaign_id]}]\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaign(campaign_id, campaign_fields, batch=True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', campaign_data_columns,\n campaign_filters, ['action_type'], True),\n self.get_adreport_stats(\n account_id, date_preset, 1, campaign_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', adgroup_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', demographic_data_columns,\n campaign_filters, None, True),\n self.get_adreport_stats(\n account_id, date_preset, 'all_days', placement_data_columns,\n campaign_filters, None, True),\n ]\n return self.make_batch_request(batch)",
"def get_targeted_campaign_ids(client, customer_id, resource_name):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n campaign.id,\n campaign_extension_setting.extension_feed_items\n FROM campaign_extension_setting\n WHERE\n campaign_extension_setting.extension_type = 'PROMOTION'\n AND campaign.status != 'REMOVED'\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n campaign_ids = []\n\n for batch in stream:\n for row in batch.results:\n feed_items = row.campaign_extension_setting.extension_feed_items\n if resource_name in feed_items:\n print(f\"Found matching campaign with ID: '{row.campaign.id}'\")\n campaign_ids.append(row.campaign.id)\n\n return campaign_ids",
"def list_suppliers_with_id(self):\n data = {}\n with Transaction().start(DBNAME, 1):\n partylist = self.Party.search([('categories', '=', self.category)])\n for i in partylist:\n data[i.pan] = i.name\n return data",
"def test_get_existent_campaigns_returns_campaigns_list(self):\n test_campaign = return_canned_campaign()\n test_campaign.create()\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response_body, {\"campaigns\": [{\"id\": 1, \"name\": \"Test Campaign\"}]}\n )",
"def get_all_companies_and_people():",
"def get_available_companies(team):",
"def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)",
"def as_dict(self):\n d = {}\n for name, competition, sid in self.get_queryset().values_list('name', 'competition', 'id'):\n d[(name, competition)] = sid\n return d",
"def get_available_companies_and_people(team):",
"def get_campaign(self, campaign_id: str) -> dict:\n return self.http_request(\"GET\", f'/campaign/{campaign_id}')",
"def get_campaign_ids(self, file):\n pass",
"def get_campaign_info(self, id):\n logger.info(\"Function call: get_campaign_info from: {}\".format(id, ))\n return self.__handle_error(\"Empty campaign id\") if not id else self.__handle_result(self.__send_request('campaigns/{}'.format(id, )))",
"def clubs(self):\n catalog = getToolByName(self.context, 'portal_catalog')\n\n return [dict(url=club.getURL(), title=club.Title, sport=club.Sport,\n address=club.Description) for club in\n catalog({'object_provides': IClub.__identifier__,\n 'path': dict(query='/'.join(self.context.getPhysicalPath()),\n depth=1), 'sort_on': 'sortable_title'})]",
"def list_campaigns(self, interval: str, page: str = None, limit: str = None) -> dict:\n params = remove_empty_elements({\"interval\": interval,\n \"page\": page,\n \"size\": limit,\n \"format\": \"json\"})\n return self.http_request(\"GET\", '/campaign/ids', params=params)",
"def get_companies(self):\n url = 'companies'\n result = self.get(url)\n return result['companies']",
"def get_companies_and_people(team):",
"def get_campaign_country_data(campaign_id):\n # Holds contribution countries for campaign with id: campaign_id\n contribution_countries = []\n\n # Holds all countries and images imaproved per country\n all_country_statistics_data = []\n\n # We get all the campaign contributions\n campaign_contributions = Contribution.query.filter_by(campaign_id=campaign_id).all()\n # We then iterate to get the countries\n for contribution in campaign_contributions:\n if contribution.country != \"\":\n contribution_countries.append(contribution.country)\n contribution_countries = set(contribution_countries)\n\n for country in contribution_countries:\n country_stats_data = {\n 'country': country,\n 'images_improved': get_country_improved_file_count(campaign_contributions, country)\n }\n all_country_statistics_data.append(country_stats_data)\n all_country_statistics_data = sorted(all_country_statistics_data,\n key=itemgetter('images_improved'), reverse=True)\n\n for country_stats_data in all_country_statistics_data:\n country_stats_data['rank'] = get_country_ranking(all_country_statistics_data, country_stats_data['country'])\n return all_country_statistics_data",
"def get_campaigns(self, uuid=None):\n params = self._build_params(uuid=uuid)\n return self._get_query('campaigns', params, Campaign)",
"def get_buyer_emails():\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n return {(crm.get_name_by_id(row[CUSTOMER_ID]), crm.get_email_by_id(row[CUSTOMER_ID])) for row in sales_data}"
]
| [
"0.6336228",
"0.5713501",
"0.54453236",
"0.5416744",
"0.5313876",
"0.5302328",
"0.527812",
"0.52746725",
"0.52378",
"0.52117795",
"0.51517314",
"0.5144151",
"0.5099462",
"0.50789464",
"0.5026485",
"0.50255877",
"0.5013553",
"0.4979689",
"0.49762148",
"0.49588078",
"0.48971602",
"0.4890019",
"0.4886197",
"0.4877456",
"0.487181",
"0.48579037",
"0.4844011",
"0.4840046",
"0.483385",
"0.4830362"
]
| 0.60787266 | 1 |
Returns a list of campaign IDs which contain a given string | def get_camp_ids_containing_str(marketer_id, string):
all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])
return [x.get("id") for x in all_campaigns if string in x["name"]] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_camp_ids_names_containing_str(marketer_id, string):\n all_campaigns = outb.get_campaigns_per_marketer(marketer_id).get(marketer_id[0])\n return [{\"id\": x.get(\"id\"), \"name\": x.get(\"name\")} for x in all_campaigns if string in x[\"name\"]]",
"def get_targeted_campaign_ids(client, customer_id, resource_name):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n campaign.id,\n campaign_extension_setting.extension_feed_items\n FROM campaign_extension_setting\n WHERE\n campaign_extension_setting.extension_type = 'PROMOTION'\n AND campaign.status != 'REMOVED'\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n campaign_ids = []\n\n for batch in stream:\n for row in batch.results:\n feed_items = row.campaign_extension_setting.extension_feed_items\n if resource_name in feed_items:\n print(f\"Found matching campaign with ID: '{row.campaign.id}'\")\n campaign_ids.append(row.campaign.id)\n\n return campaign_ids",
"def doFindAll(self, str):\n matches = []\n for value in self.doId2do.values():\n if repr(value).find(str) >= 0:\n matches.append(value)\n return matches",
"def doFindAllMatching(self, str):\n matches = []\n for value in self.doId2do.values():\n if re.search(str,repr(value)):\n matches.append(value)\n return matches",
"def get_cpd_ids(string):\n return [x for x in string.split(\" \") if x.startswith(\"C\")]",
"def check_word_in_list_in_string(list, string):\n stuff = [string for word in list if(word in string)]\n return stuff",
"def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass",
"def get_campaign_ids(self, file):\n pass",
"def get_all_matching_category_ids(category_name):\n category_list = Category.objects.filter(\n base_name__icontains=category_name\n ).all()\n return [category.id for category in category_list]",
"def _find_guids(guid_string):\n guids = []\n for found_guid in re.finditer(GUID_REGEX, guid_string):\n if found_guid.groups():\n guids.append(found_guid.group(0).strip(\"{}\"))\n return sorted(list(set(guids)))",
"def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)",
"def getAssetsWithIdentifier(self, idstring, **kwargs):\n assetcollection = []\n try:\n tempassetcol = kwargs[\"assetcol\"]\n except KeyError:\n tempassetcol = self.__assets\n for i in tempassetcol:\n if idstring in i:\n assetcollection.append(tempassetcol[i])\n return assetcollection",
"def specificWordList(catsString):\n cats = catsStringToArray(catsString)\n wordList = []\n for i in cats:\n for word in Word.objects.all().filter(category=i):\n wordList.append(word)\n return wordList",
"def get_ids(self, sentence):\n return [self.get_id(word) for word in sentence.strip().split(' ')]",
"def __get_issue_ids(self, string):\n import re\n matches = re.findall(r\"#(\\d{1,20})\", string, re.MULTILINE)\n return matches",
"def parse_sample_ids(clazz, sample_id_string):\n sample_uids_or_names = sample_id_string.split(',')\n sample_uids_or_names = [s.strip() for s in sample_uids_or_names]\n sample_ids = [ExperimentSample.objects.get(uid=uid).id for uid\n in sample_uids_or_names]\n return sample_ids",
"def bulk_has(self, ids_):\n with self._db_connection() as connection:\n existing = set(connection.datasets_intersection(ids_))\n\n return [x in existing for x in\n map((lambda x: UUID(x) if isinstance(x, str) else x), ids_)]",
"def get_campaign_name_list(self):\n campaigns = self.find('campaigns', {})\n campaign_names = []\n for campaign in campaigns:\n if 'name' in campaign:\n campaign_names.append(campaign['name'])\n return campaign_names",
"def search(self, query_string):\n terms = query_string.lower().split()\n result = set(self.wordDict[terms[0]])\n if len(result) == 0:\n return list()\n else:\n for t in terms[2:]:\n records_containing_t = self.wordDict[t]\n result = result.intersection(records_containing_t)\n return [self.get_record_dict(id).getTuple() for id in result]",
"def get_targeted_ad_group_ids(client, customer_id, resource_name):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n ad_group.id,\n ad_group_extension_setting.extension_feed_items\n FROM ad_group_extension_setting\n WHERE\n ad_group_extension_setting.extension_type = 'PROMOTION'\n AND ad_group.status != 'REMOVED'\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n ad_group_ids = []\n\n for batch in stream:\n for row in batch.results:\n feed_items = row.ad_group_extension_setting.extension_feed_items\n if resource_name in feed_items:\n print(f\"Found matching ad group with ID: '{row.ad_group.id}'\")\n ad_group_ids.append(row.ad_group.id)\n\n return ad_group_ids",
"def filter_by_comid(record, filterset=[]):\n return record['properties']['comid'] in filterset",
"def get_ids(self) -> List[str]:",
"def scan_individual_identifiers(text: str, cpf: bool = True) -> List[str]:\n if cpf:\n regex = re.compile(r\"\\w{3}\\.\\w{3}\\.\\w{3}\\-\\w{2}\")\n else:\n regex = re.compile(r\"\\w{2}\\.\\w{3}\\.\\w{3}/\\w{4}\\-\\w{2}\")\n\n identifiers = re.findall(regex, text)\n return identifiers",
"def get_incident_ids() -> list:\n incidents = demisto.get(demisto.context(), \"EmailCampaign.incidents\")\n return [incident['id'] for incident in incidents]",
"def grep(string,list):\n import re\n expr = re.compile(string)\n return filter(expr.search,list)",
"def containsAll(str, set):\n return 0 not in [c in str for c in set]",
"def containsAll(str, set):\n return 0 not in [c in str for c in set]",
"def searchString(self, instring, maxMatches=_MAX_INT):\n return list(self.scanString(instring))",
"def get_matching_emails(all_the_email,addrlist):\n l_addrlist = map(unicode.lower,addrlist)\n return [ e for e in all_the_email if e.l_address in l_addrlist ]",
"def find_possible(search_string):\n codes = []; names = []\n search_string = search_string.lower()\n for c,n in name_given_code.items():\n\n if (search_string in n):\n codes.append(c)\n names.append(n)\n\n return codes, names"
]
| [
"0.74002934",
"0.6311458",
"0.6043576",
"0.59890044",
"0.5851982",
"0.5620958",
"0.55856144",
"0.54602367",
"0.5436344",
"0.53730243",
"0.5317115",
"0.5297271",
"0.5277343",
"0.52691776",
"0.5260588",
"0.5248432",
"0.52466613",
"0.5192293",
"0.5143308",
"0.5125682",
"0.51151454",
"0.51140183",
"0.5108984",
"0.5106378",
"0.50877386",
"0.5081225",
"0.5081225",
"0.50595826",
"0.50529855",
"0.49998695"
]
| 0.8058569 | 0 |
Transforms the result of get_campaign_performance_per_period() function, and only includes campaign IDs that are in the list of camp_ids_to_filter | def transform_and_filter_result(result,camp_ids_to_filter):
final_result = list()
for x in result[0][0]:
if x["campaignId"] in camp_ids_to_filter:
result_per_id = list()
for result in x["results"]:
result_per_id_per_day = dict()
# The resulting dict can be modified
# if you need different items in it for your reporting
result_per_id_per_day["campaign_id"] = x["campaignId"]
result_per_id_per_day["date_from"] = result.get("metadata").get("fromDate")
result_per_id_per_day["date_to"] = result.get("metadata").get("toDate")
result_per_id_per_day["impressions"] = result.get("metrics").get("impressions")
result_per_id_per_day["clicks"] = result.get("metrics").get("clicks")
result_per_id_per_day["conversions"] = result.get("metrics").get("conversions")
result_per_id_per_day["spend"] = result.get("metrics").get("spend")
result_per_id.append(result_per_id_per_day)
final_result.append(result_per_id)
return final_result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_campaigns_in_interval(self, start_time, end_time, **kwargs):\n \n if 'campaign_filter' in kwargs:\n campaign_filter = kwargs['campaign_filter']\n if not(isinstance(campaign_filter, str)):\n campaign_filter = ''\n else:\n campaign_filter = MySQLdb._mysql.escape_string(str(campaign_filter))\n \n \"\"\" Escape parameters \"\"\"\n start_time = MySQLdb._mysql.escape_string(str(start_time).strip())\n end_time = MySQLdb._mysql.escape_string(str(end_time).strip())\n \n sql = \"select utm_campaign \" + \\\n \"from drupal.contribution_tracking left join civicrm.civicrm_contribution on (drupal.contribution_tracking.contribution_id = civicrm.civicrm_contribution.id) \" + \\\n \"where ts >= '%s' and ts < '%s' and utm_campaign regexp '%s' group by 1\" % (start_time, end_time, campaign_filter)\n \n results = self.execute_SQL(sql)\n \n campaigns = list()\n for row in results:\n campaigns.append(str(row[0]))\n \n return campaigns",
"def get_campaign_stats(self,campaign_id):\n campaign = Campaign(campaign_id)\n fields = ['account_name',\n 'campaign_name',\n 'clicks',\n 'cpc',\n 'reach',\n 'ctr',\n 'frequency',\n 'impressions',\n 'cpm',\n 'relevance_score']\n #Need to create a new list for data\n data = []\n \n #Grab lifetime campaign stats to find start and end date of campaign and convert to datetime formate\n insights = campaign.get_insights(fields=fields, params={'date_preset':'lifetime'})\n date_begin = datetime.datetime.strptime(insights[0]['date_start'], \"%Y-%m-%d\")\n date_end = datetime.datetime.strptime(insights[0]['date_stop'], \"%Y-%m-%d\")\n date_diff = datetime.timedelta(days=25)\n new_date = date_begin + date_diff\n\n #Pass in these values to the api\n api_date_first = str(date_begin).split()[0]\n api_date_last = str(new_date).split()[0]\n\n #Strange API limitation where you can only grab 25 values at a time. \n while date_begin < date_end:\n insights = campaign.get_insights(fields=fields, params={ 'time_range':{'since':api_date_first, 'until':api_date_last}, 'time_increment':1})\n insights = list(insights)\n date_begin = new_date \n new_date = date_begin + date_diff\n api_date_first = api_date_last\n api_date_last = str(new_date).split()[0]\n data += insights\n\n return data",
"def _get_campaigns(self, params):\n return self._api.account.get_campaigns(params={**params, **self._state_filter()}, fields=[self.state_pk])",
"def get_list_of_campaigns(self, limit=0, offset=0):\n logger.info(\"Function call: get_list_of_campaigns\")\n return self.__handle_result(self.__send_request('campaigns', 'GET', {'limit': limit or 0, 'offset': offset or 0}))",
"def get_spend_by_campaign_custom(self, budget_id, aw_account_id):\n try:\n budget = Budget.objects.get(id=budget_id)\n google_ads_account = DependentAccount.objects.get(id=aw_account_id)\n except (Budget.DoesNotExist, DependentAccount.DoesNotExist):\n return\n\n client = get_client()\n client.client_customer_id = google_ads_account.dependent_account_id\n\n aw_campaigns = budget.aw_campaigns.filter(account=google_ads_account)\n aw_campaign_ids = list(set([aw_campaign.campaign_id for aw_campaign in aw_campaigns]))\n\n report_downloader = client.GetReportDownloader(version=settings.API_VERSION)\n\n campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n {\n 'field': 'CampaignId',\n 'operator': 'IN',\n 'values': aw_campaign_ids\n }\n ]\n }\n\n campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': campaign_report_selector\n }\n\n start_date = budget.start_date\n end_date = budget.end_date\n\n campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': end_date.strftime('%Y%m%d')\n }\n\n campaign_report = Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(campaign_report_query))\n for campaign_row in campaign_report:\n print(campaign_row)\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=google_ads_account)\n campaign.campaign_name = campaign_row['campaign']\n campaign.save()\n campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n start_date=start_date,\n end_date=end_date)\n\n campaign_spend_object.spend = int(campaign_row['cost']) / 1000000\n campaign_spend_object.save()\n\n yest_campaign_report_selector = {\n 'fields': ['Cost', 'CampaignId', 'CampaignStatus', 'CampaignName', 'Labels', 'Impressions'],\n 'predicates': [\n {\n 'field': 'Cost',\n 'operator': 'GREATER_THAN',\n 'values': '0'\n },\n {\n 'field': 'CampaignId',\n 'operator': 'IN',\n 'values': aw_campaign_ids\n }\n ]\n }\n\n yest_campaign_report_query = {\n 'reportName': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'dateRangeType': 'CUSTOM_DATE',\n 'reportType': 'CAMPAIGN_PERFORMANCE_REPORT',\n 'downloadFormat': 'CSV',\n 'selector': yest_campaign_report_selector\n }\n\n start_date = budget.start_date\n yest_end_date = datetime.datetime.now() - datetime.timedelta(1)\n\n yest_campaign_report_selector['dateRange'] = {\n 'min': start_date.strftime('%Y%m%d'),\n 'max': yest_end_date.strftime('%Y%m%d')\n }\n\n campaign_report = Reporting.parse_report_csv_new(\n report_downloader.DownloadReportAsString(yest_campaign_report_query))\n for campaign_row in campaign_report:\n campaign_id = campaign_row['campaign_id']\n campaign, created = Campaign.objects.get_or_create(campaign_id=campaign_id, account=google_ads_account)\n campaign.campaign_name = campaign_row['campaign']\n campaign.save()\n campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n start_date=start_date,\n end_date=end_date)\n\n campaign_spend_object.spend_until_yesterday = int(campaign_row['cost']) / 1000000\n campaign_spend_object.save()\n\n # try:\n # campaign_report = \\\n # Reporting.parse_report_csv_new(report_downloader.DownloadReportAsString(yest_campaign_report_query))[0]\n # except IndexError:\n # return\n #\n # campaign_spend_object, created = CampaignSpendDateRange.objects.get_or_create(campaign=campaign,\n # start_date=budget.start_date,\n # end_date=budget.end_date)\n #\n # campaign_spend_object.spend_until_yesterday = int(campaign_report['cost']) / 1000000\n # campaign_spend_object.save()\n\n return 'get_spend_by_campaign_custom'",
"def find_campaigns_as_caller(caller):\n\n \"\"\"Get Campaigns for Caller\"\"\"\n campaigns_as_caller = caller.campaigns_as_caller.filter(\n status__in=[x.value[0] for x in call_campaign_statuses_for_caller],\n ).order_by('-date_created')\n\n \"\"\"Check Call Tool Feature Access for Campaigns\"\"\"\n campaigns = [x for x in campaigns_as_caller if has_call_feature_access_for_local_group(\n x.local_group\n )]\n\n return campaigns",
"def list_campaigns(self, **kwargs) -> ApiResponse:\n return self._request(kwargs.pop('path'), params=kwargs)",
"def get_conversion_stats_by_adcampaign(\n self, account_id, campaign_ids=None, include_deleted=False,\n start_time=None, end_time=None, aggregate_days=None,\n by_impression_time=True, batch=False):\n path = 'act_%s/adcampaignconversions' % account_id\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if include_deleted is not None:\n args['include_deleted'] = include_deleted\n if start_time is not None:\n args['start_time'] = start_time\n if end_time is not None:\n args['end_time'] = end_time\n if aggregate_days is not None:\n args['aggregate_days'] = aggregate_days\n if not by_impression_time:\n args['by_impression_time'] = 'false'\n return self.make_request(path, 'GET', args, batch=batch)",
"def get_all_spend_by_campaign_custom(self):\n budgets = Budget.objects.filter(has_adwords=True, is_monthly=False)\n for budget in budgets:\n for aw_account in budget.account.adwords.all():\n if settings.DEBUG:\n get_spend_by_campaign_custom(budget.id, aw_account.id)\n else:\n get_spend_by_campaign_custom.delay(budget.id, aw_account.id)\n\n return 'get_all_spend_by_campaign_custom'",
"def list_campaigns(self, interval: str, page: str = None, limit: str = None) -> dict:\n params = remove_empty_elements({\"interval\": interval,\n \"page\": page,\n \"size\": limit,\n \"format\": \"json\"})\n return self.http_request(\"GET\", '/campaign/ids', params=params)",
"def get_adcampaigns_of_account(self, account_id, fields, batch=False):\n path = 'act_%s/adcampaigns' % account_id\n args = {\n 'fields': fields,\n 'limit': self.DATA_LIMIT\n }\n return self.make_request(path, 'GET', args, batch=batch)",
"def get_targeted_campaign_ids(client, customer_id, resource_name):\n ga_service = client.get_service(\"GoogleAdsService\")\n\n query = \"\"\"\n SELECT\n campaign.id,\n campaign_extension_setting.extension_feed_items\n FROM campaign_extension_setting\n WHERE\n campaign_extension_setting.extension_type = 'PROMOTION'\n AND campaign.status != 'REMOVED'\"\"\"\n\n stream = ga_service.search_stream(customer_id=customer_id, query=query)\n\n campaign_ids = []\n\n for batch in stream:\n for row in batch.results:\n feed_items = row.campaign_extension_setting.extension_feed_items\n if resource_name in feed_items:\n print(f\"Found matching campaign with ID: '{row.campaign.id}'\")\n campaign_ids.append(row.campaign.id)\n\n return campaign_ids",
"def get_running_campaign(self):\n kwargs = {}\n kwargs['status'] = 1\n tday = datetime.utcnow().replace(tzinfo=utc)\n kwargs['startingdate__lte'] = datetime(tday.year, tday.month, tday.day,\n tday.hour, tday.minute, tday.second, tday.microsecond).replace(tzinfo=utc)\n kwargs['expirationdate__gte'] = datetime(tday.year, tday.month, tday.day,\n tday.hour, tday.minute, tday.second, tday.microsecond).replace(tzinfo=utc)\n\n s_time = \"%s:%s:%s\" % (\n str(tday.hour), str(tday.minute), str(tday.second))\n kwargs['daily_start_time__lte'] = datetime.strptime(s_time, '%H:%M:%S')\n kwargs['daily_stop_time__gte'] = datetime.strptime(s_time, '%H:%M:%S')\n\n # weekday status 1 - YES\n # self.model._meta.get_field(tday.strftime(\"%A\").lower()).value()\n kwargs[tday.strftime(\"%A\").lower()] = 1\n\n return Campaign.objects.filter(**kwargs)",
"def get_whole_related_campaign_data(save2csv=True, campaign='ARNA-1',\n resample_data=False, debug=False):\n # Which data to use?\n RunRoot = get_local_folder('RunRoot')\n BASE_str = 'geosfp_4x5_standard.v12.9.0.BASE'\n ARNA1Var = BASE_str+'.2019.2020.ARNA1.Nest.repeat.JVALS/'\n ARNA1SurVar = BASE_str+'.2019.2020.ARNA1.Nest.repeat.JVALS.CVAO.PF/'\n CVAO2015 = BASE_str+'.2015.Aug.Nest.repeat.JVALS.CVAO.PF/'\n run_dict = {\n 'ARNA-1': '{}{}'.format(RunRoot, ARNA1Var),\n 'ARNA-1-surface': '{}{}'.format(RunRoot, ARNA1SurVar),\n 'CVAO-2015-surface': '{}{}'.format(RunRoot, CVAO2015),\n }\n # Name of file to save?\n SaveName = 'GC_model_output_{}'.format(campaign)\n SaveName = AC.rm_spaces_and_chars_from_str(SaveName)\n # NetCDF directory\n sanity_check_model_runs = False\n if sanity_check_model_runs:\n for key in run_dict.keys():\n run_dict[key] = run_dict[key]+'/OutputDir/'\n # check generic stats\n RunStr = '/geosfp_4x5_standard.v12.9.0.BASE.2019.2020.ARNA.BCs.repeat/'\n RunStr += 'OutputDir/'\n REF_wd = '{}{}'.format(RunRoot, RunStr)\n use_REF_wd4Met = True\n df = AC.get_general_stats4run_dict_as_df(run_dict=run_dict,\n use_REF_wd4Met=use_REF_wd4Met,\n REF_wd=REF_wd,\n )\n # Extract the planeflight files\n folder = run_dict[campaign]\n files2use = list(sorted(glob.glob(os.path.join(folder, '*plane.log*'))))\n file2use = files2use[0]\n # Get Header information from first file\n vars, sites = AC.get_pf_headers(file2use, debug=debug)\n # Extract all points from file\n dfs = []\n for file2use in files2use:\n df, vars = AC.pf_csv2pandas(file=file2use, vars=vars, epoch=True,\n r_vars=True)\n\n # Add a datetime index\n df = AC.DF_YYYYMMDD_HHMM_2_dt(df, rmvars=None, epoch=False)\n df.index.name = None\n # Set the out of (nested) box values to NaNs\n OutOfBoxValue = -1000.0\n for col in df.columns:\n try:\n df.loc[df[col].values == OutOfBoxValue, col] = np.NaN\n except TypeError:\n print('{} - TypeError found for {} '.format(campaign, col))\n # Update the variable names\n d = PF_TRAXXX_2TracerName(None, folder=folder, RTN_dict=True)\n d = dict([('TRA_{:0>3}'.format(i), d[i]) for i in d.keys()])\n df = df.rename(columns=d)\n # Add derived (GEOSchem) variables to df\n df = add_derived_GEOSChem_specs2df(df)\n # Resample the data?\n if ('surface' not in campaign) or resample_data:\n df = df.resample('1T').mean()\n dfs += [df]\n # Concat the data frames\n df2 = pd.concat(dfs, axis=0)\n # Save to disk\n if save2csv:\n if ('surface' not in campaign):\n df2.to_csv(SaveName+'.csv')\n else:\n TYPES = list(set(df2['TYPE'].values))\n print(TYPES)\n for TYPE in TYPES:\n df2save = df2.loc[df2['TYPE'] == TYPE, :]\n df2save.to_csv('{}_{}.csv'.format(SaveName, TYPE))\n return df2",
"def get_adcampaigns(self, account_id, fields=None, batch=False):\n return self.get_adcampaigns_of_account(account_id, fields, batch=batch)",
"def get_stats_by_adcampaign(self, account_id, campaign_ids=None,\n batch=False, start_time=None, end_time=None):\n args = {}\n if campaign_ids is not None:\n args['campaign_ids'] = json.dumps(campaign_ids)\n if start_time:\n args['start_time'] = self.__parse_time(start_time)\n if end_time:\n args['end_time'] = self.__parse_time(end_time)\n path = 'act_%s/adcampaignstats' % account_id\n return self.make_request(path, 'GET', args, batch=batch)",
"def show_campaigns(request, utm_campaign, **kwargs):\n \n err_msg = ''\n try:\n err_msg = str(kwargs['kwargs']['err_msg'])\n except:\n pass\n \n test_type_override = ''\n try:\n test_type_override = MySQLdb._mysql.escape_string(request.POST['test_type_override'])\n \n if test_type_override == 'Banner':\n test_type_var = FDH._TESTTYPE_BANNER_\n elif test_type_override == 'Landing Page':\n test_type_var = FDH._TESTTYPE_LP_\n elif test_type_override == 'Banner and LP':\n test_type_var = FDH._TESTTYPE_BANNER_LP_\n \n except:\n test_type_var = ''\n pass\n \n try:\n \"\"\" Find the earliest and latest page views for a given campaign \"\"\"\n lptl = DL.LandingPageTableLoader()\n ccrml = DL.CiviCRMLoader()\n \n start_time = ccrml.get_earliest_donation(utm_campaign)\n end_time = ccrml.get_latest_donation(utm_campaign)\n \n one_step = lptl.is_one_step(start_time, end_time, utm_campaign) \n \n if not(one_step): \n start_time = lptl.get_earliest_campaign_view(utm_campaign)\n end_time = lptl.get_latest_campaign_view(utm_campaign) \n\n interval = 1\n \n \"\"\" Create reporting object to retrieve campaign data and write plots to image repo on disk \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n \n \"\"\" Produce analysis on the campaign view data \"\"\" \n ir.run(start_time, end_time, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" \n ESTIMATE THE START AND END TIME OF THE CAMPAIGN\n ===============================================\n \n Search for the first instance when more than 10 views are observed over a sampling period\n \"\"\"\n \n col_names = ir._data_loader_.get_column_names()\n \n views_index = col_names.index('views')\n ts_index = col_names.index('ts')\n \n row_list = list(ir._data_loader_._results_) # copy the query results\n for row in row_list:\n if row[views_index] > 100:\n start_time_est = row[ts_index]\n break\n row_list.reverse()\n for row in row_list:\n if row[views_index] > 100:\n end_time_est = row[ts_index]\n break\n \n \n \"\"\"\n BUILD THE VISUALIZATION FOR THE TEST VIEWS OF THIS CAMAPAIGN\n ============================================================ \n \"\"\"\n \n \"\"\" Read the test name \"\"\"\n ttl = DL.TestTableLoader()\n row = ttl.get_test_row(utm_campaign)\n test_name = ttl.get_test_field(row ,'test_name')\n \n \"\"\" Regenerate the data using the estimated start and end times \"\"\"\n ir = DR.IntervalReporting(was_run=False, use_labels=False, font_size=20, plot_type='line', query_type='campaign', file_path=projSet.__web_home__ + 'campaigns/static/images/')\n ir.run(start_time_est, end_time_est, interval, 'views', utm_campaign, {}, one_step=one_step)\n \n \"\"\" Determine the type of test (if not overridden) and retrieve the artifacts \"\"\"\n test_type, artifact_name_list = FDH.get_test_type(utm_campaign, start_time, end_time, DL.CampaignReportingLoader(query_type=''), test_type_var)\n \n return render_to_response('campaigns/show_campaigns.html', {'utm_campaign' : utm_campaign, 'test_name' : test_name, 'start_time' : start_time_est, 'end_time' : end_time_est, 'one_step' : one_step, \\\n 'artifacts' : artifact_name_list, 'test_type' : test_type, 'err_msg' : err_msg}, context_instance=RequestContext(request)) \n\n except Exception as inst:\n \n logging.error('Failed to correctly produce campaign diagnostics.')\n logging.error(type(inst))\n logging.error(inst.args)\n logging.error(inst)\n \n \"\"\" Return to the index page with an error \"\"\"\n err_msg = 'There is insufficient data to analyze this campaign: %s. Check to see if the <a href=\"/LML/\">impressions have been loaded</a>. <br><br>ERROR:<br><br>%s' % (utm_campaign, inst.__str__())\n \n return index(request, kwargs={'err_msg' : err_msg})",
"def get_campaign_ids(self, file):\n pass",
"def get_adcampaign_list(self, account_id):\n fields = 'id, name, campaign_status, start_time, end_time, ' \\\n 'daily_budget, lifetime_budget, budget_remaining'\n batch = [\n self.get_adaccount(account_id, ['currency'], batch=True),\n self.get_adcampaigns(account_id, fields, batch=True),\n self.get_stats_by_adcampaign(account_id, batch=True),\n ]\n return self.make_batch_request(batch)",
"def testGetCampaignsByCriteria(self):\n criteria = {\n 'archiveFilter': {\n 'inactiveOnly': 'true'\n }\n }\n self.assert_(isinstance(self.__class__.service.GetCampaignsByCriteria(\n criteria), tuple))",
"def find_campaigns_as_admin(call_profile):\n\n \"\"\"Check Feature Access and Local Group Permissions\"\"\"\n user = call_profile.user\n local_group = find_local_group_by_user(user)\n if local_group is not None and has_call_permission_for_local_group(\n user,\n local_group,\n 'calls.change_callcampaign'\n ):\n return local_group.callcampaign_set.all().order_by(\n '-date_created'\n )\n\n \"\"\"Otherwise return empty list\"\"\"\n return CallCampaign.objects.none()",
"def _filter_cid(self, cids):\n return [cid for cid in cids if cid is not None]",
"def get_all_camapaign_stats_data(campaign_id):\n all_campaign_stats_data = []\n all_campaign_stats = Contribution.query.filter_by(campaign_id=campaign_id).all()\n for campaign_stat in all_campaign_stats:\n campaign_stat_data = {}\n campaign_stat_data['username'] = campaign_stat.username\n campaign_stat_data['file'] = campaign_stat.file\n campaign_stat_data['edit_type'] = campaign_stat.edit_type\n campaign_stat_data['edit_action'] = campaign_stat.edit_action\n campaign_stat_data['country'] = campaign_stat.country\n campaign_stat_data['depict_item'] = campaign_stat.depict_item\n campaign_stat_data['depict_prominent'] = campaign_stat.depict_prominent\n campaign_stat_data['caption_text'] = campaign_stat.caption_text\n campaign_stat_data['caption_language'] = campaign_stat.caption_language\n campaign_stat_data['date'] = campaign_stat.date\n all_campaign_stats_data.append(campaign_stat_data)\n return all_campaign_stats_data",
"def all(self, campaign_id, **queryparams):\n self.campaign_id = campaign_id\n self.report_id = None\n return self._mc_client._get(url=self._build_path(campaign_id, 'abuse-reports'), **queryparams)",
"def get_adcampaign(self, campaign_id, fields, batch=False):\n path = '%s' % campaign_id\n args = {'fields': fields}\n return self.make_request(path, 'GET', args, batch=batch)",
"def get_campaigns(self, uuid=None):\n params = self._build_params(uuid=uuid)\n return self._get_query('campaigns', params, Campaign)",
"def test_get_existent_campaigns_returns_campaigns_list(self):\n test_campaign = return_canned_campaign()\n test_campaign.create()\n response = self.client.get(self.endpoint_url)\n response_body = response.get_json()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response_body, {\"campaigns\": [{\"id\": 1, \"name\": \"Test Campaign\"}]}\n )",
"def main():\n\n authorize(outb, creds)\n\n string = input(\"Which campaigns do you want to include? >>> \")\n\n while True:\n date_from = input(\"From which date? Use the format 'YYYY-MM-DD please. >>> \")\n try:\n date_from = datetime.datetime.strptime(date_from, \"%Y-%m-%d\") \n # Input string will only be converted if the user gives the correct format...\n break\n except ValueError:\n print(\"Please input the date in the correct format!\")\n # ... else it keeps asking for the correct format.\n\n while True:\n date_to = input(\"To which date? Use the format 'YYYY-MM-DD please. >>> \")\n try:\n date_to = datetime.datetime.strptime(date_to, \"%Y-%m-%d\")\n break\n except ValueError:\n print(\"Please input the date in the correct format!\")\n\n while True:\n breakdown = input(\"What should be the breakdown? Type 'daily' or 'monthly' >>> \")\n if breakdown in (\"daily\", \"monthly\"):\n break\n else:\n print(\"Please input only 'daily' or 'monthly'\")\n\n filename = input(\"What should be the filename? >>> \")\n\n result = outb.get_campaign_performance_per_period(marketer_id, date_from, date_to, breakdown)\n #Get the report object with the given params\n filtered_camp_ids = get_camp_ids_containing_str(marketer_id, string)\n #Filter out campaign IDs containing the given string\n tf = merge(transform_and_filter_result(result, filtered_camp_ids))\n #Transform and merge the filtered results to a dict for pandas\n dataframe = pd.DataFrame(tf, columns=[\n \"campaign_id\",\n \"date_from\",\n \"date_to\",\n \"impressions\",\n \"clicks\",\n \"conversions\",\n \"spend\"\n ])\n dataframe.set_index(\"date_from\", inplace=True)\n final_pivot_df = dataframe.groupby(\"date_from\").sum().reindex([\"impressions\", \"clicks\", \"spend\", \"conversions\"], axis=1)\n #I only need these metrics for my final export, can be changed if necessary\n date_now = datetime.datetime.now().strftime(\"%Y-%m-%d__%H_%M_%S\")\n #For the date in the filename\n writer = pd.ExcelWriter(f\"{filename}_{date_now}.xlsx\")\n #Pandas excel writer object\n final_pivot_df.to_excel(writer, \"Sheet1\")\n #Write the dataframe to excel Sheet 1\n writer.save()\n print(f\"Finished!, your report is saved as {filename}_{date_now}.xlsx\")",
"def filter_patients(self):\n\n if self.dataset is None:\n self.dataset = h5py.File(self.filename, 'r')['dataset']\n \n # Find feature indices belonging to specific criteria\n inclusion_info = self.filter_params['inclusion']\n # exclusion_info = self.filter_params['exclusion']\n case_control_info = self.filter_params['case_control']\n\n inclusion_inds = self.check_criteria(inclusion_info, case_control=False)\n # exclusion_inds = self.check_criteria(exclusion_info, case_control=False)\n case_inds, control_inds = self.check_criteria(case_control_info, case_control=True)\n\n filtered_inds = {}\n # inclusion_exclusion_inds = np.setdiff1d(inclusion_inds, exclusion_inds)\n filtered_inds['case'] = np.intersect1d(inclusion_inds, case_inds)\n filtered_inds['control'] = np.intersect1d(inclusion_inds, control_inds)\n\n return filtered_inds",
"def test_filter_by_ids(self, original_list, ids_to_filter, expected_result):\n result = helpers.filter_by_ids(original_list, ids_to_filter)\n\n self.assertEqual(result, expected_result)"
]
| [
"0.6190147",
"0.5855874",
"0.5680206",
"0.5596478",
"0.53977025",
"0.53867006",
"0.5180743",
"0.51532555",
"0.5137389",
"0.5134193",
"0.51274616",
"0.5112135",
"0.5054796",
"0.5053313",
"0.5045526",
"0.5037804",
"0.50288117",
"0.49338433",
"0.49244148",
"0.49209902",
"0.4909127",
"0.49083385",
"0.4905905",
"0.4890381",
"0.48768264",
"0.48757634",
"0.48315063",
"0.48141584",
"0.48051652",
"0.47499356"
]
| 0.71021146 | 0 |
Given a lookup_type of 'year', 'month', 'day' or 'week_day', returns the SQL that extracts a value from the given date field field_name. | def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
elif lookup_type == 'week':
return "WEEK(%s)" % field_name
elif lookup_type == 'quarter':
return "QUARTER(%s)" % field_name
elif lookup_type == 'month':
return "MONTH(%s)" % field_name
elif lookup_type == 'day':
return "DAYOFMONTH(%s)" % field_name
elif lookup_type == 'hour':
return "HOUR(%s)" % field_name
elif lookup_type == 'minute':
return "MINUTE(%s)" % field_name
else:
return "SECOND(%s)" % field_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def date_extract_sql(self, lookup_type, field_name):\n return \"EXTRACT(%s FROM %s)\" % (lookup_type, field_name)",
"def get_prep_lookup(self, lookup_type, value):\n\n if hasattr(value, 'prepare'):\n return value.prepare()\n if hasattr(value, '_prepare'):\n return value._prepare()\n\n if lookup_type in ('indexexact', 'distinct', 'slice',\\\n 'contains', 'containedby', 'overlap', 'exact', \\\n 'gt','lt','gte', 'lte'):\n return self.get_prep_value(value)\n raise TypeError(\"Field has invalid lookup: %s\" % lookup_type)",
"def picker(field_name):\n return lambda row: row[field_name]",
"def picker(field_name):\n return lambda row: row[field_name]",
"def get_prep_lookup(self, lookup_type, value):\n \n if value:\n if isinstance(value, (list, tuple)):\n if hasattr(value[0], 'prepare'):\n return [v.prepare() for v in values]\n if hasattr(value[0], '_prepare'):\n return [v._prepare() for v in values]\n else:\n if hasattr(value, 'prepare'):\n return [v.prepare() for v in values]\n if hasattr(value, '_prepare'):\n return [v._prepare() for v in values]\n \n \n # TODO clean valid lookups\n #if lookup_type in (\n # 'regex', 'iregex', 'month', 'day', 'week_day', 'search',\n # 'contains', 'icontains', 'iexact', 'startswith', 'istartswith',\n # 'endswith', 'iendswith', 'isnull',\n # 'exact', 'gt', 'gte', 'lt', 'lte',\n # 'range', 'in',\n # 'year'\n # ):\n # return value\n #else:\n # e = _(u\"%s is not a valid lookup for array field\" % lookup_type)\n # raise ValueError(e)\n \n return self.get_prep_value(value)",
"def get_db_prep_lookup(self, lookup_type, value, connection=None, prepared=None):\n if isinstance(value, types.ListType):\n for i, val in enumerate(value):\n value[i] = self._add_tz(val)\n else:\n value = self._add_tz(value)\n\n return super(LocalizedDateTimeField, self).get_db_prep_lookup(lookup_type, value, connection=connection, prepared=prepared)",
"def get_dtval(record, field_name):\n val = recordval(record, field_name)\n if (val != \"\" and not re.match(r'\\d\\d?/\\d\\d?/\\d\\d\\d\\d', val)):\n parser_error(\"bad value in \"+field_name+\": '\"+val+\"'-- try MM/DD/YYYY\")\n return val",
"def lookup(model, field_name):\n return getattr(model, field_name)",
"def transform_field_to_query(self, field, window=None):\n f_alias = alias_selector(field.alias)\n\n if window and isinstance(field, DatetimeInterval):\n return window(field.definition, field.interval_key).as_(f_alias)\n\n return field.definition.as_(f_alias)",
"def make_date_extractor(md_field: str) -> Callable:\n\n def extract(props: Dict[str, str]) -> str:\n ds = ''\n v = props.get(md_field, '')\n try:\n d = datetime.strptime(v, '%Y/%m/%d')\n ds = d.strftime('%Y%m%d')\n except Exception:\n pass\n return ds\n\n return extract",
"def dates(self, field_name, kind, order=\"ASC\"):\n if kind not in (\"year\", \"month\", \"week\", \"day\"):\n raise ValueError(\"'kind' must be one of 'year', 'month', 'week', or 'day'.\")\n if order not in (\"ASC\", \"DESC\"):\n raise ValueError(\"'order' must be either 'ASC' or 'DESC'.\")\n return (\n self.annotate(\n datefield=Trunc(field_name, kind, output_field=DateField()),\n plain_field=F(field_name),\n )\n .values_list(\"datefield\", flat=True)\n .distinct()\n .filter(plain_field__isnull=False)\n .order_by((\"-\" if order == \"DESC\" else \"\") + \"datefield\")\n )",
"def make_datable(dict, type):\n if type not in date_value: return None\n if date_value[type] not in dict: return None\n (year, month, day) = (None, None, None)\n for datecomponents in map(lambda s: map(int,s.split(\"-\")), dict[date_value[type]]):\n if len(datecomponents) < 3: continue\n (year, month, day) = datecomponents[:3]\n if year is None: return None\n return Datable(dict, (year, month, day))",
"def index_field_from_django_field(f, default=CharField):\n result = default\n\n if f.get_internal_type() in (\"DateField\", \"DateTimeField\"):\n result = DateTimeField\n elif f.get_internal_type() in (\"BooleanField\", \"NullBooleanField\"):\n result = BooleanField\n elif f.get_internal_type() in (\"CommaSeparatedIntegerField\",):\n result = MultiValueField\n elif f.get_internal_type() in (\"DecimalField\", \"FloatField\"):\n result = FloatField\n elif f.get_internal_type() in (\n \"IntegerField\",\n \"PositiveIntegerField\",\n \"PositiveSmallIntegerField\",\n \"SmallIntegerField\",\n ):\n result = IntegerField\n\n return result",
"def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)",
"def get_prep_lookup(self, lookup_type, value):\n if value is None:\n return super(self.__class__, self).get_prep_lookup(lookup_type, value)\n if lookup_type == 'in':\n value = [v.value for v in value]\n return super(self.__class__, self).get_prep_lookup(lookup_type, value)\n if lookup_type == 'exact':\n return super(self.__class__, self).get_prep_lookup(lookup_type, value.value)\n raise TypeError('Lookup type {} is not supported.'.format(lookup_type))",
"def lookup_daily(self, **kwargs):\n return self.lookup(period=self.PERIOD_DAILY, **kwargs)",
"def get_day(x):\n return x[\"SALE DATE\"].day",
"def lookup(self, name):\n return self.fieldDict[name]",
"def _parse_date(\n value_expr: str, target_expr: str, ref_parts: List[str],\n a_type: mapry.Date, auto_id: mapry.py.generate.AutoID) -> str:\n uid = auto_id.next_identifier()\n\n return _PARSE_DATE_TPL.render(\n uid=uid,\n value_expr=value_expr,\n ref_parts=ref_parts,\n target_expr=target_expr,\n a_type=a_type).rstrip(\"\\n\")",
"def convert_values(self, value, field):\n if value is None:\n return None\n if field and field.get_internal_type() == 'DateTimeField':\n if isinstance(value, string_types) and value:\n value = parse_datetime(value)\n return value\n elif field and field.get_internal_type() == 'DateField':\n if isinstance(value, datetime.datetime):\n value = value.date() # extract date\n elif isinstance(value, string_types):\n value = parse_date(value)\n elif field and field.get_internal_type() == 'TimeField':\n if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):\n value = value.time() # extract time\n elif isinstance(value, string_types):\n # If the value is a string, parse it using parse_time.\n value = parse_time(value)\n # Some cases (for example when select_related() is used) aren't\n # caught by the DateField case above and date fields arrive from\n # the DB as datetime instances.\n # Implement a workaround stealing the idea from the Oracle\n # backend. It's not perfect so the same warning applies (i.e. if a\n # query results in valid date+time values with the time part set\n # to midnight, this workaround can surprise us by converting them\n # to the datetime.date Python type).\n elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:\n value = value.date()\n # Force floats to the correct type\n elif value is not None and field and field.get_internal_type() == 'FloatField':\n value = float(value)\n return value",
"def field_value(field_name, bushfire=None, request=None, url_type=\"auto\",is_upper=None,external_email=False):\r\n if bushfire:\r\n try:\r\n if field_name == \"origin_point_geo\":\r\n return bushfire.origin_geo\r\n elif field_name == \"region\":\r\n if is_upper == True:\r\n return bushfire.region.name.upper()\r\n else:\r\n return bushfire.region.name\r\n elif field_name == \"district\":\r\n if is_upper == True:\r\n return bushfire.district.name.upper()\r\n else:\r\n return bushfire.district.name\r\n elif field_name == \"fire_number\":\r\n if request and not external_email:\r\n return mark_safe(\"<a href='{}'>{}</a>\".format(utils.get_bushfire_url(request,bushfire,url_type),bushfire.fire_number))\r\n else:\r\n return bushfire.fire_number\r\n elif field_name == \"url_link\":\r\n return mark_safe(\"<a href='{0}'>{0}</a>\".format(utils.get_bushfire_url(request,bushfire,url_type)))\r\n elif field_name == \"url\":\r\n return utils.get_bushfire_url(request,bushfire,url_type)\r\n elif field_name == \"report_status\":\r\n return bushfire.report_status_name\r\n elif field_name == \"latitude_degree\":\r\n return LatLon.Latitude(bushfire.origin_point.get_y()).degree\r\n elif field_name == \"latitude_minute\":\r\n return LatLon.Latitude(bushfire.origin_point.get_y()).minute\r\n elif field_name == \"latitude_second\":\r\n return LatLon.Latitude(bushfire.origin_point.get_y()).second\r\n elif field_name == \"longitude_degree\":\r\n return LatLon.Longitude(bushfire.origin_point.get_x()).degree\r\n elif field_name == \"longitude_minute\":\r\n return LatLon.Longitude(bushfire.origin_point.get_x()).minute\r\n elif field_name == \"longitude_second\":\r\n return LatLon.Longitude(bushfire.origin_point.get_x()).second\r\n \r\n\r\n value = getattr(bushfire, FIELD_MAPPING.get(field_name) or field_name)\r\n if field_name == \"dfes_incident_no\":\r\n return value or \"Not available\"\r\n elif value is None:\r\n return \"-\"\r\n elif type(value) == type(True):\r\n return \"Yes\" if value else \"No\"\r\n elif field_name == \"dispatch_pw\":\r\n return \"Yes\" if value == 1 else \"No\"\r\n elif isinstance(value,datetime.datetime):\r\n return value.astimezone(tz.gettz(settings.TIME_ZONE)).strftime('%Y-%m-%d %H:%M')\r\n else:\r\n value = str(value).strip()\r\n return value or \"-\"\r\n except:\r\n return \"-\"\r\n else:\r\n return \"-\"",
"def parse_fieldtype(value, fieldtype):\n\ttype_mapper = {\n\t\t\"int\": int,\n\t\t\"float\": float,\n\t\t\"basestring\": str,\n\t\t\"dict\": json.loads\n\t}\n\n\ttry:\n\t\tif fieldtype in type_mapper.keys():\n\t\t\treturn type_mapper[fieldtype](value)\n\t\telif fieldtype == \"list\":\n\t\t\traise Exception(\"Can't parse value to list type\")\n\t\telif fieldtype == \"date\":\n\t\t\treturn value\n\t\t# elif fieldtype == \"float\":\n\t\t# \treturn float(value)\n\t\t# elif fieldtype == \"basestring\":\n\t\t# \treturn str(value)\n\t\t# elif fieldtype == \"dict\":\n\t\t# \treturn json.loads(value)\n\texcept Exception, e:\n\t\traise e",
"def day(sym, date):\n return get(sym, date, date)[0][1]",
"def lookup_sqlacolumn_for_field(cls, fieldid):\n for field in cls.fieldlist:\n if (field.id==fieldid):\n return field.get_sqlacolumn()\n raise Exception(\"Could not find field {0}\".format(fieldid))\n #return None",
"def GetField(dobj, fieldname, fielddef, raw=False, addroffset=0):\n\n if isinstance(dobj, str):\n dobj = bytearray(dobj)\n\n valuemapping = None\n\n # get field definition\n format_, baseaddr, strindex, arraydef, group = GetFieldDef(fielddef, fields='format_, baseaddr, strindex, arraydef, group')\n\n # filter groups\n if not IsFilterGroup(group):\n return valuemapping\n\n # <arraydef> contains a integer list\n if isinstance(arraydef, list) and len(arraydef) > 0:\n valuemapping = []\n offset = 0\n for i in range(0, arraydef[0]):\n subfielddef = GetSubfieldDef(fielddef)\n length = GetFieldLength(subfielddef)\n if length != 0:\n if strindex is not None:\n value = GetField(dobj, fieldname, subfielddef, raw=raw, addroffset=i)\n else:\n value = GetField(dobj, fieldname, subfielddef, raw=raw, addroffset=addroffset+offset)\n valuemapping.append(value)\n offset += length\n\n # <format> contains a dict\n elif isinstance(format_, dict):\n mapping_value = {}\n # -> iterate through format\n for name in format_:\n value = None\n value = GetField(dobj, name, format_[name], raw=raw, addroffset=addroffset)\n if value is not None:\n mapping_value[name] = value\n # copy complete returned mapping\n valuemapping = copy.deepcopy(mapping_value)\n\n # a simple value\n elif isinstance(format_, (str, bool, int, float)):\n if GetFieldLength(fielddef) != 0:\n if strindex is not None:\n value = GetFieldValue(fielddef, dobj, baseaddr, addroffset)\n else:\n value = GetFieldValue(fielddef, dobj, baseaddr+addroffset)\n valuemapping = ReadWriteConverter(value, fielddef, read=True, raw=raw)\n\n else:\n exit(ExitCode.INTERNAL_ERROR, \"Wrong mapping format definition: '{}'\".format(format_), type_=LogType.WARNING, doexit=not args.ignorewarning, line=inspect.getlineno(inspect.currentframe()))\n\n return valuemapping",
"def lookup(input_field, input_val, output_field):\n l = list(filter(lambda x : x[input_field] == input_val, data))\n if len(l) != 0:\n return l[0][output_field]\n print(\"No entry found for \" + input_field + \": \" + input_val)\n return \"\"",
"def select(iso_code, date_, records):\n for record in records:\n if record[\"iso_code\"] == iso_code and record[\"date\"] == date_:\n return record\n return None",
"def dateFieldValidator(field):\n if not (field[\"type\"] == \"datetime\" or field[\"type\"] == \"date\"):\n raise ValueError(\"DateFieldValidator error: field type \" + field[\"type\"])\n if \"format\" in field:\n format_string = field[\"format\"]\n # The following is borrowed from datapackage.py...\n\n # Order of the replacements is important since month and minutes\n # can be denoted in a similar fashion\n replacement_order = [('hh', '%m'), (':mm', ':%M'), ('ss', '%S'),\n ('yyyy', '%Y'), ('yy', '%y'), ('mm', '%m'),\n ('dd', '%d')]\n\n # For each replacement we substitute (and ignore the case)\n for (old, new) in replacement_order:\n format_string = re.sub(\"(?i)%s\" % old, new, format_string)\n if field[\"type\"] == \"datetime\":\n return lambda x: datetime.datetime.strptime(x, format_string)\n else:\n return lambda x: datetime.datetime.strptime(x, format_string).date()\n else:\n if field[\"type\"] == \"datetime\":\n return lambda x: datetime.datetime.strptime(x, '%Y-%m-%dT%H:%M:%S%Z')\n else:\n return lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date()",
"def get_day_of_data(self, column_name, day):\n self.check_for_column(column_name)\n dt = pd.Timestamp(day)\n column = self.data[column_name]\n return column[column.index.date == dt.date()]",
"def api_field_from_mongo_field(cls, f, default=tastypie_fields.CharField):\r\n\r\n result = default\r\n\r\n if isinstance(f, (mongoengine.ComplexDateTimeField, mongoengine.DateTimeField)):\r\n result = tastypie_fields.DateTimeField\r\n elif isinstance(f, mongoengine.BooleanField):\r\n result = tastypie_fields.BooleanField\r\n elif isinstance(f, mongoengine.FloatField):\r\n result = tastypie_fields.FloatField\r\n elif isinstance(f, mongoengine.DecimalField):\r\n result = tastypie_fields.DecimalField\r\n elif isinstance(f, mongoengine.IntField):\r\n result = tastypie_fields.IntegerField\r\n elif isinstance(f, (mongoengine.FileField, mongoengine.BinaryField)):\r\n result = tastypie_fields.FileField\r\n elif isinstance(f, mongoengine.DictField):\r\n result = tastypie_fields.DictField\r\n elif isinstance(f, mongoengine.ListField):\r\n result = tastypie_fields.ListField\r\n elif isinstance(f, mongoengine.GeoPointField):\r\n result = tastypie_fields.ListField\r\n elif isinstance(f, mongoengine.ObjectIdField):\r\n result = tastypie_mongoengine_fields.ObjectId\r\n\r\n return result"
]
| [
"0.78950274",
"0.5761999",
"0.5541615",
"0.5541615",
"0.52907443",
"0.52887756",
"0.5265657",
"0.5236816",
"0.5213454",
"0.5026723",
"0.50044376",
"0.49397483",
"0.48385435",
"0.47717372",
"0.4744608",
"0.4740956",
"0.47128138",
"0.47058564",
"0.46687573",
"0.46597868",
"0.46553326",
"0.46483496",
"0.46401498",
"0.46338525",
"0.46105587",
"0.46051925",
"0.45936942",
"0.45847648",
"0.45767176",
"0.45654297"
]
| 0.836405 | 0 |
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary to cast it before using it in a WHERE statement. Note that the resulting string should contain a '%s' placeholder for the column being searched against. | def field_cast_sql(self, db_type, internal_type=None):
if db_type and db_type.lower() == 'blob':
return 'CAST(%s as nvarchar)'
return '%s' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def python_type_to_sql_type(_python_type):\n if _python_type == str:\n return 'string'\n elif _python_type == bytes:\n return \"blob\"\n elif _python_type == float:\n return \"float\"\n elif _python_type == int:\n return \"integer\"\n elif _python_type == datetime:\n return \"datetime\"\n elif _python_type == bool:\n return \"boolean\"\n else:\n raise Exception(\"python_type_to_sql_type: _type_code \\\"\" + str(_python_type) + \"\\\"not supported\")",
"def sql_to_python_type(sql_type):\n if sql_type.startswith(\"CHAR(\"):\n return str\n\n if sql_type.startswith(\"INTERVAL\"):\n # Calcite will always convert to milliseconds\n # no matter what the actual interval is\n # I am not sure if this breaks somewhere,\n # but so far it works\n return lambda x: timedelta(milliseconds=int(x))\n\n if sql_type.startswith(\"DECIMAL(\"):\n # We use np.float64 always\n return np.float64\n\n try:\n return _SQL_TO_PYTHON[sql_type]\n except KeyError: # pragma: no cover\n raise NotImplementedError(f\"The SQL type {sql_type} is not implemented (yet)\")",
"def _column_type(t):\n return 'bigint' if datastore_type[t].numeric else 'text'",
"def python_to_sql_type(python_type):\n\n try:\n return _PYTHON_TO_SQL[python_type.type]\n except KeyError: # pragma: no cover\n raise NotImplementedError(\n f\"The python type {python_type} is not implemented (yet)\"\n )",
"def sql_type(dtype):\n if dtype.kind in (\"i\",\"u\",\"f\"):\n # It's a numeric type\n if dtype == np.int32:\n return \"integer\"\n elif dtype == np.int64:\n return \"bigint\"\n elif dtype == np.float32:\n return \"real\"\n elif dtype == np.float64:\n return \"float\"\n else:\n raise ValueError(\"Unsupported data type \"+str(dtype))\n elif dtype.kind == \"S\":\n # It's a string\n # Note: this assumes 1 byte = 1 character!\n return (\"char(%d)\" % dtype.itemsize)\n else:\n # Not numeric or string, don't know what to do with this!\n raise ValueError(\"Unsupported data type \"+str(dtype))",
"def mysql_type_to_sql_type(_type_code):\n if _type_code in (\n MySQL_VARCHAR,\n MySQL_VAR_STRING,\n MySQL_STRING\n ):\n return 'string'\n elif _type_code in (\n MySQL_TINY_BLOB,\n MySQL_MEDIUM_BLOB,\n MySQL_LONG_BLOB,\n ):\n return \"blob\"\n elif _type_code in (\n MySQL_BLOB,\n ):\n \"\"\"TODO: This is obviously not correct, but PyMySQL seems to return MySQL_BLOB for common strings.\"\"\"\n return \"string\"\n elif _type_code in (\n MySQL_DECIMAL,\n MySQL_FLOAT,\n MySQL_DOUBLE,\n MySQL_NEWDECIMAL\n ):\n return \"float\"\n elif _type_code in (\n MySQL_TINY,\n MySQL_SHORT,\n MySQL_LONG,\n MySQL_LONGLONG,\n MySQL_INT24,\n MySQL_BIT\n ):\n return \"integer\"\n elif _type_code in (\n MySQL_DATE,\n MySQL_TIME,\n MySQL_DATETIME,\n MySQL_YEAR,\n MySQL_NEWDATE,\n MySQL_TIMESTAMP):\n return \"timestamp\"\n else:\n raise Exception(\"mysql_type_to_sql_type: _type_code \\\"\" + str(_type_code) + \"\\\"not supported\")",
"def get_column_type_from_whitelist(column_name):\n for key, whitelist in mappings.COLUMN_NAMES.items():\n if column_name.strip().lower() in whitelist:\n return key\n return None",
"def get_postgres_column_type(\n self, tablename: str, column_name: str\n ) -> str:\n return self.get_postgres_column_definition(\n tablename=tablename, column_name=column_name\n ).data_type.upper()",
"def get_column_type(self, table, column):\n self._check_connection()\n cols = self.get_table_columns_list(table)\n for c in cols:\n if c[0] == column:\n return c[1]\n raise DBException(\n \"column %s were not found in table %s\" %\n (column, table))",
"def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.Text()",
"def _patched_cast(self, db_type):\n # see https://code.djangoproject.com/ticket/11580\n if db_type and db_type.endswith('LOB'):\n return \"DBMS_LOB.SUBSTR(%s,2000,1)\"\n else:\n return \"%s\"",
"def _quoter(self, col) :\n\n j = self.cols.index(col)\n if self.types[j] == 'TEXT' :\n return '\"%s\"'\n else :\n return '%s'",
"def get_column_type(\n self,\n table: exp.Table | str,\n column: exp.Column,\n dialect: DialectType = None,\n normalize: t.Optional[bool] = None,\n ) -> exp.DataType:",
"def get_column_type(type_name: str) -> object:\n raise NotImplementedError",
"def _get_sql_column(column_attr):\n col_def_sql = ' \"%s\"' % column_attr['name']\n if column_attr['extra'] == 'auto_increment':\n col_def_sql += ' SERIAL'\n else:\n col_def_sql += ' %s' % column_attr['type'].upper()\n\n if column_attr['size'] and column_attr['type'] not in ['text', 'bytea', 'smallint', 'decimal', 'set']:\n col_def_sql += '(' + str(column_attr['size']) + ')'\n if not column_attr['nullable']:\n col_def_sql += ' NOT NULL'\n if column_attr['isPk']:\n col_def_sql += ' PRIMARY KEY'\n if column_attr['default'] is not None:\n if column_attr['default'].replace(\".\", \"\", 1).isdigit():\n if column_attr['type'] == 'boolean':\n col_def_sql += ' DEFAULT ' + ('true' if column_attr['default'] != '0' else 'false')\n else:\n col_def_sql += ' DEFAULT ' + column_attr['default']\n elif column_attr['default'] == 'current_timestamp':\n col_def_sql += ' DEFAULT ' + column_attr['default']\n elif column_attr['default'].lower() == \"true\" or column_attr['default'].lower() == \"false\":\n col_def_sql += ' DEFAULT ' + column_attr['default'].upper()\n else:\n col_def_sql += \" DEFAULT U&'%s'\" % column_attr['default']\n\n if 'isPkC' in column_attr:\n col_def_sql = 'PRIMARY KEY (' + ','.join(column_attr['isPkC']) + ')'\n\n return col_def_sql",
"def cast_type(cdm_column_type, value):\n if cdm_column_type in ('integer', 'int64'):\n # Regex check only relevant if submission dtype is 'object'\n if not re.match(SCIENTIFIC_NOTATION_REGEX, str(value)):\n return int(value)\n if cdm_column_type in ('character varying', 'text', 'string'):\n return str(value)\n if cdm_column_type == 'numeric':\n return float(value)\n if cdm_column_type == 'float' and isinstance(value, float):\n return value\n if cdm_column_type == 'date' and isinstance(value, datetime.date):\n return value\n if cdm_column_type == 'timestamp' and isinstance(\n value, datetime.datetime): # do not do datetime.datetime\n return value",
"def _valid_column(column_name):\n return str(column_name)",
"def dtype_to_db_type(dtype):\n\n candidates = [dtype]\n\n # if we get a single character code we should normalize to a NumPy type\n if dtype in np.typeDict:\n dtype = np.typeDict[dtype]\n candidates.append(dtype.__name__)\n\n #if we get a dtype object i.e. dtype('int16'), then pull out its name\n if hasattr(dtype, 'name'):\n candidates.append(dtype.name)\n\n # for a dtype like dtype('S3') need to access dtype.type.__name__ to get 'string_'\n if hasattr(dtype, 'type'):\n candidates.append(dtype.type.__name__)\n\n # convert Python types by adding their type's name\n if hasattr(dtype, '__name__'):\n candidates.append(dtype.__name__)\n\n candidates.append(str(dtype))\n\n for candidate_key in candidates:\n if candidate_key in _dtype_to_db_type_dict:\n return _dtype_to_db_type_dict[candidate_key]\n\n assert False, \"Failed to find sqlite3 column type for %s\" % dtype",
"def py_type_to_sql_column(model, member, cls, **kwargs):\n if issubclass(cls, JSONModel):\n return sa.JSON(**kwargs)\n elif issubclass(cls, Model):\n name = f'{cls.__model__}.{cls.__pk__}'\n cls.__backrefs__.add((model, member))\n\n # Determine the type of the foreign key\n column = create_table_column(cls, cls._id)\n return (column.type, sa.ForeignKey(name, **kwargs))\n elif issubclass(cls, str):\n return sa.String(**kwargs)\n elif issubclass(cls, int):\n return sa.Integer(**kwargs)\n elif issubclass(cls, float):\n return sa.Float(**kwargs)\n elif issubclass(cls, dict):\n return sa.JSON(**kwargs)\n elif issubclass(cls, (tuple, list)):\n return sa.ARRAY(**kwargs)\n elif issubclass(cls, datetime.datetime):\n return sa.DateTime(**kwargs)\n elif issubclass(cls, datetime.date):\n return sa.Date(**kwargs)\n elif issubclass(cls, datetime.time):\n return sa.Time(**kwargs)\n elif issubclass(cls, datetime.timedelta):\n return sa.Interval(**kwargs)\n elif issubclass(cls, (bytes, bytearray)):\n return sa.LargeBinary(**kwargs)\n elif issubclass(cls, Decimal):\n return sa.Numeric(**kwargs)\n raise NotImplementedError(\n f\"A type for {member.name} of {model} ({cls}) could not be \"\n f\"determined automatically, please specify it manually by tagging it \"\n f\"with .tag(column=<sqlalchemy column>) or set `store=False`\")",
"def _normalize_column(column):\n if not isinstance(column, str):\n msg = \"expected column of type 'str', got {0!r} instead\"\n raise TypeError(msg.format(column.__class__.__name__))\n column = column.strip()\n column = column.replace('\"', '\"\"') # Escape quotes.\n if column == '':\n column = '_empty_'\n return '\"' + column + '\"'",
"def _normalize_column(column):\n if not isinstance(column, str):\n msg = \"expected column of type 'str', got {0!r} instead\"\n raise TypeError(msg.format(column.__class__.__name__))\n column = column.strip()\n column = column.replace('\"', '\"\"') # Escape quotes.\n if column == '':\n column = '_empty_'\n return '\"' + column + '\"'",
"def _expected_type(column_definition):\n try:\n expected_type = column_definition.type.python_type\n except NotImplementedError:\n # Custom column definitions can lack a type.\n # We use custom column definitions for primary keys of type int.\n expected_type = int\n if issubclass(expected_type, Enum):\n # This is an Enum type column, I'm making the simplifying assumption\n # that those will always be string type\n expected_type = str\n return expected_type",
"def get_column_type(cls, **kwargs: Any) -> Any: # pragma no cover\n return None",
"def get_column_type(cls, **kwargs: Any) -> Any:\n return sqlalchemy.BigInteger()",
"def _handle_sql_types(value):\n if type(value) is datetime:\n return value.isoformat()\n return str(value)",
"def escapeinput_data_for_sql(self, value, sql_type):\n\t\t# print value\n\t\tvalue = value.replace('\\'', '\"')\n\t\tvalue = value.replace(',', '_')\n\t\t\n\t\tif len(value) == 0:\n\t\t\tif sql_type in ('BIGINT', 'INTEGER', 'FLOAT', 'DOUBLE'):\n\t\t\t\treturn '0'\n\t\t\tif sql_type == 'NVARCHAR':\n\t\t\t\treturn '\\'\\''\n\t\telse:\n\t\t\tif sql_type in ('BIGINT', 'INTEGER', 'FLOAT', 'DOUBLE'):\n\t\t\t\t# return value\n\t\t\t\treturn '\\'' + value + '\\''\n\t\t\tif sql_type == 'NVARCHAR':\n\t\t\t\treturn '\\'' + value + '\\''\n\n\t\treturn '\\'' + value + '\\''",
"def sql_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_type\")",
"def sql_type(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"sql_type\")",
"def cast(elem, psql_type):\n if psql_type == 'real':\n return float(format(elem, '.6g'))\n elif psql_type == 'double precision':\n return float(format(elem, '.15g'))\n elif psql_type == 'timestamp':\n if isinstance(elem, pd.Timestamp):\n return elem.to_pydatetime()\n else:\n return elem\n elif psql_type == 'text':\n if type(elem) == float:\n return \"NaN\"\n return str(elem)\n else:\n return elem",
"def get_type_name(column_data: Dict) -> Text:\n column_type = str(column_data[\"db_type\"]).lower()\n if \"text\" in column_type:\n return \"text\"\n elif \"json\" in column_type:\n return \"json\"\n elif not isinstance(column_data[\"type\"], tuple):\n type_ = column_data[\"type\"].__name__\n else:\n type_ = f'array of {column_data[\"type\"][1]}'\n return type_"
]
| [
"0.6239577",
"0.6223429",
"0.61100966",
"0.60024476",
"0.5870205",
"0.58654475",
"0.57926273",
"0.5771992",
"0.5766482",
"0.5717607",
"0.56727433",
"0.56491125",
"0.5609501",
"0.55463326",
"0.5472974",
"0.5444695",
"0.5438208",
"0.5438205",
"0.54337585",
"0.5425272",
"0.5425272",
"0.5408477",
"0.5388643",
"0.5304204",
"0.529244",
"0.5275298",
"0.5265924",
"0.5265924",
"0.52572435",
"0.5244941"
]
| 0.6790866 | 0 |
Given a cursor object that has just performed an INSERT/OUTPUT statement into a table that has an autoincrementing ID, returns the newly created ID. | def fetch_returned_insert_id(self, cursor):
return cursor.fetchone()[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def returnInsertID(self):\n try:\n return self.cursor.lastrowid\n except Exception as e:\n print \"Error getting insert id %s \" % e",
"def returnInsertID(self):\n try:\n return self.db.insert_id()\n except MySQLdb.Error as e:\n print \"Error getting insert id %s \" % e",
"def insert_get_last_id(self, sql: str) -> int:\n with self.connection.cursor() as cursor:\n self.connection.ping(reconnect=True)\n cursor.execute(sql)\n last_id = cursor.lastrowid\n self.connection.commit()\n return last_id",
"def _get_event_id(oracle_cursor): # -> (int)\n\n oracle_cursor.execute(\"\"\"select event_seq.NEXTVAL from dual\"\"\")\n row = oracle_cursor.fetchone()\n\n event_id = int(row[0])\n\n oracle_cursor.execute(\"\"\"\n INSERT INTO event (event_id, event_type_cd, event_timestamp)\n VALUES (:event_id, 'SYST', sysdate)\n \"\"\",\n event_id=event_id\n )\n\n return event_id",
"def _get_event_id(oracle_cursor): # -> (int)\r\n\r\n oracle_cursor.execute(\"\"\"select event_seq.NEXTVAL from dual\"\"\")\r\n row = oracle_cursor.fetchone()\r\n\r\n event_id = int(row[0])\r\n\r\n oracle_cursor.execute(\"\"\"\r\n INSERT INTO event (event_id, event_type_cd, event_timestamp)\r\n VALUES (:event_id, 'SYST', sysdate)\r\n \"\"\",\r\n event_id=event_id\r\n )\r\n\r\n return event_id",
"def insert(self,connector):\n c= connector.cursor()\n # print str_cmd\n snew = buildInsert(self,\"SETUP\")\n# print snew\n # print type(snew)\n #print type(str_cmd)\n c.execute(snew)\n c.execute(\"select MAX(ID) from SETUP\")\n for row in c:\n lastid=row[0]\n break\n self.ID=lastid\n connector.commit()\n return lastid",
"def insert(self,connector):\n c= connector.cursor()\n # print str_cmd\n snew = buildInsert(self,\"DCC\")\n# print snew\n # print type(snew)\n #print type(str_cmd)\n c.execute(snew)\n c.execute(\"select MAX(ID) from DCC\")\n for row in c:\n lastid=row[0]\n break\n connector.commit()\n return lastid",
"def insert(self,connector):\n c= connector.cursor()\n # print str_cmd\n snew = buildInsert(self,\"DIF\")\n# print snew\n # print type(snew)\n #print type(str_cmd)\n c.execute(snew)\n c.execute(\"select MAX(ID) from DIF\")\n for row in c:\n lastid=row[0]\n break\n connector.commit()\n return lastid",
"def post_exec(self):\n\n if self.compiled.isinsert:\n tbl = self.compiled.statement.table\n if not hasattr(tbl, 'has_sequence'):\n tbl.has_sequence = None\n for column in tbl.c:\n if getattr(column, 'sequence', False) or self._has_implicit_sequence(column):\n tbl.has_sequence = column\n break\n\n if bool(tbl.has_sequence):\n # TBD: for some reason _last_inserted_ids doesn't exist here\n # (but it does at corresponding point in mssql???)\n #if not len(self._last_inserted_ids) or self._last_inserted_ids[0] is None:\n self.cursor.execute(\"SELECT @@identity AS lastrowid\")\n row = self.cursor.fetchone()\n self._last_inserted_ids = [int(row[0])] #+ self._last_inserted_ids[1:]\n # print \"LAST ROW ID\", self._last_inserted_ids\n\n super(AccessExecutionContext, self).post_exec()",
"def insert(self,connector):\n c= connector.cursor()\n # print str_cmd\n snew = buildInsert(self,\"HR2\")\n# print snew\n # print type(snew)\n #print type(str_cmd)\n c.execute(snew)\n c.execute(\"select MAX(ID) from HR2\")\n for row in c:\n lastid=row[0]\n break\n self.ID=lastid\n connector.commit()\n return lastid",
"def insert(self,connector):\n c= connector.cursor()\n # print str_cmd\n snew = buildInsert(self,\"LDA\")\n# print snew\n # print type(snew)\n #print type(str_cmd)\n c.execute(snew)\n c.execute(\"select MAX(ID) from LDA\")\n for row in c:\n lastid=row[0]\n break\n self.ID=lastid\n connector.commit()\n return lastid",
"def getLastId(self,table):\n\tif self.dbType==\"sqlite\":\n\t query = \"SELECT LAST_INSERT_ROWID() FROM %s LIMIT 1\"%table\n\telse:\n\t query = \"SELECT LAST_INSERT_ID() FROM %s\"%table\n\tlocaltime= \"%s \"%time.strftime(\"%H:%M:%S\",time.localtime())\n\tpid = \"%s \"%os.getpid()\n self.log.write(pid+localtime+query+'\\n')\n\t# since SQLite locks a whole table we use separate cursor to get\n\t# information while transaction still in progress\n\tcur = self.db.cursor()\n\tcur.execute(query)\n\ttup = cur.fetchone()\n\tid = tup[0]\n\tcur.close()\n# tup = self.fetchOne(query)\n\tid = tup[0]\n return id",
"def insert_null(self, table_name: str) -> int:\n sql = 'INSERT INTO ' + table_name + '(ID) VALUES(NULL)'\n self.cursor.execute(sql)\n self.connection.commit()\n\n return self.cursor.lastrowid",
"def __getCursorID(self):\n self.__cursor_lock.acquire()\n self.__cursor_id += 1\n cursor_id = self.__cursor_id\n self.__cursor_lock.release()\n return cursor_id",
"def _get_next_event_id(oracle_cursor, transaction_type): # -> (int)\n\n oracle_cursor.execute(\"\"\"select event_seq.NEXTVAL from dual\"\"\")\n row = oracle_cursor.fetchone()\n\n event_id = int(row[0])\n\n oracle_cursor.execute(\"\"\"\n INSERT INTO event (event_id, event_type_cd, event_timestamp)\n VALUES (:event_id, :transaction_type, sysdate)\n \"\"\",\n event_id=event_id,\n transaction_type=transaction_type\n )\n\n return event_id",
"def insert_status(conn, table, sql_param):\r\n header_list = get_header(conn, table)\r\n header_list.pop(0)\r\n insert = f\"INSERT INTO {table} ({header_list[0]}, {header_list[1]}, {header_list[2]}) VALUES (%s, %s, %s) RETURNING id\"\r\n cursor = conn.cursor()\r\n cursor.execute(insert, sql_param)\r\n conn.commit()\r\n returned_id = cursor.fetchone()\r\n cursor.close()\r\n return returned_id",
"def before_insert(mapper, conn, target):\n if target.sequence_id is None:\n sql = text(\n '''SELECT max(t_sequence_id)+1 FROM tables WHERE t_d_id = :did''')\n\n max_id, = conn.execute(sql, did=target.d_id).fetchone()\n\n if not max_id:\n max_id = 1\n\n target.sequence_id = max_id\n\n Table.before_update(mapper, conn, target)",
"def get_entry_id(self, table, field, value):\n cur = self.con.execute(\"select rowid from %s where %s='%s'\" % (table, field, value))\n res = cur.fetchone()\n # Check if id exists\n if res is None:\n # Add to table if id does not exist\n cur = self.con.execute(\"insert into %s (%s) values ('%s')\" % (table, field, value))\n return cur.lastrowid\n return res[0]",
"def get_next_id(self):\n con = self.c._connect()\n last_id = self.c.get_last_id(con.cursor())\n con.close()\n return last_id + 1",
"def create_person(conn, person):\n sql = ''' INSERT INTO person(firstname,lastname)\n VALUES(?,?) '''\n cur = conn.cursor() # cursor object\n cur.execute(sql, person)\n return cur.lastrowid # returns the row ID of the cursor object, person ID",
"def last_id(self):\n rows = self.db.query(\"\"\"\n SELECT LAST_INSERT_ID() AS id\n \"\"\")\n for row in rows:\n return row['id']",
"def insert_registryRecord(cursor, transaction_id):\n last_row_id = -1;\n username = read_db_conf(DB_CONF_FILE, DB_CONF_SECTION)['user']\n query = \"\"\"\n INSERT INTO registry(\n transaction_id,\n username\n )\n VALUES( %s, %s )\n \"\"\"\n \n try:\n cursor.execute(query, (transaction_id, username))\n last_row_id = cursor.lastrowid\n except DBError as err:\n print(err)\n finally:\n return last_row_id",
"def _id_maybe_add(self, get_sql, add_sql, params=list()):\n try:\n self.cur.execute(get_sql, params)\n return self.cur.fetchone()[0]\n except TypeError:\n self.cur.execute(add_sql, params)\n return self.cur.lastrowid",
"def insert_record(self):\n insert_resource = self.client.resource(api_path=\"/table/{insert_table}\".format(insert_table=self.insert_table))\n result = insert_resource.create(payload=self.new_record_payload)\n sys_id = result['sys_id']\n return sys_id",
"def create_student(conn, student):\n\n sql = ('''INSERT INTO students(last_name, first_name, student_id) VALUES(?,?,?)''')\n c = conn.cursor()\n c.execute(sql, student)\n conn.commit()\n return c.lastrowid",
"def insert(q, *params):\n db = Database()\n db.cur.execute(q, *params)\n ret_id = db.cur.lastrowid\n db.con.close()\n return ret_id",
"def before_insert(mapper, conn, target):\n\n #from identity import ObjectNumber\n #assert not target.fk_vid or not ObjectNumber.parse(target.fk_vid).revision\n\n if target.sequence_id is None:\n # In case this happens in multi-process mode\n conn.execute(\"BEGIN IMMEDIATE\")\n sql = text(\n '''SELECT max(c_sequence_id)+1 FROM columns WHERE c_t_id = :tid''')\n\n max_id, = conn.execute(sql, tid=target.t_id).fetchone()\n\n if not max_id:\n max_id = 1\n\n target.sequence_id = max_id\n\n Column.before_update(mapper, conn, target)",
"def _execute_insert(self, insertQuery, insertValues):\n with self as plasticDB:\n cursor = plasticDB.connection.cursor()\n cursor.execute(insertQuery, insertValues)\n return cursor.lastrowid",
"def create_result(conn, result):\n sql = ''' INSERT INTO results(url,title) VALUES(?,?) '''\n cur = conn.cursor()\n cur.execute(sql, result)\n rowid = cur.lastrowid\n return rowid",
"def get_lastid(table, conn):\n s = select([table.c.id])\n result = conn.execute(s)\n allids = result.fetchall()\n idlist = []\n for idx in allids:\n if isinstance(idx.values()[0], int):\n idlist.append(idx.values()[0])\n lastid = max(idlist)\n return lastid + 1"
]
| [
"0.72980887",
"0.6936921",
"0.6831432",
"0.6586946",
"0.65387934",
"0.6439575",
"0.6336456",
"0.63056076",
"0.627088",
"0.6255386",
"0.6252701",
"0.62023705",
"0.6180922",
"0.6173137",
"0.6164245",
"0.6139721",
"0.61370486",
"0.6100954",
"0.60723114",
"0.6048889",
"0.6044164",
"0.59689295",
"0.58997446",
"0.5857971",
"0.578211",
"0.5767087",
"0.5762646",
"0.5746683",
"0.5730416",
"0.57229674"
]
| 0.7289031 | 1 |
Returns a quoted version of the given table, index or column name. Does not quote the given name if it's already been quoted. | def quote_name(self, name):
if name.startswith(self.left_sql_quote) and name.endswith(self.right_sql_quote):
return name # Quoting once is enough.
return '%s%s%s' % (self.left_sql_quote, name, self.right_sql_quote) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def quote_dotted(\n name: Union[\"quoted_name\", str], quote: functools.partial\n) -> Union[\"quoted_name\", str]:\n\n if isinstance(name, quoted_name):\n return quote(name)\n result = \".\".join([quote(x) for x in name.split(\".\")])\n return result",
"def sql_for_tablespace(self, tablespace, inline=False):\n return \"ON %s\" % self.quote_name(tablespace)",
"def quote_name(self, name):\n name = re.sub('-', '', name)\n if name.startswith('\"') and name.endswith('\"'):\n return name\n return '\"%s\"' % (name,)",
"def format_column(self, column, use_table=False, name=None, table_name=None):\n if name is None:\n name = column.name\n if not getattr(column, 'is_literal', False):\n if use_table:\n return self.format_table(column.table, use_schema=False, name=table_name) + \".\" + self.__generic_obj_format(column, name)\n else:\n return self.__generic_obj_format(column, name)\n else:\n # literal textual elements get stuck into ColumnClause alot, which shouldnt get quoted\n if use_table:\n return self.format_table(column.table, use_schema=False, name=table_name) + \".\" + name\n else:\n return name",
"def _quoter(self, col) :\n\n j = self.cols.index(col)\n if self.types[j] == 'TEXT' :\n return '\"%s\"'\n else :\n return '%s'",
"def getquoted(self): # real signature unknown; restored from __doc__\n pass",
"def quote(value):\n return DoubleQuotedScalarString(value)",
"def col_name(col):\n\n if isinstance(col, str):\n return col\n return col.__name__",
"def column_name(name):\n # Only needs exceptions to standard token cleanup\n column_map = {\n \"line#\" : \"ignore\",\n \"date\" : \"timestamp\",\n \"rh\" : \"humidity\",\n \"par\" : \"par_ue\"\n }\n\n if name in column_map:\n return column_map[name]\n \n return name",
"def _quote(v):\n return '\"' + v + '\"' if ' ' in v else v",
"def encodeColumnName(self, column):\r\n return '\"{}\"'.format(column)",
"def _table_name(self, name: AnyStr) -> bytes:\n name = ensure_bytes(name)\n if self.table_prefix is None:\n return name\n return self.table_prefix + self.table_prefix_separator + name",
"def quoted_column_names_reading_template(self, specify_column_names):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE testquoted (\n \"IdNumber\" int PRIMARY KEY,\n \"select\" text\n )\"\"\")\n\n data = [[1, 'no'], [2, 'Yes'],\n [3, 'True'], [4, 'false']]\n\n tempfile = self.get_temp_file()\n write_rows_to_csv(tempfile.name, data)\n\n stmt = (\"\"\"COPY ks.testquoted (\"IdNumber\", \"select\") FROM '{name}'\"\"\"\n if specify_column_names else\n \"\"\"COPY ks.testquoted FROM '{name}'\"\"\").format(name=tempfile.name)\n\n self.run_cqlsh(stmt)\n\n out, err, _ = self.run_cqlsh(cmds=\"SELECT * FROM ks.testquoted\")\n results = self.parse_cqlsh_query(out=out, num_cols=2)\n\n self.assertCsvResultEqual(tempfile.name, results, 'testquoted')",
"def csv_quoting_examples():\n name_table = read_csv_file(\"name_table.csv\", \",\")\n name_table.append([1, 2, 3])\n write_csv_file(name_table, \"name_table_minimal.csv\", \",\", csv.QUOTE_MINIMAL)\n write_csv_file(name_table, \"name_table_all.csv\", \",\", csv.QUOTE_ALL)\n write_csv_file(name_table, \"name_table_nonnumeric.csv\", \",\", csv.QUOTE_NONNUMERIC)\n #write_csv_file(name_table, \"name_table_none.csv\", \",\", csv.QUOTE_NONE) # no escapechar is set for lots of quotes",
"def csv_quoting_examples():\n name_table = read_csv_file(\"name_table.csv\", \",\")\n name_table.append([1, 2, 3])\n write_csv_file(name_table, \"name_table_minimal.csv\", \",\", csv.QUOTE_MINIMAL)\n write_csv_file(name_table, \"name_table_all.csv\", \",\", csv.QUOTE_ALL)\n write_csv_file(name_table, \"name_table_nonnumeric.csv\", \",\", csv.QUOTE_NONNUMERIC)\n #write_csv_file(name_table, \"name_table_none.csv\", \",\", csv.QUOTE_NONE) # no escapechar is set for lots of quotes",
"def test_quoted_column_names_writing(self):\n self.prepare()\n self.session.execute(\"\"\"\n CREATE TABLE testquoted (\n \"IdNumber\" int PRIMARY KEY,\n \"select\" text\n )\"\"\")\n\n data = [[1, 'no'], [2, 'Yes'],\n [3, 'True'], [4, 'false']]\n\n insert_statement = self.session.prepare(\"\"\"INSERT INTO testquoted (\"IdNumber\", \"select\") VALUES (?, ?)\"\"\")\n execute_concurrent_with_args(self.session, insert_statement, data)\n\n for specify_column_names in (True, False):\n tempfile = self.get_temp_file()\n stmt = (\"\"\"COPY ks.testquoted (\"IdNumber\", \"select\") TO '{name}'\"\"\"\n if specify_column_names else\n \"\"\"COPY ks.testquoted TO '{name}'\"\"\").format(name=tempfile.name)\n self.run_cqlsh(stmt)\n\n reference_file = self.get_temp_file()\n write_rows_to_csv(reference_file.name, data)\n\n assert_csvs_items_equal(tempfile.name, reference_file.name)",
"def get_table_query_string(self) -> str:\n if self.database and self.table:\n return f'\"{self.database}\".\"{self.schema}\".\"{self.table}\"'\n elif self.table:\n return f'\"{self.table}\"'\n else:\n return f\"({self.query})\"",
"def _qualify(table, cols):\n return ', '.join(['{}.{}'.format(table, c) for c in cols])",
"def quoted(val: str) -> str:\n return f'\"{val}\"' if ' ' in val else val",
"def format_colname(name):\n colnames = [\n \"AV\",\n \"RV\",\n \"EBV\",\n \"CAV1\",\n \"CAV2\",\n \"CAV3\",\n \"CAV4\",\n \"C1\",\n \"C2\",\n \"C3\",\n \"C4\",\n \"x_o\",\n \"gamma\",\n \"bump_area\",\n \"fh2\",\n \"nhtot\",\n \"nh2\",\n \"nhi\",\n \"NH_AV\",\n \"NH_EBV\",\n ]\n plotnames = [\n \"$A(V)$\",\n \"$R(V)$\",\n \"$E(B-V)$\",\n \"$C^{A(V)}_1$\",\n \"$C^{A(V)}_2$\",\n \"$C^{A(V)}_3$\",\n \"$C^{A(V)}_4$\",\n \"$C_1$\",\n \"$C_2$\",\n \"$C_3$\",\n \"$C_4$\",\n \"$x_o$\",\n r\"$\\gamma$\",\n r\"$\\pi C^{A(V)}_3 / 2 \\gamma$\",\n \"$f(H_2)$\",\n \"$N(H)$\",\n \"$N(H_2)$\",\n \"$N(HI)$\",\n \"$N(H)/A(V)$\",\n \"$N(H)/E(B-V)$\",\n ]\n dic_pairs = dict(zip(colnames, plotnames))\n\n out_name = name\n if name[:3] == \"log\":\n out_name = r\"$\\log (\" + name[3:].upper() + \")$\"\n elif name in dic_pairs.keys():\n out_name = dic_pairs[name]\n\n return out_name",
"def sql(self, quoted=True):\n if quoted:\n return '\"%s\"' % MySQLdb.escape_string(str(self.data))\n else:\n return '%s' % MySQLdb.escape_string(str(self.data))",
"def create_delete_code(name_of_table, where_col = None, where_equals_value = None):\n sql_str = ''\n try:\n if where_col is None or where_equals_value is None:\n raise Exception('You must provide a where column and an equals value')\n else:\n sql_str = 'DELETE FROM ' + str(name_of_table).strip()\n sql_str += '\\nWHERE ' + where_col + \" = \" + str(where_equals_value).strip()\n except Exception as e:\n raise Exception('Error in create_delete_code(): ' + e.__str__())\n return sql_str",
"def quote(*a, **kw):\n return quote(*a, **kw)",
"def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"",
"def aliased_for_cypher(self):\n return '{} AS {}'.format(self.for_cypher(), self.alias_for_cypher)",
"def key(name):\n return (\n Literal(name) ^\n (sep('\\'') + Literal(name) + sep('\\'')) ^\n (sep('\"') + Literal(name) + sep('\"')))",
"def test_quoted_column_names_reading_specify_names(self):\n self.quoted_column_names_reading_template(specify_column_names=True)",
"def quote_spaces(arg):\n if ' ' in arg or '\\t' in arg:\n return '\"%s\"' % arg\n else:\n return str(arg)",
"def get_sql(self, table_name):\n\t\tcolNames,sql = self._generate_sql_parts(table_name)\n\t\treturn get_query(table_name, colNames, sql)",
"def test_quoted_column_names_reading_dont_specify_names(self):\n self.quoted_column_names_reading_template(specify_column_names=False)"
]
| [
"0.60627484",
"0.5968109",
"0.5906213",
"0.5800245",
"0.5722166",
"0.5646786",
"0.55683017",
"0.5497559",
"0.54963726",
"0.54942673",
"0.54909176",
"0.5466382",
"0.5427123",
"0.54071015",
"0.54071015",
"0.5401644",
"0.53815466",
"0.5361875",
"0.53616196",
"0.5352527",
"0.5338262",
"0.53378344",
"0.532958",
"0.53041476",
"0.5269125",
"0.522711",
"0.5220684",
"0.521612",
"0.52059877",
"0.515493"
]
| 0.66145134 | 0 |
Returns a SQL expression that returns a random value. | def random_function_sql(self):
return "RAND()" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_random_query() -> str:\n\n # Generate a list of random columns.\n columns = get_random_columns()\n arithmetic_exprs = [get_random_arithmetic_expr()\n for _ in range(random.randint(0, 2))]\n arithmetic_exprs = [\n f\"{expr[0]}{expr[1]}{expr[2]}\" for expr in arithmetic_exprs]\n columns.extend(arithmetic_exprs)\n columns = [f\"\\\"{column}\\\"\" for column in columns]\n\n # Generate a nested query?\n if get_random_bool():\n inner_q, new_columns = _get_random_query(columns, \"t\")\n q, _ = _get_random_query(new_columns, inner_q)\n else:\n q, _ = _get_random_query(columns, \"t\")\n\n return f\"{q};\"",
"def random():\n return constant(1)",
"def random():\r\n return R.NextDouble()",
"def _random_function(self, random_state):\n return random_state.rand",
"def mock_sql_rand(spark):\n\n def mock_rand():\n return 1\n\n spark.udf.register(\"rand\", mock_rand, IntegerType())\n yield\n # Afterwards, reset all temporary catalog items to undo the patching.\n spark.catalog._reset()",
"def func_randomint(exp_parameter=3, root_parameter=1):\n sup = Word.objects.all().order_by(\"-id\")[0].id - 1\n randfloat = random.uniform(0,sup)\n randint = math.floor(exp_parameter*(sup**(sup/randfloat)+1) + root_parameter(-1*math.sqrt(sup*randfloat)+sup)/2)\n return randint",
"def random() -> float:\n ...",
"def _get_random_value(self):\r\n return random.randint(1, 10)",
"def get_random_where_clause(columns: List[str]) -> str:\n\n random_columns = choose_random_columns(columns)\n conditions = [get_random_condition(column) for column in random_columns]\n conditions = [\n f\"{condition[0]} {condition[1]} {condition[2]}\" for condition in conditions]\n return \"WHERE \" + f\" {random.choice(LOGICAL_OPERATORS)} \".join(conditions)",
"def random_operation():\r\n operation = random.choice([\"add\",\"subtract\",\"multiply\",\"divide\"])\r\n return operation",
"def get_random_2(number):\n return ''.join(random.sample(field, number))",
"def get_random_user():\n return random.choice(User.query.all())",
"def getRandom( self ):\n import random \n count = Mysql.ex( \"SELECT count(*) AS c FROM `%s`.`people`;\" % self.db_name )\n the_id = random.randint( 1, count[0]['c'] )\n people = self.getByID( the_id )\n return people",
"def acquisition_function_random(gp_reward_model: BasicGPRewardModel) -> int:\n return np.random.randint(0, len(gp_reward_model.candidate_queries))",
"def RandomFunction(self):\r\n return self._random_fn",
"def get_random(number):\n s = ''\n for i in xrange(number):\n s = s + str(random.choice(field))\n return s",
"def random(self):\r\n return random.randint(1, 4)",
"def quasi_rand(values, feature, parent):\r\n seed = values[0]\r\n base = values[1]\r\n min = values[2]\r\n max = values[3]\r\n \r\n return math.floor(halton(seed, base) * (max - min + 1) + min)",
"def random_sample(self) -> Union[DeclarativeMeta, AliasedClass]:\n if self._profile_sample_query:\n return self._fetch_sample_data_with_query_object()\n\n if not self.profile_sample:\n if self._partition_details:\n return self._random_sample_for_partitioned_tables()\n\n return self.table\n\n # Add new RandomNumFn column\n rnd = self.get_sample_query()\n session_query = self.session.query(rnd)\n\n # Prepare sampled CTE\n sampled = session_query.where(rnd.c.random <= self.profile_sample).cte(\n f\"{self.table.__tablename__}_sample\"\n )\n # Assign as an alias\n return aliased(self.table, sampled)",
"def get_random_male_name ():\n return db_random_pop_default(DB_FIRST_MALE, \"John\")",
"def random_float():\n return (random() - 0.5) * 2",
"def _get_random_number_code(self):\r\n return \"str(random.randint(0, 1e9))\"",
"def randomize_value(self) -> None:",
"def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)",
"def getRandom(self):\n return random.choice(self.table.keys())",
"def rand(self) -> ZqValue:\n\n return self(randbelow(int(self.q)))",
"def random(self):\n return self._randomize()",
"def rand(self):\n return self.State.rand()",
"def male_first():\r\n cursor.execute('SELECT name FROM male order by RANDOM() limit 1')\r\n return cursor.fetchone()[0]",
"def getRandom(self) -> int:"
]
| [
"0.71786726",
"0.6782727",
"0.656452",
"0.63139075",
"0.6294503",
"0.60930663",
"0.60634965",
"0.60624295",
"0.6041429",
"0.60379696",
"0.603706",
"0.6012389",
"0.6007451",
"0.5995923",
"0.5967113",
"0.59594595",
"0.5940236",
"0.5913576",
"0.5902262",
"0.5893886",
"0.5883036",
"0.5881327",
"0.58597875",
"0.5800406",
"0.5795525",
"0.57508963",
"0.573371",
"0.5732502",
"0.57320905",
"0.57275283"
]
| 0.8177851 | 0 |
Returns a string of the query last executed by the given cursor, with placeholders replaced with actual values. `sql` is the raw query containing placeholders, and `params` is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes. | def last_executed_query(self, cursor, sql, params):
return super(DatabaseOperations, self).last_executed_query(cursor, cursor.last_sql, cursor.last_params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def execute(self, sql, params=None):\n if params is None:\n for statement in sql.split(';'):\n self.cursor.execute(statement)\n return\n print('string parameters get escaped to guard against sql injection')\n print(\"resulting sql is \" + \\\n sql.replace(\"?\", \"'\" + params[0].replace(\"'\", \"''\") + \"'\"))\n self.cursor.execute(sql, params)",
"def execute(self, sql, params=None):\n if params and not isinstance(params, Mapping):\n raise TypeError(\"Expected dict or other mapping object\")\n\n cursor = self.cursor()\n sql, params = utils.change_param_style(self.driver.paramstyle, sql, params)\n cursor.execute(sql, params)\n return cursor",
"def replace_params(self):\n raw_sql = self.raw_sql\n for placeholder in self.to_replace:\n newreg = re.compile(placeholder)\n repl = self.get_replacement_value(placeholder)\n if repl:\n raw_sql = newreg.sub(str(repl), raw_sql)\n self.sql = raw_sql",
"def format_sql_in_context(sql_template, param_dict, conn):\n if conn is not None:\n # Postgres, secure\n query = format_sql_postgres(sql_template, param_dict)\n return query.as_string(conn)\n\n # sqlite, may not be perfectly secure, far better than nothing\n return format_query_check_chars(sql_template, param_dict)",
"def query(sql, parameters=()):\n\tif not isinstance(parameters, (tuple, list)):\n\t\traise TypeError(parameters) # must be tuple or list\n\tconn = get_connection()\n\trows = conn.execute(sql, parameters)\n\tconn.commit()\n\treturn rows.fetchall()",
"def _sql_to_string(self, psql):\n pcon = self.__engine.raw_connection()\n try:\n pcur = pcon.cursor()\n xxx = psql.as_string(pcur)\n finally:\n pcon.close()\n return xxx",
"def mogrify_sql_statement(self, content):\n sql = content[0]\n args = content[1]\n\n if self.dbmi.__name__ == \"psycopg2\":\n if len(args) == 0:\n return sql\n else:\n if self.connected:\n try:\n return self.cursor.mogrify(sql, args)\n except Exception as exc:\n print(sql, args)\n raise exc\n else:\n self.connect()\n statement = self.cursor.mogrify(sql, args)\n self.close()\n return statement\n\n elif self.dbmi.__name__ == \"sqlite3\":\n if len(args) == 0:\n return sql\n else:\n # Unfortunately as sqlite does not support\n # the transformation of sql strings and qmarked or\n # named arguments we must make our hands dirty\n # and do it by ourself. :(\n # Doors are open for SQL injection because of the\n # limited python sqlite3 implementation!!!\n pos = 0\n count = 0\n maxcount = 100\n statement = sql\n\n while count < maxcount:\n pos = statement.find(\"?\", pos + 1)\n if pos == -1:\n break\n\n if args[count] is None:\n statement = \"%sNULL%s\" % (statement[0:pos],\n statement[pos + 1:])\n elif isinstance(args[count], (int, long)):\n statement = \"%s%d%s\" % (statement[0:pos], args[count],\n statement[pos + 1:])\n elif isinstance(args[count], float):\n statement = \"%s%f%s\" % (statement[0:pos], args[count],\n statement[pos + 1:])\n elif isinstance(args[count], datetime):\n statement = \"%s\\'%s\\'%s\" % (statement[0:pos], str(args[count]),\n statement[pos + 1:])\n else:\n # Default is a string, this works for datetime\n # objects too\n statement = \"%s\\'%s\\'%s\" % (statement[0:pos],\n str(args[count]),\n statement[pos + 1:])\n count += 1\n\n return statement",
"def query(self, paramstyle=None):\n s = []\n for x in self.items:\n if isinstance(x, SQLParam):\n x = x.get_marker(paramstyle)\n s.append(safestr(x))\n else:\n x = safestr(x)\n # automatically escape % characters in the query\n # For backward compatability, ignore escaping when the query looks already escaped\n if paramstyle in ['format', 'pyformat']:\n if '%' in x and '%%' not in x:\n x = x.replace('%', '%%')\n s.append(x)\n return \"\".join(s)",
"def execute(self, sql):\n with self.connection.cursor() as dbc:\n if sql[-1] != ';':\n sql += ';'\n dbc.execute(sql)\n self.last_row = dbc.lastrowid\n try:\n return dbc.fetchall()\n except:\n return",
"def execute(self, stmt, params=(), **kwargs):\n if kwargs:\n params = kwargs\n # do any substitutions\n if params:\n if isinstance(params, (tuple, list, set)):\n newpar = []\n for par in params:\n newpar.append(convert_PROCEDURES(par))\n params = newpar\n elif isinstance(params, dict):\n for k, val in params.items():\n params[k] = convert_PROCEDURES(val)\n # if the statement was given do any conversions\n if stmt:\n if stmt.startswith('commit'):\n return None\n exstmt = self.replacevals(stmt)\n if exstmt is None:\n return None\n\n return super(MockCursor, self).execute(convert_PROCEDURES(exstmt), params)\n return super(MockCursor, self).execute(self._stmt, params)",
"def cur_exec(cur, req, params):\n temp_binds = None\n try:\n cur.prepare(req)\n temp_binds = dict((i, params[i]) for i in cur.bindnames())\n prepare_strings(cur, temp_binds)\n cur.execute(req, **temp_binds)\n except Exception as err:\n log.warn('error req = %s', req)\n log.warn('params = %s', params)\n log.warn('temp_binds = %s', temp_binds)\n raise err",
"def sql(self):\n if not self._selects:\n raise ValueError('No SELECT statements are specified')\n\n sql = []\n param_values = []\n\n # MySQL SELECT syntax as of 5.7:\n #\n # SELECT ...\n # UNION [ALL | DISTINCT] SELECT ...\n # [UNION [ALL | DISTINCT] SELECT ...]\n\n if self.query_options:\n sql.extend(self.query_options)\n\n for stmt in self._selects:\n if isinstance(stmt, mysqlstmt.Select):\n select_sql, select_params = stmt.sql()\n stmtsql = select_sql\n if select_params is not None:\n param_values.extend(select_params)\n else:\n stmtsql = stmt\n\n if sql:\n if self._distinct is False:\n sql.append('UNION ALL')\n else:\n sql.append('UNION')\n\n sql.append(u'({0})'.format(stmtsql))\n\n if self._orderby_conds:\n sql.append('ORDER BY')\n sql.append(', '.join(self._orderby_conds))\n\n if self._limit is not None:\n row_count, offset = self._limit\n if offset > 0:\n sql.append('LIMIT {0},{1}'.format(offset, row_count))\n else:\n sql.append('LIMIT {0}'.format(row_count))\n\n if self.placeholder:\n return ' '.join(sql), param_values if param_values else None\n assert not param_values\n return ' '.join(sql)",
"def mysql(cursor, query, print_query=False):\n if isinstance(query, basestring):\n query = [query]\n for q in query:\n cursor.execute(q)\n q.replace('\\n', ' ')\n if print_query:\n print('Executed: {}'.format(q))",
"def sql_query(sql):\n cur = c.cursor()\n cur.execute(sql)\n c.commit()",
"def query(self, sql, params=None):\n\n # check if connection is alive, else reconnect\n try:\n self.cur.execute(sql, params)\n except pymysql.OperationalError:\n # todo fix timeout and reconnect\n self.connect()\n self.cur.execute(sql, params)\n except IOError:\n print('%s\\nQuery Failed!' % sql)\n raise\n\n return self.cur",
"def execute_param(cursor, query, param):\n while True:\n try:\n cursor.execute(query, param)\n break\n except Exception as e:\n print(\"Database query: {} {} {}\".format(cursor, query, param))\n print(\"Database retry reason: {}\".format(e))\n time.sleep(random.random())\n return cursor",
"def _db_execute(self, cur, sql_query):\n self.ctx.dbq_count += 1\n \n try:\n a = time.time()\n query, params = self._process_query(sql_query)\n out = cur.execute(query, params)\n b = time.time()\n except:\n if self.printing:\n print >> debug, 'ERR:', str(sql_query)\n if self.ctx.transactions:\n self.ctx.transactions[-1].rollback()\n else:\n self.ctx.rollback()\n raise\n\n if self.printing:\n print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))\n return out",
"def execSqlWithNoParams(self, sql_str):\n try:\n conn = MySQLdb.connect(**self.connect)\n cursor = conn.cursor()\n result = cursor.execute(sql_str)\n conn.commit()\n conn.close()\n return result\n except Exception, e:\n logger.error(\"mysql util error:\" + str(e))",
"def PlaceHolders(sql_args):\n return ','.join('%s' for _ in sql_args)",
"def execute(cls, sql):\n cursor = cls.get_conn().cursor()\n cursor.execute(sql)\n return cursor",
"def sql_query(self, query, params=(), plain_query=False, dict_cursor=False,\n cursor_class=None, show_table=False, buffsize=1024):\n if not cursor_class:\n if dict_cursor:\n cursor_class = oursql.DictCursor\n else:\n cursor_class = oursql.Cursor\n klass = cursor_class\n\n with self._sql_lock:\n if not self._sql_conn:\n self._sql_connect()\n with self._sql_conn.cursor(klass, show_table=show_table) as cur:\n cur.execute(query, params, plain_query)\n if buffsize:\n while True:\n group = cur.fetchmany(buffsize)\n if not group:\n return\n for result in group:\n yield result\n for result in cur.fetchall():\n yield result",
"def sql_log(cls, sql_query, data=None):\n\t\t# if data exists , I replace them into `complete_sql_query`\n\t\tif data:\n\t\t\tfor key, value in data.items():\n\t\t\t\tsearch = ':{}'.format(key)\n\t\t\t\treplace = '`{}`'.format(value)\n\t\t\t\tsql_query = sql_query.replace(search, replace)\n\n\t\tprint('\\t{}'.format(sql_query))",
"def execute_sql(self, sql, params=None, commit=None):\n try:\n cursor = super().execute_sql(sql, params, commit)\n except pw.OperationalError:\n # If we're in a transaction or the database isn't\n # set to autoconnect, there's not much we can do,\n # so just continue to crash\n if not self.autoconnect or self.in_transaction():\n raise\n\n # Close the broken connector, if it's still open\n if not self.is_closed():\n self.close()\n\n # And then retry. This will re-open the DB because\n # we've just closed it and autoconnect is True.\n cursor = super().execute_sql(sql, params, commit)\n return cursor",
"def __execute_query(self, query: str, params: Union[None, tuple] = None) -> sqlite3.Cursor:\n if params is None:\n params = ()\n\n with self.conn:\n return self.conn.execute(query, params)",
"def execute_fetchall(self, sql, sql_args=None, dictcursor=False):\n # Check that sql arguments have the correct type\n self._check_sql_args(sql_args)\n # Execute the query\n try:\n pgcursor = self.get_postgres_cursor(dictcursor)\n pgcursor.execute(sql, sql_args)\n result = pgcursor.fetchall()\n self._connection.commit()\n except PostgresError, e:\n self._connection.rollback()\n raise RuntimeError(\"Error running SQL query: %s\", str(e))\n finally:\n pgcursor.close()\n return result",
"def execute(self,sql):\n # self.results = self.execute_silent(sql)\n # return self.results\n # sql = self.format_sql(sql, **kwargs)\n sql_list = sql.split(';')\n for stmt in sql_list:\n if stmt:\n stmt = stmt.strip()\n if len(stmt) < 10:\n break\n result = self.execute_silent(stmt)\n #if result is not None,It's select stmt.\n if result:\n return result",
"def make_query_string(query, params):\n query_string = query\n\n index = 1\n for param in params:\n if param:\n to_replace = \"%%param%d%%\" % index\n query_string = query_string.replace(to_replace, param)\n index += 1\n\n return query_string",
"def execute(query):\n print query\n cursor.execute(query)",
"def gen_q_stmt(name, query):\n return \"query {} `{}`;\\n\".format(name, query)",
"def execute_query(cur, conn, query):\n try:\n cur.execute(query)\n rows = cur.fetchall()\n for row in rows:\n print(row)\n conn.commit()\n except Exception as e:\n print(e)"
]
| [
"0.6624555",
"0.62363774",
"0.5814773",
"0.58146745",
"0.5813312",
"0.57900816",
"0.57399",
"0.56984067",
"0.5682154",
"0.5672558",
"0.5627463",
"0.56166804",
"0.5593889",
"0.556798",
"0.5483629",
"0.5471066",
"0.54700255",
"0.5422969",
"0.5385744",
"0.5356096",
"0.53469515",
"0.53451943",
"0.52987695",
"0.52885073",
"0.52694565",
"0.52622885",
"0.520771",
"0.5188328",
"0.5161901",
"0.5160402"
]
| 0.63921547 | 1 |
Returns the SQL for committing the given savepoint. | def savepoint_commit_sql(self, sid):
return "REMOVE SAVEPOINT %s" % self.quote_name(sid) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _do_commit(self):",
"def commit():\n get_db().commit()",
"def quickSqlWrite(self,s,v):\n self.conn()\n self.execute(s,v)\n self.commit()\n self.close()",
"def commit(cls, sql, **kwargs):\n conn = kwargs['conn']\n\n cursor = conn.cursor(dictionary=True, buffered=False)\n if CHECKS_OFF:\n sql = TURN_CHECKS_OFF + sql\n\n for _ in cursor.execute(sql, kwargs.get('args'), multi=True):\n pass\n\n cls.close(conn, cursor)",
"def save_query(self):\r\n self.conn.commit()",
"def commit(self):\n self._connection.execute_nonquery(\"sql\", \"COMMIT\", True)",
"def _do_commit(self):\n self.backend.commit()",
"def save(self):\r\n debug.write(\"[SourceRPG] Handling SQL Save\", 1)\r\n if self.path != \":memory:\":\r\n debug.write(\"Path is not in memory\", 2, False)\r\n if currentTurboMode is False:\r\n debug.write(\"We are not in turbo mode\", 2, False)\r\n self.connection.commit()\r\n debug.write(\"[SourceRPG] SQL Save handled\", 1)",
"def insert_statement() -> str:\n pass",
"def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'",
"def commit(self):\n self.sql_session.commit()",
"def commitToDatabase(self, tiltseriesdata):\n\t\tapDisplay.printError(\"you did not create a 'commitToDatabase' function in your script\")\n\t\traise NotImplementedError()",
"def savepoint(self, id):\n self.execute(\"SAVEPOINT {}\".format(id))",
"def commit(self):",
"def Save(self) -> None:\n self.__conn.commit()",
"def commit(self):\n self.execute_sql(sql.commit)\n self.under_transaction = False",
"def _commit_now(self):\n self._database.commit()",
"def save_and_exit():\n con.commit()\n con.close()\n quit()",
"def _sql_to_string(self, psql):\n pcon = self.__engine.raw_connection()\n try:\n pcur = pcon.cursor()\n xxx = psql.as_string(pcur)\n finally:\n pcon.close()\n return xxx",
"def start_transaction_sql(self):\n return \"BEGIN TRANSACTION\"",
"def prepare_for_commit(self):",
"def commit(self):\n datastore_pre_commit.send(session=self.db.session)\n super().commit()\n datastore_post_commit.send(session=self.db.session)",
"def execute_and_commit_sql(db, sql):\n conn_string = return_connection(db)\n with pg2.connect(conn_string) as conn:\n with conn.cursor() as curs:\n curs.execute(sql)\n conn.commit()",
"def to_sql(self):\n\n if not self._action:\n self.set_action(\"select\")\n for scope in self._global_scopes.get(self.owner, {}).get(self._action, []):\n if not scope:\n continue\n\n scope(self.owner, self)\n\n grammar = self.get_grammar()\n sql = grammar.compile(self._action).to_sql()\n self.boot()\n return sql",
"def set_savepoint(self, name=None):\n if name is None:\n name = f\"{uuid.uuid1()}\"\n elif not isinstance(name, str) or len(name.strip()) == 0:\n raise ProgrammingError(\"Invalid SAVEPOINT name\")\n self._connection.execute_nonquery(\n \"sql\", f\"SAVEPOINT {quote_identifier(name)}\", True\n )\n return name",
"def savepoint(self):\n\n if not self.is_active:\n raise errors.InactiveTransaction()\n\n return Savepoint(self)",
"def asSQL(self) -> str:\n # To modify this method, pay attentin to SQL injection.\n # For example, if `self.x` is assumed to be integer\n # but is not guaranteed to be,\n # `:d` must always be specified in format strings:\n # `expressions.append(f\"x > {self.x:d}\")`\n expressions = []\n if self.date_start is not None:\n if self.date_start.tzinfo is None:\n datestr = self.date_start.isoformat()\n else:\n datestr = self.date_start.astimezone(datetime.timezone.utc).replace(tzinfo=None).isoformat()\n expressions.append(f\"pfs_visit.issued_at >= '{datestr}'\")\n if self.date_end is not None:\n if self.date_end.tzinfo is None:\n datestr = self.date_end.isoformat()\n else:\n datestr = self.date_end.astimezone(datetime.timezone.utc).replace(tzinfo=None).isoformat()\n expressions.append(f\"pfs_visit.issued_at < '{datestr}'\")\n if self.visit_start is not None:\n expressions.append(f\"pfs_visit.pfs_visit_id >= '{self.visit_start:d}'\")\n if self.visit_end is not None:\n expressions.append(f\"pfs_visit.pfs_visit_id < '{self.visit_end:d}'\")\n\n if expressions:\n return \"(\" + \" AND \".join(expressions) + \")\"\n else:\n return \"TRUE\"",
"def commit(self):\n return self.conn.commit()",
"def commitToDatabase(self, tiltseriesdata):\n\t\treturn",
"def query_commit(self, q, param=None):\r\n try:\r\n c = self.connection.cursor()\r\n if param is None:\r\n c.execute(q)\r\n else:\r\n c.execute(q, param)\r\n self.logger.log(logger.LogLevel.DEBUG, 'database.query_commit: %s | %s' % (q, param), True)\r\n self.connection.commit()\r\n return True\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.ERROR, 'database.query_commit: %s' % e)\r\n return False"
]
| [
"0.59211785",
"0.5635704",
"0.5527037",
"0.5473119",
"0.543951",
"0.5430877",
"0.5415368",
"0.53850543",
"0.53623307",
"0.5356949",
"0.5353716",
"0.5352036",
"0.53104126",
"0.5298429",
"0.52913463",
"0.52817005",
"0.52312577",
"0.5219302",
"0.52135074",
"0.5188443",
"0.51828486",
"0.51638573",
"0.51622444",
"0.51582503",
"0.51502126",
"0.5120803",
"0.51180667",
"0.51119",
"0.5101707",
"0.5096297"
]
| 0.71807504 | 0 |
Returns the SQL statement required to start a transaction. | def start_transaction_sql(self):
return "BEGIN TRANSACTION" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_transaction_sql(self):\n return 'START TRANSACTION;'",
"def start_transaction(self):\n self._connection.execute_nonquery(\"sql\", \"START TRANSACTION\", True)",
"def wrap_transaction(self):\n new_script = self.__class__()\n new_script.append(\n [BeginStatement()] + self.statements + [CommitStatement()])\n\n return new_script",
"def startTxn(self,msg=\"\"):\n\tif self.verbose and msg:\n\t message =\"Start transaction: \"+msg\n\t message+=\", commit flag=%s\"%self.commitFlag\n\t if self.dbType=='sqlite':\n\t message+=\", %s\"%self.db.isolation_level\n\t print message\n\t sys.__stdout__.flush()\n\tif self.dbType==\"mysql\":\n\t if self.commitFlag:\n\t self.updateDBAndLog(\"BEGIN\")\n\telif self.dbType==\"sqlite\":\n\t if not self.commitFlag:\n\t return\n\t if self.db.isolation_level:\n\t return\n\t counter=0\n\t query = \"BEGIN IMMEDIATE\"\n\t cu = self.db.cursor()\n\t while 1:\n\t if counter>10: break\n\t try:\n\t\t cu.execute(query)\n# self.cursor.execute(query)\n\t self.updateLog(query)\n\t break\n\t except:\n\t print \"Wait for BEGIN\"\n\t\t gen_util.printExcept()\n\t\t pass\n\t counter+=1\n\t cu.close()\n\t return",
"def get_sql_statement(self, start_time: datetime, end_time: datetime) -> str:\n return self.sql_stmt.format(\n start_time=start_time,\n end_time=end_time,\n result_limit=self.config.sourceConfig.config.resultLimit,\n filters=self.filters, # pylint: disable=no-member\n )",
"def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'",
"def start_transaction(self): # type: ignore[no-untyped-def]\n return self._database.start_transaction",
"def insert_statement() -> str:\n pass",
"def to_sql(self):\n if self._action == \"create\":\n return self.platform().compile_create_sql(self.table)\n else:\n if not self._dry:\n # get current table schema\n table = self.platform().get_current_schema(\n self.connection, \"table_schema\"\n )\n self.table.from_table = table\n\n return self.platform().compile_alter_sql(self.table)",
"def create_statement(self):\n query = self.commands.get_table_create_statement(self.name)\n if self.db.table_exists(self.name):\n statement = self.execute(query)[0][0]\n statement = re.sub('\\s+', ' ', statement)\n return statement\n raise ValueError('Table does not exist, no create statement')",
"def test_transaction_management_statements(self):\n for script_pattern in (\n \"BEGIN TRANSACTION; %s; COMMIT;\",\n \"BEGIN; %s; END TRANSACTION;\",\n \"/* comment */BEGIN TRANSACTION; %s; /* comment */COMMIT;\",\n \"/* comment */ BEGIN TRANSACTION; %s; /* comment */ COMMIT;\",\n \"\"\"\n-- comment\nBEGIN TRANSACTION;\n\n%s;\n\n-- comment\nCOMMIT;\"\"\",\n ):\n\n test_statement = (\"CREATE TABLE TEST1 (field1 int); \"\n \"DROP TABLE TEST1\")\n script = script_pattern % test_statement\n src = self.tmp()\n\n with open(src, 'wt') as f:\n f.write(script)\n\n sqls = SqlScript(src)\n sqls.run(self.engine)",
"def create_table_statements() -> [str]:\n pass",
"def runSqlNoTransaction(self, sql):\r\n self.c.autocommit = True\r\n cursor = self.c.cursor()\r\n cursor.execute(sql)\r\n self.c.commit()\r\n cursor.close()\r\n self.c.autocommit = False\r\n return True",
"def _Dynamic_BeginTransaction(self, request, transaction, request_id=None):\n request.set_app(self.project_id)\n self._RemoteSend(request, transaction, \"BeginTransaction\", request_id)\n self.__tx_actions[transaction.handle()] = []\n return transaction",
"def begin(self):\n self._in_transaction = True\n self.execute(\"BEGIN\")",
"def start_transaction(self):\n raise Unsupported()",
"def start_transaction(self) -> None:\n pass",
"def get_sql(self, table_name):\n\t\tcolNames,sql = self._generate_sql_parts(table_name)\n\t\treturn get_query(table_name, colNames, sql)",
"def startTransaction(self) -> int:\n ...",
"def execute_transaction(self, statement, mapset=None):\n connected = False\n if not self.connected:\n self.connect()\n connected = True\n\n sql_script = \"\"\n sql_script += \"BEGIN TRANSACTION;\\n\"\n sql_script += statement\n sql_script += \"END TRANSACTION;\"\n\n try:\n if self.dbmi.__name__ == \"sqlite3\":\n self.cursor.executescript(statement)\n else:\n self.cursor.execute(statement)\n self.connection.commit()\n except:\n if connected:\n self.close()\n self.msgr.error(_(\"Unable to execute transaction:\\n %(sql)s\" %\n {\"sql\": statement}))\n raise\n\n if connected:\n self.close()",
"def create_db_statement(self):\n return Engine.create_db_statement(self).replace(\"DATABASE\", \"SCHEMA\")",
"def get_sql_statement_create_table_for_datatable(datatable: ModelDatatable):\n if datatable is None:\n return None\n\n # sql_create_projects_table = \"\"\" CREATE TABLE IF NOT EXISTS projects (\n # begin_date text,\n # end_date text\n # ); \"\"\"\n # begin of statement\n sql_statement = \"\"\" CREATE TABLE IF NOT EXISTS \"\"\" + \\\n datatable.get_datatable_name() + \"\"\"(\"\"\"\n\n # add column names\n column_names_string = get_column_names_as_string_incl_comma_using_datatable(datatable)\n sql_statement = sql_statement + column_names_string\n\n # add end of statement\n sql_statement = sql_statement + \"\"\");\"\"\"\n logger_log(\"datatable name: \"\n + datatable.get_datatable_name()\n + \" - result: \"\n + sql_statement)\n return sql_statement",
"def to_sql(self):\n\n if not self._action:\n self.set_action(\"select\")\n for scope in self._global_scopes.get(self.owner, {}).get(self._action, []):\n if not scope:\n continue\n\n scope(self.owner, self)\n\n grammar = self.get_grammar()\n sql = grammar.compile(self._action).to_sql()\n self.boot()\n return sql",
"def construct_statement(*args):\n\n INPUT_STATEMENT = \"\"\n for statement in args:\n INPUT_STATEMENT += statement\n \n\n return INPUT_STATEMENT",
"def get_sql_template(table):\n if table == 'strategy_state':\n sql = \"insert into strategy_state values ('%s', '%s', '%s')\"\n\n elif table == 'strategy_parameter':\n sql = \"insert into strategy_parameter values ('%s', '%s', '%s')\"\n\n else:\n platform_logger.error(\"input wrong table '%s'\" % table)\n return ''\n\n return sql",
"def create_tx(\n self,\n query: str,\n query_params: Optional[Mapping[str, Any]] = None,\n ):\n tx = self.get_session().begin_transaction()\n try:\n # logger.info(query)\n tx.run(query, parameters=query_params)\n tx.commit()\n except Exception as e:\n logger.error(e)\n finally:\n tx.close()",
"def sql_statement(self, operation, sql):\n if type(sql) != str:\n raise Exception(\n \"Invalid argument: sql of type {} should be: <class 'str'>\".format(type(sql)))\n if operation == 0 or operation == 2 or operation == 3:\n self.__database__.execute(sql)\n elif operation == 1:\n return self.__database__.execute(sql)\n else:\n raise Exception(\"Invalid SQL operation code\")",
"def get_sql_session(self):\n session_maker_obj = sessionmaker(bind=self._engine,\n expire_on_commit=False)\n session = session_maker_obj()\n return session",
"def construct_query(self):\n reader = QueryReader(filepath=self.filepath, filename=self.filename, raw_sql=self.raw_sql, params=self.params)\n return reader.sql",
"def start_transaction(self,):\n\n if self.tx is not None:\n raise OverlappedTransaction(str(self.tx.xid))\n\n modlogger.debug(\"start tx\")\n opid = self.new_opid()\n xaction = StartTxOperation(opid,opid)\n self.tx = Transaction(opid,self.home, track_state = self.track_state)\n self._add_operation(opid,xaction)\n return opid"
]
| [
"0.8120522",
"0.6544149",
"0.63907224",
"0.62939125",
"0.62703323",
"0.62328506",
"0.61006147",
"0.606897",
"0.60182077",
"0.6007495",
"0.59822136",
"0.5977059",
"0.582756",
"0.5786009",
"0.5709212",
"0.5700747",
"0.56912994",
"0.5642326",
"0.56331635",
"0.5619323",
"0.56181574",
"0.5538353",
"0.55153185",
"0.5504861",
"0.5478654",
"0.54775274",
"0.5472405",
"0.5468527",
"0.5456999",
"0.5428314"
]
| 0.83221465 | 0 |
Returns the SQL that will be appended to tables or rows to define a tablespace. Returns '' if the backend doesn't use tablespaces. | def sql_for_tablespace(self, tablespace, inline=False):
return "ON %s" % self.quote_name(tablespace) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_table_query_string(self) -> str:\n if self.database and self.table:\n return f'\"{self.database}\".\"{self.schema}\".\"{self.table}\"'\n elif self.table:\n return f'\"{self.table}\"'\n else:\n return f\"({self.query})\"",
"def schema(self):\n attrs = self.attrs.copy()\n parts = ['CREATE', 'TABLE', self.name, '(%s,' % self.hash_key.schema]\n del attrs[self.hash_key.name]\n if self.range_key:\n parts.append(self.range_key.schema + ',')\n del attrs[self.range_key.name]\n if attrs:\n attr_def = ', '.join([attr.schema for attr in six.itervalues(attrs)])\n parts.append(attr_def + ',')\n\n parts.append(\"THROUGHPUT (%d, %d))\" % (self.read_throughput,\n self.write_throughput))\n parts.extend([g.schema for g in six.itervalues(self.global_indexes)])\n return ' '.join(parts) + ';'",
"def _create_query(self, if_not_exists: bool = False) -> str:\n\n column_qry = ['CREATE TABLE']\n\n if if_not_exists:\n column_qry.append('IF NOT EXISTS')\n\n column_qry += [self.name, '(',\n ', '.join([column._create()\n for column in self.columns]),\n ')'\n ]\n return ' '.join(column_qry)",
"def create_table_statements() -> [str]:\n pass",
"def show_tablespaces(self):\n sql = \"SELECT TABLESPACE_NAME FROM DBA_TABLESPACES WHERE CONTENTS <> 'TEMPORARY' ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#TABLESPACE}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))",
"def to_sql(self):\n if self._action == \"create\":\n return self.platform().compile_create_sql(self.table)\n else:\n if not self._dry:\n # get current table schema\n table = self.platform().get_current_schema(\n self.connection, \"table_schema\"\n )\n self.table.from_table = table\n\n return self.platform().compile_alter_sql(self.table)",
"def as_sql(self):\n distinct = 'DISTINCT ' if self._distinct else ''\n final = ' FINAL' if self._final else ''\n table_name = '`%s`' % self._model_cls.table_name()\n if self._model_cls.is_system_model():\n table_name = '`system`.' + table_name\n params = (distinct, self.select_fields_as_sql(), table_name, final)\n sql = u'SELECT %s%s\\nFROM %s%s' % params\n\n if self._prewhere_q and not self._prewhere_q.is_empty:\n sql += '\\nPREWHERE ' + self.conditions_as_sql(prewhere=True)\n\n if self._where_q and not self._where_q.is_empty:\n sql += '\\nWHERE ' + self.conditions_as_sql(prewhere=False)\n\n if self._grouping_fields:\n sql += '\\nGROUP BY %s' % comma_join('`%s`' % field for field in self._grouping_fields)\n\n if self._grouping_with_totals:\n sql += ' WITH TOTALS'\n\n if self._order_by:\n sql += '\\nORDER BY ' + self.order_by_as_sql()\n\n if self._limit_by:\n sql += '\\nLIMIT %d, %d' % self._limit_by\n sql += ' BY %s' % comma_join(string_or_func(field) for field in self._limit_by_fields)\n\n if self._limits:\n sql += '\\nLIMIT %d, %d' % self._limits\n\n return sql",
"def table_name() -> str:\n pass",
"def build(self):\n return self._sql.strip()",
"def get_sql_template(table):\n if table == 'account_position':\n sql = \"insert into account_position values \" \\\n \"('%s', '%s', '%s', '0', '%f', '%f', '%f', '0', '0', '%f', '%f', '%f', '0', '0'\" \\\n \",'0', '0', '0', null, null, null, null, null, '0', '0', \" \\\n \"'0', '0', '%f', '0', '0', '%s');\"\n\n elif table == 'pf_position':\n sql = \"insert into pf_position values \" \\\n \"('%s', '%s', '%s', '0', '%f', '%f', '%f', '0', '0', '%f', \" \\\n \"'%f', '%f', '0', '0', '0', '0', null, '1',\" \\\n \" '0', '0', '0', '0', '0', '0', '0', '0', '%f', '0', '0', '0', null);\"\n\n elif table == 'account_trade_restrictions':\n sql = \"insert into account_trade_restrictions values \" \\\n \"('%s', '%s', '18', '0', '1000', '0', '1000', '0', '2000', \" \\\n \"'0', '3000', '0', '1000', '0', '1000', '0','1000', '1000', '0.9',\" \\\n \" '1000', '0.2', '0.1','100000000', '0', '0', '0', \" \\\n \"'0', '0','0' )\"\n elif table == 'instrument':\n sql = \"select ticker, pre_price from instrument where ticker= '%s'\"\n\n elif table == 'pf_account':\n sql = \"insert into pf_account values\" \\\n \"('%s', '%s', '%s', '%s', null, '');\"\n else:\n platform_logger.error(\"input wrong table '%s'\" % table)\n return ''\n return sql",
"def sql(self):\n return ';\\n'.join([x.sql() for x in self._statements]) + ';'",
"def get_sql(self, table_name):\n\t\tcolNames,sql = self._generate_sql_parts(table_name)\n\t\treturn get_query(table_name, colNames, sql)",
"def get_sql_template(table):\n if table == 'strategy_state':\n sql = \"insert into strategy_state values ('%s', '%s', '%s')\"\n\n elif table == 'strategy_parameter':\n sql = \"insert into strategy_parameter values ('%s', '%s', '%s')\"\n\n else:\n platform_logger.error(\"input wrong table '%s'\" % table)\n return ''\n\n return sql",
"def determine_query():\n return query if query is not None \\\n else f\"SELECT * FROM '{table}';\"",
"def get_query(self):\n columns = ','.join(['\"{}\"'.format(x) for x in self.columns])\n query = 'SELECT {} FROM \"{}\"'.format(columns, self.table)\n filter_params = []\n if self.filters:\n filter_sql, filter_params = filter_postgis(self.filters)\n query += ' WHERE {}'.format(filter_sql)\n query += ';'\n return str(text(query)), filter_params",
"def table_name(self) -> str:\n return \"OLTP\"",
"def _get_table_sql(object_id, tab_name, columns=[], rerun='s16a_wide2', save_sql=False, fn_sql='hsc_sql.txt', ):\n\tlocalpath = _get_local_path()\n\n\tfn = localpath + fn_table_template_sql\n\tsql_columns = _get_table_sql_columns(columns=columns)\n\n\twith open(fn, 'r') as f:\n\t\tsql_template = f.read()\n\tsql = sql_template.format(object_id=object_id, tab_name=tab_name, sql_columns=sql_columns, rerun=rerun, )\n\n\tif save_sql:\n\t\twith open(fn_sql, \"w\") as text_file:\n\t\t\ttext_file.write(sql)\n\n\treturn sql",
"def _get_sql_create_table(self, table_attr):\n template = 'CREATE TABLE IF NOT EXISTS \"%s\" (\\n %s );'\n columns_pri, columns_ref, columns, columns_ignore = \\\n PsqlParser._get_categorized_columns(table_attr['columns'])\n v2_columns = []\n for columnName, columnAttr in merge_dicts(columns_pri, columns_ref, columns).iteritems():\n v2_columns.append(PsqlParser._get_sql_column(columnAttr))\n return template % (table_attr['name'], \", \\n \".join(v2_columns))",
"def insert_tables_docstring(cur, conn):",
"def table(self):\n return self._table_name",
"def create(self):\n sql = []\n\n if self.kind == \"PRIMARY\":\n sql.append(\"ADD PRIMARY KEY\")\n elif self.kind == \"UNIQUE\":\n sql.append(\"ADD UNIQUE INDEX `%s`\" % self.name)\n elif self.kind in ('FULLTEXT', 'SPATIAL'):\n sql.append(\"ADD %s INDEX `%s`\" % (self.kind, self.name))\n else:\n sql.append(\"ADD INDEX `%s`\" % self.name)\n\n sql.append(\"(%s)\" % \", \".join([self.format_sub_part(f, l) for f, l in self.fields]))\n\n if self.type in ('BTREE', 'HASH', 'RTREE'):\n sql.append(\"USING %s\" % self.type)\n\n return ' '.join(sql)",
"def get_query(self):\n columns = ','.join(['\"{}\"'.format(x) for x in self._columns])\n query = 'SELECT {} FROM \"{}\"'.format(columns, self._table)\n filter_params = []\n if self._filters:\n filter_sql, filter_params = filter_postgis(self._filters)\n query += ' WHERE {}'.format(filter_sql)\n query += ';'\n return str(text(query)), filter_params",
"def get_tablename(self):\n return self.ds_table",
"def create_statement(self):\n query = self.commands.get_table_create_statement(self.name)\n if self.db.table_exists(self.name):\n statement = self.execute(query)[0][0]\n statement = re.sub('\\s+', ' ', statement)\n return statement\n raise ValueError('Table does not exist, no create statement')",
"def to_sql(self):\n\n if not self._action:\n self.set_action(\"select\")\n for scope in self._global_scopes.get(self.owner, {}).get(self._action, []):\n if not scope:\n continue\n\n scope(self.owner, self)\n\n grammar = self.get_grammar()\n sql = grammar.compile(self._action).to_sql()\n self.boot()\n return sql",
"def create_db_statement(self):\n return Engine.create_db_statement(self).replace(\"DATABASE\", \"SCHEMA\")",
"def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")",
"def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")",
"def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")",
"def can_sql(self):\n self.SQL = {} #Dict of canned SQL \n self.SQL[\"User Tables\"] = '''\n SELECT table_schema,table_name\n FROM information_schema.tables\n WHERE table_type = 'BASE TABLE' \n AND table_schema = 'public' \n ORDER BY table_schema,table_name;\n '''\n \n self.SQL[\"All Tables\"] = '''\n SELECT table_schema,table_name\n FROM information_schema.tables\n ORDER BY table_schema,table_name;\n '''\n \n self.SQL[\"All Databases\"] = ''' \n SELECT datname FROM pg_database\n WHERE datistemplate = false;\n '''"
]
| [
"0.6469759",
"0.59934837",
"0.58655435",
"0.583523",
"0.58219546",
"0.58166826",
"0.5785589",
"0.57622534",
"0.56823945",
"0.55773354",
"0.5572355",
"0.55675316",
"0.55593735",
"0.55440897",
"0.55301553",
"0.5515544",
"0.5511684",
"0.54842204",
"0.5446456",
"0.542847",
"0.54197186",
"0.54133713",
"0.5392606",
"0.537065",
"0.53659934",
"0.53448635",
"0.53229207",
"0.53229207",
"0.53229207",
"0.53198206"
]
| 0.6807127 | 0 |
Prepares a value for use in a LIKE query. | def prep_for_like_query(self, x):
# http://msdn2.microsoft.com/en-us/library/ms179859.aspx
return smart_text(x).replace('%', '\%').replace('_', '\_') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_prep_lookup(self, lookup_type, value):\n\n if hasattr(value, 'prepare'):\n return value.prepare()\n if hasattr(value, '_prepare'):\n return value._prepare()\n\n if lookup_type in ('indexexact', 'distinct', 'slice',\\\n 'contains', 'containedby', 'overlap', 'exact', \\\n 'gt','lt','gte', 'lte'):\n return self.get_prep_value(value)\n raise TypeError(\"Field has invalid lookup: %s\" % lookup_type)",
"def prepare_value(self, value):\n if value is None:\n return value\n value = value.replace(\" \", \"\").replace(\".\", \"\")\n if value:\n return \"%s.%s.%s.%s\" % (value[0:3], value[3:7], value[7:11], value[11:])\n return value",
"def __prepare_value(self, val, none_symbol=\"-\"):\n\t\t# If type is a tuple, then it is a \n\t\t# (func_unit_label, replica_id) pair. \n\t\t# Concatenate then using \"_\" symbol.\n\t\tif type(val) == type(()):\n\t\t\tval = \"_\".join(map(str, val))\n\n\t\t# Cast value to string/character type,\n\t\t# if it is not a None value\n\t\tif val is not None:\n\t\t\tval = str(val)\n\t\telse:\n\t\t\tval = none_symbol\n\n\t\t# Value processing finished\n\t\treturn val",
"def _prepare_cache(self, value):\n\n return value",
"def get_prep_value(self, value):\n return str(value)",
"def prep_for_iexact_query(self, x):\n return x",
"def get_prep_value(self, value):\n if (value is UNKNOWN) or (value is ''):\n # If Django tries to save an empty string, send the db None (NULL).\n return None\n else:\n # Otherwise, just pass the value.\n return value",
"def get_prep_value(self, value):\n return encrypt(value)",
"def get_prep_lookup(self, lookup_type, value):\n if value is None:\n return super(self.__class__, self).get_prep_lookup(lookup_type, value)\n if lookup_type == 'in':\n value = [v.value for v in value]\n return super(self.__class__, self).get_prep_lookup(lookup_type, value)\n if lookup_type == 'exact':\n return super(self.__class__, self).get_prep_lookup(lookup_type, value.value)\n raise TypeError('Lookup type {} is not supported.'.format(lookup_type))",
"def prepare_query_value(self, op, value):\n if op in UPDATE_OPERATORS:\n self.validate(value)\n return value",
"def _wrap_initial(initial, query):\n\t\trepl = query.replace('initial = ?', 'initial is ?')\n\t\treturn repl if initial is None else query",
"def lower_replace(value):\n return value.lower().replace(\" \",\"_\")",
"def _assignValue(value):\n if value == \"\":\n return None\n else:\n return value",
"def cleanfieldlower(value):\n if not value:\n return None\n value = str(value)\n value = value.strip()\n value = value.lower()\n return value",
"def clean_value(self, value):\n return value",
"def prepare_terms(terms, search_mode):\n if search_mode in (\"contains\", \"starts_with\"):\n terms = terms.replace(\"_\", \"\\_\").replace(\"%\", \"\\%\")\n\n if search_mode == \"contains\":\n terms = \"%\" + terms + \"%\"\n elif search_mode == \"starts_with\":\n terms = terms + \"%\"\n return terms",
"def test_prepare_value_string(self):\n field = FractionField()\n result = field.prepare_value(\"1/4\")\n self.assertEqual(\"1/4\", result)\n\n result = field.prepare_value(\"1 1/4\")\n self.assertEqual(\"1 1/4\", result)",
"def loope(value,arg):\r\n return value.replace(arg,'')",
"def _wildcardformat(regxval):\n if regxval == None:\n return None\n else:\n try:\n return regxval.replace(\"*\",\"%\").replace(\"?\",\"_\")\n except AttributeError:\n return regxval",
"def _rewrite_wrt(self, var):\n if var == \"\":\n return \"\"\n for pred_rd, pred_wrt, inst in reversed(self.all_rw_list):\n if pred_wrt == \"\" or var == pred_wrt:\n continue\n # exact matching\n if var.find(pred_wrt) != -1:\n var = var.replace(pred_wrt, pred_rd)\n break\n return var",
"def url_filter(val):\n if isinstance(val, Undefined):\n return UNDEFINED_LABEL\n return quote(str(val))",
"def _match_filter(self, meta, field):\r\n val = meta[field]\r\n if field in self.ignored_values:\r\n for pattern in self.ignored_values[field]:\r\n val = val.replace(pattern, '')\r\n return val",
"def normalizeFilter(_filter):\n _filter = '' if _filter == None else _filter\n\n _filter = _filter.rstrip(' ')\n _filter = '%' if _filter == '' else _filter\n _filter = _filter.replace('*','%')\n _filter = _filter.replace('?','_')\n\n return _filter",
"def get_name_query_cond(column: str, val: str, query_params: dict):\n if val is not None and column is not None:\n query_params[column] = '%' + val + '%'\n return 'AHJ.' + column + ' LIKE %(' + column + ')s AND '\n return ''",
"def get_prep_value(self, value):\n if value is None:\n return value\n if isinstance(value, self.enum_class):\n return super(self.__class__, self).get_prep_value(value.value)\n if isinstance(value, Enum):\n raise ValueError('{} is of the wrong Enum type.'.format(value))\n return super(self.__class__, self).get_prep_value(value)",
"def search(self, value):\n pass",
"def sanitize(cls, value):\n return value",
"def cutting(value,arg):\n return value.replace(arg,'working')",
"def pre_set(self, value):\r\n return value",
"def sanitize_name(self, value):\n if self.sanitize_names:\n new_value = re.sub('[^a-zA-Z0-9_]', '_', value[:127])\n else:\n new_value = value\n return new_value"
]
| [
"0.62387824",
"0.62335867",
"0.60099083",
"0.57378113",
"0.5714425",
"0.5617409",
"0.5498217",
"0.5473074",
"0.5430203",
"0.5371442",
"0.5293164",
"0.52836585",
"0.5271929",
"0.52164465",
"0.51479703",
"0.5127178",
"0.5091327",
"0.5089108",
"0.50791",
"0.50724685",
"0.5052547",
"0.5051434",
"0.5047706",
"0.5036393",
"0.50360805",
"0.50204587",
"0.5010905",
"0.5003025",
"0.500135",
"0.49995393"
]
| 0.6611588 | 0 |
Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns. | def adapt_datetimefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
if settings.USE_TZ and timezone.is_aware(value):
# pyodbc donesn't support datetimeoffset
value = value.astimezone(self.connection.timezone).replace(tzinfo=None)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_mongo(self, value):\n\n try:\n if not isinstance(value, datetime) or not value:\n pdate = value.toPyDate()\n return datetime(pdate.year, pdate.month, pdate.day)\n else:\n return value\n except AttributeError:\n return value\n\n # pyValue = value.toPyDate()\n # return datetime(pyValue.year, pyValue.month, pyValue.day)",
"def _coerce_datetime(\n value: Any, annotation: Type[Union[datetime.date, datetime.datetime]]\n ) -> Union[datetime.datetime, datetime.date]:\n if isinstance(value, datetime.date) and annotation == datetime.datetime:\n value = datetime.datetime(value.year, value.month, value.day)\n elif isinstance(value, datetime.datetime) and annotation == datetime.date:\n value = value.date()\n elif isinstance(value, (int, float)):\n value = annotation.fromtimestamp(value)\n else:\n value = dateutil.parser.parse(value)\n\n return value",
"def force_to_datetime(val):\n if not val:\n return val\n elif isinstance(val, datetime.datetime):\n return val\n elif isinstance(val, datetime.date):\n return datetime.datetime.combine(val, datetime.time())\n elif isinstance(val, str):\n return string_to_datetime(val)\n else:\n raise ValueError(\"object must be date or datetime!\")",
"def python_cast(self, v):\n\n if self.type_is_time():\n import dateutil.parser\n dt = dateutil.parser.parse(v)\n\n if self.datatype == Column.DATATYPE_TIME:\n dt = dt.time()\n if not isinstance(dt, self.python_type):\n raise TypeError(\n '{} was parsed to {}, expected {}'.format(\n v,\n type(dt),\n self.python_type))\n\n return dt\n else:\n # This isn't calling the python_type method -- it's getting a python type, then instantialting it,\n # such as \"int(v)\"\n return self.python_type(v)",
"def dt_to_pydatetime(self):\n return DateTimeDefault.register(pandas.Series.dt.to_pydatetime)(self)",
"def _date_to_datetime(value):\r\n assert isinstance(value, datetime.date)\r\n return datetime.datetime(value.year, value.month, value.day)",
"def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])",
"def init_value(self, value, strict: bool = True):\n if isinstance(value, str):\n value = datetime.datetime.fromisoformat(value)\n elif isinstance(value, float):\n value = datetime.datetime.fromtimestamp(value)\n return super().init_value(value, strict)",
"def dehydrate_datetime(value):\n\n def seconds_and_nanoseconds(dt):\n if isinstance(dt, datetime):\n dt = DateTime.from_native(dt)\n zone_epoch = DateTime(1970, 1, 1, tzinfo=dt.tzinfo)\n t = dt.to_clock_time() - zone_epoch.to_clock_time()\n return t.seconds, t.nanoseconds\n\n tz = value.tzinfo\n if tz is None:\n # without time zone\n value = utc.localize(value)\n seconds, nanoseconds = seconds_and_nanoseconds(value)\n return Structure(ord(b\"d\"), seconds, nanoseconds)\n elif hasattr(tz, \"zone\") and tz.zone:\n # with named time zone\n seconds, nanoseconds = seconds_and_nanoseconds(value)\n return Structure(ord(b\"f\"), seconds, nanoseconds, tz.zone)\n else:\n # with time offset\n seconds, nanoseconds = seconds_and_nanoseconds(value)\n return Structure(ord(b\"F\"), seconds, nanoseconds, tz.utcoffset(value).seconds)",
"def convert_values(self, value, field):\n if value is None:\n return None\n if field and field.get_internal_type() == 'DateTimeField':\n if isinstance(value, string_types) and value:\n value = parse_datetime(value)\n return value\n elif field and field.get_internal_type() == 'DateField':\n if isinstance(value, datetime.datetime):\n value = value.date() # extract date\n elif isinstance(value, string_types):\n value = parse_date(value)\n elif field and field.get_internal_type() == 'TimeField':\n if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):\n value = value.time() # extract time\n elif isinstance(value, string_types):\n # If the value is a string, parse it using parse_time.\n value = parse_time(value)\n # Some cases (for example when select_related() is used) aren't\n # caught by the DateField case above and date fields arrive from\n # the DB as datetime instances.\n # Implement a workaround stealing the idea from the Oracle\n # backend. It's not perfect so the same warning applies (i.e. if a\n # query results in valid date+time values with the time part set\n # to midnight, this workaround can surprise us by converting them\n # to the datetime.date Python type).\n elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:\n value = value.date()\n # Force floats to the correct type\n elif value is not None and field and field.get_internal_type() == 'FloatField':\n value = float(value)\n return value",
"def _time_to_datetime(value):\r\n assert isinstance(value, datetime.time)\r\n return datetime.datetime(1970, 1, 1,\r\n value.hour, value.minute, value.second,\r\n value.microsecond)",
"def make_datetime(value):\n if value:\n return value\n return None",
"def orm2datetime(value, tformat=ORM_DATETIME_FORMAT, default=None):\n if not value:\n return default\n return datetime.strptime(value, tformat)",
"def _serialize_datetime(val):\n return datetime_to_iso8601(val)",
"def convert_timestamp_to_object(data):\n for k, value in data.items():\n value_type = value.split(\"::\", 1)[0]\n if value_type == \"datetime\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = datetime.fromtimestamp(timestamp)\n elif value_type == \"date\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = date.fromtimestamp(timestamp)\n data[k] = value\n return data",
"def to_datetime(obj: \"Any\") -> datetime:\n if isinstance(obj, datetime):\n # trivially return datetime objects untouched\n return obj\n elif isinstance(obj, Timestamp):\n # straight from a Google Timestamp\n dt = obj.ToDatetime()\n return dt.replace(tzinfo=timezone.utc)\n elif isinstance(obj, (int, float)):\n # from the gRPC Ledger API; interpret as microseconds from the GMT epoch\n return datetime.utcfromtimestamp(obj / 1e6).replace(tzinfo=timezone.utc)\n elif isinstance(obj, str):\n # from the REST Ledger API or an end user; give preference to the unambiguous wire\n # format ISO8601, but otherwise try to parse in a variety of formats\n for fmt in DATETIME_FORMATS:\n try:\n return fmt(obj)\n except ValueError:\n pass\n\n raise ValueError(f\"Could not parse as a datetime: {obj!r}\")",
"def convert_types(cls, value):\n if type(value) in (datetime, date):\n return time.mktime(value.timetuple())\n elif isinstance(value, Decimal):\n return float(value)\n else:\n return value",
"def convert_date(self, dt: datetime) -> Union[datetime, Function]:\n return dt",
"def ensure_datetime(ob: AnyDatetime) -> datetime.datetime:\n if isinstance(ob, datetime.datetime):\n return ob\n date = cast(datetime.date, ob)\n time = cast(datetime.time, ob)\n if isinstance(ob, datetime.date):\n time = datetime.time()\n if isinstance(ob, datetime.time):\n date = datetime.date(1900, 1, 1)\n return datetime.datetime.combine(date, time)",
"def to_pydatetime(self) -> npt.NDArray[np.object_]:\n return ints_to_pydatetime(self.asi8, tz=self.tz, reso=self._creso)",
"def to_datetime(date: Union[dt.datetime, dt.date]) -> dt.datetime:\n if isinstance(date, dt.datetime):\n return dt.datetime(date.year, date.month, date.day, date.hour, date.minute, date.second)\n elif isinstance(date, dt.date):\n return dt.datetime(date.year, date.month, date.day)\n else:\n raise ValueError(\"<{0}>'s type is not recognized. Its type is <{1}>\".format(date, type(date)))",
"def test_type_conversion(registry: AdapterLoader) -> None:\n registry.add(\"dummy\", FakeAdapterWithDateTime)\n\n connection = connect(\":memory:\", [\"dummy\"], isolation_level=\"IMMEDIATE\")\n cursor = connection.cursor()\n\n cursor.execute('SELECT * FROM \"dummy://\"')\n assert cursor.fetchall() == []\n\n cursor.execute(\n 'INSERT INTO \"dummy://\" (birthday) VALUES (?)',\n (datetime(2021, 1, 1, 0, 0),),\n )\n cursor.execute('SELECT * FROM \"dummy://\"')\n assert cursor.fetchall() == [\n (\n None,\n datetime(2021, 1, 1, 0, 0),\n None,\n None,\n ),\n ]\n\n # make sure datetime is stored as a datetime\n assert FakeAdapterWithDateTime.data == [\n {\n \"age\": None,\n \"birthday\": datetime(2021, 1, 1, 0, 0),\n \"name\": None,\n \"pets\": None,\n \"rowid\": 1,\n },\n ]\n assert isinstance(FakeAdapterWithDateTime.data[0][\"birthday\"], datetime)\n\n cursor.execute(\n 'SELECT * FROM \"dummy://\" WHERE birthday > ?',\n (datetime(2020, 12, 31, 0, 0),),\n )\n assert cursor.fetchall() == [\n (None, datetime(2021, 1, 1, 0, 0), None, None),\n ]",
"def _to_date(self, x):\n if isinstance(x, datetime.datetime):\n return x.date()\n return x",
"def datetime_to_sql(connection, obj):\n return connection.string_literal(datetime_to_str(obj))",
"def _get_datetime(dt_value):\n result = None\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S.%f %z\")\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S.%f\")\n if result is None:\n result = _get_datetime_from_format(dt_value, \"%Y-%m-%d %H:%M:%S\")\n if result is None:\n raise RuntimeError(\n \"Failed to convert '{}' into datetime object\".format(dt_value))\n return result",
"def ensure_datetime(value: Union[Date, DateTime, str], **kwargs: int) -> DateTime:\n ## Check the type of the value and act accordingly.\n if isinstance(value, DateTime):\n ## It is a datetime instance. Nothing to be done. Just return with replacement:\n return value.replace(**kwargs) # type: ignore\n elif isinstance(value, Date):\n ## It is a date instance. Set to morning and return with replacement:\n return DateTime.combine(value, DateTime.min.time()).replace(**kwargs) # type: ignore\n elif isinstance(value, str):\n ## We have a string. Attempt to parse and return with replacement:\n try:\n return parse(value).replace(**kwargs) # type: ignore\n except ParserError:\n raise ValueError(\"Can not parse value into a date/time object: {}\".format(value))\n\n ## We have a problem here: Don't know how to convert other\n ## object. Raise a value error:\n raise ValueError(\"Don't know how to convert value to date/time object: {}\".format(value))",
"def convert_datetime_to_iso(datetime_obj):\r\n return Date().to_json(datetime_obj)",
"def create_datetime_column(df):\n df[\"datetime\"] = pd.to_datetime(df[[\"year\", \"month\", \"day\"]])\n return df.drop([\"year\", \"month\", \"day\"], axis=1)",
"def serialize_dt(value):\n return value.isoformat() if hasattr(value, 'isoformat') else value",
"def adapt_timefield_value(self, value):\n if value is None:\n return None\n \n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n # SQL Server doesn't support microseconds\n if isinstance(value, string_types):\n return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))\n if timezone.is_aware(value):\n raise ValueError(\"DBMaker backend does not support timezone-aware times.\")\n return datetime.time(value.hour, value.minute, value.second)"
]
| [
"0.70304805",
"0.6863592",
"0.6858909",
"0.6828734",
"0.67776716",
"0.6712351",
"0.66223943",
"0.6560922",
"0.6497349",
"0.64821446",
"0.64643747",
"0.6452288",
"0.64504373",
"0.6442613",
"0.6383555",
"0.63835424",
"0.6320463",
"0.6273228",
"0.6247605",
"0.62223506",
"0.62175107",
"0.62047285",
"0.61966145",
"0.6194032",
"0.61829126",
"0.61759645",
"0.61425006",
"0.6120858",
"0.6114316",
"0.61136174"
]
| 0.69163877 | 1 |
Transform a time value to an object compatible with what is expected by the backend driver for time columns. | def adapt_timefield_value(self, value):
if value is None:
return None
# Expression values are adapted by the database.
if hasattr(value, 'resolve_expression'):
return value
# SQL Server doesn't support microseconds
if isinstance(value, string_types):
return datetime.datetime(*(time.strptime(value, '%H:%M:%S')[:6]))
if timezone.is_aware(value):
raise ValueError("DBMaker backend does not support timezone-aware times.")
return datetime.time(value.hour, value.minute, value.second) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_to_time(value):\n if isinstance(value, datetime.time):\n return value\n elif isinstance(value, str):\n return datetime.time.fromisoformat(value)\n else:\n return datetime.time(value)",
"def dehydrate_time(value):\n if isinstance(value, Time):\n nanoseconds = int(value.ticks * 1000000000)\n elif isinstance(value, time):\n nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +\n 1000000000 * value.second + 1000 * value.microsecond)\n else:\n raise TypeError(\"Value must be a neotime.Time or a datetime.time\")\n if value.tzinfo:\n return Structure(ord(b\"T\"), nanoseconds, value.tzinfo.utcoffset(value).seconds)\n else:\n return Structure(ord(b\"t\"), nanoseconds)",
"def _time_to_datetime(value):\r\n assert isinstance(value, datetime.time)\r\n return datetime.datetime(1970, 1, 1,\r\n value.hour, value.minute, value.second,\r\n value.microsecond)",
"def _astropy_time(time):\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))",
"def python_cast(self, v):\n\n if self.type_is_time():\n import dateutil.parser\n dt = dateutil.parser.parse(v)\n\n if self.datatype == Column.DATATYPE_TIME:\n dt = dt.time()\n if not isinstance(dt, self.python_type):\n raise TypeError(\n '{} was parsed to {}, expected {}'.format(\n v,\n type(dt),\n self.python_type))\n\n return dt\n else:\n # This isn't calling the python_type method -- it's getting a python type, then instantialting it,\n # such as \"int(v)\"\n return self.python_type(v)",
"def fix_time_fields(self):\n time_fields = {\"Time of day\": lambda time: time.hour, \"Time of year (month)\": lambda time: time.month}\n for time_field in time_fields.keys():\n for i in range(self.df.shape[0]):\n value = self.df[time_field][i]\n if type(value) is datetime.time or type(value) is datetime.datetime:\n self.df[time_field].loc[i] = time_fields[time_field](value)",
"def convert_timestamp_to_object(data):\n for k, value in data.items():\n value_type = value.split(\"::\", 1)[0]\n if value_type == \"datetime\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = datetime.fromtimestamp(timestamp)\n elif value_type == \"date\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = date.fromtimestamp(timestamp)\n data[k] = value\n return data",
"def construct_obstime(self, row):\n return time.Time(self['mjd'][row], format='mjd')",
"def unmarshall_time(tyme):\r\n return datetime.datetime(day=tyme['day'],\r\n month=tyme['month'],\r\n year=tyme['year'],\r\n hour=tyme['hour'],\r\n minute=tyme['minute'],\r\n second=tyme['second'],\r\n microsecond=tyme['microsecond'])",
"def get_datetime_from_time(value: datetime | time) -> datetime:\n if isinstance(value, time):\n value = datetime.combine(dt_util.now().date(), value, dt_util.DEFAULT_TIME_ZONE)\n if isinstance(value, datetime):\n value = value.replace(tzinfo=dt_util.DEFAULT_TIME_ZONE)\n if value > dt_util.now():\n raise ValidationError(\"Time cannot be in the future.\")\n return value",
"def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)",
"def time(self, value: typing.Union[str, _datetime.datetime, _datetime.date]):\n if isinstance(value, _datetime.datetime):\n value = value.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n elif isinstance(value, _datetime.date):\n value = value.strftime(\"%Y-%m-%dT00:00:00Z\")\n self._properties[\"time\"] = value",
"def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]",
"def parse_tql_time(self, data, pid, label):\n field = self.parse_tql_field(data, pid, label)\n if field:\n hour, minute, second = [int(i) for i in field.split(':')[0:3]]\n field = datetime.time(hour, minute, second)\n return field",
"def datetime_from_time(time: datetime.time, date: datetime.date = datetime.date.today()):\n if type(time) == datetime.time:\n return datetime.datetime.combine(date, time)\n else:\n return time",
"def time(self):\n return self[self.time_columns]",
"def time(self):\n return self[self.time_columns]",
"def _serialize_time(val):\n return val.isoformat()",
"def to_db(time):\n return int(time.timestamp())",
"def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var",
"def int_to_time(seconds):\n time = Time()\n minutes, time.second = divmod(seconds, 60)\n time.hour, time.minute = divmod(minutes, 60)\n return time",
"def convert_types(cls, value):\n if type(value) in (datetime, date):\n return time.mktime(value.timetuple())\n elif isinstance(value, Decimal):\n return float(value)\n else:\n return value",
"def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])",
"def test_as_time(self):\n self.assertEqual(\n time_display.as_time(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n with_msec=True),\n '23:59:30.357')",
"def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]",
"def time(self):\n return Time(self.hour, self.minute, self.second)",
"def test_is_time_druid_time_col(self):\n col = TableColumn(column_name=\"__time\", type=\"INTEGER\")\n self.assertEquals(col.is_dttm, None)\n DruidEngineSpec.alter_new_orm_column(col)\n self.assertEquals(col.is_dttm, True)\n\n col = TableColumn(column_name=\"__not_time\", type=\"INTEGER\")\n self.assertEquals(col.is_time, False)",
"def scale_time_to(recs, unit):\n\n for r in recs:\n if unit == 'd':\n r.t = [t / 3600 / 24 for t in r.time]\n elif unit == 'hours':\n r.t = [t / 3600 for t in r.time]\n elif unit == 'min':\n r.t = [t / 60 for t in r.time]\n elif unit in ('s', 'sec'):\n r.t = r.time\n else:\n Exception('Wrong time unit')\n\n Records.time_unit = unit\n Records.time_label = 'Time (' + unit + ')'",
"def __parse_time(self, time_obj):\n if time_obj:\n resp = ''\n if isinstance(time_obj, int) or isinstance(time_obj, str):\n resp = time_obj\n elif isinstance(time_obj, datetime.datetime):\n resp = calendar.timegm(time_obj.timetuple())\n else:\n raise Exception(\"Unknown __parse_time format for {0}\".format(time_obj))\n return str(resp)\n return None",
"def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second"
]
| [
"0.7205994",
"0.7059694",
"0.7051511",
"0.67561054",
"0.6677156",
"0.6417766",
"0.63727784",
"0.63563097",
"0.6295769",
"0.6213761",
"0.6160596",
"0.6135675",
"0.6091793",
"0.60568434",
"0.60233325",
"0.5982329",
"0.5982329",
"0.59743375",
"0.5955505",
"0.5954497",
"0.5911527",
"0.5906954",
"0.58608985",
"0.5857448",
"0.5828965",
"0.5817017",
"0.58006823",
"0.57914406",
"0.5785946",
"0.5754254"
]
| 0.7745255 | 0 |
Returns a twoelements list with the lower and upper bound to be used with a BETWEEN operator to query a field value using a year lookup `value` is an int, containing the lookedup year. | def year_lookup_bounds(self, value):
first = '%s-01-01 00:00:00'
# SQL Server doesn't support microseconds
last = '%s-12-31 23:59:59'
return [first % value, last % value] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_range( value ):\n return list(range(value))",
"def get_range(value):\n return list(range(value))",
"def year_text_range_filter(self, queryset, name, value):\n if value:\n if value[0] and value[1]:\n queryset = queryset.filter(**{name+'__regex': r'^[0-9]{3,4}$'}) \\\n .annotate(**{name+'_int': Cast(name, IntegerField())})\\\n .filter(**{name+'_int__range': (value[0], value[1])})\n else:\n if value[0]:\n queryset = queryset.filter(**{name+'__regex': r'^[0-9]{3,4}$'}) \\\n .annotate(**{name+'_int': Cast(name, IntegerField())})\\\n .filter(**{name+'_int__gte': value[0]})\n if value[1]:\n queryset = queryset.filter(**{name+'__regex': r'^[0-9]{3,4}$'}) \\\n .annotate(**{name+'_int': Cast(name, IntegerField())})\\\n .filter(**{name+'_int__lte': value[1]})\n\n return queryset",
"def _year_range(m):\n return (m.group(1), m.group(2))",
"def translate_years(val):\n if val.find(\"-\") > 0:\n tokens = re.findall(\"[0-9]+\", val)\n one = int(tokens[0])\n two = int(tokens[1])\n one = (1900 + one) if one > 50 else (2000 + one)\n two = (1900 + two) if two > 50 else (2000 + two)\n return range(one, two + 1)\n tokens = re.findall(\"[0-9]+\", val)\n return [int(f\"{'19' if int(t) > 50 else '20'}{t}\") for t in tokens]",
"def grab_by_year(dataframe, year_col, value_col, year_start=None,\n year_end=None):\n\n if year_start is None and year_end is None:\n print(\"year_start and year_end are not defined. Insert a value \\\n for at least one year variable.\")\n\n if year_end is None:\n grab_years = dataframe[(dataframe[year_col] >= year_start)\n ][value_col]\n if year_start is None:\n grab_years = dataframe[(dataframe[year_col] <= year_end)\n ][value_col]\n\n if year_start is not None and year_end is not None:\n grab_years = dataframe[(dataframe[year_col] >= year_start) &\n (dataframe[year_col] <= year_end)\n ][value_col]\n\n return grab_years",
"def get_models_between(start_year, end_year):\n\n results = db.session.query(Model).filter(Model.year >= start_year, Model.year < end_year).all()\n\n return results",
"def get_models_between(start_year, end_year):\n\n models = Model.query.filter(Model.year >= start_year,\n Model.year < end_year).all()\n return models",
"def t_range_years(t_range):\r\n start_year = int(t_range[0].split(\"-\")[0])\r\n end_year = int(t_range[1].split(\"-\")[0])\r\n end_month = int(t_range[1].split(\"-\")[1])\r\n end_day = int(t_range[1].split(\"-\")[2])\r\n if end_month == 1 and end_day == 1:\r\n year_range_list = np.arange(start_year, end_year)\r\n else:\r\n year_range_list = np.arange(start_year, end_year + 1)\r\n return year_range_list",
"def get_models_between(start_year, end_year):\n\n return Model.query.filter(Model.year > start_year, Model.year < end_year)",
"def dbf_years(self):\n return [year for year in self.years if year <= 2020]",
"def _builtin_between(low, high, value, **k):\n mode = check_mode((low, high, value), ['iii', 'iiv'], functor='between', **k)\n low_v = int(low)\n high_v = int(high)\n if mode == 0: # Check\n value_v = int(value)\n if low_v <= value_v <= high_v:\n return [(low, high, value)]\n else: # Enumerate\n results = []\n for value_v in range(low_v, high_v + 1):\n results.append((low, high, Constant(value_v)))\n return results",
"def get_ranges(self, tchain, kw):\n (lo, hi) = (\"min\", \"max\")\n ran = None\n for t in tchain:\n rstmt = t.search_one(kw)\n if rstmt is None: continue\n ran = [ i.split(\"..\") for i in rstmt.arg.split(\"|\") ]\n if ran[0][0] != 'min': lo = ran[0][0]\n if ran[-1][-1] != 'max': hi = ran[-1][-1]\n if ran is None: return None\n if len(ran) == 1:\n return [(lo, hi)]\n else:\n return [(lo, ran[0][-1])] + ran[1:-1] + [(ran[-1][0], hi)]",
"def get_range(value):\n\n raw = value\n\n # If we find a '@' at the beginning of the range, we should invert\n # the match.\n\n invert = False\n\n if value.find('@') == 0:\n invert = True\n value = value.lstrip('@')\n\n # The : separates a max/min range. If it exists, there is at least\n # a minimum. We'll start our ranges at zero and infinity so we don't\n # have to worry about complex testing logic.\n\n bottom = 0\n top = float('infinity')\n\n if value.find(':') > 0:\n (bottom, top) = value.split(':')\n if top == '':\n top = float('infinity')\n else:\n top = float(top)\n\n if bottom == '':\n bottom = 0\n elif bottom == '~':\n bottom = -float('infinity')\n else:\n bottom = float(bottom)\n else:\n top = float(value)\n\n return (bottom, top, invert, raw)",
"def constraint_clause_in_range_validator(field, presentation, context):\n\n field.default_validate(presentation, context)\n\n values = getattr(presentation, field.name)\n if isinstance(values, list):\n # Make sure list has exactly two elements\n if len(values) == 2:\n lower, upper = values\n the_type = presentation._get_type(context)\n\n # Lower bound must be coercible\n lower = coerce_value(context, presentation, the_type, None, None, lower, field.name)\n\n if upper != 'UNBOUNDED':\n # Upper bound be coercible\n upper = coerce_value(context, presentation, the_type, None, None, upper, field.name)\n\n # Second \"in_range\" value must be greater or equal than first\n if (lower is not None) and (upper is not None) and (lower >= upper):\n context.validation.report(\n u'upper bound of \"in_range\" constraint is not greater than the lower bound'\n u' in \"{0}\": {1} <= {2}'\n .format(presentation._container._fullname, safe_repr(lower),\n safe_repr(upper)),\n locator=presentation._locator, level=Issue.FIELD)\n else:\n context.validation.report(\n u'constraint \"{0}\" is not a list of exactly 2 elements in \"{1}\": {2}'\n .format(field.name, presentation._fullname, safe_repr(values)),\n locator=presentation._get_child_locator(field.name), level=Issue.FIELD)",
"def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))",
"def range(self, value):\n self.value_range = tuple([float(x) for x in value.split(':')])",
"def year_range(df):\n\n if not isinstance(df, pd.DataFrame):\n print(\"year_range was not passed a pandas DataFrame.\")\n return\n\n df['year_start'] = df['year'].min()\n df['year_end'] = df['year'].max()\n df.drop('year' , axis = 1, inplace = True)\n return df",
"def year_cv_split(X, year_range):\n return [\n ((X[\"year\"] < year).to_numpy(), (X[\"year\"] == year).to_numpy())\n for year in range(*year_range)\n ]",
"def multiple_years(our_data, start, end):\n count = start\n album_list = []\n while count <= end:\n album_list.append(find_by_year(our_data,count))\n count += 1",
"def _date_match_to_int_or_tuple(self, m):\n\n years = set()\n try:\n if m[0] != '':\n # yyyy-dd-mm\n years = int(m[0][0:4])\n elif m[1] != '':\n # year-range derived from a century\n century = int(re.match(re.compile('\\\\d+'), m[1]).group(0))\n\n match = re.compile(self.regexes['capture']['century-plus-suffix']).match(m[1])\n suffix = match.group(2)\n if suffix:\n if re.compile(self.regexes['match']['suffix-bce']).match(suffix) is not None:\n years = (100 * -century, 100 * -century + 99)\n else:\n years = (100 * (century - 1), 100 * (century - 1) + 99)\n else:\n years = (100 * (century - 1), 100 * (century - 1) + 99)\n elif m[2] != '':\n # explicit year-range\n\n # FIXME: spaghetti code, but it works!\n range_of_stuff = []\n i = 0\n first_none = None\n for y in re.sub(self.regexes['substitution']['year-year-splitter'], r'\\1>|<\\2', m[2]).split('>|<'):\n # get rid of whitespace\n y = y.strip()\n match = re.compile(self.regexes['capture']['year']).match(y)\n\n # if there is a suffix, one of these will not be None\n suffix = match.group(2) or match.group(4)\n if suffix:\n if i == 0:\n first_none = False\n\n if re.compile(self.regexes['match']['suffix-bce']).match(suffix) is not None:\n range_of_stuff.append(-1 * int(match.group(1) or match.group(3)))\n else:\n range_of_stuff.append(int(match.group(1) or match.group(3)))\n else:\n if i == 0:\n first_none = True\n range_of_stuff.append(int(match.group(1) or match.group(3)))\n\n i += 1\n if first_none:\n if range_of_stuff[1] <= 0:\n range_of_stuff[0] = -1 * range_of_stuff[0]\n\n years = (range_of_stuff[0], range_of_stuff[1])\n elif m[3] != '':\n # extract single year\n prep = re.sub(self.regexes['substitution']['dd-mon-year-time'], r'\\1', m[3]).strip()\n years = int(prep)\n elif m[4] != '':\n # year with unknown ones\n y = m[4].strip()\n match = re.compile(r'[1-9]\\d{3}').match(y)\n if match is None:\n years = int(self._resolve_unknown_ones(y))\n\n else:\n years = int(match.group(0))\n\n elif m[5] != '':\n # plain old year\n match = re.compile(self.regexes['capture']['year']).match(m[5])\n suffix = match.group(2) or match.group(4)\n if suffix:\n if re.compile(self.regexes['match']['suffix-bce']).match(suffix) is not None:\n years = -1 * int(match.group(1) or match.group(3))\n else:\n years = int(match.group(1) or match.group(3))\n else:\n years = int(match.group(1) or match.group(3))\n else:\n raise Error\n\n except ValueError as e:\n #logger.error('An error occurred while trying to match \"{}\": {}'.format(m, e))\n pass\n\n #logger.debug('Mapping match to years: {} -> {}'.format(m, years))\n return years",
"def get_start_end_years(df: pd.DataFrame) -> Tuple[int, int]:\n return df.iloc[0].year, df.iloc[-1].year",
"def xbrl_years(self):\n return [year for year in self.years if year >= 2021]",
"def _read_range(range: str) -> Tuple[str, List[Tuple[Union[int, None], Union[int, None]]]]:\n format, split_on_pairs = range.split('=', 1)\n split_on_pairs = split_on_pairs.split(',')\n pairs = []\n for pair_str in split_on_pairs:\n split_on_range = pair_str.split('-', 1)\n start = int(split_on_range[0]) if len(split_on_range[0]) > 0 else None\n stop = int(split_on_range[1]) if len(split_on_range[1]) > 0 else None\n pairs.append((start, stop))\n return format, pairs",
"def calcrange(a4lim,data):\r\n a4range=N.intersect1d(N.where(data>a4lim[0])[0],N.where(data<a4lim[1])[0])\r\n return a4range",
"def extractRangeTable(database: str, table: str, columnNumber: int, lower: any, upper: any) -> list:\n\n bd = _database(database)\n\n if bd:\n\n tb = _table(database, table)\n\n if tb:\n\n mode = tb[\"modo\"]\n\n val = -1\n\n if mode == \"avl\":\n val = avl.extractRangeTable(database, table, columnNumber, lower, upper)\n\n elif mode == \"b\":\n val = b.extractRangeTable(database, table, columnNumber, lower, upper)\n\n elif mode == \"bplus\":\n val = bplus.extractRangeTable(database, table, columnNumber, lower, upper)\n\n elif mode == \"hash\":\n val = hash.extractRangeTable(database, table, columnNumber, lower, upper)\n\n elif mode == \"isam\":\n val = isam.extractRangeTable(database, table, columnNumber, lower, upper)\n\n elif mode == \"json\":\n val = json.extractRangeTable(database, table, lower, upper)\n\n elif mode == \"dict\":\n val = dict.extractRangeTable(database, table, columnNumber, lower, upper)\n\n return val\n\n else:\n return 3\n\n else:\n return 2",
"def get_range(self) -> tuple[int, int]:\n return self.range_from, self.range_to",
"def value_range(self, rng):\n start, end = rng.split(':')\n (row_offset, column_offset) = a1_to_rowcol(start)\n (last_row, last_column) = a1_to_rowcol(end)\n\n out = []\n for col in self.values[row_offset - 1:last_row]:\n out.extend(col[column_offset - 1:last_column])\n return out",
"def parse_range_once(value, expand=True):\n value = value.split(',')\n subvalues = []\n for subvalue in value:\n if '-' in subvalue:\n low, high = [int(v) for v in subvalue.split('-')]\n if expand:\n subvalues += list(range(low, high + 1))\n elif low == high - 1:\n subvalues += [low, high]\n else:\n subvalues += [(low, high)]\n else:\n subvalues += [int(subvalue)]\n return subvalues",
"def _select_by_range(self, disc_low, disc_high):\n sqlstmt = \"SELECT h FROM %s WHERE d>=? and d<=?\" % self.VIEW\n pickup = self.cursor.execute(sqlstmt, (disc_low, disc_high,))\n return [h[0] for h in pickup]"
]
| [
"0.6570659",
"0.65618116",
"0.6470824",
"0.63468575",
"0.6273404",
"0.6227882",
"0.6117337",
"0.60214007",
"0.6008347",
"0.60038984",
"0.60016304",
"0.5972599",
"0.56596154",
"0.5565629",
"0.5526359",
"0.55116224",
"0.5479264",
"0.546489",
"0.54628104",
"0.54439676",
"0.5379518",
"0.53714883",
"0.53443915",
"0.5307761",
"0.5307479",
"0.53036976",
"0.52847874",
"0.5277892",
"0.5259428",
"0.5255398"
]
| 0.7706698 | 0 |
Coerce the value returned by the database backend into a consistent type that is compatible with the field type. In our case, cater for the fact that SQL Server < 2008 has no separate Date and Time data types. | def convert_values(self, value, field):
if value is None:
return None
if field and field.get_internal_type() == 'DateTimeField':
if isinstance(value, string_types) and value:
value = parse_datetime(value)
return value
elif field and field.get_internal_type() == 'DateField':
if isinstance(value, datetime.datetime):
value = value.date() # extract date
elif isinstance(value, string_types):
value = parse_date(value)
elif field and field.get_internal_type() == 'TimeField':
if (isinstance(value, datetime.datetime) and value.year == 1900 and value.month == value.day == 1):
value = value.time() # extract time
elif isinstance(value, string_types):
# If the value is a string, parse it using parse_time.
value = parse_time(value)
# Some cases (for example when select_related() is used) aren't
# caught by the DateField case above and date fields arrive from
# the DB as datetime instances.
# Implement a workaround stealing the idea from the Oracle
# backend. It's not perfect so the same warning applies (i.e. if a
# query results in valid date+time values with the time part set
# to midnight, this workaround can surprise us by converting them
# to the datetime.date Python type).
elif isinstance(value, datetime.datetime) and value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
return value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_types(cls, value):\n if type(value) in (datetime, date):\n return time.mktime(value.timetuple())\n elif isinstance(value, Decimal):\n return float(value)\n else:\n return value",
"def normalise_field_value(value):\n if isinstance(value, datetime):\n return make_timezone_naive(value)\n elif isinstance(value, Decimal):\n return decimal_to_string(value)\n return value",
"def _type_convert(self, value):\n if value is None:\n return value\n\n try:\n return datetime.datetime.strptime(value, \"%Y-%m-%d %H:%M:%S\")\n except ValueError:\n pass\n\n try:\n return int(value)\n except ValueError:\n pass\n\n try:\n if _parser(value.strip().replace(\"_\", \"\")):\n return decimal.Decimal(value)\n except decimal.InvalidOperation:\n pass\n\n return value",
"def _handle_sql_types(value):\n if type(value) is datetime:\n return value.isoformat()\n return str(value)",
"def python_cast(self, v):\n\n if self.type_is_time():\n import dateutil.parser\n dt = dateutil.parser.parse(v)\n\n if self.datatype == Column.DATATYPE_TIME:\n dt = dt.time()\n if not isinstance(dt, self.python_type):\n raise TypeError(\n '{} was parsed to {}, expected {}'.format(\n v,\n type(dt),\n self.python_type))\n\n return dt\n else:\n # This isn't calling the python_type method -- it's getting a python type, then instantialting it,\n # such as \"int(v)\"\n return self.python_type(v)",
"def _check_value(value, field):\n if not value:\n return False\n\n if field.get('date', False):\n # Get date format\n date_format = field.get('date_format', False) or json_pattern_part.get('date_format', False) or self.master_json_pattern.get('date_format', False)\n if date_format:\n value = datetime.strptime(value, date_format)\n\n if field.get('name'):\n field_name = field.get('name')\n # Get the type of the column and cast if necessary\n field_type = model_obj._columns[field_name]._type\n if field_type == 'integer':\n try:\n value = int(value)\n except (TypeError, ValueError), e:\n logger.warning(\"Cannot convert value of integer field to int : %s for field %s\" % (value, field_name))\n logger.warning(e)\n logger.warn(\"Cannot convert value of integer field to int : %s for field %s\" % (value, field_name))\n elif field_type == 'float':\n try:\n value = float(value)\n except (TypeError, ValueError), e:\n logger.warning(\"Cannot convert value of float field to float : %s for field %s\" % (value, field_name))\n logger.warning(e)\n logger.warn(\"Cannot convert value of float field to float : %s for field %s\" % (value, field_name))\n return value",
"def _get_value_as_type(self, forced_type=None):\r\n type = forced_type or self.type\r\n try:\r\n converted_value = self.value\r\n if not type:\r\n try:\r\n converted_value = ast.literal_eval(self.value)\r\n except (ValueError, SyntaxError):\r\n # Unable to convert the metadata value automatically\r\n # let it default to self.value\r\n pass\r\n else:\r\n if type not in self._supported_types:\r\n # Types must be explicitly declared so the\r\n # correct type converter may be used. Subclasses\r\n # of Query may define _supported_types and\r\n # _type_converters to define their own types.\r\n raise TypeError()\r\n converted_value = self._type_converters[type](self.value)\r\n except ValueError:\r\n msg = (_('Unable to convert the value %(value)s'\r\n ' to the expected data type %(type)s.') %\r\n {'value': self.value, 'type': type})\r\n raise ClientSideError(msg)\r\n except TypeError:\r\n msg = (_('The data type %(type)s is not supported. The supported'\r\n ' data type list is: %(supported)s') %\r\n {'type': type, 'supported': self._supported_types})\r\n raise ClientSideError(msg)\r\n except Exception:\r\n msg = (_('Unexpected exception converting %(value)s to'\r\n ' the expected data type %(type)s.') %\r\n {'value': self.value, 'type': type})\r\n raise ClientSideError(msg)\r\n return converted_value",
"def field_type_converter(self, old_type):\n\n if old_type == 'String':\n new_type = 'Text'\n elif old_type == 'Integer':\n new_type = 'Short'\n elif old_type == 'Date':\n new_type = 'Date'\n elif old_type == 'GlobalID':\n new_type = 'GUID'\n else:\n new_type = 'Double'\n return new_type",
"def _cast_field(self, cast_to, value):\n if cast_to in (int, long, str):\n return cast_to(value)\n elif cast_to == unicode:\n try:\n value = value.decode(self.charset, self.errors)\n except UnicodeEncodeError, e:\n raise InvalidData(\"Error encoding unicode value '%s': %s\" % (repr(value), e))\n\n return value\n elif cast_to in (any, bytes):\n return value\n else:\n raise TypeError(\"Invalid field type %s\" % (cast_to))",
"def cast_type(cdm_column_type, value):\n if cdm_column_type in ('integer', 'int64'):\n # Regex check only relevant if submission dtype is 'object'\n if not re.match(SCIENTIFIC_NOTATION_REGEX, str(value)):\n return int(value)\n if cdm_column_type in ('character varying', 'text', 'string'):\n return str(value)\n if cdm_column_type == 'numeric':\n return float(value)\n if cdm_column_type == 'float' and isinstance(value, float):\n return value\n if cdm_column_type == 'date' and isinstance(value, datetime.date):\n return value\n if cdm_column_type == 'timestamp' and isinstance(\n value, datetime.datetime): # do not do datetime.datetime\n return value",
"def _convert_field_type(row):\n return row",
"def to_python(self, value):\n if isinstance(value, models.CharField):\n # If an instance, just return the instance.\n return value\n if value is None:\n # If db has NULL, convert it to UNKNOWN.\n return UNKNOWN\n\n # Otherwise, just return the value.\n return value",
"def convert_type(self, value, schema_type, **kwargs):",
"def init_value(self, value, strict: bool = True):\n if isinstance(value, str):\n value = datetime.datetime.fromisoformat(value)\n elif isinstance(value, float):\n value = datetime.datetime.fromtimestamp(value)\n return super().init_value(value, strict)",
"def _to_python(self, value):\n if isinstance(value, (int, float, long, complex)):\n return value\n\n if isinstance(value, (list, tuple)):\n result = [self._to_python(v) for v in value]\n if isinstance(value, tuple):\n result = tuple(result)\n return result\n\n if value == \"true\":\n return True\n elif value == \"false\":\n return False\n\n is_string = False\n\n if IS_PY3:\n if isinstance(value, bytes):\n value = force_unicode(value)\n\n if isinstance(value, str):\n is_string = True\n else:\n if isinstance(value, str):\n value = force_unicode(value)\n\n if isinstance(value, basestring): # NOQA: F821\n is_string = True\n\n if is_string:\n possible_datetime = DATETIME_REGEX.search(value)\n\n if possible_datetime:\n date_values = possible_datetime.groupdict()\n\n for dk, dv in date_values.items():\n date_values[dk] = int(dv)\n\n return datetime.datetime(\n date_values[\"year\"],\n date_values[\"month\"],\n date_values[\"day\"],\n date_values[\"hour\"],\n date_values[\"minute\"],\n date_values[\"second\"],\n )\n\n try:\n # This is slightly gross but it's hard to tell otherwise what the\n # string's original type might have been.\n return ast.literal_eval(value)\n except (ValueError, SyntaxError):\n # If it fails, continue on.\n pass\n\n return value",
"def enforce_param_datatype(cls, name, value, dtype: DataType):\n if value is None:\n return\n\n if dtype == DataType.datetime:\n try:\n datetime_value = np.datetime64(value).item()\n if isinstance(datetime_value, int):\n raise MlflowException.invalid_parameter_value(\n f\"Invalid value for param {name}, it should \"\n f\"be convertible to datetime.date/datetime, got {value}\"\n )\n return datetime_value\n except ValueError as e:\n raise MlflowException.invalid_parameter_value(\n f\"Failed to convert value {value} from type {type(value).__name__} \"\n f\"to {dtype} for param {name}\"\n ) from e\n\n # Note that np.isscalar(datetime.date(...)) is False\n if not np.isscalar(value):\n raise MlflowException.invalid_parameter_value(\n f\"Value should be a scalar for param {name}, got {value}\"\n )\n\n # Always convert to python native type for params\n if getattr(DataType, f\"is_{dtype.name}\")(value):\n return DataType[dtype.name].to_python()(value)\n\n if (\n (\n DataType.is_integer(value)\n and dtype in (DataType.long, DataType.float, DataType.double)\n )\n or (DataType.is_long(value) and dtype in (DataType.float, DataType.double))\n or (DataType.is_float(value) and dtype == DataType.double)\n ):\n try:\n return DataType[dtype.name].to_python()(value)\n except ValueError as e:\n raise MlflowException.invalid_parameter_value(\n f\"Failed to convert value {value} from type {type(value).__name__} \"\n f\"to {dtype} for param {name}\"\n ) from e\n\n raise MlflowException.invalid_parameter_value(\n f\"Incompatible types for param {name}. Can not safely convert {type(value).__name__} \"\n f\"to {dtype}.\",\n )",
"def _make_serializable(self, field):\n if isinstance(field, datetime):\n return str(field)\n elif isinstance(field, Decimal):\n return float(field)\n else:\n return field",
"def cast(self, value):\n if value is None:\n return None\n return self.type(value)",
"def cast(self, value):\n\n return value",
"def _value_from_db(self, value, field, field_kind, db_type):\r\n\r\n # We could have stored None for a null field.\r\n if value is None:\r\n return None\r\n\r\n # All keys were converted to the Key class.\r\n if db_type == 'key':\r\n assert isinstance(value, Key), \\\r\n \"GAE db.Key expected! Try changing to old storage, \" \\\r\n \"dumping data, changing to new storage and reloading.\"\r\n assert value.parent() is None, \"Parents are not yet supported!\"\r\n value = value.id_or_name()\r\n# value = self._value_from_db_key(value, field_kind)\r\n\r\n # Always retrieve strings as unicode (old datasets may\r\n # contain non-unicode strings).\r\n elif db_type == 'string' or db_type == 'text':\r\n if isinstance(value, str):\r\n value = value.decode('utf-8')\r\n else:\r\n value = unicode(value)\r\n\r\n # Dates and times are stored as datetimes, drop the added part.\r\n elif db_type == 'date':\r\n value = value.date()\r\n elif db_type == 'time':\r\n value = value.time()\r\n\r\n # Convert GAE Blobs to plain strings for Django.\r\n elif db_type == 'bytes':\r\n value = str(value)\r\n\r\n # Revert the decimal-to-string encoding.\r\n if field_kind == 'DecimalField':\r\n value = decimal.Decimal(value)\r\n\r\n return super(DatabaseOperations, self)._value_from_db(\r\n value, field, field_kind, db_type)",
"def adapt_datetimefield_value(self, value):\n if value is None:\n return None\n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n \n if settings.USE_TZ and timezone.is_aware(value):\n # pyodbc donesn't support datetimeoffset\n value = value.astimezone(self.connection.timezone).replace(tzinfo=None)\n \n return value",
"def to_mongo(self, value):\n\n try:\n if not isinstance(value, datetime) or not value:\n pdate = value.toPyDate()\n return datetime(pdate.year, pdate.month, pdate.day)\n else:\n return value\n except AttributeError:\n return value\n\n # pyValue = value.toPyDate()\n # return datetime(pyValue.year, pyValue.month, pyValue.day)",
"def mongo_to_python_type(field, data):\n if isinstance(field, ObjectIdField):\n return str(data)\n elif isinstance(field, DecimalField):\n return data\n elif isinstance(field, BooleanField):\n return data\n else:\n return str(data)",
"def _parse_dtypes(data, table_meta):\n for name, field in table_meta['fields'].items():\n field_type = field['type']\n if field_type == 'datetime':\n datetime_format = field.get('format')\n data[name] = pd.to_datetime(data[name], format=datetime_format, exact=False)\n elif field_type == 'numerical' and field.get('subtype') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n elif field_type == 'id' and field.get('subtype', 'integer') == 'integer':\n data[name] = data[name].dropna().astype(np.int64)\n\n return data",
"def __convToTyped(index, value, dtypes):\n\t#print(index, value)\n\tdtype = dtypes[index]\n\ttvalue = value\n\tif dtype == \"int\":\n\t\ttvalue = int(value)\n\telif dtype == \"float\":\n\t\ttvalue = float(value)\n\treturn tvalue",
"def castType(self, valueType, value):\n try:\n return valueType(value)\n except (ValueError, TypeError):\n self.error('incorrect type \"{}\"'.format(value))",
"def db_cast(self):\n if self.is_int:\n return 'BIGINT'\n return 'TEXT'",
"def type_cast(self, value, data_type):\n if isinstance(data_type, BaseArg):\n value = data_type(value)\n elif isinstance(value, data_type) is False:\n if self.auto_type_cast and isinstance(value, str) and data_type in (int, bool, float):\n if data_type is bool:\n value = value.lower()\n if value not in {\"true\", \"false\"}:\n raise Exception()\n value = True if value == \"true\" else False\n else:\n value = data_type(value)\n else:\n raise Exception()\n return value",
"def _cast(d):\n if d[u\"type\"] in (u\"uri\", u\"bnode\", u\"literal\"):\n return d[u\"value\"]\n elif d[u\"type\"] == u\"typed-literal\":\n if d[u\"datatype\"] == u\"http://www.w3.org/2001/XMLSchema#integer\":\n return int(d[u\"value\"])\n elif d[u\"datatype\"] == u\"http://www.w3.org/2001/XMLSchema#float\":\n return float(d[u\"value\"])\n elif d[u\"datatype\"] == u\"http://www.w3.org/2001/XMLSchema#double\":\n return float(d[u\"value\"])\n elif d[u\"datatype\"] == u\"http://www.w3.org/2001/XMLSchema#integer\":\n return d[u\"value\"]\n raise NotImplementedError(\"can not cast '{}'\".format(d.items()))",
"def field_to_object(value):\n mapping = {\n str: StringField,\n int: IntField,\n list: ListField,\n dict: DictField,\n datetime.datetime: DateField,\n }\n return mapping.get(type(value), AnyField)(value)"
]
| [
"0.7383127",
"0.71478885",
"0.7121961",
"0.69921225",
"0.69495225",
"0.6938221",
"0.6927089",
"0.6910447",
"0.68615055",
"0.682171",
"0.6807103",
"0.6564544",
"0.6458779",
"0.63955134",
"0.6381338",
"0.6371626",
"0.6342293",
"0.63253826",
"0.6269357",
"0.62493396",
"0.62393546",
"0.61977553",
"0.6192072",
"0.6188446",
"0.6188315",
"0.61100996",
"0.6077092",
"0.60716367",
"0.6070876",
"0.6016535"
]
| 0.7590033 | 0 |
Return all the sites that are in FILENAME. | def get_sites():
sites_file = open(FILENAME)
sites = []
for site in sites_file:
sites.append("http://" + site.strip())
return sites | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dorkScanner():\n pysearch.PySearch()\n openfile = open(\"sites.txt\", 'r')\n urls = openfile.read()\n openfile.close()\n return urls",
"def get_sites():\n #sites = sys.argv[1:] # Accept sites from command line input\n\n # Read in additional sites to monitor from sites.txt file\n try:\n sites = [site.strip() for site in io.open('sites.txt', mode='r').readlines()]\n except IOError:\n print (colorize(\"No sites.txt file found\", \"red\"))\n\n # Add protocol if missing in URL\n for site in range(len(sites)):\n if sites[site][:7] != \"http://\" and sites[site][:8] != \"https://\":\n sites[site] = \"http://\" + sites[site]\n\n # Eliminate exact duplicates in sites\n sites = list(set(sites))\n\n return sites",
"def list_sites():\n result = []\n querystring = 'select sitename from {};'.format(TABLES[0]))\n res = execute_query(querystring)\n if res:\n result = [x[0] for x in res]\n return result",
"def load_captured_urls_local(filename):\n if os.path.isfile(filename) == False:\n return []\n \n with open(filename, 'r') as f:\n return f.read().splitlines()",
"def get_website_URLs():\n\tfilepath = os.path.dirname(os.path.realpath(__file__)) +\"/web_sources\"\n\tf = open(filepath, 'r')\n\twebsites = []\n\tfor line in f:\n\t\tif line != \"\\n\":\n\t\t\tendl_index = line.index('\\n')\n\t\t\tclean_line = line[:endl_index]\n\t\t\tnew_list = clean_line.split(' ', 1)\n\t\t\twebsites.append(new_list)\n\tf.close()\n\treturn websites",
"def extract_URLs(self, input_file_name):\n file = open(input_file_name, 'r')\n lines = []\n for line in file.readlines():\n # Don't add empty lines.\n if len(line.strip()) > 0:\n lines.append(line.strip())\n return lines",
"def read_urls(file):\r\n with open(file, \"r+\") as url_file:\r\n url_list = url_file.readlines()\r\n return url_list",
"def getListOfSites(self):\n with self.config.TaskWorker.envForCMSWEB:\n sites = self.resourceCatalog.getAllPSNs()\n filteredSites = [site for site in sites if not site.startswith(\"T1_\")]\n\n return filteredSites",
"def read_urls(filename):\n # +++your code here+++\n\n res=utility(filename)\n for i in res:\n \tprint i",
"def read_file_names(self):\n files_BIDMC = os.listdir(self.root_dir_BIDMC)\n masks_BIDMC = os.listdir(self.seg_dir_BIDMC)\n files_HK = os.listdir(self.root_dir_HK)\n masks_HK = os.listdir(self.seg_dir_HK)\n files_I2CVB = os.listdir(self.root_dir_I2CVB)\n masks_I2CVB = os.listdir(self.seg_dir_I2CVB)\n files_ISBI = os.listdir(self.root_dir_ISBI)\n masks_ISBI = os.listdir(self.seg_dir_ISBI)\n files_ISBI_15 = os.listdir(self.root_dir_ISBI_15)\n masks_ISBI_15 = os.listdir(self.seg_dir_ISBI_15)\n files_UCL = os.listdir(self.root_dir_UCL)\n masks_UCL = os.listdir(self.seg_dir_UCL)\n site_files = [files_BIDMC, files_HK, files_I2CVB, files_ISBI, files_ISBI_15, files_UCL]\n site_masks = [masks_BIDMC, masks_HK, masks_I2CVB, masks_ISBI, masks_ISBI_15, masks_UCL]\n return site_files, site_masks",
"def fileReader(filename):\n try:\n openfile = open(filename, 'r')\n urls = openfile.read()\n openfile.close()\n return urls\n except IOError:\n print \"File tidak ada\"\n exit()",
"def read_urls(filename):\n # +++your code here+++\n result = []\n if not path_exists(filename):\n print 'Path ' + filename + ' doesn\\'t exist!'\n sys.exit(1)\n \n # get base url from the filename\n match = re.search(r'\\S*_(\\S*)', filename)\n host = 'http://' + match.group(1)\n \n # read file for urls\n file = open(filename, 'rU')\n for line in file:\n match = re.search(r'\\S*puzzle\\S*.jpg', line)\n if match:\n result.append(host + match.group())\n file.close()\n # sort the list and remove duplicates (-> set)\n return sorted(set(result), key=sortedFn)\n #return sorted(set(result))",
"def get_site_to_check(self, filename):\n csv_file = csv.reader(open(filename, 'r'), delimiter=\";\")\n \n sites={}\n for row in csv_file:\n if len(row) == 0:\n continue\n site_url = row[0]\n sites[site_url] = {\"url\":site_url}\n \n sites[site_url][\"checks\"] = []\n for check in row[1:]:\n sites[site_url][\"checks\"].append(check)\n return sites",
"def get_domains(filename):\n with open(filename, 'r') as file:\n result = []\n for line in file.readlines():\n domain = line.strip()[1:]\n result.append(domain)\n return result",
"def get_list_of_sites(self):\n\n return self.site_db_obj.get_list_of_sites()",
"def get_list_of_sites(self) -> list:\n ah_write = self.get_iis_object()\n section = ah_write.GetAdminSection(\"system.applicationHost/sites\", \"MACHINE/WEBROOT/APPHOST\")\n collection = section.Collection\n result = []\n\n for i in range(collection.Count):\n\n site = collection[i]\n prop = site.Properties\n # site_id = prop[\"id\"].Value\n name = prop[\"name\"].Value\n default_app = self.get_default_app(site)\n bindings = self.get_site_bindings(site.ChildElements)\n applications = self.get_applications(site)\n if default_app and not os.path.exists(self.core.expandvars(default_app[\"physicalPath\"])):\n # не показывать сайты для которых нет физ. директории для иис экспреса\n continue\n site = Site(name, bindings, default_app, applications)\n if hasattr(site, 'port') and site.port != 0:\n result.append(site)\n\n return result",
"def get_urls(self):\r\n if self.mod.filename:\r\n return [x + self.mod.filename for x in self.mod.service.get_mirrors()]",
"def get_sites():\n sites = [ x.get('siteid') for x in Schedconfig.objects.values('siteid').distinct() ]\n locale.setlocale(locale.LC_ALL, '')\n sites = sorted(sites, key=locale.strxfrm)\n return sites",
"def sites(self):\n return self.data.sites.values",
"def __read_file(file_path):\n assert os.path.exists(file_path), 'FILE \"{}\" NOT FOUND,' \\\n ' PLEASE GIVE THE CORRECT FILE PATH.'.format(file_path)\n url_list = []\n if file_path == '':\n return url_list\n else:\n my_file = open(file_path, 'r')\n for line in my_file.readlines():\n url_list.append(''.join(line.split('\\n')))\n return url_list",
"def find_all(client):\n return list(map(lambda s: Site(s), client.get_api_resource(\"self/sites\")))",
"def sites(self):\n return self._sites",
"def sites(self):\n return self._sites",
"def get_all_site_names(_current_parser=None):\n parser = _get_parser(_current_parser)\n return [site for site in parser if site != \"DEFAULT\"]",
"def load_mapping():\n return [l.strip() for l in open(ALL_URL_LIST)]",
"def get_vhosts( self ):\n self.sites_available = []\n self.created_RE = re.compile( '#VirtualHost created by localsite.py' )\n vlogging.ret_logger.debug( 'listing localsites now' )\n available_dir = '/etc/apache2/sites-available'\n sites_available = os.listdir( available_dir )\n vlogging.ret_logger.info( sites_available )\n sites_enabled = os.listdir( '/etc/apache2/sites-enabled' ) \n for item in sites_available:\n file_path = os.path.join( available_dir, item )\n if os.path.isfile( file_path ):\n f = open( file_path, 'rb' )\n # Check to see if the site was created by vhosts.py\n if self.created_RE.match( f.read( 36 ) ):\n if item in sites_enabled:\n enabled = True\n else:\n enabled = False\n vhost = VirtualHost( item, enabled )\n self.sites_available.append( vhost )\n f.close()\n return self.sites_available",
"def sites(self):\n return self.properties.get('sites',\n SiteCollection(self.context, ResourcePath(\"sites\", self.resource_path)))",
"def load_sites(self, site_list: list = None):\n try:\n sites = self.api.get(host=self.host, endpoint=f\"/api/v1/orgs/{self.oid}/sites\")\n except Exception as e:\n logger.error(f\"{TextColors.FAIL}Error getting org sites:{TextColors.ENDC} {e}\")\n raise e\n if site_list:\n sites = [s for s in sites if s['name'] in site_list]\n self.sites = sites",
"def get_links_from_file( filename ):\n\twith open( filename, 'r') as f:\n\t\tfor url in f:\n\t\t\tyield url",
"def loadServerList(inputFile='servers.csv', httpsonly=True):\n servers = []\n with open('servers.csv', 'r') as csvfile:\n serverreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in serverreader:\n name, url = row\n if url:\n host = urlparse(url)\n if not httpsonly or host.scheme == 'https':\n servers += [host.netloc]\n return servers"
]
| [
"0.63554835",
"0.63197654",
"0.6251028",
"0.6009859",
"0.59881043",
"0.5953247",
"0.5951535",
"0.592165",
"0.5893977",
"0.5885239",
"0.58738947",
"0.5866899",
"0.5862455",
"0.5860901",
"0.5763758",
"0.57449645",
"0.5721816",
"0.56856227",
"0.56841946",
"0.5682537",
"0.5669579",
"0.5659974",
"0.5659974",
"0.5654441",
"0.5647867",
"0.5637065",
"0.56112856",
"0.5579861",
"0.5573804",
"0.55621386"
]
| 0.77288526 | 1 |
Attempt to read the given site. Return the text of the site if successful, otherwise returns False. | def read_site(site):
try:
connection = urlopen(site)
html = connection.read()
connection.close()
except:
return False
parser = HtmlTextParser()
parser.parse(html)
return parser.get_text() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_text(self, url: str) -> str:\n response = self._session().get(url)\n if not response.ok:\n response.raise_for_status()\n return response.text",
"def read_text(link, from_file = False):\n\tif from_file:\n\t\ttext = read_from_file(link)\n\telse:\n\t\t_, text = read_from_web(link)\n\n\treturn text",
"def read_url(url):\n return requests.get(url).text",
"def read_url(url):\n response = requests.get(url)\n return response.text",
"def zero_day_read(url: Url) -> str:\n logging.info(f\"Reading URL: {url}\")\n with WebDriver() as driver:\n page_text = read_url_to_string(url, driver, cacher=None)\n logging.info(\"Finished pulling URL.\")\n return page_text",
"def get_page(self, url):\n\n lynx = True\n\n if lynx:\n try:\n lynxcmd = \"lynx -dump -source -useragent='Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)' %s\" % url\n content = os.popen(lynxcmd).read()\n except IOError, (errno, strerror):\n return False\n else:\n try:\n location = urllib2.urlopen(url)\n except IOError, (errno, strerror):\n return False\n content = location.read()\n\n # Clear out all troublesome whitespace\n content = content.replace(\"\\n\", \"\")\n content = content.replace(\"\\r\", \"\")\n content = content.replace(\"\\t\", \"\")\n content = content.replace(\"> \", \">\")\n content = content.replace(\" \", \" \")\n content = self.latin1_to_ascii(content)\n\n if not lynx:\n location.close()\n return content",
"def get_content_from_url(link):\n # sleep time before making web request\n sleep(SCRAPING_REQUEST_STAGGER)\n response = requests.get(link)\n if response.status_code != 200:\n return False\n return response.content",
"def read_page(url):\n\n return urllib.request.urlopen(url).read()",
"def load_page(url: str) -> str:\n try:\n response = urlopen(url)\n\n if response.status == 200:\n body_text = str(response.read())\n return body_text\n return \"\"\n except URLError:\n return \"\"",
"def one_day_read(url: Url) -> str:\n logging.info(f\"Reading URL: {url}\")\n raw_html_cacher = TimedReadWriteCacher(ttl=23)\n with WebDriver() as driver:\n page_text = read_url_to_string(url, driver, cacher=raw_html_cacher)\n logging.info(\"Finished pulling URL.\")\n return page_text",
"def get_news_text(news_url: str):\n print(\"Getting Article Content..\")\n if news_url is None:\n print(\"URL is missing\")\n return None\n else:\n article = Article(news_url)\n article.download()\n article.parse()\n return article.text",
"def check_link(url):\n try:\n\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n print('Connection Failed!!!')",
"def get_text(url):\n try:\n result = requests.get(url, headers=DEFAULT_REQUEST_HEADERS)\n result.raise_for_status()\n except requests.HTTPError as err:\n raise URLGetTextError(err)\n\n return result.text",
"def page_read(url):\n LOGGER.debug('Reading %s', url)\n return urlopen(url).read().decode('utf-8')",
"def _get_plain_text(self, url, soup, site):\n print('Get plaint text: ' + url)\n title = str(soup.find(class_=self._title_tags[site]))\n content = str(soup.find(class_=self._content_tags[site]))\n # h = html2text.HTML2Text() # uncomment this segment of code\n # h.ignore_links = True # if you want to get plain text\n # h.ignore_images = True\n # title = h.handle(title)\n # content = h.handle(content)\n if title == None or content == None:\n print('Different website structure: ' + url)\n return ''\n return self._clean(title + content, no_punc=True) # with symbols\n # return title + content # without symbols",
"def get_from_net(self, url):\n print 'opening', url\n ty = urlopen(url)\n print 'reading...'\n s = ty.read()\n print 'done'\n return s",
"def read_web(url):\n f = urllib.request.urlopen(url)\n contents = f.read()\n return contents",
"def url_read(self, url):\n if 'raise' in url:\n raise urllib.error.HTTPError(None, None, None, None, None)\n else:\n return self.contents",
"def load_url_content(url):\n try:\n r = requests.get(url)\n if r.ok:\n return r.text\n else:\n return None\n except Exception:\n return None",
"def call_website(link: str) -> str:\n r = requests.get(link)\n\n if r.status_code != 200:\n sys.exit(1)\n\n return r.text",
"def read_from_web(url):\n\tpage = urlopen(url)\n\tsoup = BeautifulSoup(page, \"lxml\")\n\ttext = ' '.join(map(lambda p: p.text, soup.find_all('p')))\n\treturn soup.title.text, text",
"def fetch(url):\n content = requests.get(url).text\n if \"Error\" in content:\n raise ValueError(f\"Cannot read from: {url}\")\n return content",
"def get_text_content(url: str) -> str:\n\n url = _fix_url(url)\n\n return get(url).text",
"def read_url(self, url: str) -> str:\n return requests.get(url, headers=self.headers).text",
"def get_page(url):\n try:\n return urlopen(url).read()\n except:\n return None\n return None",
"def get_remote_content(self, path):\n if path.startswith(\"http\"):\n page_path = path\n elif path.startswith(\"www\"):\n page_path = \"https://\" + path\n else:\n page_path = self.source + path\n \n print(\"Getting \" + page_path)\n \n try:\n resp = requests.get(page_path)\n except:\n print(\"Unable to get \" + page_path)\n return None\n \n if resp.status_code == 200:\n return resp.content\n else:\n print(\"Unable to get \" + page_path + \" Response = \" + str(resp.status_code))\n return None",
"def getContent(link):\n\n if WebNavigator.TIMING:\n WebNavigator.TIMER = time.time()\n\n pageSource = \"\"\n try:\n response = urllib.request.urlopen(link, timeout=10)\n try:\n pageSource = response.read().decode(response.headers.get_content_charset())\n except (TypeError, UnicodeDecodeError) as e:\n print(e)\n pass\n except (urllib.error.HTTPError, urllib.error.URLError, TimeoutError) as e:\n print(e)\n pass\n\n if WebNavigator.TIMING:\n print(\"WEBNAVIGATOR: Time to get content from\", link, \":\", time.time() - WebNavigator.TIMER)\n\n return pageSource",
"def pageText(self, url):\n try:\n request = urllib2.Request(url)\n request.add_header(\"User-Agent\", \"siruta_postcodes.py 1.0\")\n response = urllib2.urlopen(request)\n text = response.read()\n response.close()\n # When you load to many users, urllib2 can give this error.\n except urllib2.HTTPError, urllib2.URLError:\n self.loge(u\"Server or connection error. Pausing for 10 seconds... \" + time.strftime(\"%d %b %Y %H:%M:%S (UTC)\", time.gmtime()) )\n response.close()\n time.sleep(10)\n return pageText(url)\n return text",
"def load_text_from_url(url, **data):\n\n timeout = data.get(\"timeout\", 20)\n\n results = []\n\n try:\n # print(\"Extracting HTML from: {}\".format(url))\n response = requests.get(\n url,\n headers={\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:68.0) Gecko/20100101 Firefox/68.0\"\n },\n timeout=timeout,\n )\n\n text = response.text\n status = response.status_code\n\n if status == 200 and len(text) > 0:\n return text\n else:\n print(\"Incorrect status returned: \", status)\n\n return None\n\n except Exception as e:\n print(\"Problem with url: {0}.\".format(url))\n return None",
"def get_cont(self,url):\r\n\t\trequest = mechanize.Request(url)\r\n\t\ttry:\r\n\t\t\tresponse = mechanize.urlopen(request)\r\n\t\t\tres = response.read()\r\n\t\t\treturn res\r\n\t\texcept urllib2.HTTPError, error:\r\n\t\t\tres = error.read()\r\n\t\t\treturn res"
]
| [
"0.65649706",
"0.6558723",
"0.63546216",
"0.62799954",
"0.6234996",
"0.6133159",
"0.61123294",
"0.6106079",
"0.60595316",
"0.6043561",
"0.60159254",
"0.6012336",
"0.6000874",
"0.59981036",
"0.5962465",
"0.593739",
"0.5926055",
"0.591335",
"0.59035695",
"0.5810522",
"0.5785607",
"0.5767409",
"0.5757652",
"0.5716762",
"0.57134914",
"0.57113415",
"0.5683847",
"0.5657252",
"0.5624859",
"0.56085825"
]
| 0.82702225 | 1 |
Index the given site with the given text. | def index_site(site, text):
# YOUR CODE HERE #
pass # delete this when you write your code | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_page_to_index(index, url, content):\n words = content.split()\n for word in words:\n add_to_index(index, word, url)\n return index",
"def index_site(site, text):\n words = text.lower().split()\n for word in words:\n if word not in index: # case 1: haven't seen word anywhere\n index[word] = {site:1} # make a new entry for the word\n elif site not in index[word]: # case 2: haven't seen word on this site\n index[word][site] = 1 # make a new entry for this site\n else: # case 3: seen this word on this site\n index[word][site] += 1 # increment the frequency by 1",
"def index_site() -> None:\n app = create_web_app()\n app.app_context().push()\n index.create_index()\n\n indexable: List[IndexablePage] = []\n for path in site.list_paths():\n page: Page = site.load_page(path)\n content = bleach.clean(page.markdown, strip=True, tags=[])\n indexable.append(IndexablePage(\n title=page.title,\n path=page.path,\n content=content\n ))\n index.add_documents(*indexable)",
"def add_page_to_index(index,url,content):\n\tkeywords = split_string(content,\".,-!<>/=\\\"\")\n\tfor keyword in keywords:\n\t\tadd_to_index(index,keyword,url)",
"def add_to_index(self, url, soup):\n # Check if its been indexed\n if self.is_indexed(url):\n return\n print('Indexing ' + url)\n\n # Get the individual words\n text = self.get_text_only(soup)\n words = separate_words(text)\n\n # Get the URL id\n urlid = self.get_entry_id('urllist', 'url', url)\n\n # Link each word to this url\n for i, _ in enumerate(words):\n word = words[i]\n if word in IGNORE_WORDS:\n continue\n wordid = self.get_entry_id('wordlist', 'word', word)\n # insert word location and url into table\n self.con.execute(\"insert into wordlocation(urlid,wordid,location) values\" \\\n \"(%d,%d,%d)\" % (urlid, wordid, i))",
"def build_index():\n for site in get_sites():\n text = read_site(site)\n while text == False:\n text = read_site(site) # keep attempting to read until successful\n index_site(site, text)",
"def build_index():\n for site in get_sites():\n text = read_site(site)\n while text == False:\n text = read_site(site) # keep attempting to read until successful\n index_site(site, text)",
"def reindex_page(self, page, title, writer, text=None):\n\n if text is None:\n get_text = getattr(page, 'plain_text', lambda: u'')\n try:\n text = get_text()\n except error.NotFoundErr:\n text = None\n\n extract_links = getattr(page, 'extract_links', None)\n links = []\n wanted = []\n if extract_links and text:\n for link, label in extract_links(text):\n qlink = link.replace(u' ', u'%20')\n label = label.replace(u' ', u'%20')\n links.append(u'%s:%s' % (qlink, label))\n if link[0] != '+' and link not in wanted and link not in self.storage:\n wanted.append(qlink)\n else:\n links = []\n doc = {'title': str(title)}\n if links:\n doc['links'] = u' '.join(links)\n doc['has_links'] = True\n if wanted:\n doc['wanted'] = u' '.join(wanted)\n if text:\n doc['content'] = text\n writer.add_document(**doc)\n else:\n writer.delete_by_term('title', title)",
"async def wikipedia(self, ctx, *args):\n if args[0] == 'random':\n search_string = wp.random()\n else:\n search_string = ' '.join(args)\n try:\n page = wp.page(search_string)\n await ctx.send(page.url)\n self.logger.info(misolog.format_log(ctx, f\"\"))\n except wp.exceptions.DisambiguationError as error:\n await ctx.send(f\"```{str(error)}```\")\n self.logger.info(misolog.format_log(ctx, f\"Disambiguation page\"))",
"def index_page(data):\n main_index = [html.H1(\"INDEX\"),\n html.Div([html.A(\"Characters\", href=\"/character\"),\n \" | \", html.A(\"Search\", href=\"/search\")]),\n dcc.Loading(html.Img(src=create_wordcloud(data[\"tokens\"]))),\n get_random_quotes(data, 5)]\n\n return main_index",
"def test_navigates_to_index_page_about_page_index_page(w_driver):\n #Index Page\n w_driver.get('localhost:8000')\n results=w_driver.page_source\n text_found1=re.search(r'Welcome to the Kasner Micro Search Engine', results)\n\n #About Page\n w_driver.get('localhost:8000/about')\n results=w_driver.page_source\n text_found2=re.search(r'About the Kasner Search Engine',results)\n\n #Index Page\n w_driver.get('localhost:8000')\n results=w_driver.page_source\n text_found3=re.search(r'Welcome to the Kasner Micro Search Engine', results)\n\n assert(text_found1 != None)\n assert(text_found2 != None)\n assert(text_found3 != None)",
"def get_index_text(self, crate, module, impl, name):\n raise NotImplementedError",
"def execute_task(self, *args):\n item, key = args\n from flankers.textsemantics import TextSemantics\n if not (item.title == '' and item.abstract == ''):\n # if item is not a media or a link from Twitter\n # it is or a feed or a tweet\n text = item.abstract if len(item.abstract) != 0 else item.title\n text = text[:1799] if len(text) >= 1800 else text\n if Indexer.query().filter(Indexer.webres == key).count() == 0:\n semantics = TextSemantics(text)\n labels = semantics.find_related_concepts()\n for l in labels:\n index = Indexer(keyword=l.strip(), webres=key)\n index.put()\n print \"indexing stored: \" + item.url + \">\" + l\n else:\n raise Exception(\"storeIndexer(): Resource already indexed\")",
"def search(text):\n s = Search()\n result = _search(s, text)\n _print_results(result)\n return result",
"async def wiki(self, ctx, *, parse: str):\n parse = parse.split(' ', 1)\n anti = [\"antibirth\", \"anti\"]\n rev = [\"revelations\", \"rev\"]\n subdomain = \"antibirth\" if parse[0] in anti else \"tboirevelations\" if parse[0] in rev \\\n else \"bindingofisaacrebirth\"\n parse = ' '.join(parse) if subdomain == \"bindingofisaacrebirth\" else parse[1]\n page = requests.get(f\"https://{subdomain}.gamepedia.com/index.php?search={parse}\")\n if \"search\" in page.url:\n soup = BeautifulSoup(page.content, 'html.parser')\n if re.sub(r'\\W+', '', parse.lower()) == \\\n re.sub(r'\\W+', '', soup.find(class_=\"unified-search__result__title\").get(\"data-title\").lower()):\n await ctx.send(soup.find(class_=\"unified-search__result__title\").get(\"href\"))\n else:\n await ctx.send(f\"I couldn't find an exact match. Here is a link to this query's search page. {page.url}\")\n else: await ctx.send(page.url)",
"def checkForIndexPage(r):\n soup = BeautifulSoup(r.text, 'lxml')\n head = soup.find('h1')\n if head != None and head.string != None and (\"Index of \" in head.string):\n return \"Shows 'Index Of' page ✘\" \n else:\n return \"Displays properly ✓\"",
"def search_postman(text):\n result = _search_blog('postman', text)\n _print_results(result)\n return result",
"def score(self, link_text, page_title, body_text):\n\t\tdoc = metapy.index.Document()\n\t\tdoc.content(link_text + page_title + body_text)\n\t\tdocvec = self.fwdIndex.tokenize(doc)\n\t\tlabel = self.classifier.classify(docvec)\n\t\tif label == \"NewHome\":\n\t\t\treturn 1.0\n\t\telif label == \"NotNewHome\":\n\t\t\treturn 0.0\n\t\telse:\n\t\t\treturn 0.5",
"async def jisho(self, ctx, *, text):\n await ctx.message.delete()\n await hf.safe_send(ctx,\n f\"Try finding the meaning to the word you're looking for here: https://jisho.org/search/{text}\")",
"def handle_text_search(self, text):\n log.debug(\"Handling text search: %s\", text)\n\n self.current_selected = 0\n self._refresh()",
"def index_file(self, file_name):\n self.contents = []\n article_text = \"\"\n article_annots = [] # for annot-only index\n\n f = open(file_name, \"r\")\n for line in f:\n line = line.replace(\"#redirect\", \"\")\n # ------ Reaches the end tag for an article ---------\n if re.search(r'</doc>', line):\n # ignores null titles\n if wiki_uri is None:\n print \"\\tINFO: Null Wikipedia title!\"\n # ignores disambiguation pages\n elif (wiki_uri.endswith(\"(disambiguation)>\")) or \\\n ((len(article_text) < 200) and (\"may refer to:\" in article_text)):\n print \"\\tINFO: disambiguation page \" + wiki_uri + \" ignored!\"\n # ignores list pages\n elif (wiki_uri.startswith(\"<wikipedia:List_of\")) or (wiki_uri.startswith(\"<wikipedia:Table_of\")):\n print \"\\tINFO: List page \" + wiki_uri + \" ignored!\"\n # adds the document to the index\n else:\n self.__add_to_contents(Lucene.FIELDNAME_ID, wiki_uri, Lucene.FIELDTYPE_ID)\n if self.annot_only:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_annots, Lucene.FIELDTYPE_ID_TV)\n else:\n self.__add_to_contents(Lucene.FIELDNAME_CONTENTS, article_text, Lucene.FIELDTYPE_TEXT_TVP)\n self.lucene.add_document(self.contents)\n self.contents = []\n article_text = \"\"\n article_annots = []\n\n # ------ Process other lines of article ---------\n tag_iter = list(self.tagRE.finditer(line))\n # adds line to content if there is no annotation\n if len(tag_iter) == 0:\n article_text += line\n continue\n # A tag is detected in the line\n for t in tag_iter:\n tag = t.group(3)\n if tag == \"doc\":\n doc_title = self.titleRE.search(t.group(2))\n wiki_uri = WikipediaUtils.wiki_title_to_uri(doc_title.group(1)) if doc_title else None\n if tag == \"a\":\n article_text += t.group(1) + t.group(4) # resolves annotations and replace them with mention\n # extracts only annotations\n if self.annot_only:\n link_title = self.linkRE.search(t.group(2))\n link_uri = WikipediaUtils.wiki_title_to_uri(unquote(link_title.group(1))) if link_title else None\n if link_uri is not None:\n article_annots.append(link_uri)\n else:\n print \"\\nINFO: link to the annotation not found in \" + file_name\n last_span = tag_iter[-1].span()\n article_text += line[last_span[1]:]\n f.close()",
"def es_index(project=None):\n if project is not None:\n project = Project.by_slug(project)\n if project is None:\n raise ValueError(\"Project not found.\")\n script_indexer.index_project(project=project)",
"def search_content_with_text_get(self, ctype, currentpage, head, locale, searchtext, source, tag):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Content/Search/{locale}/\"))",
"def test_navigates_to_index_page_link_about_page(w_driver):\n w_driver.get('localhost:8000')\n \n element=w_driver.find_element_by_link_text('About our team').click()\n results=w_driver.page_source\n text_found=re.search(r'About the Kasner Search Engine',results)\n\n assert(text_found != None)",
"def es_index(data):\n doc_type = data.get('service')\n es.index(index=INDEX, doc_type=doc_type, body=data)",
"def webtext(analysis):\n global current_file\n try:\n if request.form[\"url\"] == \"\":\n flash(\"No URL given\")\n return redirect(url_for('index'))\n url = request.form['url']\n current_file = main.Analyser(url)\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n with Database() as database:\n categories = database.loadCategories()\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n texts=analysed_texts,\n text=analysed_texts[analysis],\n ext=current_file.text.ext,\n keywords=keywords,\n categories=categories,\n facts=text_facts,\n upload=True)\n\n except:\n flash(\"Web address not found!\")\n return redirect(url_for('index'))",
"def populate_index(db):\n\tfor url in URL:\n\t\tprint url\n\t\trequest = urllib2.Request(url)\n\t\ttry :\n\t\t\tresponse = urllib2.urlopen(request)\n\t\texcept urllib2.URLError:\n\t\t\tprint \"Network Unreachable \"\n\t\t\tsys.exit()\t\n\t\ttext = html2text(response.read())\n\t\tdb.generate_index(text,url)",
"def test_navigates_to_about_page_link_index_page(w_driver):\n w_driver.get('localhost:8000/about')\n\n element=w_driver.find_element_by_link_text('back to Kasner').click()\n results=w_driver.page_source\n text_found=re.search(r'Welcome to the Kasner Micro Search Engine',results)\n\n assert(text_found != None)",
"def search_against_url(request, url):\n\n (scheme, _1, _2, _3, _4, _5) = urlparse(url)\n if scheme not in ('http', 'https'):\n return search_page(request, error='The URL must begin with either http or https.')\n\n sfm = from_django_conf('sidebyside')\n try:\n (title, text) = fetch_and_clean(url)\n except requests.exceptions.Timeout:\n return search_page(request, error=\"Sorry, that news article couldn't be retrieved.\")\n\n try:\n sfm_results = sfm.search(text=text, title=title, url=url)\n drop_silly_results(sfm_results)\n sort_by_coverage(sfm_results)\n\n\n #if they submit a url, don't return the exact same url in the results\n for r in sfm_results['documents']['rows']:\n if r.get('url') == url:\n sfm_results['documents']['rows'].remove(r)\n\n if sfm_results.has_key('text'): text = sfm_results['text']\n else: text = ''\n\n if sfm_results.has_key('title'): title = sfm_results['title']\n else: title='No Title'\n\n return search_result_page(request, sfm_results, text,\n source_title=title, source_url=url)\n except superfastmatch.SuperFastMatchError, e:\n if e.status == httplib.NOT_FOUND:\n raise HttpResponse('No such article {0}'.format(url))\n elif settings.DEBUG == True:\n return HttpResponse(e.response[1], status=e.response[0])\n else:\n raise",
"def main(url, ip):\n\n doc = {'url' : url}\n #get the whois and domain info\n print('[*] Getting whois')\n doc = get_whois(doc)\n\n #only continue checking stuff if we can actually resolve the address\n if doc['ip'] != '':\n #get ssl information if available\n print('[*] Get certificate information')\n doc = get_certinfo(doc)\n #browse to the site and get metrics\n print('[*] Interrogating homepage')\n doc = interrogate_homepage(doc)\n\n #now it is time to parse the ids logs\n doc = get_ids_logs(doc)\n\n #strip out ip if we don't have one\n if doc['ip'] == '':\n doc.pop('ip')\n \n\n try:\n print('[*] Adding information to elastiseach as %s:9200' % ip)\n es = Elasticsearch([ip])\n res = es.index(index='flurb', doc_type='site', body=doc)\n #if res == 'OK then return some sort of success response\n\n #there is no explicit close so we are going to delete\n #our es object to trigger the socket cleanup\n del(es)\n\n except:\n print('[*] Failed to add document to elasticsearch at %s' % ip)\n del(es)\n return doc\n\n return doc"
]
| [
"0.6791117",
"0.65517974",
"0.62685835",
"0.6252186",
"0.6025351",
"0.6015429",
"0.6015429",
"0.54486924",
"0.5442465",
"0.5423652",
"0.54173946",
"0.53921217",
"0.5385489",
"0.528756",
"0.52750754",
"0.52612174",
"0.5211718",
"0.51874846",
"0.5158359",
"0.5149468",
"0.5147078",
"0.5137462",
"0.51232153",
"0.5106557",
"0.5103946",
"0.51028556",
"0.5065986",
"0.50536275",
"0.50513643",
"0.5047046"
]
| 0.769407 | 0 |
Build the index by reading and indexing each site. | def build_index():
for site in get_sites():
text = read_site(site)
while text == False:
text = read_site(site) # keep attempting to read until successful
index_site(site, text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_index(self):\n url = self.start_url\n\n # Search from last available date if not rebuilding and index is not empty\n if not self.rebuild > 0:\n recent_filings = self.get_most_recent_filings()\n pdt = recent_filings[0].date_filing\n # Reformat date to SEC format MM/DD/YYYY\n formatted_date = f\"{pdt:02}/{pdt:02}/{pdt.year}\"\n url = self.url_str.format(domain=self.domain_name, start=formatted_date, end=defaults['end_date'])\n\n page_counter = 0\n entries_counter = 0\n\n print(f\"{ats()} Starting index build...\" if self.rebuild else f\"{ats()} Starting index update...\")\n # Iterate through search results pages until no Next button found\n while True:\n page = self.load_page(url)\n # Scrape, parse and record into database current search results page\n entries_counter += self.scrape_page(page)\n page_counter += 1\n print(f\"{ats()} Scraped results page {page_counter}, {entries_counter} entries...\")\n # Get url of next search results page\n url = self.get_next(page)\n if url is None:\n # Exit loop if no more search results\n break\n if self.n_limit and entries_counter >= self.n_limit:\n # Exit if reached user-specified limit\n break\n\n # Do some reporting\n if self.rebuild:\n print(f'{ats()} Index built! Total {page_counter} search result pages scraped. '\n f'{entries_counter} index entries created.')\n else:\n print(f'{ats()} Index updated! Total {page_counter} search result page(s) scraped. '\n f'{entries_counter} index entries (re)added.')",
"def build(self):\n\t\tself.documents = self.get_items_to_index()\n\t\tself.build_index()",
"def build_index(self):\n self.rebuild_index()",
"def populate_index(db):\n\tfor url in URL:\n\t\tprint url\n\t\trequest = urllib2.Request(url)\n\t\ttry :\n\t\t\tresponse = urllib2.urlopen(request)\n\t\texcept urllib2.URLError:\n\t\t\tprint \"Network Unreachable \"\n\t\t\tsys.exit()\t\n\t\ttext = html2text(response.read())\n\t\tdb.generate_index(text,url)",
"def build_index():\n pass",
"def build_index(self):\n self.create_index()\n logger.debug(f\"Building index with {self.n_trees} trees.\")\n\n for i in range(len(self.corpus_embeddings)):\n self.index.add_item(i, self.corpus_embeddings[i])\n self.index.build(self.n_trees)",
"def start_index(self, stem):\n with open(\n self.posting_and_dictionary_path + \"/docsStem\" if stem else self.posting_and_dictionary_path + \"/docs.txt\",\n \"w+\") as out:\n out.write(\"Number City NumOfUniqeTerms maxTf Date\\n\")\n out.close()\n\n stop_words = {}\n try:\n with open(self.corpus_path + \"/stop_words.txt\", \"r\") as sw:\n lines = sw.readlines()\n for line in lines:\n stop_words[line[:len(line) - 1]] = \"\"\n sw.close()\n\n except Exception:\n raise FileNotFoundError(\"the file stop_words.txt didn't found\")\n\n files_number = len(\n [word for word in os.listdir(self.corpus_path) if os.path.isdir(self.corpus_path + \"/\" + word)])\n s = files_number / 46\n tasks = []\n i = 0\n while i < int(s):\n index_element = IndexElement(i, self.corpus_path, self.posting_and_dictionary_path, stem, 46, stop_words)\n tasks.append(index_element)\n i += 1\n if files_number % 46 > 0:\n tasks.append(IndexElement(i, self.corpus_path, self.posting_and_dictionary_path, stem, files_number % 46,\n stop_words))\n starttime = time.time()\n pool = Pool(processes=(multiprocessing.cpu_count()))\n pool.map(self.index, tasks)\n print(time.time() - starttime)\n self.start_merge(stem)",
"def build_index(self):\n # Init the HNSWLIB index\n self.create_index()\n logger.info(f\"Building HNSWLIB index, max_elements: {len(self.corpus)}\")\n logger.debug(f\"Parameters Required: M: {self.M}\")\n logger.debug(f\"Parameters Required: ef_construction: {self.ef_construction}\")\n logger.debug(f\"Parameters Required: ef(>topn): {self.ef}\")\n\n # Then we train the index to find a suitable clustering\n self.index.add_items(self.corpus_embeddings, list(range(len(self.corpus_embeddings))))",
"def index_site() -> None:\n app = create_web_app()\n app.app_context().push()\n index.create_index()\n\n indexable: List[IndexablePage] = []\n for path in site.list_paths():\n page: Page = site.load_page(path)\n content = bleach.clean(page.markdown, strip=True, tags=[])\n indexable.append(IndexablePage(\n title=page.title,\n path=page.path,\n content=content\n ))\n index.add_documents(*indexable)",
"def build_index(self):\n\t\tix = self.create_index()\n\t\twriter = AsyncWriter(ix)\n\n\t\tfor i, document in enumerate(self.documents):\n\t\t\tif document:\n\t\t\t\twriter.add_document(**document)\n\t\t\tupdate_progress_bar(\"Building Index\", i, len(self.documents))\n\n\t\twriter.commit(optimize=True)",
"def build_index(self):\n \n \n geoids = self.partitions.find_or_new(table='facilities_geoids')\n addresses = self.partitions.find_or_new(table='facilities_addresses')\n facilities = self.partitions.find(table='facilities')\n \n facilities.attach(addresses,'addresses')\n facilities.attach(geoids,'geoids')\n \n q = \"\"\"\n SELECT year, type, oshpd_id, facility_name, dba_city, dba_zip_code, blockgroup_gvid, tract_gvid, county_gvid\n FROM facilities\n JOIN geoids.facilities_geoids AS geoids ON geoids.facilities_id = facilities.id\n JOIN addresses.facilities_addresses AS addresses ON addresses.facilities_id = facilities.id\n \"\"\"\n \n p = self.partitions.find_or_new(table='facilities_index')\n p.clean()\n lr = self.init_log_rate()\n \n with p.inserter() as ins:\n for row in facilities.query(q):\n ins.insert(row)\n lr(str(p.identity))",
"def build_index(path, limit=None):\n\n documents = {}\n doc_lengths = {}\n index = {}\n j = 0 # Counter for articles\n for i in range(0, 22):\n if i >= 10:\n file = open(path + \"reut2-0\" + str(i) + \".sgm\", encoding='latin-1')\n else:\n file = open(path + \"reut2-00\" + str(i) + \".sgm\", encoding='latin-1')\n\n # Parsing html pages and getting reuters tagged once\n soup = BeautifulSoup(file, \"html.parser\")\n articles = soup.find_all('reuters')\n\n for article in articles:\n\n body = \"\"\n title = \"\"\n newid = int(article['newid'])\n\n try:\n body = article.body.get_text()\n except AttributeError:\n pass\n\n try:\n title = article.title.get_text()\n except AttributeError:\n pass\n\n words_list = title + \"\\n\" + body\n\n # Adding title+body to documents dictionary\n documents[newid] = words_list\n\n # Processing document and adding document lengths to dictionary\n processed_doc = preprocess(documents[newid])\n doc_lengths[newid] = len(processed_doc)\n\n # Adding word to index\n for term in processed_doc:\n if term in index:\n term_freq, docs_dict = index[term]\n\n term_freq += 1\n if newid in docs_dict:\n docs_dict[newid] += 1\n else:\n docs_dict[newid] = 1\n\n index[term] = (term_freq, docs_dict)\n else:\n docs_dict = {newid: 1}\n index[term] = (1, docs_dict)\n j += 1\n # Checking limit on articles\n if limit is not None:\n if j == limit:\n break\n\n # Checking limit on articles\n if limit is not None:\n if j == limit:\n break\n\n for term in index:\n term_freq, docs_dict = index[term]\n index[term] = [term_freq] + list(docs_dict.items())\n\n if limit is None:\n save_obj(index, \"reuters_index\")\n save_obj(documents, \"reuters_documents\")\n save_obj(doc_lengths, \"reuters_doc_length\")\n\n return index",
"def _es_push_indexes(self, content):\n for c in self.es_clients:\n c.create_index(content)",
"def index(self):\n for block_dir_relative in sorted(next(os.walk(self.data_dir))[1]):\n td_pairs = self.parse_block(block_dir_relative)\n index_id = 'index_'+block_dir_relative\n self.intermediate_indices.append(index_id)\n with ii.InvertedIndexWriter(index_id, directory=self.output_dir, \n postings_encoding=\n self.postings_encoding) as index:\n self.invert_write(td_pairs, index)\n td_pairs = None\n self.save()\n with ii.InvertedIndexWriter(self.index_name, directory=self.output_dir, \n postings_encoding=\n self.postings_encoding) as merged_index:\n with contextlib.ExitStack() as stack:\n indices = [stack.enter_context(\n ii.InvertedIndexIterator(index_id, \n directory=self.output_dir, \n postings_encoding=\n self.postings_encoding)) \n for index_id in self.intermediate_indices]\n self.merge(indices, merged_index)",
"def build(self):\n allow_bare = AllowBareCityName(blocklist=self.bare_name_blocklist)\n\n iter_keys = CityKeyIter(allow_bare)\n\n # Deduped cities.\n cities = WOFLocality.clean_us_cities()\n\n logger.info('Indexing US cities.')\n\n for row in tqdm(cities):\n\n # Key -> id(s)\n for key in map(keyify, iter_keys(row)):\n self.add_key(key, row.wof_id)\n\n # ID -> city\n self.add_location(row.wof_id, CityMatch(row))",
"def createIndex(pages): \n index = defaultdict(list)\n for url, content, links in pages:\n counts = getNumberTerms(content)\n for term, count in counts.items():\n index[term].append((url, count))\n return index",
"def rebuild_index():\n print('Building indexes...')\n print(data_fldr)\n ndx = []\n for root, _, files in os.walk(data_fldr):\n for f in files:\n if f[-3:].upper() in ['CSV','TXT']:\n ndx.extend(get_index_terms(root + os.sep + f))\n with open(ndx_file, 'w') as fio:\n for i in ndx:\n fio.write(i + '\\n')",
"def build_index(in_dir, out_dict, out_postings):\n print('indexing...')\n\n maxInt = sys.maxsize\n\n while True:\n # decrease the maxInt value by factor 10 \n # as long as the OverflowError occurs.\n try:\n csv.field_size_limit(maxInt)\n break\n except OverflowError:\n maxInt = int(maxInt/10)\n\n #Dicitionary for saving the normalized weights for document vector\n lengths = dict()\n\n #Number of docs read from csv\n total_docs = 1\n max_docs = 1000\n\n #Data stored in csv read file line by line and save columns data\n with open(os.path.join(in_dir), 'r', encoding=\"utf8\") as data_csv:\n reader = csv.DictReader(data_csv)\n #each line corresponds to a document\n for doc in reader:\n\n #if(total_docs > max_docs):\n # break\n\n #If line is blank, just skip\n if doc is None:\n continue\n \n #save the different columns of the doc\n doc_id = int(doc[\"document_id\"])\n #Remove punctuation in title and content\n doc_title = re.sub(r\"[,;@#?!&$()%\\[\\]°~^_.+=\\\"><`|}{*':/]+ *\", \" \", doc[\"title\"])\n doc_content = re.sub(r\"[,;@#?!&$()%\\[\\]°~^_.+=\\\"><`|}{*':/]+ *\", \" \", doc[\"content\"])\n doc_date = doc[\"date_posted\"]\n doc_year = doc_date[0:4]\n doc_court = doc[\"court\"]\n\n #The dictionaryies are updated, postings lists are updated or new terms added\n update_terms_zones_dictionary(doc_id, doc_title, \".title\")\n update_terms_zones_dictionary(doc_id, doc_content, \".content\")\n update_date_field_dictionary(doc_id, doc_year)\n update_court_field_dictionary(doc_id, doc_court)\n\n total_docs += 1\n\n data_csv.close()\n\n #This section stores the Log TF using the word counts in the postings in the dictionary\n #It saves the Log TF in an auxiliary dictionary named lengths\n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n\n for docID_termF in postings_list:\n #Get the vector for the doc, where the docId is docID_termF[0]\n #If there is no vector for this doc, then create a new dict\n #I am using dictionaries as the vector for the word only for the calculations\n doc_vector = lengths.get(docID_termF[0], dict())\n #I add the logarithmic term frequency to that document vector\n doc_vector[word] = 1 + math.log(docID_termF[1], 10)\n #Save that to its corresponding doc\n lengths[docID_termF[0]] = doc_vector\n\n #This section normalizes the Log TFs \n for doc_vector in lengths.values():\n #We store each of the values in a list and then use:\n #np.linalg.norm to do the normalization = sqrt(sum(values^2))\n weights = doc_vector.values()\n #We get the vectors magnitude\n magnitude = np.linalg.norm(np.array(list(weights)))\n for word in doc_vector.keys():\n #For every word entry in the vector \n #normalize by dividing the weight by the magnitude\n doc_vector[word] = doc_vector[word] / magnitude\n\n #This section replaces the word count in the tuple of the dictionary with the Normalized Log TF\n #It also sorts the postings list by doc ID\n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n new_postings_list = list()\n for docID_termF in postings_list:\n docID_termF = ( docID_termF[0], lengths[docID_termF[0]][word] )\n new_postings_list.append(docID_termF)\n new_postings_list.sort()\n dictionary[word] = new_postings_list\n\n ''' \n with open('ugly_dictionary.txt', 'w') as fp:\n json.dump(dictionary, fp)\n '''\n #Determine the relevance of each doc by the court that it has in its court field\n #Save the relevant docs and their relevance\n relevant_courts_dict = { \"SG Court of Appeal\":2, \"SG Privy Council\":2, \"UK House of Lords\":2, \"UK Supreme Court\":2,\n \"High Court of Australia\":2, \"CA Supreme Court\":2, \"SG High Court\":1.5, \"Singapore International Commercial Court\":1.5,\n \"HK High Court\": 1.5, \"HK Court of First Instance\": 1.5, \"UK Crown Court\": 1.5, \"UK Court of Appeal\": 1.5, \"UK High Court\": 1.5, \n \"Federal Court of Australia\": 1.5, \"NSW Court of Appeal\": 1.5, \"NSW Court of Criminal Appeal\": 1.5, \"NSW Supreme Court\": 1.5}\n\n relevant_docs = dict()\n \n for court_name in relevant_courts_dict:\n court_postings_list = court_dictionary.get(court_name, -1)\n if(court_postings_list != -1):\n for docid in court_postings_list:\n #save a dictionary of docID and its relevance (2 or 1.5) according to its court\n relevant_docs[docid] = relevant_courts_dict[court_name]\n\n #This section traverse each word (key) in the dictionary, get its postings list and save it in a different file \n postings_list_file = open(out_postings, \"wb\") \n for word in dictionary:\n #Get postings list for the word\n postings_list = dictionary[word]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n dictionary[word] = (document_frequency, postings_list_position)\n for date in date_dictionary:\n #Get postings list for the date\n postings_list = date_dictionary[date]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n date_dictionary[date] = (document_frequency, postings_list_position)\n for court in court_dictionary:\n #Get postings list for the date\n postings_list = court_dictionary[court]\n #Get the document frequency\n document_frequency = len(postings_list)\n #Know the starting position for the pointer\n postings_list_position = postings_list_file.tell()\n # Writing to file \n pickle.dump(postings_list, postings_list_file)\n #Replace postings list with reference to the position\n court_dictionary[court] = (document_frequency, postings_list_position)\n #Close the postings lists file\n postings_list_file.close() \n\n #Now open the dictionary file and save the three dictionaries\n with open(out_dict, 'wb') as dictionary_file:\n pickle.dump(total_docs, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(date_dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(court_dictionary, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n pickle.dump(relevant_docs, dictionary_file, protocol=pickle.HIGHEST_PROTOCOL)\n \n '''\n The structure we have is:\n\n dictionary.txt: Has three dictionaries\n {word.zone : [doc_freq, pointer], word.zone: [doc_freq, pointer], ...}\n {date : [doc_freq, pointer], date: [doc_freq, pointer], ...}\n {court : [doc_freq, pointer], court: [doc_freq, pointer], ...}\n\n postings.txt: Has the postings for the three dictionaries\n For the dictionary postings:\n [[docID,termFrequency],[docID,termFrequency]]\n [[docID,termFrequency]] ...\n For the date_dictionary postings:\n [docId, docId, docId, docId]\n For the court_dictionary postings:\n [docId, docId, docId, docId]\n ...\n\n Both documents together would be:\n { word.zone: [doc_freq, [[docID,termFrequency], ... ]], \n word.zone: [doc_freq, [[docID,termFrequency].}, ...]] }\n { date: [doc_freq, [docID, docID, ... ]], date: [doc_freq, [docID, docID, ... ]] }\n { court: [doc_freq, [docID, docID, ... ]], date: [doc_freq, [docID, docID, ... ]] }\n\n lengths.txt\n [document: [word: weight, word: weight, ...], document: [word: weight, word: weight, ...]]\n Decided to make it like this to keep control of which weights correspond to which words\n Although for a document I will traverse all the weights to get the score\n If the word is not in the document vector [which in my case is a dictionary], then its weight is 0\n This way I am no using a sparse matrix\n\n '''",
"def build_index(self, folder):\n self.__start_indexing()\n for chunk in sorted(os.listdir(folder)):\n path = folder + \"/\" + chunk\n if os.path.isdir(path):\n for dir in sorted(os.listdir(path)):\n filedir = path + \"/\" + dir\n for anns_file in sorted(os.listdir(filedir)):\n self.index_file(filedir + \"/\" + anns_file)\n self.__end_indexing()",
"def build_sites(self):\n for i in range(self.shape[0]):\n for j in range(self.shape[1]):\n for k in range(self.shape[2]):\n for s,site in enumerate(self.cell.sites):\n newsite = copy.deepcopy(site)\n coordinate = self.cell.a1*i+\\\n self.cell.a2*j+\\\n self.cell.a3*k\n newsite.coordinate += coordinate\n self.sites[i,j,k,s] = newsite",
"def on_new_site(self, files):\n init_index()",
"def _populate_index(self):\n os.makedirs(self.cache_dir, exist_ok=True)\n local_files = glob('{}/*'.format(self.cache_dir))\n for file in local_files:\n self._add_to_index(os.path.basename(file), os.path.getsize(file))",
"def createindexes():\n index = [{}, {}, {}, {}]\n readcorpus(index)\n buildindex4(index[2], index[3])\n writeindextofile(index)\n return index",
"def _initIndexes(self):\n class Record:\n \"\"\" a moron simple object for carrying the 'extra'-payload to index\n constructors\n \"\"\"\n def __init__(self, **kw):\n self.__dict__.update(kw)\n\n addIndex = self.addIndex\n addColumn = self.addColumn\n\n # Content indexes\n self._catalog.indexes.clear()\n for (index_name, index_type, extra) in self.enumerateIndexes():\n if extra is None:\n addIndex( index_name, index_type)\n else:\n if isinstance(extra, StringTypes):\n p = Record(indexed_attrs=extra)\n elif isinstance(extra, DictType):\n p = Record(**extra)\n else:\n p = Record()\n addIndex( index_name, index_type, extra=p )\n\n # Cached metadata\n self._catalog.names = ()\n self._catalog.schema.clear()\n for column_name in self.enumerateColumns():\n addColumn( column_name )",
"def create_index():",
"def build_index(self):\n # format output and input\n ref_file = f'{self.genome_database}BSB_ref.fa'\n # collect external command\n index_command = [f'{self.bwa_path}', 'index', '-a', 'bwtsw', '-b', f'{self.block_size}', ref_file]\n # run external command\n subprocess.run(args=index_command)",
"def run_indexer(self, corpus):\n with open(corpus, 'r') as fp:\n for line in tqdm(fp.readlines()):\n doc_id, document = self.preprocessor.get_doc_id(line)\n tokenized_document = self.preprocessor.tokenizer(document)\n self.indexer.generate_inverted_index(doc_id, tokenized_document)\n self.indexer.sort_terms()\n self.indexer.add_skip_connections()\n self.indexer.calculate_tf_idf()",
"def build(self):\n states = WOFRegion.query.filter(WOFRegion.country_iso=='US')\n\n logger.info('Indexing US states.')\n\n for row in tqdm(states):\n\n # Key -> id(s)\n for key in map(keyify, state_key_iter(row)):\n self.add_key(key, row.wof_id)\n\n # ID -> state\n self.add_location(row.wof_id, StateMatch(row))",
"def _index_group_with_subgroup(self, **kwargs):\n\n log.setLevel(self.log_level)\n # get a list of all the uri to index\n uri_list = kwargs.get('uri_list', self.get_uri_list())\n if not uri_list:\n log.info(\"0 items to index\")\n return\n # results = results[:100]\n # Start processing through uri\n batch_file = os.path.join(CFG.dirs.logs, \"batch_list.txt\")\n # with open(batch_file, \"w\") as fo:\n # fo.write(\"{\")\n log.info(\"'%s' items to index\", len(uri_list))\n self.time_start = datetime.datetime.now()\n batch_size = kwargs.get(\"batch_size\", 12000)\n if len(uri_list) > batch_size:\n batch_end = batch_size\n else:\n batch_end = len(uri_list)\n batch_start = 0\n batch_num = 1\n self.batch_data = {}\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n self.batch_uris = {}\n self.batch_uris[batch_num] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n end = False\n last = False\n final_list = []\n expand_index = kwargs.get(\"expand_index\", True)\n while not end:\n log.debug(\"batch %s: %s-%s\", batch_num, batch_start, batch_end)\n sub_batch = []\n j = 0\n for i in range(batch_start, batch_end):\n # for i, subj in enumerate(uri_list[batch_start:batch_end]):\n qry_size = kwargs.get(\"qry_size\", 1000)\n if j < qry_size:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n if j == qry_size -1 or i == batch_end - 1:\n try:\n sub_batch.append(uri_list.pop()) #subj)\n except IndexError:\n pass\n # with open(batch_file, \"a\") as fo:\n # fo.write(json.dumps({str('%s-%s' % (batch_num, i+1)):\n # [item[0].sparql\n # for item in sub_batch]})[1:-1]+\",\\n\")\n if not kwargs.get(\"no_threading\", False):\n th = threading.Thread(name=batch_start + i + 1,\n target=self._index_sub,\n args=(sub_batch,\n i+1,\n batch_num,))\n th.start()\n else:\n self._index_sub(sub_batch, i+1, batch_num)\n j = 0\n final_list += sub_batch\n sub_batch = []\n else:\n j += 1\n log.debug(datetime.datetime.now() - self.time_start)\n if not kwargs.get(\"no_threading\", False):\n main_thread = threading.main_thread()\n for t in threading.enumerate():\n if t is main_thread:\n continue\n t.join()\n action_list = []\n for key, items in self.batch_data[batch_num].items():\n if key == 'main':\n es_worker = self.es_worker\n else:\n es_worker = self.other_indexers[key]\n action_list += es_worker.make_action_list(items)\n result = self.es_worker.bulk_save(action_list)\n final_list += self.batch_uris[batch_num]\n self._update_triplestore(result, action_list)\n del action_list\n del self.batch_uris[batch_num]\n del self.batch_data[batch_num]\n try:\n del pyrdf.memorized\n pyrdf.memorized = {}\n except AttributeError:\n pass\n while gc.collect() > 0:\n pass\n # pdb.set_trace()\n batch_end += batch_size\n batch_start += batch_size\n if last:\n end = True\n if len(uri_list) <= batch_size:\n batch_end = len(uri_list)\n last = True\n batch_num += 1\n self.batch_uris[batch_num] = []\n self.batch_data[batch_num] = {}\n self.batch_data[batch_num]['main'] = []\n for name, indexer in self.other_indexers.items():\n self.batch_data[batch_num][name] = []\n log.debug(datetime.datetime.now() - self.time_start)\n # with open(batch_file, 'rb+') as fo:\n # fo.seek(-2, os.SEEK_END)\n # fo.truncate()\n # # fo.close()\n # fo.write(\"}\".encode())",
"def build_index(self):\r\n date_time('Building indexes in citations table')\r\n self.cursor.execute('DROP INDEX IF EXISTS IDX_citations ;')\r\n self.cursor.execute('CREATE INDEX IDX_citations ON citations (citation);')\r\n self.conn.commit()\r\n gc.collect()"
]
| [
"0.7400163",
"0.71896327",
"0.69972724",
"0.6935306",
"0.6830705",
"0.6573556",
"0.65437955",
"0.643226",
"0.6414859",
"0.6355487",
"0.6307958",
"0.62730974",
"0.6251326",
"0.62469614",
"0.62241423",
"0.61707556",
"0.61366165",
"0.6097477",
"0.60937256",
"0.6085822",
"0.6083899",
"0.60673434",
"0.604971",
"0.60466087",
"0.603719",
"0.6028207",
"0.60252595",
"0.5999693",
"0.5996224",
"0.59902877"
]
| 0.89319247 | 1 |
this method try to read notes from a sound wave file with a list of dict of start_time, pitch and duration | def read_note_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):
print("====> reading notes from sound file")
win_s = 512 // DOWN_SAMPLE # fft size
hop_s = 256 // DOWN_SAMPLE # hop size
# adjust sample rate
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
notes_o = notes("default", win_s, hop_s, samplerate)
result = []
total_frames = 0
while True:
samples, read = s()
new_note = notes_o(samples)
# note too high considered as noise
if new_note[0] != 0 and new_note[0] <= 120:
note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20,
duration=new_note[2])
result.append(note_klass)
total_frames += read
if read < hop_s:
break
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generateNotes():\r\n fs = 44100 # hertz\r\n seconds = 3 # Note duration of 3 seconds\r\n noteNames = [\"C4\", \"D4\", \"E4\", \"F4\", \"G4\", \"A4\", \"B4\"]\r\n for noteName in noteNames:\r\n myNote = music21.note.Note(noteName)\r\n noteFrequency = myNote.pitch.frequency\r\n # Generate array with seconds*sample_rate steps, ranging between 0 and seconds\r\n t = np.linspace(0, seconds, seconds * fs, False)\r\n\r\n # Generate a 440 Hz sine wave\r\n sound = np.sin(noteFrequency * t * 2 * np.pi)\r\n\r\n # Ensure that highest value is in 16-bit range\r\n audio = sound * (2**15 - 1) / np.max(np.abs(sound))\r\n # Convert to 16-bit data\r\n audio = audio.astype(np.int16)\r\n\r\n # Start playback\r\n play_obj = sa.play_buffer(audio, 1, 2, fs)\r\n\r\n # Wait for playback to finish before exiting\r\n play_obj.wait_done()\r\n\r\n #Write sound to file\r\n sf.write('assets/patterns/'+noteName+'.wav', audio, fs)",
"def notes_to_sound(instrument, track, duration_multiplier=1, srate=None):\n srate = get_srate(srate)\n duration = 1.0\n note_duration = 1.0\n note_on_velocity = 0.7\n note_off_velocity = 0.5\n t = 0.0\n result = []\n for item in track:\n if isinstance(item, Note):\n if item.duration is None:\n item.duration = duration\n else:\n duration = item.duration\n if hasattr(duration, '__getitem__'):\n if len(duration) == 1:\n duration = duration[0]\n else:\n duration, note_duration = duration\n item.duration = note_duration\n else:\n note_duration = duration\n if item.note_on_velocity is None:\n item.note_on_velocity = note_on_velocity\n else:\n note_on_velocity = item.note_on_velocity\n if item.note_off_velocity is None:\n item.note_off_velocity = note_off_velocity\n else:\n note_off_velocity = item.note_off_velocity\n item.multiply_duration(duration_multiplier)\n instrument_sound = instrument(item)\n elif isinstance(item, Rest):\n if item.duration is not None:\n duration = item.duration\n instrument_sound = []\n else:\n if hasattr(item, '__getitem__'):\n pitch = item[0]\n if len(item) > 1:\n duration = item[1]\n if hasattr(duration, '__getitem__'):\n if len(duration) == 1:\n duration = duration[0]\n else:\n duration, note_duration = duration\n else:\n note_duration = duration\n if len(item) > 2:\n note_on_velocity = item[2]\n if len(item) > 3:\n note_off_velocity = item[3]\n else:\n pitch = item\n instrument_sound = instrument(Note(pitch=pitch, duration=note_duration * duration_multiplier, note_on_velocity=note_on_velocity, note_off_velocity=note_off_velocity, srate=srate))\n result = merge(result, instrument_sound, int(t * srate))\n t += duration * duration_multiplier\n return result",
"def get_notes():\n notes = []\n durs = []\n\n for file in glob.glob(\"D:\\\\anthems\\\\data\\\\*.mid\"):\n midi = converter.parse(file)\n\n print(\"Parsing %s\" % file)\n\n notes_to_parse = None\n\n try:\n s2 = instrument.partitionByInstrument(midi)\n notes_to_parse = s2.parts[0].recurse() \n except:\n notes_to_parse = midi.flat.notes\n \n for element in notes_to_parse:\n if isinstance(element, note.Note):\n notes.append(str(element.pitch))\n durs.append(element.duration.quarterLength)\n elif isinstance(element, chord.Chord):\n notes.append('.'.join(str(n) for n in element.normalOrder))\n durs.append(element.duration.quarterLength)\n\n return notes, durs",
"def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time",
"def process_notes_in_song(dict_time_notes, seq_len = 50):\n list_of_dict_keys_time = []\n \n for key in dict_time_notes:\n sample = dict_time_notes[key]\n times = np.unique(np.where(sample > 0)[1])\n index = np.where(sample > 0)\n dict_keys_time = {}\n\n for time in times:\n index_where = np.where(index[1] == time)\n notes = index[0][index_where]\n dict_keys_time[time] = notes\n list_of_dict_keys_time.append(dict_keys_time)\n return list_of_dict_keys_time",
"def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")",
"def readMidi(filename):\n\n global offset, midifile, beatDivision, istart, iend, firstNote, ignorePC\n\n try:\n inpath = file(filename, \"rb\")\n except:\n error(\"Unable to open MIDI file %s for reading\" % filename)\n\n midifile=inpath.read()\n inpath.close()\n\n # Create our storage:\n # A dic with the channels 0-15 as keys for the midi note events\n # 2 lists for lyrics and text events. These have tuples for (time, text)\n\n events={}\n for c in range(0,16):\n events[c]=[]\n\n textEvs=[]\n lyricEvs=[]\n\n # Ensure this is valid header\n\n hd=midifile[0:4]\n if hd != 'MThd':\n error(\"Expecting 'MThd', %s not a standard midi file\" % filename)\n\n offset = 4\n a = m32i()\n\n if a != 6:\n error(\"Expecting a 32 bit value of 6 in header\")\n\n format=m16i()\n\n if format not in (0,1):\n error(\"MIDI file format %s not recognized\" % format)\n\n ntracks=m16i()\n beatDivision=m16i()\n\n if beatDivision != gbl.BperQ:\n warning(\"MIDI file '%s' tick/beat of %s differs from MMA's \"\n \"%s. Will try to compensate\" %\n (filename, beatDivision, gbl.BperQ))\n\n # Adjust start/end to the file's tick\n\n istart *= beatDivision\n iend *= beatDivision\n\n midievents={}\n firstNote = 0xffffff\n\n for tr in range(ntracks):\n tm=0\n\n hdr = midifile[offset:offset+4]\n offset+=4\n\n if hdr != 'MTrk':\n error(\"Malformed MIDI file in track header\")\n trlen = m32i() # track length, not used?\n\n lastevent = None\n\n \"\"\" Parse the midi file. We have to parse off each event, even\n though many will just be thrown away. You can't just skip around\n in a midi file :) In the future we might decide to include meta\n stuff, etc. Or, we may not :) For now, we keep:\n - note on\n - note off\n - key pressure\n - control change\n - program change\n - channel pressure\n - pitch blend\n - text event\n - lyric event\n \"\"\"\n\n while 1:\n tm += mvarlen() # adjust total offset by delta\n\n ev=m1i()\n\n if ev < 0x80:\n if not lastevent:\n error(\"Illegal running status in %s at %s\" % (midifile, offset))\n offset -= 1\n ev=lastevent\n\n\n sValue = ev>>4 # Shift MSBs to get a 4 bit value\n channel = ev & 0x0f\n\n if sValue == 0x8: # note off event\n\n note=m1i()\n vel=m1i()\n\n if octAdjust and channel != 10:\n note += octAdjust\n while note < 0: note += 12\n while note >127: note -= 12\n events[channel].append([tm, ev & 0xf0, chr(note)+chr(vel)])\n\n elif sValue == 0x9: # note on event\n if tm < firstNote:\n firstNote = tm\n note=m1i()\n vel=m1i()\n\n if octAdjust and channel != 10:\n note += octAdjust\n while note < 0: note += 12\n while note >127: note -= 12\n\n if volAdjust != 100:\n vel = int( (vel*volAdjust)/100)\n if vel<0: vel=1\n if vel>127: vel=127\n\n events[ev & 0xf].append([tm, ev & 0xf0, chr(note)+chr(vel)])\n\n elif sValue == 0xa: # key pressure\n events[ev & 0xf].append([tm, ev & 0xf0, chars(2)])\n\n elif sValue == 0xb: # control change\n events[ev & 0xf].append([tm, ev & 0xf0, chars(2)])\n \n elif sValue == 0xc: # program change\n if ignorePC: # default is to ignore these\n offset += 1 \n else: # set with option IgnorePC=1\n events[ev & 0xf].append([tm, ev & 0xf0, chars(1)])\n\n elif sValue == 0xd: # channel pressure\n events[ev & 0xf].append([tm, ev & 0xf0, chars(1)])\n\n elif sValue == 0xe: # pitch blend\n events[ev & 0xf].append([tm, ev & 0xf0, chars(2)])\n\n elif sValue == 0xf: # system, mostly ignored\n if ev == 0xff: # meta events\n a=m1i()\n\n if a == 0x00: # sequence number\n l=mvarlen()\n offset += l\n\n elif a == 0x01: # text (could be lyrics)\n textEvs.append((tm, chars(mvarlen())))\n\n elif a == 0x02: # copyright\n l=mvarlen()\n offset += l\n\n elif a == 0x03: # seq/track name\n l=mvarlen()\n offset += l\n\n elif a == 0x04: # instrument name\n l=mvarlen()\n offset += l\n\n elif a == 0x05: # lyric\n lyricEvs.append((tm, chars(mvarlen())))\n\n elif a == 0x06: # marker\n l=mvarlen()\n offset += l\n\n elif a == 0x07: # cue point\n l=mvarlen()\n offset += l\n\n elif a == 0x21: # midi port\n l=mvarlen()\n offset += l\n\n elif a == 0x2f: # end of track\n l=mvarlen()\n offset += l\n break\n\n elif a == 0x51: #tempo\n l=mvarlen()\n offset += l\n\n elif a == 0x54: # SMPTE offset\n l=mvarlen()\n offset += l\n\n elif a == 0x58: # time sig\n l=mvarlen()\n offset += l\n\n elif a == 0x59: # key sig\n l=mvarlen()\n offset += l\n\n else: # probably 0x7f, proprietary event\n l=mvarlen()\n offset += l\n\n\n elif ev == 0xf0: # system exclusive\n l=mvarlen()\n offset += l\n\n elif ev == 0xf2: # song position pointer, 2 bytes\n offset += 2\n\n elif ev == 0xf3: # song select, 1 byte\n offset += 1\n\n else: # all others are single byte commands\n pass\n\n if ev >= 0x80 and ev <= 0xef:\n lastevent = ev\n\n return (events, textEvs, lyricEvs)",
"def generate_dict_time_notes(list_all_midi, batch_song = 16, start_index=0, fs=30, use_tqdm=True):\n assert len(list_all_midi) >= batch_song\n \n dict_time_notes = {}\n process_tqdm_midi = tqdm_notebook(range(start_index, min(start_index + batch_song, len(list_all_midi)))) if use_tqdm else range(start_index, min(start_index + batch_song, len(list_all_midi)))\n for i in process_tqdm_midi:\n midi_file_name = list_all_midi[i]\n if use_tqdm:\n process_tqdm_midi.set_description(\"Processing {}\".format(midi_file_name))\n try: # Handle exception on malformat MIDI files\n midi_pretty_format = pretty_midi.PrettyMIDI(midi_file_name)\n piano_midi = midi_pretty_format.instruments[0] # Get the piano channels\n piano_roll = piano_midi.get_piano_roll(fs=fs)\n dict_time_notes[i] = piano_roll\n except Exception as e:\n print(e)\n print(\"broken file : {}\".format(midi_file_name))\n pass\n return dict_time_notes",
"def parse(cls, file: Keyvalues) -> Dict[str, 'Sound']:\n sounds = {}\n for snd_prop in file:\n volume = split_float(\n snd_prop, 'volume',\n VOLUME.__getitem__,\n 1.0,\n )\n pitch = split_float(\n snd_prop, 'pitch',\n Pitch.__getitem__,\n 100.0,\n )\n\n if 'soundlevel' in snd_prop:\n level = split_float(\n snd_prop, 'soundlevel',\n Level.__getitem__,\n Level.SNDLVL_NORM,\n )\n elif 'attenuation' in snd_prop:\n atten_min, atten_max = split_float(\n snd_prop, 'attenuation',\n ATTENUATION.__getitem__,\n ATTENUATION['ATTN_IDLE'],\n )\n # Convert to a soundlevel.\n # See source_sdk/public/soundflags.h:ATTN_TO_SNDLVL()\n level = (\n (50.0 + 20.0 / atten_min) if atten_min else 0.0,\n (50.0 + 20.0 / atten_max) if atten_max else 0.0,\n )\n else:\n level = (Level.SNDLVL_NORM, Level.SNDLVL_NORM)\n\n # Either 1 \"wave\", or multiple in \"rndwave\".\n wavs: List[str] = []\n for prop in snd_prop:\n if prop.name == 'wave':\n wavs.append(prop.value)\n elif prop.name == 'rndwave':\n for subprop in prop:\n wavs.append(subprop.value)\n\n channel_str = snd_prop['channel', 'CHAN_AUTO'].upper()\n channel: Union[int, Channel]\n if channel_str.startswith('CHAN_'):\n channel = Channel(channel_str)\n else:\n channel = int(channel_str)\n\n sound_version = snd_prop.int('soundentry_version', 1)\n\n if 'operator_stacks' in snd_prop:\n if sound_version == 1:\n raise ValueError(\n 'Operator stacks used with version '\n f'less than 2 in \"{snd_prop.real_name}\"!'\n )\n\n start_stack, update_stack, stop_stack = (\n Keyvalues(stack_name, [\n prop.copy()\n for prop in\n snd_prop.find_children('operator_stacks', stack_name)\n ])\n for stack_name in\n ['start_stack', 'update_stack', 'stop_stack']\n )\n else:\n start_stack, update_stack, stop_stack = [None, None, None]\n\n sounds[snd_prop.name] = Sound(\n snd_prop.real_name,\n wavs,\n volume,\n channel,\n level,\n pitch,\n start_stack,\n update_stack,\n stop_stack,\n sound_version == 2,\n )\n return sounds",
"def get_notes():\n song_index_to_notes = {}\n\n for file in glob.glob(\"../data/midi/*.mid\"):\n midi = converter.parse(file)\n song_index = int(os.path.splitext(os.path.basename(file))[0])\n #print(\"Parsing %s with an index %d\" % (file, song_index))\n\n notes_to_parse = None\n notes = []\n try: # file has instrument parts\n s2 = instrument.partitionByInstrument(midi)\n notes_to_parse = s2.parts[0].recurse()\n except: # file has notes in a flat structure\n notes_to_parse = midi.flat.notes\n\n for element in notes_to_parse:\n if isinstance(element, note.Note):\n notes.append(str(element.pitch))\n elif isinstance(element, chord.Chord):\n notes.append('.'.join(str(n) for n in element.normalOrder))\n\n song_index_to_notes[song_index] = notes\n\n return song_index_to_notes",
"def _load_audio_list(self, path):\n\n result = {}\n\n for entry in textfile.read_separated_lines_generator(path, separator='\\t', max_columns=4):\n for index, _ in enumerate(entry):\n if entry[index] == '\\\\N':\n entry[index] = None\n\n if len(entry) < 4:\n entry.extend([None] * (4 - len(entry)))\n\n if not self.include_empty_licence and entry[2] is None:\n continue\n\n if self.include_licenses is not None and entry[2] not in self.include_licenses:\n continue\n\n result[entry[0]] = entry[1:]\n\n return result",
"def read_wav_data(timestamps, wavfile, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size=1024):\n sig, samplerate = librosa.load(wavfile, sr=None, mono=True)\n data = list()\n\n # normalize sound wave\n # sig = sig / np.sqrt(np.mean(sig**2, axis=0));\n # sig = sig / np.max(np.max(np.abs(sig), axis=0));\n sig = sig / np.max(np.abs(sig))\n\n # calc a length array\n tmpts = np.array(timestamps)\n timestamp_interval = tmpts[1:] - tmpts[:-1]\n timestamp_interval = np.append(timestamp_interval, timestamp_interval[-1])\n\n for sz in snapint:\n data_r = np.array([get_wav_data_at(max(0, min(len(sig) - fft_size, coord + timestamp_interval[i] * sz)),\n sig, samplerate, fft_size=fft_size, freq_high=samplerate//4) for i, coord in enumerate(timestamps)])\n data.append(data_r)\n\n raw_data = np.array(data)\n norm_data = np.tile(np.expand_dims(\n np.mean(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n std_data = np.tile(np.expand_dims(\n np.std(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n return (raw_data - norm_data) / std_data",
"def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):\n if os.path.isfile(filename) is False:\n raise Exception('File not found with filename = %s' % filename)\n\n print(\"====> reading pitch from sound file\")\n win_s = 4096 // DOWN_SAMPLE # fft size\n hop_s = 512 // DOWN_SAMPLE # hop size\n\n s = source(filename, samplerate, hop_s)\n samplerate = s.samplerate\n\n tolerance = 0.8\n\n pitch_o = pitch(\"yin\", win_s, hop_s, samplerate)\n pitch_o.set_unit(\"midi\")\n pitch_o.set_tolerance(tolerance)\n\n result = []\n\n # total number of frames read\n total_frames = 0\n while True:\n samples, read = s()\n # the pitch value is not rounded and many zeroes occur\n that_pitch = pitch_o(samples)[0]\n confidence = pitch_o.get_confidence()\n result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))\n total_frames += read\n if read < hop_s:\n break\n\n group_result_with_log_density = compute_density_from_pitch_result(result)\n density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])\n print(\"====> density level list length %s\" % len(density_level_list))\n proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])\n print(\"====> emphasis proportion list length = %d\" % len(proportion_list))\n return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list)",
"def createMelody(song, outputSongFileName, timing=4):\n wavInput = (())\n wavInput1 = (())\n wavInput2 = (())\n wavInput3 = (())\n\n # Remove the beginning and end portions of the canvas that are blank\n while song[0] == ['R','R','R','R']:\n del song[0]\n while song[-1] == ['R','R','R','R']:\n del song[-1]\n\n for notesList in song:\n\n remove_dup(notesList)\n\n notesNum = []\n for i in range(len(notesList)):\n if (notesList[i].upper() == 'R'):\n notesNum.append('')\n elif (notesList[i].upper() == 'A' or notesList[i].upper() == 'B'):\n notesNum.append('3')\n else:\n notesNum.append('4')\n\n wavInput = ((notesList[0].lower() + str(notesNum[0]), timing),) + wavInput\n wavInput1 = ((notesList[1].lower() + str(notesNum[1]), timing),) + wavInput1\n wavInput2 = ((notesList[2].lower() + str(notesNum[2]), timing),) + wavInput2\n wavInput3 = ((notesList[3].lower() + str(notesNum[3]), timing),) + wavInput3\n\n\n wavInput = wavInput[::-1]\n wavInput1 = wavInput1[::-1]\n wavInput2 = wavInput2[::-1]\n wavInput3 = wavInput3[::-1]\n\n wavNames = [\".wav1.wav\",\".wav2.wav\",\".wav3.wav\",\".wav4.wav\"]\n wavInputs = [wavInput,wavInput1,wavInput2,wavInput3]\n\n validWavInputs = []\n\n for i in range(len(wavInputs)):\n if isAllRests(wavInputs[i]) == False:\n validWavInputs.append(wavInputs[i])\n\n validWavNames = wavNames[:len(validWavInputs)]\n\n call(['python','GenerateWavFiles.py',str(validWavNames) + \"@\" + str(validWavInputs)])\n\n sounds = []\n for i in range(len(validWavNames)):\n sounds.append(AudioSegment.from_wav(validWavNames[i]))\n\n combined = sounds[0]\n for i in range(1, len(sounds)):\n combined = combined.overlay(sounds[i])\n\n combined.export(outputSongFileName, format='wav')",
"def from_notes(self, notes, bpm=120.0, gap=16, ignore_polyphonic_notes=False):\n self._clear()\n\n # Compute quantization steps per second.\n steps_per_second = bpm / 60.0 * self.steps_per_bar / BEATS_PER_BAR\n\n quantize = lambda x: int(math.ceil(x - QUANTIZE_CUTOFF))\n\n # Sort track by note start times.\n notes.sort(key=lambda note: note.start_time)\n for note in notes:\n # Ignore 0 velocity notes.\n if not note.velocity:\n continue\n\n # Quantize the start and end times of the note.\n start_step = quantize(note.start_time * steps_per_second)\n end_step = quantize(note.end_time * steps_per_second)\n if end_step == start_step:\n end_step += 1\n\n # Do not allow notes to start or end in negative time.\n if start_step < 0 or end_step < 0:\n raise BadNoteException(\n 'Got negative note time: start_time = %s, end_time = %s'\n % (note.start_time, note.end_time))\n\n # If start_step comes before or lands on an already added note's start\n # step, we cannot add it. Discard the melody because it is not monophonic.\n if not self._can_add_note(start_step):\n if ignore_polyphonic_notes:\n continue\n else:\n self._clear()\n raise PolyphonicMelodyException()\n\n # If a gap of `gap` or more steps is found, end the melody.\n if (len(self) and\n self._distance_to_last_event(start_step) >= gap):\n break\n\n # Add the note-on and off events to the melody.\n self._add_note(note.pitch, start_step, end_step)\n\n self._write_all_notes()",
"def specpolfinalstokes(infile_list,polcal='polcal.txt',logfile='salt.log',debug=False):\n\n patternlist = open(datadir+'wppaterns.txt','r').readlines()\n patternpairs = dict(); patternstokes = dict()\n for p in patternlist:\n if p.split()[0] == '#': continue\n patternpairs[p.split()[0]]=(len(p.split())-3)/2\n patternstokes[p.split()[0]]=int(p.split()[1])\n wav_l,heff_l,hpa_l,qeff_l = np.loadtxt(datadir+polcal,dtype=float,unpack=True)\n calversion = open(datadir+polcal, 'r').readlines()[1][2:].rstrip()\n\n with logging(logfile, debug) as log:\n \n # organize data using names\n files = len(infile_list)\n allrawlist = []\n for i in range(files):\n object,config,wvplt,count = os.path.basename(infile_list[i]).split('.')[0].rsplit('_',4)\n if (config[0]!='c')|(wvplt[0]!='h')|(not count.isdigit()):\n log.message('File '+infile_list[i]+' is not a raw stokes file.' , with_header=False) \n continue\n allrawlist.append([i,object,config,wvplt,count])\n configlist = sorted(list(set(ele[2] for ele in allrawlist))) # unique configs\n\n # correct raw stokes for track (TBS)\n\n # do one config at a time, since different configs may have different number of wavelengths\n for conf in configlist:\n log.message(\"\\nConfiguration: %s\" % conf, with_header=False) \n rawlist = [entry for entry in allrawlist if entry[2]==conf]\n for col in (4,3,1,2): rawlist = sorted(rawlist,key=operator.itemgetter(col)) \n rawstokes = len(rawlist)\n cols = pyfits.open(infile_list[rawlist[0][0]])['SCI'].data.shape[-1]\n stokes_jsw = np.zeros((rawstokes,2,cols)); \n var_jsw = np.zeros_like(stokes_jsw); bpm_jsw = np.zeros_like(stokes_jsw).astype(int)\n wav_jw = np.zeros((rawstokes,cols))\n comblist = []\n # get data\n for j in range(rawstokes):\n i,object,config,wvplt,count = rawlist[j]\n if j==0:\n lampid = pyfits.getheader(infile_list[i],0)['LAMPID'].strip().upper()\n telpa = float(pyfits.getheader(infile_list[i],0)['TELPA'])\n if lampid==\"NONE\":\n pacaltype = \"Equatorial\"\n hpa_l -= (telpa % 180)\n else:\n pacaltype =\"Instrumental\"\n calinfo = (pacaltype+' '+calversion)\n log.message(' Calibration: '+calinfo, with_header=False) \n \n wppat = pyfits.getheader(infile_list[i],0)['WPPATERN']\n wav0 = pyfits.getheader(infile_list[i],'SCI')['CRVAL1']\n dwav = pyfits.getheader(infile_list[i],'SCI')['CDELT1']\n stokes_jsw[j] = pyfits.open(infile_list[i])['SCI'].data.reshape((2,-1))\n var_jsw[j] = pyfits.open(infile_list[i])['VAR'].data.reshape((2,-1))\n bpm_jsw[j] = pyfits.open(infile_list[i])['BPM'].data.reshape((2,-1))\n wav_jw[j] = np.mgrid[wav0:(wav0+cols*dwav):dwav]\n if int(count)==1:\n comblist.append((j,object,config,wvplt,count,wppat))\n else:\n comblist[-1] = (j,object,config,wvplt,count,wppat)\n\n # combine multiple instances (count > 1)\n combstokes = len(comblist)\n stokes_ksw = np.zeros((combstokes,2,cols)); \n var_ksw = np.zeros_like(stokes_ksw)\n bpm_ksw = np.zeros_like(stokes_ksw).astype(int)\n wav_kw = np.zeros((combstokes,cols))\n chisqstokes_kw = np.zeros_like(wav_kw)\n obslist = []\n obsobject = ''\n obsconfig = ''\n chisqlist = [[]]\n for k in range(combstokes):\n j,object,config,wvplt,count,wppat = comblist[k]\n stokes_ksw[k] = stokes_jsw[j-int(count)+1:j+1].sum(axis=0)\n var_ksw[k] = var_jsw[j-int(count)+1:j+1].sum(axis=0) \n bpm_ksw[k] = (bpm_jsw[j-int(count)+1:j+1].sum(axis=0) > 0).astype(int)\n wav_kw[k] = wav_jw[j]\n\n # compute chisq/dof for multiple instances\n if int(count) > 1:\n combstokes_w = np.zeros(cols)\n bok = (bpm_ksw[k,1] == 0) \n combstokes_w[bok] = stokes_ksw[k,1,bok]/stokes_ksw[k,0,bok]\n for jj in range(j-int(count)+1,j+1):\n stokes_w = np.zeros(cols); errstokes_w = np.zeros_like(stokes_w)\n stokes_w[bok] = stokes_jsw[jj,1,bok]/stokes_jsw[jj,0,bok]\n errstokes_w[bok] = np.sqrt(var_jsw[jj,1,bok]/(stokes_jsw[jj,0,bok])**2)\n chisqstokes_kw[k,bok] += ((stokes_w[bok]-combstokes_w[bok])/errstokes_w[bok])**2\n chisqstokes_kw[k] /= int(count)-1\n chisqstokes = chisqstokes_kw[k].sum()/bok.sum()\n chisqlist[-1].append(chisqstokes)\n log.message(\" Chisq/dof Filter Pair %s: %7.2f\" % (wvplt,chisqstokes), with_header=False)\n if ((object != obsobject) | (config != obsconfig)):\n obslist.append([k,object,config,wppat,1])\n chisqlist.append([])\n obsobject = object; obsconfig = config\n else:\n obslist[-1][4] +=1\n \n # for each obs combine stokes, apply efficiency and PA calibration as appropriate for pattern, and save\n obss = len(obslist)\n for obs in range(obss):\n k,object,config,wppat,pairs = obslist[obs]\n obsname = object+\"_\"+config\n log.message(\"\\n Observation: %s\" % obsname, with_header=False)\n# print k,object,config,wppat,pairs\n finstokes = patternstokes[wppat]\n if pairs != patternpairs[wppat]:\n log.message(' Not a complete pattern, skipping observation', with_header=False) \n continue\n stokes_fw = np.zeros((finstokes,cols))\n var_fw = np.zeros_like(stokes_fw)\n ok_fw = bpm_ksw[k:k+pairs,:].sum(axis=0) == 0\n ok_w = ok_fw.all(axis=0)\n bpm_fw = np.repeat((np.logical_not(ok_w))[None,:],finstokes,axis=0)\n stokes_fw[0] = stokes_ksw[k:k+pairs,0].sum(axis=0)/pairs\n var_fw[0] = var_ksw[k:k+pairs,0].sum(axis=0)/pairs**2 \n\n if wppat.count('Linear'):\n var_fw = np.vstack((var_fw,np.zeros(cols))) # add QU covariance\n if wppat=='Linear':\n stokes_fw[1:,ok_w] = stokes_ksw[k:k+2,1,ok_w]*(stokes_fw[0,ok_w]/stokes_ksw[k:k+2,0,ok_w])\n var_fw[1:3,ok_w] = var_ksw[k:k+2,1,ok_w]*(stokes_fw[0,ok_w]/stokes_ksw[k:k+2,0,ok_w])**2\n elif wppat=='Linear-Hi':\n # for Linear-Hi, must go to normalized stokes in order for the pair combination to cancel systematic errors\n nstokes_pw = np.zeros((pairs,cols)); nvar_pw = np.zeros((pairs,cols))\n nstokes_fw = np.zeros((finstokes,cols)); nvar_fw = np.zeros((finstokes+1,cols))\n nstokes_pw[:,ok_w] = stokes_ksw[k:k+pairs,1,ok_w]/stokes_ksw[k:k+pairs,0,ok_w]\n nvar_pw[:,ok_w] = var_ksw[k:k+pairs,1,ok_w]/(stokes_ksw[k:k+pairs,0,ok_w])**2\n if debug: \n np.savetxt(obsname+\"_nstokes.txt\",np.vstack((ok_w.astype(int),nstokes_pw)).T,fmt=\"%3i \"+4*\"%10.6f \")\n np.savetxt(obsname+\"_nvar.txt\",np.vstack((ok_w.astype(int),nvar_pw)).T,fmt=\"%3i \"+4*\"%14.9f \")\n nstokes_fw[1] = 0.5*(nstokes_pw[0] + (nstokes_pw[1]-nstokes_pw[3])/np.sqrt(2.))\n nstokes_fw[2] = 0.5*(nstokes_pw[2] + (nstokes_pw[1]+nstokes_pw[3])/np.sqrt(2.))\n nvar_fw[1] = 0.25*(nvar_pw[0] + (nvar_pw[1]+nvar_pw[3])/2.)\n nvar_fw[2] = 0.25*(nvar_pw[2] + (nvar_pw[1]+nvar_pw[3])/2.)\n nvar_fw[3] = 0.25*((nvar_pw[1] - nvar_pw[3])/2.)\n stokes_fw[1:] = nstokes_fw[1:]*stokes_fw[0]\n var_fw[1:] = nvar_fw[1:]*stokes_fw[0]**2\n chisqq = ((nstokes_pw[0,ok_w] - nstokes_fw[1,ok_w])**2/nvar_fw[1,ok_w]).sum()/ok_w.sum() \n chisqu = ((nstokes_pw[2,ok_w] - nstokes_fw[2,ok_w])**2/nvar_fw[2,ok_w]).sum()/ok_w.sum()\n chisqlist[obs].append(chisqq)\n chisqlist[obs].append(chisqu)\n log.message(\" Chisq/dof Linear-Hi Q,U: %7.2f %7.2f\" % (chisqq,chisqu), with_header=False) \n\n # calculate, print estimated systematic error from chisq mean\n if len(chisqlist[obs]):\n chisqdof = np.array(chisqlist[obs]).mean()\n dofs = float(ok_fw[0].sum())\n chisqdoferr = np.sqrt(2./dofs)\n syserr = 0. # estimate systematic error using noncentral chisq distribution\n if (chisqdof - 1.) > 3.*chisqdoferr:\n nvar_fw = np.zeros_like(var_fw)\n nvar_fw[:,ok_fw[0]] = var_fw[:,ok_fw[0]]/stokes_fw[0,ok_fw[0]]**2\n syserr = np.sqrt(dofs*(chisqdof - 1.)/(1./nvar_fw[1,ok_fw[1]]).sum())\n print syserr \n \n log.message((\" Mean chisq/dof: %5.2f Estimated sys %%error: %5.2f\") % \\\n (chisqdof,100.*syserr), with_header=False)\n\n heff_w = interp1d(wav_l,heff_l,kind='cubic')(wav_kw[k])\n par_w = -interp1d(wav_l,hpa_l,kind='cubic')(wav_kw[k])\n c_w = np.cos(2.*np.radians(par_w)); s_w = np.sin(2.*np.radians(par_w))\n stokes_fw[1:] /= heff_w\n var_fw[1:] /= heff_w**2\n stokes_fw[1:] = stokes_fw[1]*c_w - stokes_fw[2]*s_w , \\\n stokes_fw[1]*s_w + stokes_fw[2]*c_w\n var_fw[1:3] = var_fw[1]*c_w**2 + var_fw[2]*s_w**2 , \\\n var_fw[1]*s_w**2 + var_fw[2]*c_w**2\n var_fw[3] = c_w*s_w*(var_fw[1] - var_fw[2]) + (c_w**2-s_w**2)*var_fw[3]\n\n # save final stokes fits file\n infile = infile_list[rawlist[comblist[k][0]][0]]\n hduout = pyfits.open(infile)\n hduout['SCI'].data = stokes_fw.astype('float32').reshape((3,1,-1))\n hduout['SCI'].header.update('CTYPE3','I,Q,U')\n hduout['VAR'].data = var_fw.astype('float32').reshape((4,1,-1))\n hduout['VAR'].header.update('CTYPE3','I,Q,U,QU')\n\n hduout['BPM'].data = bpm_fw.astype('uint8').reshape((3,1,-1))\n hduout['BPM'].header.update('CTYPE3','I,Q,U')\n hduout[0].header.update('POLCAL',calinfo)\n if len(chisqlist[obs]): \n hduout[0].header.update('SYSERR',100.*syserr, \\\n 'estimated % systematic error')\n outfile = object+'_'+config+'_stokes.fits'\n hduout.writeto(outfile,clobber=True,output_verify='warn')\n log.message('\\n '+outfile+' Stokes I,Q,U', with_header=False)\n \n# elif wppat.count('Circular'): TBS \n\n# elif wppat=='All-Stokes': TBS\n\n return",
"def get_notes(n_notes=3):\n notes = []\n \n for ii, file in enumerate(glob.glob(\"data/maestro-v2.0.0/2004/*.midi\")):\n if ii >= n_notes:\n break\n pickle_file_name = file[:-4] + 'pkl'\n\n if os.path.isfile(pickle_file_name):\n print(f'Reading parsed file: {pickle_file_name}')\n with open(pickle_file_name, 'rb') as handle:\n midi = pickle.load(handle)\n else:\n midi = converter.parse(file)\n\n with open(pickle_file_name, 'wb') as handle:\n print(f'writing parsed file: {pickle_file_name}')\n unserialized_data = pickle.dump(midi, \n handle, \n protocol=pickle.HIGHEST_PROTOCOL\n )\n\n\n print(\"Parsing %s\" % file)\n\n notes_to_parse = None\n try: # file has instrument parts\n s2 = instrument.partitionByInstrument(midi)\n notes_to_parse = s2.parts[0].recurse() \n except: # file has notes in a flat structure\n notes_to_parse = midi.flat.notes\n \n for element in notes_to_parse:\n if isinstance(element, note.Note):\n notes.append(str(element.pitch))\n elif isinstance(element, chord.Chord):\n notes.append('.'.join(str(n) for n in element.normalOrder))\n \n return notes",
"def load_clip_times_FPTL_format(clip_list_file, \n path_to_data, \n start_times_file, \n output_dir,\n clip_window=None, \n clip_window_origin=None,\n plot_fluor_around_peaks=False,\n delay = 0.0933492779732,\n print_peak_vals=True,\n print_peak_times = True ):\n\n print \"clip_list_file\", clip_list_file\n pkl_file = open(clip_list_file, 'rb')\n data = pickle.load(pkl_file)\n print 'data', data\n\n movie_info_dict = dict()\n all_start_times_dict = load_start_times(start_times_file)\n\n all_peak_vals = dict()\n\n for key in data:\n if key != 'labels':\n movie_info = dict()\n trial = data[key]\n print 'data[labels]', data['labels']\n print 'key', key\n label = data['labels'][key]\n print label.split('_', 3)\n animal_id, date, exp_type, mouse_type = label.split('_', 3)\n name = animal_id + '_' + date + '_' + exp_type\n print name\n print mouse_type\n\n movie_info['movie_file'] = get_movie_file(name, path_to_data)\n movie_info['output_file'] = get_output_file(name, output_dir)\n peak_inds = data[key]['peak_indices']\n if 'fluor_data' in trial:\n time_stamps = trial['time_stamps']\n fluor_data = trial['fluor_data']\n else:\n print \"using decimated time series\"\n time_stamps = trial['time_stamps_decimated']\n fluor_data = trial['fluor_data_decimated']\n time_stamps = time_stamps - delay\n print \"INCLUDING DELAY from filter: \", delay\n\n\n movie_info['peak_times'] = time_stamps[peak_inds]\n movie_info['peak_vals'] = fluor_data[peak_inds]\n movie_info['name'] = name\n movie_info['start_time'] = all_start_times_dict[name]\n movie_info['mouse_type'] = mouse_type\n movie_info['interaction_start'] = None\n movie_info['interaction_end'] = None\n movie_info['mouse_type'] = mouse_type\n\n movie_info_dict[name] = movie_info\n\n all_peak_vals[name] = movie_info['peak_vals']\n\n if plot_fluor_around_peaks:\n if clip_window is not None and clip_window_origin is not None:\n plotFluorAroundPeaks(fluor_data, time_stamps, peak_inds,\n clip_window, clip_window_origin,\n output_dir, name, movie_info['start_time'])\n\n if print_peak_vals:\n output_folder = output_dir + '/peak_vals/'\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n w = csv.writer(open(output_folder+'peak_vals.csv', \"w\"), delimiter=',')\n for key, val in all_peak_vals.items():\n w.writerow([key] + [', '.join([str(x) for x in val])])\n\n pickle.dump( all_peak_vals, open( output_folder + 'peak_vals.pkl', \"wb\" ) )\n\n\n if print_peak_times:\n output_folder = output_dir + '/peak_times/'\n if not os.path.exists(output_folder):\n os.makedirs(output_folder)\n filename = output_folder+'peak_times.txt'\n f = open(filename, 'w')\n for trial in movie_info_dict.keys():\n movie_info = movie_info_dict[trial]\n peak_times = movie_info['peak_times']\n name = movie_info['name']\n f.write(\"%s\\n\" % name)\n for val in peak_times:\n f.write(\"%f\\n\" %val)\n f.close()\n print filename\n\n\n ## Print for debugging, and to check that labels match up with blind data\n # print 'peak_inds', peak_inds, np.max(peak_inds)\n # print \"movie_info['peak_times'] \", movie_info['peak_times'] \n # print \"movie_info['peak_vals']\", movie_info['peak_vals']\n # pp = pprint.PrettyPrinter(indent=4)\n # pp.pprint( trial['labels'])\n\n return movie_info_dict",
"def parse_raw(data):\n for sample in data:\n assert \"src\" in sample\n json_line = sample[\"src\"]\n obj = json.loads(json_line)\n assert \"key\" in obj\n assert \"wav\" in obj\n assert \"txt\" in obj\n key = AishellKeyMapper.encode(obj[\"key\"])\n wav_file = obj[\"wav\"]\n txt = obj[\"txt\"]\n try:\n if \"start\" in obj:\n assert \"end\" in obj\n sample_rate = torchaudio.backend.sox_io_backend.info(wav_file).sample_rate\n start_frame = int(obj[\"start\"] * sample_rate)\n end_frame = int(obj[\"end\"] * sample_rate)\n waveform, _ = torchaudio.backend.sox_io_backend.load(\n filepath=wav_file, num_frames=end_frame - start_frame, frame_offset=start_frame\n )\n else:\n waveform, sample_rate = torchaudio.load(wav_file)\n example = dict(key=key, txt=txt, wav=waveform, sample_rate=sample_rate)\n yield example\n except Exception as ex:\n logging.warning(\"Failed to read {}\".format(wav_file))",
"def play_notes(self, show_map=False):\n # print(self.programs)\n pianorolls = []\n for track in self.notes:\n pianoroll = np.zeros((track.shape[1] * 3, 128))\n\n for i in range(track.shape[0]):\n note_track = np.array(track[i, :])\n note_track += 1\n notes_pos = np.nonzero(note_track)\n f = 3\n for pos in notes_pos[0]:\n # print(\"Error\", f * pos, f * pos + self.dura_to_timecell(note_track[pos] - 1),\n # self.dura_to_timecell(note_track[pos] - 1))\n pianoroll[f * pos:f * pos + self.dura_to_timecell(note_track[pos] - 1) + 1, 83 - i] = 90\n pianorolls.append(pianoroll)\n\n # print(\"pianoroll\")\n print(self.notes_index)\n tracks = []\n for i in range(len(pianorolls)):\n tracker = Track(pianoroll=pianorolls[i], program=self.programs[i])\n tracks.append(tracker)\n multitrack = Multitrack(tracks=tracks)\n multitrack.write(\"create1.mid\")\n\n if show_map:\n print(\"Show map will not work\")\n # tracker.plot()\n # plt.show()",
"def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set",
"def read_transcription_file(file_path, audio_file_path):\n with open(file_path) as in_file:\n last_timestamp = 0\n res = []\n transcription = \"\"\n for line in in_file:\n time_stamp_match = re.match(\"\\[([0-9\\]+\\.[0-9]+)\\]\", line)\n #if this regex matched then the line is a timestamp\n if time_stamp_match:\n timestamp = float(time_stamp_match.group(1))\n if transcription and transcription.strip() not in ['(())', \"<no-speech>\"]:\n single_instance = {\"start_time\": last_timestamp, \n \"end_time\": timestamp,\n \"transcription\": transcription,\n \"audio_file\" : audio_file_path}\n res.append(single_instance)\n last_timestamp = timestamp\n else:\n last_timestamp = timestamp # this handles silence at beginning\n else:\n transcription = line.strip()\n \n return res",
"def MTread(fn,slMode='s',leng=0,start=0, wav_out=None, outpath='Default Folder',header=None):\n #check variables\n try:\n fn\n except NameError:\n raise Warning('Filename fn needs to be defined!')\n \n try:\n slMode\n except NameError:\n warnings.warn('slMode - the start and length mode was not defined...defaulting to s for seconds')\n slMode = 's'\n if slMode.upper() not in ['S','P']:\n warnings.warn('slMode - the start and length mode has to be either s for seconds or p for points...defaulting to s for seconds')\n slMode = 's'\n \n try:\n leng\n except NameError:\n warnings.warn('leng - the length of the data to be read in was not defined...defaulting to leng = 0, reading in all data')\n leng = 0\n if type(leng) != int:\n warnings.warn('leng - the length of the data has to be an integer...defaulting to leng = 0, reading in all data')\n leng = 0\n \n try:\n start\n except NameError:\n warnings.warn('start - the starting point or time was not defined...defaulting to start = 0, reading from the start')\n start = 0\n if type(leng) != int:\n warnings.warn('start - the starting point or time was not defined...defaulting to start = 0, reading from the start')\n start = 0\n \n # Create empty dictionaries\n HEADER = {}\n INFO = {}\n \n if leng==0: leng = np.inf\n \n #check if auxiliary data\n vcode = path.basename(fn)[2]\n aux = True if vcode in ['I','J','K','P','T','X','Y','Z'] else False\n \n #open the binary file and start reading\n with open(fn, \"rb\") as f:\n magicstring = f.read(8).decode('ascii').strip().strip('\\x00')\n if magicstring == 'DATA':\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Found Data...')\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Getting Header information...')\n HEADER['totalhdrs'] = int(f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['abbrev '] = f.read(8).decode('ascii').strip().strip('\\x00')\n HEADER['stationcode'] = f.read(3).decode('ascii').strip().strip('\\x00')\n HEADER['title'] = f.read(82).decode('ascii').strip().strip('\\x00')\n HEADER['month'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['day'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['year'] = (f.read(5).decode('ascii').strip().strip('\\x00'))\n HEADER['hours'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['minutes'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['seconds'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['msec'] = (f.read(4).decode('ascii').strip().strip('\\x00'))\n HEADER['sampling_period'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['samplebits'] = int(f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['wordsize'] = int(f.read(2).decode('ascii').strip().strip('\\x00'))\n \n #if HEADER['wordsize'] < HEADER['samplebits']/8:\n #warnings.warn('The samplebits field Does not fit the wordsize field. --- This file may be bad. ')\n HEADER['typemark'] = f.read(1).decode('ascii').strip().strip('\\x00')\n \n HEADER['swapping'] = f.read(1).decode('ascii').strip().strip('\\x00')\n \n HEADER['signing'] = f.read(1).decode('ascii').strip().strip('\\x00')\n HEADER['caltype'] = f.read(1).decode('ascii').strip().strip('\\x00')\n HEADER['calmin'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['calmax'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['calunits'] = f.read(40).decode('ascii').strip().strip('\\x00')\n HEADER['recordsize'] = int(f.read(6).decode('ascii').strip().strip('\\x00'))\n HEADER['sourcevers'] = f.read(9).decode('ascii').strip().strip('\\x00')\n HEADER['sourcesn'] = f.read(16).decode('ascii').strip().strip('\\x00')\n print(HEADER)\n \n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Getting Meta data...')\n INFO['filename'] = fn\n INFO['filesize'] = path.getsize(fn)\n INFO['srate'] = 1/HEADER['sampling_period']\n INFO['when'] = datetime.strptime(HEADER['year'] + '/' + HEADER['month'] + '/' + HEADER['day'] + ' ' + HEADER['hours'] + ':' + HEADER['minutes'] + ':' + HEADER['seconds'] + '.' + HEADER['msec'],'%Y/%m/%d %H:%M:%S.%f')\n INFO['datenumber'] = date.toordinal(INFO['when'])\n \n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Reading Data...')\n if slMode.upper() == 'P': # Start & Length specified in # Points (samples)\n INFO['whenC'] = INFO['when'] + timedelta(seconds=start/INFO['srate'])\n INFO['datenumber'] = INFO['datenumber'] + (start/INFO['srate']/24/3600)\n else:\n INFO['whenC'] = INFO['when'] + timedelta(seconds=start) # Corrected start time (with offset)\n INFO['datenumber'] = INFO['datenumber'] + start/24/3600\n \n if 'wordsize' in HEADER:\n if HEADER['wordsize'] == '':\n HEADER['wordsize'] = 2\n else:\n HEADER['wordsize'] = 2\n \n INFO['nsamp'] = int((INFO['filesize'] - 512 * HEADER['totalhdrs']) / HEADER['wordsize'])\n INFO['seconds'] = INFO['nsamp'] / INFO['srate']\n \n if leng > 0: # Only load data if it's been asked for.\n if any(x in HEADER['swapping'] for x in ['S','L','s','l']):\n mode = '<'\n else:\n mode = '>'\n \n status = 0\n if slMode.upper() == 'P': # specified start time in sample 'P'oints rather than time\n try:\n f.seek(int(512 * HEADER['totalhdrs']) + int(start) * HEADER['wordsize']) # Skip by samples/points\n except:\n status = 1\n else:\n try:\n f.seek(int(512 * HEADER['totalhdrs']) + round(start * INFO['srate'] * HEADER['wordsize'])) # skip by time (seconds)\n except:\n status = 1\n \n if status == 0: # If status is nonzero, we probably went past the end of the file.\n if HEADER['caltype'].upper() == 'F':\n if not any(x == HEADER['wordsize'] for x in [4,8]):\n f.close(f)\n #raise Warning('Invalid word size! Only valid Float sizes are four or eight bytes.')\n binType = 'float' + str(HEADER['wordsize'] * 8)\n else:\n binType = 'bit' + str(HEADER['wordsize'] * 8)\n if any(x in HEADER['signing'] for x in ['U','u']):\n binType = 'u' + binType\n \n \n if slMode.upper() == 'P':\n if leng == np.inf:\n fi = f.read()\n else:\n fi = f.read(leng)\n \n else:\n if leng == np.inf:\n fi = f.read()\n else:\n fi = f.read(int(leng*INFO['srate'])*2)\n if aux:\n fmt = '%c%iH' %(mode,len(fi)/2)\n else:\n fmt = '%c%ih' %(mode,len(fi)/2)\n p = unpack(fmt,fi)\n \n calmax = HEADER['calmax']\n calmin = HEADER['calmin']\n \n if (type(calmin) == float and type(calmax) == float and ((calmin + np.spacing(1)) < calmax) and HEADER['caltype'].upper() != 'F'):\n calmax = HEADER['calmax']\n calmin = HEADER['calmin']\n if HEADER['signing'].upper() == 'U':\n bitmin = 0\n bitmax = 2**HEADER['samplebits'] - 1\n else:\n bitmin = -(2**(HEADER['samplebits']-1))\n bitmax = (2**(HEADER['samplebits']-1)) - 1\n \n \n multiplier = (calmax - calmin) / (bitmax - bitmin)\n p = (np.array(p) - bitmin) * multiplier + calmin\n else:\n p = []# Output an empty matrix if requested data is beyond the length of the current file\n \n else:\n p = [] # Also output an empty matrix of zero length LENGTH input is requested (ie, only return header/info values)\n INFO['count'] = 0\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Returning data...')\n \n #check if it is a data or aux file\n \n if aux:\n p = pd.DataFrame({'Value':p})\n p['VarCode'] = vcode\n p['mission'] = HEADER['title'].split('-')[0] \n p['sampling_rate'] = HEADER['sampling_period']\n p['nSample'] = np.arange(1,p.shape[0]+1)\n p['start_time'] = pd.to_datetime(HEADER[\"year\"] + \"-\" + HEADER[\"month\"] + \"-\" + HEADER[\"day\"] + \" \" + HEADER[\"hours\"] + \":\" +\\\n HEADER[\"minutes\"] + \":\" + HEADER[\"seconds\"] + \".\" + HEADER[\"msec\"])\n p['sec_since_start'] = p['nSample'] * p['sampling_rate']\n p['Time'] = p['start_time'] + pd.to_timedelta(p['sec_since_start'], unit='s')\n return(p,HEADER,'aux')\n else:\n if wav_out != None:\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Saving wav file...' + HEADER['title'].split('-')[0] )\n if 'p':\n if outpath=='Default Folder':\n outpath = path.dirname(fn)\n outfn = outpath +'\\\\' + INFO['when'].strftime('D%m%d%YT%H%M%S') + '_' + path.basename(fn)[:-3] + '.wav'\n sr = int(INFO['srate'])\n data = p\n write(outfn,int(sr), np.int16(data/(abs(data).max())*np.iinfo(np.int16).max))\n \n if header != None:\n if outpath=='Default Folder':\n outpath = path.dirname(fn)\n hh = pd.DataFrame.from_dict(HEADER, orient='index')\n hh.to_csv( outpath +'\\\\' + INFO['when'].strftime('D%m%d%YT%H%M%S') + '_' + path.basename(fn)[:-3] + '.csv')\n if 'p':\n return p,HEADER,INFO",
"def read(files, save):\n\t# NOTE all soundings are size obs long, they must be filled in with zeros for this data format...\n\t# create the HDF5 document\n\tdoc = h5(save)\n\tsize = 450 # this hopefully exceeds the size of the arrays # CPIN Files are much shorter...\n\tdoc.create(pres=size, temp=size, dewpt=size, rh=size, r=size, u=size, v=size, z=size, lat=1, lon=1, theta=size, thte=size,\n\t\twspd=size, wdir=size, gamma=size, stab=size, N=size, rich=size, thtdef=size, cpin=size)\n\t# those last two do not have to be included...\n\t# Z=geopotenital height\n\n\t# now read the files!\n\tfor f in sorted(files):\n\t\tfname = f.split('/')[-1]\n\t\t# if 'smth' not in fname and NCAR not in fname: continue\n\t\tl.info('reading ' + fname)\n\t\t# launch time comes from line 2 of the file, the last element\n\t\tdf = open(f, 'r')\n\t\ttxt = df.read(2000).split('\\n') # way more than we need\n\t\tdf.close()\n\t\tlatln = txt[0].split() # keys 1,2 will be what we want\n\t\ttry:\n\t\t\ttm = s2t(txt[1].split()[-1] + 'UTC', '%Y%m%d%H%M%Z')\n\t\texcept:\n\t\t\t# drat.\n\t\t\tprint txt.split('\\n')[1]\n\t\t\tcontinue\n\t\ttry:\n\t\t\tif 'cpin' in fname:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich, thtdef, cpin = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\t\telse:\n\t\t\t\tz, p, t, td, rh, r, wb, tv, tht, thte, thtw, ws, wd, u, v, vflg, gamma, stab, N, rich = np.loadtxt(f, skiprows=4, unpack=True)\n\t\t\t\t# r is mixing ratio\n\t\texcept:\n\t\t\tl.warning('This file could not be read')\n\t\t\tcontinue\n\n\t\t# and append this data! I will trust the time seconds, instead of recomputing the time\n\t\t# but, before that, we have to make them all the same size - size long\n\t\tnl = np.zeros(size - t.shape[0]) - 999.00 # -999 array to fluff the end\n\t\tp = np.concatenate((p, nl))\n\t\tt = np.concatenate((t, nl))\n\t\ttd = np.concatenate((td, nl))\n\t\trh = np.concatenate((rh, nl))\n\t\tr = np.concatenate((r, nl))\n\t\ttv = np.concatenate((tv, nl))\n\t\ttht = np.concatenate((tht, nl))\n\t\tthte = np.concatenate((thte, nl))\n\t\tws = np.concatenate((ws, nl))\n\t\twd = np.concatenate((wd, nl))\n\t\tgamma = np.concatenate((gamma, nl))\n\t\tstab = np.concatenate((stab, nl))\n\t\tN = np.concatenate((N, nl))\n\t\trich = np.concatenate((rich, nl))\n\t\tu = np.concatenate((u, nl))\n\t\tv = np.concatenate((v, nl))\n\t\tz = np.concatenate((z, nl))\n\t\tif 'cpin' in fname:\n\t\t\tcpin = np.concatenate((cpin, nl))\n\t\t\tthtdef = np.concatenate((thtdef, nl))\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich, cpin=cpin, thtdef=thtdef)\n\t\telse:\n\t\t\tdoc.append(tm, persist=True, pres=p, temp=t, dewpt=td, rh=rh, r=r, u=u, v=v, z=z, lat=[latln[1]], lon=[latln[2]],\n\t\t\t\ttheta=tht, thte=thte, wspd=ws, wdir=wd, gamma=gamma, stab=stab, N=N, rich=rich)\n\tdoc.close()",
"def loadMusic(dirName):\n midiDir = os.path.dirname(os.path.abspath(__file__)) + \"/midi/\"\n platformDir = os.path.join(midiDir, dirName) + \"/\"\n\n if not os.path.isdir(platformDir):\n print \"No platform named\", platformDir, \"in directory\", midiDir\n return None\n\n midiFiles = os.listdir(platformDir)\n midiFiles = [platformDir + \"/\" + midiFile for midiFile in midiFiles]\n\n songs = []\n\n for midiFile in midiFiles:\n with open(midiFile, \"r\") as f:\n lines = f.readlines()\n\n song = []\n for line in lines:\n line = line.split()\n\n # extract pitch and duration from .txt song data, convert\n # those values to pysynth format, and add the\n # (pitch, duration) tuple to the song list\n if \"TR\" in line and line[line.index(\"TR\") + 1] == \"1\" \\\n and \"NT\" in line:\n noteIndex = line.index(\"NT\")\n pitch = line[noteIndex + 1]\n pitch = formatPitch(pitch)\n\n duration = line[noteIndex + 2]\n duration = formatDuration(duration)\n\n pysynthTuple = (pitch, duration)\n song.append(pysynthTuple)\n\n if song:\n songs.append(song)\n return songs",
"def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels",
"def read(self, path):\n pbase = os.path.splitext(path)[0]\n gsid = pbase.split('/')[-2]\n gender, sid = gsid[0], gsid[1:]\n assert sid in self._spkr_table\n phoneseq = phnread(pbase+'.PHN')\n wrdseq = phnread(pbase+'.WRD')\n transcrpt = txtread(pbase+'.TXT')\n sample = TIMITSpeech(\n *audioread(path), speaker=sid, gender=gender,\n transcript=transcrpt, phonemeseq=phoneseq,\n wordseq=wrdseq\n )\n #sample.phonemeseq = [\n # (t, PHONETABLE[p]) for t, p in sample.phonemeseq]\n return sample",
"def slice_from_reading(reading_path, waveforms_path, slice_duration=5, archive_definitions=[], output_level=0):\n if output_level >= 5:\n logging.info('Reading file: ' + reading_path)\n\n try:\n events = nordic_reader.read_nordic(reading_path, True) # Events tuple: (event.Catalog, [waveforms file names])\n except nordic_reader.NordicParsingError as error:\n if output_level >= 2:\n logging.warning('In ' + reading_path + ': ' + str(error))\n return -1\n except ValueError as error:\n if output_level >= 2:\n logging.warning('In ' + reading_path + ': ' + str(error))\n return -1\n except AttributeError as error:\n if output_level >= 2:\n logging.warning('In ' + reading_path + ': ' + str(error))\n return -1\n\n index = -1\n slices = []\n picks_line = \"STAT SP IPHASW\"\n for event in events[0].events:\n index += 1\n\n f = open(reading_path)\n l = [line.strip() for line in f]\n\n id = None\n picks_started = False\n picks_amount = len(event.picks)\n picks_read = 0\n picks_distance = []\n if config.seconds_high_precision:\n start_seconds = []\n for line in l:\n if picks_started and picks_read < picks_amount and len(line) >= 74:\n try:\n dist = float(line[70:74])\n except ValueError as e:\n dist = None\n picks_distance.append(dist)\n\n if config.seconds_high_precision:\n try:\n seconds = float(line[21:27])\n except ValueError as e:\n seconds = None\n start_seconds.append(seconds)\n\n if len(line) > 73:\n title = line[0:6]\n if title == \"ACTION\":\n id_title = line[56:59]\n if id_title == \"ID:\":\n id_str = line[59:73]\n id = int(id_str)\n\n if len(line) > 25:\n if line[0:len(picks_line)] == picks_line:\n picks_started = True\n\n # Min magnitude check\n if len(event.magnitudes) > 0:\n if event.magnitudes[0].mag < config.min_magnitude:\n continue\n\n # Max depth check\n if len(event.origins) > 0:\n if event.origins[0].depth is None:\n continue\n if event.origins[0].depth > config.max_depth:\n continue\n\n try:\n if len(event.picks) > 0: # Only for files with picks\n if output_level >= 3:\n logging.info('File: ' + reading_path + ' Event #' + str(index) + ' Picks: ' + str(len(event.picks)))\n\n picks_index = -1\n for pick in event.picks:\n if output_level >= 3:\n logging.info('\\t' + str(pick))\n\n picks_index += 1\n if config.seconds_high_precision:\n if picks_index < len(start_seconds):\n start_seconds_pick = start_seconds[picks_index]\n else:\n start_seconds_pick = pick.time.second\n print(\"OUT OF BOUNDS START SECONDS PICK\")\n print(\"FILE: \" + reading_path)\n print(\"PICKS: \")\n for pick_print in event.picks:\n print(str(pick_print))\n else:\n start_seconds_pick = pick.time.seconds\n pick_time = UTCDateTime(pick.time.year, pick.time.month, pick.time.day, pick.time.hour,\n pick.time.minute, start_seconds_pick)\n\n if picks_index < len(picks_distance) and picks_distance[picks_index] is not None:\n if picks_distance[picks_index] > config.max_dist:\n continue\n\n # Check phase\n if pick.phase_hint != 'S' and pick.phase_hint != 'P':\n logging.info('\\t' + 'Neither P nor S phase. Skipping.')\n continue\n\n if output_level >= 3:\n logging.info('\\t' + 'Slices:')\n\n # Checking archives\n found_archive = False\n if len(archive_definitions) > 0:\n station = pick.waveform_id.station_code\n station_archives = seisan.station_archives(archive_definitions, station)\n\n channel_slices = []\n for x in station_archives:\n if x[4] <= pick_time:\n if x[5] is not None and pick_time > x[5]:\n continue\n else:\n archive_file_path = seisan.archive_path(x, pick_time.year, pick_time.julday,\n config.archives_path, output_level)\n\n if os.path.isfile(archive_file_path):\n try:\n arch_st = read(archive_file_path)\n except TypeError as error:\n if output_level >= 2:\n logging.warning('In ' + archive_file_path + ': ' + str(error))\n return -1\n\n # arch_st.normalize(global_max=config.global_max_normalizing) # remove that\n # arch_st.filter(\"highpass\", freq=config.highpass_filter_df) # remove that\n # line later\n for trace in arch_st:\n pick_start_time = pick_time\n if trace.stats.starttime > pick_time or pick_time + slice_duration >= trace.stats.endtime:\n logging.info('\\t\\tArchive ' + archive_file_path +\n ' does not cover required slice interval')\n continue\n\n shifted_time = pick_time - config.static_slice_offset\n end_time = shifted_time + slice_duration\n\n found_archive = True\n\n trace_slice = trace.slice(shifted_time, end_time)\n if output_level >= 3:\n logging.info('\\t\\t' + str(trace_slice))\n\n trace_file = x[0] + str(x[4].year) + str(x[4].julday) + x[1] + x[2] + x[3]\n event_id = x[0] + str(x[4].year) + str(x[4].julday) + x[2] + x[3]\n slice_name_station_channel = (trace_slice, trace_file, x[0], x[1], event_id,\n pick.phase_hint, id_str)\n\n # print(\"ID \" + str(id_str))\n # if id_str == '20140413140958':\n # print(x[0])\n # if True:#x[0] == 'NKL':\n # trace.integrate()\n # trace_slice.integrate()\n # trace.normalize()\n # trace_slice.normalize()\n # print('FOUND ID! NORMALIZED')\n # print('ARCHIVE: ' + archive_file_path)\n # print('FILE: ' + trace_file)\n # print('SLICE: ' + str(trace_slice))\n # print('TIME: ' + str(shifted_time) + ' till ' + str(end_time))\n # print('TRACE: ' + str(trace))\n # print('DATA: ' + str(trace_slice.data))\n\n # trace_slice.filter(\"highpass\", freq=config.highpass_filter_df)\n # patho = \"/seismo/seisan/WOR/chernykh/plots/part/\"\n # patho2 = \"/seismo/seisan/WOR/chernykh/plots/whole/\"\n\n # plt.plot(trace_slice.data)\n # plt.ylabel('Amplitude')\n # plt.savefig(patho + trace_file)\n # plt.figure()\n\n # plt.plot(trace.data)\n # plt.ylabel('Amplitude')\n # plt.savefig(patho2 + trace_file)\n # plt.figure()\n\n if len(trace_slice.data) >= 400:\n channel_slices.append(slice_name_station_channel)\n\n # Read and slice waveform\n if found_archive:\n if len(channel_slices) > 0:\n slices.append(channel_slices)\n continue\n\n except ValueError as error:\n if output_level >= 2:\n logging.warning('In ' + reading_path + ': ' + str(error))\n continue\n\n return sort_slices(slices)",
"def audition(self, notes):\n from picomusic.note import Note\n from picomusic.part import Part\n from picomusic.phrase import Phrase\n from picomusic.stagemanager import StageManager\n if isinstance(notes, (Note, str)):\n notes = [notes]\n pitches = self.default_tuning.pitches\n notes = [\n Note(pitches[note]) if isinstance(note, str) else note\n for note in notes\n ]\n manager = StageManager()\n stage = manager.audition\n part = Part(self)\n phrase = Phrase()\n for note in notes:\n phrase.place(0, 0, part, note)\n stage.timeline.clear()\n stage.timeline.place(phrase)\n stage.play_once()",
"def process(self, scale, track, note_list, t_start, slot_duration):\n\n from .note import Note\n\n self._mod.scale = scale\n self._mod.track = track\n\n # notes is like: [n1, n2, n3], [n4], [], [n5, n6]\n # for each slot, we divide it by _divide_\n # record the first note start time and first note end time\n # get the delta between start and end, divide by _divide_\n # tick through the start to end times incrementing by delta/_divide_ (new_note_width)\n # at each step, adjust by the values in slots, as a mod expression\n # compute the new note list for this particular slot\n # move to the next slot\n\n new_note_list = []\n applies_to = self.applies_to\n\n # TODO: consider a roller option that does not reset at the pattern boundary, but survives between patterns?\n # could be musically interesting for odd lengths\n\n #slot_modifications = roller(self.slots)\n\n #slot_duration = clip.\n\n start_time = t_start\n\n for notes2 in note_list:\n\n (actual_notes, is_chord) = _expand_notes(notes2)\n\n divide = self.divide\n\n if divide is None:\n # constructs an arp like transform that plays every note within the given slot time\n divide = len(actual_notes)\n\n skip = False\n chord_skip = False\n if is_chord:\n # leave chords unaffected if requested\n if applies_to not in [ BOTH, CHORDS ]:\n skip = True\n chord_skip = True\n divide = 1\n else:\n # leave notes unaffected if requested\n if applies_to not in [ BOTH, NOTES ]:\n skip = True\n divide = 1\n\n\n\n if chord_skip:\n new_note_list.append(actual_notes)\n\n else:\n\n notes = actual_notes\n\n #new_notes = []\n\n\n if not notes: # None + len == 0:\n # we don't attempt to transform rests\n new_note_list.append([])\n start_time = start_time + slot_duration\n continue\n\n # compute the new time information for the divided notes\n\n\n new_delta = round(slot_duration / divide)\n\n # roll_notes picks values off the incoming note/chord list, it happens once each time a 'divide'\n # is looped through\n roll_notes = roller(notes)\n\n i_ct = 0\n\n for _ in range(0, divide):\n\n i_ct = i_ct + 1\n\n # grab a note that is playing from all notes that are playing\n which_note = next(roll_notes) # .copy()\n\n # apply the new time information\n\n which_slot = next(self._slot_mods)\n\n # calculate the new note using the mod expression\n\n if not skip:\n final_note = self._mod.do(which_note, which_slot)\n if final_note is None:\n continue\n else:\n # this handles if the transform was set to skip chords or skip individual notes\n final_note = which_note.copy()\n\n adjust_notes = [ final_note ]\n\n if type(final_note) == Note:\n final_note.start_time = start_time\n final_note.end_time = round(start_time + (i_ct * new_delta))\n final_note.length = new_delta\n else:\n # the transform *produced* a new chord\n for x in final_note.notes:\n x.start_time = start_time\n x.end_time = round(start_time + (i_ct * new_delta))\n x.length = new_delta\n\n start_time = start_time + new_delta\n\n # we return an array here based on further code in the pipeline expecting one\n # this could probably use some cleanup for consistency. The final_note object\n # can technically be a Chord and not a note.\n\n if type(final_note) == Note:\n new_note_list.append([final_note])\n else:\n new_note_list.append(final_note.notes)\n\n # the new note list is the result of applying the transform. Because of the divides, the new\n # note list can be longer than the incoming note list, but each note in the list has time information.\n #\n # [[n1],[n2],[n3],[n4],[n5]]\n\n #print(\"------> NNL=%s\" % new_note_list)\n return new_note_list"
]
| [
"0.6513883",
"0.64199954",
"0.63684255",
"0.6252008",
"0.6139959",
"0.6108248",
"0.6106231",
"0.6080406",
"0.6077858",
"0.59736735",
"0.5935756",
"0.5932142",
"0.58901197",
"0.588345",
"0.5838785",
"0.5838774",
"0.583514",
"0.581392",
"0.58096606",
"0.57892066",
"0.5775122",
"0.57748866",
"0.5771851",
"0.57359034",
"0.57208943",
"0.5718384",
"0.5694328",
"0.5674219",
"0.56693745",
"0.5641896"
]
| 0.6661406 | 0 |
this method try to read pitches from a sound wave file with a list of dict of pitch and confidence | def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):
if os.path.isfile(filename) is False:
raise Exception('File not found with filename = %s' % filename)
print("====> reading pitch from sound file")
win_s = 4096 // DOWN_SAMPLE # fft size
hop_s = 512 // DOWN_SAMPLE # hop size
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
tolerance = 0.8
pitch_o = pitch("yin", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
result = []
# total number of frames read
total_frames = 0
while True:
samples, read = s()
# the pitch value is not rounded and many zeroes occur
that_pitch = pitch_o(samples)[0]
confidence = pitch_o.get_confidence()
result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))
total_frames += read
if read < hop_s:
break
group_result_with_log_density = compute_density_from_pitch_result(result)
density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> density level list length %s" % len(density_level_list))
proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])
print("====> emphasis proportion list length = %d" % len(proportion_list))
return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse(cls, file: Keyvalues) -> Dict[str, 'Sound']:\n sounds = {}\n for snd_prop in file:\n volume = split_float(\n snd_prop, 'volume',\n VOLUME.__getitem__,\n 1.0,\n )\n pitch = split_float(\n snd_prop, 'pitch',\n Pitch.__getitem__,\n 100.0,\n )\n\n if 'soundlevel' in snd_prop:\n level = split_float(\n snd_prop, 'soundlevel',\n Level.__getitem__,\n Level.SNDLVL_NORM,\n )\n elif 'attenuation' in snd_prop:\n atten_min, atten_max = split_float(\n snd_prop, 'attenuation',\n ATTENUATION.__getitem__,\n ATTENUATION['ATTN_IDLE'],\n )\n # Convert to a soundlevel.\n # See source_sdk/public/soundflags.h:ATTN_TO_SNDLVL()\n level = (\n (50.0 + 20.0 / atten_min) if atten_min else 0.0,\n (50.0 + 20.0 / atten_max) if atten_max else 0.0,\n )\n else:\n level = (Level.SNDLVL_NORM, Level.SNDLVL_NORM)\n\n # Either 1 \"wave\", or multiple in \"rndwave\".\n wavs: List[str] = []\n for prop in snd_prop:\n if prop.name == 'wave':\n wavs.append(prop.value)\n elif prop.name == 'rndwave':\n for subprop in prop:\n wavs.append(subprop.value)\n\n channel_str = snd_prop['channel', 'CHAN_AUTO'].upper()\n channel: Union[int, Channel]\n if channel_str.startswith('CHAN_'):\n channel = Channel(channel_str)\n else:\n channel = int(channel_str)\n\n sound_version = snd_prop.int('soundentry_version', 1)\n\n if 'operator_stacks' in snd_prop:\n if sound_version == 1:\n raise ValueError(\n 'Operator stacks used with version '\n f'less than 2 in \"{snd_prop.real_name}\"!'\n )\n\n start_stack, update_stack, stop_stack = (\n Keyvalues(stack_name, [\n prop.copy()\n for prop in\n snd_prop.find_children('operator_stacks', stack_name)\n ])\n for stack_name in\n ['start_stack', 'update_stack', 'stop_stack']\n )\n else:\n start_stack, update_stack, stop_stack = [None, None, None]\n\n sounds[snd_prop.name] = Sound(\n snd_prop.real_name,\n wavs,\n volume,\n channel,\n level,\n pitch,\n start_stack,\n update_stack,\n stop_stack,\n sound_version == 2,\n )\n return sounds",
"def load_wav(file_path):\n sample_rate, data = wavfile.read(file_path)\n return data, sample_rate",
"def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]",
"def specpolfinalstokes(infile_list,polcal='polcal.txt',logfile='salt.log',debug=False):\n\n patternlist = open(datadir+'wppaterns.txt','r').readlines()\n patternpairs = dict(); patternstokes = dict()\n for p in patternlist:\n if p.split()[0] == '#': continue\n patternpairs[p.split()[0]]=(len(p.split())-3)/2\n patternstokes[p.split()[0]]=int(p.split()[1])\n wav_l,heff_l,hpa_l,qeff_l = np.loadtxt(datadir+polcal,dtype=float,unpack=True)\n calversion = open(datadir+polcal, 'r').readlines()[1][2:].rstrip()\n\n with logging(logfile, debug) as log:\n \n # organize data using names\n files = len(infile_list)\n allrawlist = []\n for i in range(files):\n object,config,wvplt,count = os.path.basename(infile_list[i]).split('.')[0].rsplit('_',4)\n if (config[0]!='c')|(wvplt[0]!='h')|(not count.isdigit()):\n log.message('File '+infile_list[i]+' is not a raw stokes file.' , with_header=False) \n continue\n allrawlist.append([i,object,config,wvplt,count])\n configlist = sorted(list(set(ele[2] for ele in allrawlist))) # unique configs\n\n # correct raw stokes for track (TBS)\n\n # do one config at a time, since different configs may have different number of wavelengths\n for conf in configlist:\n log.message(\"\\nConfiguration: %s\" % conf, with_header=False) \n rawlist = [entry for entry in allrawlist if entry[2]==conf]\n for col in (4,3,1,2): rawlist = sorted(rawlist,key=operator.itemgetter(col)) \n rawstokes = len(rawlist)\n cols = pyfits.open(infile_list[rawlist[0][0]])['SCI'].data.shape[-1]\n stokes_jsw = np.zeros((rawstokes,2,cols)); \n var_jsw = np.zeros_like(stokes_jsw); bpm_jsw = np.zeros_like(stokes_jsw).astype(int)\n wav_jw = np.zeros((rawstokes,cols))\n comblist = []\n # get data\n for j in range(rawstokes):\n i,object,config,wvplt,count = rawlist[j]\n if j==0:\n lampid = pyfits.getheader(infile_list[i],0)['LAMPID'].strip().upper()\n telpa = float(pyfits.getheader(infile_list[i],0)['TELPA'])\n if lampid==\"NONE\":\n pacaltype = \"Equatorial\"\n hpa_l -= (telpa % 180)\n else:\n pacaltype =\"Instrumental\"\n calinfo = (pacaltype+' '+calversion)\n log.message(' Calibration: '+calinfo, with_header=False) \n \n wppat = pyfits.getheader(infile_list[i],0)['WPPATERN']\n wav0 = pyfits.getheader(infile_list[i],'SCI')['CRVAL1']\n dwav = pyfits.getheader(infile_list[i],'SCI')['CDELT1']\n stokes_jsw[j] = pyfits.open(infile_list[i])['SCI'].data.reshape((2,-1))\n var_jsw[j] = pyfits.open(infile_list[i])['VAR'].data.reshape((2,-1))\n bpm_jsw[j] = pyfits.open(infile_list[i])['BPM'].data.reshape((2,-1))\n wav_jw[j] = np.mgrid[wav0:(wav0+cols*dwav):dwav]\n if int(count)==1:\n comblist.append((j,object,config,wvplt,count,wppat))\n else:\n comblist[-1] = (j,object,config,wvplt,count,wppat)\n\n # combine multiple instances (count > 1)\n combstokes = len(comblist)\n stokes_ksw = np.zeros((combstokes,2,cols)); \n var_ksw = np.zeros_like(stokes_ksw)\n bpm_ksw = np.zeros_like(stokes_ksw).astype(int)\n wav_kw = np.zeros((combstokes,cols))\n chisqstokes_kw = np.zeros_like(wav_kw)\n obslist = []\n obsobject = ''\n obsconfig = ''\n chisqlist = [[]]\n for k in range(combstokes):\n j,object,config,wvplt,count,wppat = comblist[k]\n stokes_ksw[k] = stokes_jsw[j-int(count)+1:j+1].sum(axis=0)\n var_ksw[k] = var_jsw[j-int(count)+1:j+1].sum(axis=0) \n bpm_ksw[k] = (bpm_jsw[j-int(count)+1:j+1].sum(axis=0) > 0).astype(int)\n wav_kw[k] = wav_jw[j]\n\n # compute chisq/dof for multiple instances\n if int(count) > 1:\n combstokes_w = np.zeros(cols)\n bok = (bpm_ksw[k,1] == 0) \n combstokes_w[bok] = stokes_ksw[k,1,bok]/stokes_ksw[k,0,bok]\n for jj in range(j-int(count)+1,j+1):\n stokes_w = np.zeros(cols); errstokes_w = np.zeros_like(stokes_w)\n stokes_w[bok] = stokes_jsw[jj,1,bok]/stokes_jsw[jj,0,bok]\n errstokes_w[bok] = np.sqrt(var_jsw[jj,1,bok]/(stokes_jsw[jj,0,bok])**2)\n chisqstokes_kw[k,bok] += ((stokes_w[bok]-combstokes_w[bok])/errstokes_w[bok])**2\n chisqstokes_kw[k] /= int(count)-1\n chisqstokes = chisqstokes_kw[k].sum()/bok.sum()\n chisqlist[-1].append(chisqstokes)\n log.message(\" Chisq/dof Filter Pair %s: %7.2f\" % (wvplt,chisqstokes), with_header=False)\n if ((object != obsobject) | (config != obsconfig)):\n obslist.append([k,object,config,wppat,1])\n chisqlist.append([])\n obsobject = object; obsconfig = config\n else:\n obslist[-1][4] +=1\n \n # for each obs combine stokes, apply efficiency and PA calibration as appropriate for pattern, and save\n obss = len(obslist)\n for obs in range(obss):\n k,object,config,wppat,pairs = obslist[obs]\n obsname = object+\"_\"+config\n log.message(\"\\n Observation: %s\" % obsname, with_header=False)\n# print k,object,config,wppat,pairs\n finstokes = patternstokes[wppat]\n if pairs != patternpairs[wppat]:\n log.message(' Not a complete pattern, skipping observation', with_header=False) \n continue\n stokes_fw = np.zeros((finstokes,cols))\n var_fw = np.zeros_like(stokes_fw)\n ok_fw = bpm_ksw[k:k+pairs,:].sum(axis=0) == 0\n ok_w = ok_fw.all(axis=0)\n bpm_fw = np.repeat((np.logical_not(ok_w))[None,:],finstokes,axis=0)\n stokes_fw[0] = stokes_ksw[k:k+pairs,0].sum(axis=0)/pairs\n var_fw[0] = var_ksw[k:k+pairs,0].sum(axis=0)/pairs**2 \n\n if wppat.count('Linear'):\n var_fw = np.vstack((var_fw,np.zeros(cols))) # add QU covariance\n if wppat=='Linear':\n stokes_fw[1:,ok_w] = stokes_ksw[k:k+2,1,ok_w]*(stokes_fw[0,ok_w]/stokes_ksw[k:k+2,0,ok_w])\n var_fw[1:3,ok_w] = var_ksw[k:k+2,1,ok_w]*(stokes_fw[0,ok_w]/stokes_ksw[k:k+2,0,ok_w])**2\n elif wppat=='Linear-Hi':\n # for Linear-Hi, must go to normalized stokes in order for the pair combination to cancel systematic errors\n nstokes_pw = np.zeros((pairs,cols)); nvar_pw = np.zeros((pairs,cols))\n nstokes_fw = np.zeros((finstokes,cols)); nvar_fw = np.zeros((finstokes+1,cols))\n nstokes_pw[:,ok_w] = stokes_ksw[k:k+pairs,1,ok_w]/stokes_ksw[k:k+pairs,0,ok_w]\n nvar_pw[:,ok_w] = var_ksw[k:k+pairs,1,ok_w]/(stokes_ksw[k:k+pairs,0,ok_w])**2\n if debug: \n np.savetxt(obsname+\"_nstokes.txt\",np.vstack((ok_w.astype(int),nstokes_pw)).T,fmt=\"%3i \"+4*\"%10.6f \")\n np.savetxt(obsname+\"_nvar.txt\",np.vstack((ok_w.astype(int),nvar_pw)).T,fmt=\"%3i \"+4*\"%14.9f \")\n nstokes_fw[1] = 0.5*(nstokes_pw[0] + (nstokes_pw[1]-nstokes_pw[3])/np.sqrt(2.))\n nstokes_fw[2] = 0.5*(nstokes_pw[2] + (nstokes_pw[1]+nstokes_pw[3])/np.sqrt(2.))\n nvar_fw[1] = 0.25*(nvar_pw[0] + (nvar_pw[1]+nvar_pw[3])/2.)\n nvar_fw[2] = 0.25*(nvar_pw[2] + (nvar_pw[1]+nvar_pw[3])/2.)\n nvar_fw[3] = 0.25*((nvar_pw[1] - nvar_pw[3])/2.)\n stokes_fw[1:] = nstokes_fw[1:]*stokes_fw[0]\n var_fw[1:] = nvar_fw[1:]*stokes_fw[0]**2\n chisqq = ((nstokes_pw[0,ok_w] - nstokes_fw[1,ok_w])**2/nvar_fw[1,ok_w]).sum()/ok_w.sum() \n chisqu = ((nstokes_pw[2,ok_w] - nstokes_fw[2,ok_w])**2/nvar_fw[2,ok_w]).sum()/ok_w.sum()\n chisqlist[obs].append(chisqq)\n chisqlist[obs].append(chisqu)\n log.message(\" Chisq/dof Linear-Hi Q,U: %7.2f %7.2f\" % (chisqq,chisqu), with_header=False) \n\n # calculate, print estimated systematic error from chisq mean\n if len(chisqlist[obs]):\n chisqdof = np.array(chisqlist[obs]).mean()\n dofs = float(ok_fw[0].sum())\n chisqdoferr = np.sqrt(2./dofs)\n syserr = 0. # estimate systematic error using noncentral chisq distribution\n if (chisqdof - 1.) > 3.*chisqdoferr:\n nvar_fw = np.zeros_like(var_fw)\n nvar_fw[:,ok_fw[0]] = var_fw[:,ok_fw[0]]/stokes_fw[0,ok_fw[0]]**2\n syserr = np.sqrt(dofs*(chisqdof - 1.)/(1./nvar_fw[1,ok_fw[1]]).sum())\n print syserr \n \n log.message((\" Mean chisq/dof: %5.2f Estimated sys %%error: %5.2f\") % \\\n (chisqdof,100.*syserr), with_header=False)\n\n heff_w = interp1d(wav_l,heff_l,kind='cubic')(wav_kw[k])\n par_w = -interp1d(wav_l,hpa_l,kind='cubic')(wav_kw[k])\n c_w = np.cos(2.*np.radians(par_w)); s_w = np.sin(2.*np.radians(par_w))\n stokes_fw[1:] /= heff_w\n var_fw[1:] /= heff_w**2\n stokes_fw[1:] = stokes_fw[1]*c_w - stokes_fw[2]*s_w , \\\n stokes_fw[1]*s_w + stokes_fw[2]*c_w\n var_fw[1:3] = var_fw[1]*c_w**2 + var_fw[2]*s_w**2 , \\\n var_fw[1]*s_w**2 + var_fw[2]*c_w**2\n var_fw[3] = c_w*s_w*(var_fw[1] - var_fw[2]) + (c_w**2-s_w**2)*var_fw[3]\n\n # save final stokes fits file\n infile = infile_list[rawlist[comblist[k][0]][0]]\n hduout = pyfits.open(infile)\n hduout['SCI'].data = stokes_fw.astype('float32').reshape((3,1,-1))\n hduout['SCI'].header.update('CTYPE3','I,Q,U')\n hduout['VAR'].data = var_fw.astype('float32').reshape((4,1,-1))\n hduout['VAR'].header.update('CTYPE3','I,Q,U,QU')\n\n hduout['BPM'].data = bpm_fw.astype('uint8').reshape((3,1,-1))\n hduout['BPM'].header.update('CTYPE3','I,Q,U')\n hduout[0].header.update('POLCAL',calinfo)\n if len(chisqlist[obs]): \n hduout[0].header.update('SYSERR',100.*syserr, \\\n 'estimated % systematic error')\n outfile = object+'_'+config+'_stokes.fits'\n hduout.writeto(outfile,clobber=True,output_verify='warn')\n log.message('\\n '+outfile+' Stokes I,Q,U', with_header=False)\n \n# elif wppat.count('Circular'): TBS \n\n# elif wppat=='All-Stokes': TBS\n\n return",
"def load_wav(wav_file):\n rate, data = wavfile.read(wav_file)\n return rate, data",
"def load_wav_file(file_path: str):\n rate, data = wavfile.read(file_path)\n return rate, data",
"def praat_analyze_pitch(audio_file):\n\n\tpraatpath = path.abspath('Praat.app/Contents/MacOS/Praat')\t# locate Praat executable\n\tpl = PraatLoader(praatpath=praatpath)\t# create instance of PraatLoader object\n\t\n\tpraat_output = pl.run_script('pitch.praat', audio_file)\t# run pitch script in Praat\n\tpitch_data = pl.read_praat_out(praat_output) # turn Praat's output into Python dict\n\n\treturn pitch_data",
"def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]",
"def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")",
"def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch",
"def read(self, path):\n pbase = os.path.splitext(path)[0]\n gsid = pbase.split('/')[-2]\n gender, sid = gsid[0], gsid[1:]\n assert sid in self._spkr_table\n phoneseq = phnread(pbase+'.PHN')\n wrdseq = phnread(pbase+'.WRD')\n transcrpt = txtread(pbase+'.TXT')\n sample = TIMITSpeech(\n *audioread(path), speaker=sid, gender=gender,\n transcript=transcrpt, phonemeseq=phoneseq,\n wordseq=wrdseq\n )\n #sample.phonemeseq = [\n # (t, PHONETABLE[p]) for t, p in sample.phonemeseq]\n return sample",
"def load_wav_dic(wav_dic):\n noisy_path, clean_path = wav_dic[\"noisy\"], wav_dic[\"clean\"]\n noisy, fs = sf.read(noisy_path, dtype=\"float32\")\n clean, fs = sf.read(clean_path, dtype=\"float32\")\n return noisy, clean, fs",
"def detect_pitch(\n source,\n method=\"default\",\n tolerance=0.8, # got value from aubio Python demos\n silence=-70.0,\n unit=\"Hz\",\n buf_size=1024,\n hop_size=256,\n samplerate=0,\n channels=0,\n):\n if not isinstance(source, aubio.source):\n source = aubio.source(\n source, hop_size=hop_size, samplerate=samplerate, channels=channels\n )\n\n with source:\n pitchdetect = aubio.pitch(\n method=method,\n buf_size=buf_size,\n hop_size=hop_size,\n samplerate=source.samplerate,\n )\n pitchdetect.set_tolerance(tolerance)\n pitchdetect.set_silence(silence)\n pitchdetect.set_unit(unit)\n\n results = []\n nframes = 0\n\n while True:\n block, read = source()\n confidence = pitchdetect.get_confidence()\n\n results.append((nframes, pitchdetect(block)[0], confidence))\n\n nframes += read\n if read < source.hop_size:\n break\n\n return results",
"def read_audio(self, path_to_wav):\n y, sr = librosa.load(path_to_wav, sr=None)\n return (y, sr)",
"def get_line_wavelengths():\n line_wavelengths = OrderedDict() ; line_ratios = OrderedDict()\n \n line_wavelengths['PaB'] = [12821]\n line_ratios['PaB'] = [1.]\n line_wavelengths['Ha'] = [6564.61]\n line_ratios['Ha'] = [1.]\n line_wavelengths['Hb'] = [4862.68]\n line_ratios['Hb'] = [1.]\n line_wavelengths['Hg'] = [4341.68]\n line_ratios['Hg'] = [1.]\n line_wavelengths['Hd'] = [4102.892]\n line_ratios['Hd'] = [1.]\n \n line_wavelengths['OIII-4363'] = [4364.436]\n line_ratios['OIII-4363'] = [1.]\n line_wavelengths['OIII'] = [5008.240, 4960.295]\n line_ratios['OIII'] = [2.98, 1]\n \n # Split doublet, if needed\n line_wavelengths['OIII4959'] = [4960.295]\n line_ratios['OIII4959'] = [1]\n line_wavelengths['OIII5007'] = [5008.240]\n line_ratios['OIII5007'] = [1]\n \n line_wavelengths['OII'] = [3727.092, 3729.875]\n line_ratios['OII'] = [1, 1.] \n \n line_wavelengths['OI-6302'] = [6302.046, 6363.67]\n line_ratios['OI-6302'] = [1, 0.33]\n\n line_wavelengths['NeIII'] = [3869]\n line_ratios['NeIII'] = [1.]\n line_wavelengths['NeV'] = [3346.8]\n line_ratios['NeV'] = [1.]\n line_wavelengths['NeVI'] = [3426.85]\n line_ratios['NeVI'] = [1.]\n \n line_wavelengths['SIII'] = [9068.6, 9530.6][::-1]\n line_ratios['SIII'] = [1, 2.44][::-1]\n \n # Split doublet, if needed\n line_wavelengths['SIII9068'] = [9068.6]\n line_ratios['SIII9068'] = [1]\n line_wavelengths['SIII9531'] = [9530.6]\n line_ratios['SIII9531'] = [1]\n \n line_wavelengths['SII'] = [6718.29, 6732.67]\n line_ratios['SII'] = [1., 1.] \n \n line_wavelengths['HeII'] = [4687.5]\n line_ratios['HeII'] = [1.]\n line_wavelengths['HeI-5877'] = [5877.2]\n line_ratios['HeI-5877'] = [1.]\n line_wavelengths['HeI-3889'] = [3889.5]\n line_ratios['HeI-3889'] = [1.]\n line_wavelengths['HeI-1083'] = [10830.]\n line_ratios['HeI-1083'] = [1.]\n \n line_wavelengths['MgII'] = [2799.117]\n line_ratios['MgII'] = [1.]\n \n line_wavelengths['CIV-1549'] = [1549.480]\n line_ratios['CIV-1549'] = [1.]\n line_wavelengths['CIII-1908'] = [1908.734]\n line_ratios['CIII-1908'] = [1.]\n line_wavelengths['OIII-1663'] = [1665.85]\n line_ratios['OIII-1663'] = [1.]\n line_wavelengths['HeII-1640'] = [1640.4]\n line_ratios['HeII-1640'] = [1.]\n \n line_wavelengths['NII'] = [6549.86, 6585.27]\n line_ratios['NII'] = [1., 3]\n line_wavelengths['NIII-1750'] = [1750.]\n line_ratios['NIII-1750'] = [1.]\n line_wavelengths['NIV-1487'] = [1487.]\n line_ratios['NIV-1487'] = [1.]\n line_wavelengths['NV-1240'] = [1240.81]\n line_ratios['NV-1240'] = [1.]\n\n line_wavelengths['Lya'] = [1215.4]\n line_ratios['Lya'] = [1.]\n \n line_wavelengths['Lya+CIV'] = [1215.4, 1549.49]\n line_ratios['Lya+CIV'] = [1., 0.1]\n \n line_wavelengths['Ha+SII'] = [6564.61, 6718.29, 6732.67]\n line_ratios['Ha+SII'] = [1., 1./10, 1./10]\n line_wavelengths['Ha+SII+SIII+He'] = [6564.61, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+SII+SIII+He'] = [1., 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n\n line_wavelengths['Ha+NII+SII+SIII+He'] = [6564.61, 6549.86, 6585.27, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+NII+SII+SIII+He'] = [1., 1./(4.*4), 3./(4*4), 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n \n line_wavelengths['OIII+Hb'] = [5008.240, 4960.295, 4862.68]\n line_ratios['OIII+Hb'] = [2.98, 1, 3.98/6.]\n \n line_wavelengths['OIII+Hb+Ha'] = [5008.240, 4960.295, 4862.68, 6564.61]\n line_ratios['OIII+Hb+Ha'] = [2.98, 1, 3.98/10., 3.98/10.*2.86]\n\n line_wavelengths['OIII+Hb+Ha+SII'] = [5008.240, 4960.295, 4862.68, 6564.61, 6718.29, 6732.67]\n line_ratios['OIII+Hb+Ha+SII'] = [2.98, 1, 3.98/10., 3.98/10.*2.86*4, 3.98/10.*2.86/10.*4, 3.98/10.*2.86/10.*4]\n\n line_wavelengths['OIII+OII'] = [5008.240, 4960.295, 3729.875]\n line_ratios['OIII+OII'] = [2.98, 1, 3.98/4.]\n \n line_wavelengths['OII+Ne'] = [3729.875, 3869]\n line_ratios['OII+Ne'] = [1, 1./5]\n \n return line_wavelengths, line_ratios",
"def test_random_note_pitch(common_scales, all_pitches):\n\n for key, scales in common_scales.items():\n note = random_note_pitch(scales)\n assert note in all_pitches\n assert isinstance(note, (int, str)) is True",
"def preprocess(self, file_list):\n for strain_name, chromosomes in self.parse_csvs(file_list).iteritems():\n # TODO: remove stuff about \"bad\" (residual heterozygosity)\n bad = False\n for intervals in chromosomes.itervalues():\n for ss, _ in intervals:\n if ss == -999:\n bad = True\n if not bad:\n print 'good', strain_name\n self.sample_dict[strain_name] = self.intervals_and_sources(chromosomes)\n else:\n print 'bad', strain_name\n self.save_sample_dict()",
"def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set",
"def lookup(self, prevPitches):\n col = self.convertPitches.index(prevPitches)\n pitchList = []\n for pitch in range(len(self.freqTable[col])):\n pitchList.append((self.PITCH_TYPES[pitch], self.freqTable[col][pitch]))\n return pitchList",
"def _read_pha(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n header_for_livetime = hdul[0].header\n\n return data['channel'], data['counts'], header_for_livetime['LIVETIME']",
"def wav_to_prosodic(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n f0 = pitch.get_value_at_time(time)\n f0_nan = 0\n if np.isnan(f0):\n f0 = 0\n f0_nan = 1\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([f0, f0_nan, int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time",
"def hyou_reader():\n with open(HYOU_FILE_PATH, 'r') as voc_file:\n\n voc_list = []\n lesson_list = []\n\n voc_match = [\n re.compile(r\"^(\\S+)\\s*((\\S*))\\s*〔(\\S*)〕\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*((\\S*))\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*〔(\\S+)〕\\s*(\\S+)\"),\n re.compile(r\"^(\\S+)\\s*(\\S+)\")\n ]\n\n voc_key = [\n {\"Voc\": 1, \"Ext\": 2, \"Type\": 3, \"Meaning\": 4},\n {\"Voc\": 1, \"Ext\": 2, \"Type\": 0, \"Meaning\": 3},\n {\"Voc\": 1, \"Ext\": 0, \"Type\": 2, \"Meaning\": 3},\n {\"Voc\": 1, \"Ext\": 0, \"Type\": 0, \"Meaning\": 2},\n ]\n\n match_count = len(voc_match)\n voc_count = 0\n lesson_count = 0\n\n for voc_line in voc_file:\n if voc_line.find(\"第\") != -1 and voc_line.find(\"课\") != -1:\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n voc_list = []\n voc_count = 0\n lesson_count = lesson_count + 1\n sound_list = sound_reader(lesson_count)\n elif not voc_line.find(\"----\") != -1 and voc_line != \"\\n\":\n voc_line.strip()\n\n voc_dict = {}\n for i in range(0, match_count):\n voc_group = voc_match[i].match(voc_line)\n if voc_group:\n for key, value in voc_key[i].items():\n if value != 0:\n voc_dict[key] = voc_group.group(value)\n else:\n voc_dict[key] = \"\"\n break\n\n if not voc_dict.has_key(\"Voc\"):\n print voc_line\n continue\n\n voc_dict[\"Time\"] = sound_list[voc_count]\n voc_count = voc_count + 1\n voc_list.append(voc_dict)\n\n voc_len = len(voc_list)\n if voc_len > 0:\n lesson_list.append(voc_list)\n\n return lesson_list",
"def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time",
"def read_process_song(path, window=1, overlap=0, debug=True):\n\n arr_features = []\n\n signal, sr = librosa.load(path)\n signal = signal[:660000]\n\n # Debug process\n if debug:\n print(\"Reading file: {}\".format(path))\n\n # Split songs:\n samples = split_songs(signal, window, overlap)\n\n # Append the result to the data structure\n for s in samples:\n features = get_features(s, sr)\n arr_features.append(features)\n return arr_features",
"def get_pitches(midi, part_id=None):\n try: # file has instrument parts\n s2 = instrument.partitionByInstrument(midi)\n print(len(s2))\n if part_id is not None:\n notes = s2.parts[part_id].recurse()\n pitches = [c.pitches for c in notes]\n pitch_arr = np.array(pitches)[None]\n ps_arr = np.array([pitch.ps for pitch in pitches])\n else:\n pitch_arr = []\n ps_arr = []\n for part in s2:\n notes = part.recurse()\n pitches = [c.pitches for c in notes]\n pss = [pitch.ps for pitch in pitches]\n pitch_arr.append(pitches)\n ps_arr.append(pss)\n pitch_arr = np.array(pitch_arr)\n ps_arr = np.array(ps_arr)\n except: # file has notes in a flat structure\n notes = midi.flat.notes\n pitch_arr = np.array([c.pitches for c in notes])\n ps_arr = np.array([pitch.ps for pitch in pitches])\n return pitch_arr, ps_arr",
"def parse_raw(data):\n for sample in data:\n assert \"src\" in sample\n json_line = sample[\"src\"]\n obj = json.loads(json_line)\n assert \"key\" in obj\n assert \"wav\" in obj\n assert \"txt\" in obj\n key = AishellKeyMapper.encode(obj[\"key\"])\n wav_file = obj[\"wav\"]\n txt = obj[\"txt\"]\n try:\n if \"start\" in obj:\n assert \"end\" in obj\n sample_rate = torchaudio.backend.sox_io_backend.info(wav_file).sample_rate\n start_frame = int(obj[\"start\"] * sample_rate)\n end_frame = int(obj[\"end\"] * sample_rate)\n waveform, _ = torchaudio.backend.sox_io_backend.load(\n filepath=wav_file, num_frames=end_frame - start_frame, frame_offset=start_frame\n )\n else:\n waveform, sample_rate = torchaudio.load(wav_file)\n example = dict(key=key, txt=txt, wav=waveform, sample_rate=sample_rate)\n yield example\n except Exception as ex:\n logging.warning(\"Failed to read {}\".format(wav_file))",
"def read_wave(path):\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate",
"def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig",
"def get_note_probabilities(self, peaks):\n notes = []\n \n for p in peaks:\n harmonics = self.get_harmonics(p[0])\n probability = self.test_harmonics(peaks, harmonics)\n \n if probability > self.probability_threshold:\n notes.append(tuple([p[0], probability]))\n\n return notes",
"def getSamples(self, section, pitch, target=\"beats\"):\n sample_list = audio.AudioQuantumList()\n if target == \"beats\":\n sample_list.extend([b for x in section.children() for b in x.children()]);\n elif target == \"bars\":\n sample_list.extend(section.children())\n return sample_list.that(overlap_ends_of(self.original.analysis.segments.that(have_pitch_max(pitch)).that(overlap_starts_of(sample_list))))"
]
| [
"0.60571706",
"0.5847298",
"0.582534",
"0.5804961",
"0.57598567",
"0.5719537",
"0.5706302",
"0.5703949",
"0.56977487",
"0.5689279",
"0.5596877",
"0.55241215",
"0.55162257",
"0.5496917",
"0.54842895",
"0.5484168",
"0.54683024",
"0.5467565",
"0.5427731",
"0.54150516",
"0.5402274",
"0.53967094",
"0.5386984",
"0.5382557",
"0.53659576",
"0.5358054",
"0.53117996",
"0.5309214",
"0.53019106",
"0.5301148"
]
| 0.6643086 | 0 |
given defined bpm, it generates a bar of heartbeat sound. given the fact that the heart beat track has a certain length of each beat, the bpm cannot be too high, which is undetermined yet. | def get_one_bar_heart_beat(filename: str, bpm: int):
heart_beat_track = AudioSegment.from_file(file=filename, format='mp3')
heart_beat_1 = heart_beat_track[70:180]
heart_beat_2 = heart_beat_track[380:490]
# AudioSegment.export(part, 'single_heartbeat1.mp3')
tick_per_sec = 60 * 1000 / bpm
# make a sequential beats by a quarter notes which means a tick contains 2 heat beats
# and this is only applied for a half bar.
# in conclusion, one bar has two sets of heart beats
result_track = AudioSegment.empty()
# first set
result_track += heart_beat_1
gap = tick_per_sec / 2 - len(result_track)
result_track += AudioSegment.silent(gap)
result_track += heart_beat_2
# fill the gap
gap = tick_per_sec * 2 - len(result_track)
result_track += AudioSegment.silent(gap)
# # second set
result_track += heart_beat_1
gap = tick_per_sec * 2.5 - len(result_track)
result_track += AudioSegment.silent(gap)
result_track += heart_beat_2
# # fill the end gap
gap = tick_per_sec * 4 - len(result_track)
result_track += AudioSegment.silent(gap)
return result_track | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_bpm(duration, numbeats):\n dur = duration / 60\n bpm = numbeats / dur\n print('bpm calculated')\n\n if bpm > 300:\n raise TypeError(\"TOOOOOOO HIGH\")\n\n # except ValueError:\n # pass\n # else:\n # logging.info('Calculated BPM: %s', bpm)\n\n return bpm",
"def get_bpm(self):\n\t\t\n\t\tfft = self.get_fft()\n\t\t\n\t\t# Get the bin numbers of the bounds of the possible allowed heart-rates in\n\t\t# the FFT\n\t\tmin_bin = self.bpm_to_bin(self.min_bpm)\n\t\tmax_bin = self.bpm_to_bin(self.max_bpm)\n\t\t\n\t\t# Find the bin with the highest intensity (the heartbeat)\n\t\tif min_bin == max_bin:\n\t\t\tbest_bin = min_bin\n\t\telse:\n\t\t\tbest_bin = max(range(min_bin, max_bin),\n\t\t\t key=(lambda i: fft[i-1][0]))\n\t\theartrate = self.bin_to_bpm(best_bin)\n\t\tphase = fft[best_bin-1][1]\n\t\t\n\t\t# Produce the FFT data in the format described above\n\t\tfft_data = zip((self.bin_to_bpm(b) for b in range(min_bin, max_bin+1)),\n\t\t fft[min_bin-1:max_bin])\n\t\t\n\t\treturn heartrate, phase, fft_data",
"def beats_to_bpm(beats):\n # if enough beats are found, convert to periods then to bpm\n bpms = 60. / numpy.diff(beats)\n return numpy.median(bpms)",
"def ms_from_bpm(bpm: float) -> float:\n return 240000 / bpm",
"def get_file_bpm(path, params = {}):\n try:\n win_s = params['win_s']\n samplerate = params['samplerate']\n hop_s = params['hop_s']\n except:\n \"\"\"\n # super fast\n samplerate, win_s, hop_s = 4000, 128, 64 \n # fast\n samplerate, win_s, hop_s = 8000, 512, 128\n \"\"\"\n # default:\n samplerate, win_s, hop_s = 44100, 1024, 512\n\n s = source(path, samplerate, hop_s)\n samplerate = s.samplerate\n o = tempo(\"specdiff\", win_s, hop_s, samplerate)\n # List of beats, in samples\n beats = []\n # Total number of frames read\n total_frames = 0\n\n while True:\n samples, read = s()\n is_beat = o(samples)\n if is_beat:\n this_beat = o.get_last_s()\n beats.append(this_beat)\n #if o.get_confidence() > .2 and len(beats) > 2.:\n # break\n total_frames += read\n if read < hop_s:\n break\n\n # Convert to periods and to bpm \n bpms = 60./diff(beats)\n b = median(bpms)\n return b",
"def bpm_to_bin(self, bpm):\n\t\t\n\t\treturn int(float(len(self.buf) * bpm) / float(60.0 * self.fps))",
"def bpm(self, bp: int) -> None:\n if bp > BPM_MAX:\n self._bpm = BPM_MAX\n elif bp < BPM_MIN:\n self._bpm = BPM_MIN\n else:\n self._bpm = bp",
"def bin_to_bpm(self, bin):\n\t\t\n\t\treturn (60.0 * bin * self.fps) / float(len(self.buf))",
"def normalize_bpm(bpm: int):\n return abs(REF_BPM - bpm) + BOTTOM_BPM",
"def estimate_bpm(D):\n if len(D) < 2*ignore:\n return 0\n else:\n return 1/np.mean(np.diff(D))*60",
"def _get_bpm_from_soundstretch(output):\n \n output = output.split(\"\\n\")\n for line in output:\n if 'Detected BPM rate ' in line:\n bpm = line[18:]\n return float(bpm)\n return None # Could not parse output",
"def onetwo_beep_gen(numbeep, interval, finalstim_tc, finalstim_nb):\n##### if one beep:#############################################################\n\n # 8ms + 50ms + 8ms = 195 + 1220 + 195 which is a total of 1610 samples\n interval = 0.050\n gap = np.zeros(24414. * interval, float)\n onebeep_tc = np.concatenate((finalstim_tc, gap), axis=1)\n onebeep_nb = np.concatenate((finalstim_nb, gap), axis=1)\n return onebeep_tc, onebeep_nb\n##### if two beep:#############################################################\n\n twobeep_tc = np.append(onebeep_tc, finalstim_tc, axis=1)\n twobeep_nb = np.append(onebeep_nb, finalstim_tc, axis=1)\n return twobeep_tc, twobeep_nb",
"def fm_bell_note(sr, note, duration):\n ## TODO: Fill this in\n return None # This is a dummy value",
"def ring_bells():\n # Need to get the pattern for this time slot and apply it.\n curTime = time.strftime(\"%H:%M\")\n if curTime not in jsonConfig[\"schedules\"][curSchedule]:\n logging.error(\"Couldn't find time record for time \" + curTime + \" in schedule \" + curSchedule)\n return\n\n # Obtain the pattern to use.\n pattern = jsonConfig[\"schedules\"][curSchedule][curTime]\n if pattern not in jsonConfig[\"patterns\"]:\n logging.error(\"Could not find pattern '\" + pattern + \"'.\")\n return\n\n # Play the pattern.\n logging.debug(\"Playing bell: \" + pattern)\n bellRings = jsonConfig[\"patterns\"][pattern][\"rings\"]\n bellDuration = jsonConfig[\"patterns\"][pattern][\"duration\"]\n bellSpacing = jsonConfig[\"patterns\"][pattern][\"spacing\"]\n for _ in range(bellRings):\n power_bells(True)\n time.sleep(bellDuration)\n power_bells(False)\n time.sleep(bellSpacing)",
"def butter_hpf(highcut, fs, order):\n nyq = 0.5 * fs\n high = highcut / nyq\n b, a = signal.butter(order, high, btype='highpass')\n w, h = signal.freqz(b, a, worN=1000)\n# plt.figure()\n# plt.plot((fs * 0.5 / np.pi) * w, abs(h))\n return b, a",
"def _dBmTomW(dBm):\n return math.pow(10.0, dBm / 10.0)",
"def hern_bulge_mass(r,b):\n rb = r/b\n return ((rb*rb)/(2*(1+rb)**2.))",
"def make_bpm_plot(self):\n plotXY([[self.processor.times,\n self.processor.samples],\n [self.processor.freqs,\n self.processor.fft]],\n labels=[False, True],\n showmax=[False, \"bpm\"],\n label_ndigits=[0, 0],\n showmax_digits=[0, 1],\n skip=[3, 3],\n name='plot')",
"def send_pitch_bend(self, value=8192, ch=None):\n self.send_channel_message(PITCH_BEND, value & 0x7f,\n (value >> 7) & 0x7f, ch=ch)",
"def bruno_mes(self):\n MI = -125\n MA = 125\n INCR = 19\n####################################################################\n ### CHANNEL 1\n self.write('CHN 1')\n self.write('CHN?')\n print 'Acting on channel:',self.read()\n self.write('WAVE ARB')\n self.write('ARBLOAD ARB1')\n self.write('FREQ 100')\n self.write('DCOFFS 0.05')\n self.write('AMPL 0.1')\n \n l =(125,-125,125)#arange(MI,MA,INCR) # the ramp\n# lll = copy(l)[::-1][1:-1]\n# l = concatenate((l,lll))\n self.write_array_to_byte(l,1)",
"def bblo(wave,bstar,airlimit,fig):\n import matplotlib.pyplot as plt\n import logging\n from scipy.interpolate import splrep,splev\n import tmath.wombat.womconfig as womconfig\n from tmath.wombat.womwaverange import womwaverange\n from tmath.wombat.womget_element import womget_element\n from tmath.wombat.inputter_single import inputter_single\n from tmath.wombat.onclick import onclick\n from tmath.pydux.waveparse import waveparse\n from tmath.wombat.yesno import yesno\n# global nsplinepoints, tmpsplptsx, tmpsplptsy, pflag\n print('Select regions to blotch')\n done=False\n while (not done):\n plt.cla()\n plt.plot(wave,bstar, drawstyle='steps-mid')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.pause(0.01)\n wavesub,fluxsub,mode=womwaverange(wave,bstar,'none')\n wavebind=womget_element(wave,wavesub[0])\n waverind=womget_element(wave,wavesub[-1])\n plt.cla()\n plt.plot(wave[wavebind:waverind+1],bstar[wavebind:waverind+1], \\\n drawstyle='steps-mid')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.pause(0.01)\n print('Do you want to enter blotch wavelengths by hand (w),')\n print('mark points (m), fit a spline (s), or quit (q)?')\n choice=inputter_single('(w/m/s/q): ','wmsq')\n if (choice == 'w') or (choice == 'm'):\n blotchgood=False\n while (not blotchgood):\n wavechoicedone=False\n while (not wavechoicedone):\n if (choice == 'w'):\n waveselb,waveselr=waveparse()\n else:\n print('Mark the two endpoints of the blotch region')\n endpoints=plt.ginput(2, timeout=-1)\n waveselb=endpoints[0][0]\n waveselr=endpoints[1][0]\n if (waveselb > waveselr):\n waveselb,waveselr=waveselr,waveselb\n waveselbind=womget_element(wave,waveselb)\n waveselrind=womget_element(wave,waveselr)\n print(waveselb, waveselr,waveselbind,waveselrind)\n if (waveselbind == 0) or (waveselrind == (len(wave)-1)):\n print('Wavelengths incorrect--too close to endpoints')\n else:\n wavechoicedone=True\n contblue=bstar[waveselbind-1]\n contred=bstar[waveselrind+1]\n delta=(contred-contblue)/(waveselrind-waveselbind+1)\n bstarcor=bstar.copy()\n for i in range(waveselbind,waveselrind+1):\n bstarcor[i]=contblue+ (i-waveselbind+1)*delta\n plt.plot(wave[wavebind:waverind+1],bstarcor[wavebind:waverind+1], \\\n drawstyle='steps-mid')\n plt.pause(0.01)\n print('Is this acceptable')\n answer=yesno('y')\n if (answer == 'y'):\n bstar=bstarcor.copy()\n blotchgood=True\n logging.info('File {} blotched from {} to {}'.format('bstar', wave[waveselbind], wave[waveselrind]))\n elif (choice == 's'):\n xmin,xmax=plt.xlim()\n ymin,ymax=plt.ylim()\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n womconfig.nsplinepoints=0\n womconfig.tmpsplptsx=[]\n womconfig.tmpsplptsy=[]\n\n spldone=False\n while (not spldone):\n plt.cla()\n plt.plot(wave[wavebind:waverind+1],bstar[wavebind:waverind+1], \\\n drawstyle='steps-mid')\n if (len(womconfig.tmpsplptsx) > 0):\n plt.plot(womconfig.tmpsplptsx,womconfig.tmpsplptsy,'ro')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n cid = fig.canvas.mpl_connect('button_press_event', onclick)\n print('\\nClick on continuum points for spline fit.')\n print('Spline will replace values between first and last point')\n print('Left button = add point')\n print('Middle button = delete point')\n print('Right button = done\\n')\n womconfig.pflag=''\n while (womconfig.pflag != 'done'):\n plt.pause(0.01)\n fig.canvas.mpl_disconnect(cid)\n\n splptsy=[z for _,z in sorted(zip(womconfig.tmpsplptsx,womconfig.tmpsplptsy))]\n splptsx=sorted(womconfig.tmpsplptsx)\n spline=splrep(splptsx,splptsy,k=3)\n splblueindex=womget_element(wave,splptsx[0])\n splredindex=womget_element(wave,splptsx[-1])\n splwave=wave[splblueindex:splredindex+1].copy()\n splineresult=splev(splwave,spline)\n bstarcor=bstar.copy()\n bstarcor[splblueindex:splredindex+1]=splineresult.copy()\n plt.plot(splwave,splineresult,drawstyle='steps-mid')\n print('Is this acceptable')\n answer=yesno('y')\n if (answer == 'y'):\n bstar=bstarcor.copy()\n spldone=True\n logging.info('File {} blotched with spline from {} to {}'.format('bstar', wave[splblueindex], wave[splredindex]))\n else:\n done=True \n print('Do another region?')\n another=yesno('n')\n if (another == 'n'):\n done=True\n \n return bstar",
"def slice_heartbeats(self):\n sampling_rate = self.additional_fields['fs'] # 360 samples per second\n logging.info(\"Sampling rate: {}\".format(sampling_rate))\n assert sampling_rate == 360\n before = 0.2 # 0.2 seconds == 0.2 * 10^3 miliseconds == 200 ms\n after = 0.4 # --> 400 ms\n\n #\n # Find lead 2 position:\n #\n lead_pos = None\n for i, lead in enumerate(self.additional_fields['sig_name']):\n if lead == 'MLII':\n lead_pos = i\n if lead_pos is None:\n raise AssertionError(\"Didn't find lead 2 position. LEADS: {}\".format(self.additional_fields['sig_name']))\n logging.info(\"LEAD 2 position: {}\".format(lead_pos))\n ecg_signal = self.signals[:, lead_pos]\n r_peak_locations = self.labels_locations\n\n # convert seconds to samples\n before = int(before * sampling_rate) # Number of samples per 200 ms.\n after = int(after * sampling_rate) # number of samples per 400 ms.\n\n len_of_signal = len(ecg_signal)\n\n heart_beats = []\n\n for ind, r_peak in enumerate(r_peak_locations):\n start = r_peak - before\n if start < 0:\n logging.info(\"Skipping beat {}\".format(ind))\n continue\n end = r_peak + after\n if end > len_of_signal - 1:\n logging.info(\"Skipping beat {}\".format(ind))\n break\n heart_beats_dict = {}\n heart_beat = np.array(ecg_signal[start:end])\n heart_beats_dict['patient_number'] = self.patient_number\n heart_beats_dict['cardiac_cycle'] = heart_beat\n aami_label_str = heartbeat_types.convert_heartbeat_mit_bih_to_aami(self.mit_bih_labels_str[ind])\n aami_label_ind = heartbeat_types.convert_heartbeat_mit_bih_to_aami_index_class(self.mit_bih_labels_str[ind])\n heart_beats_dict['mit_bih_label_str'] = self.mit_bih_labels_str[ind]\n heart_beats_dict['aami_label_str'] = aami_label_str\n heart_beats_dict['aami_label_ind'] = aami_label_ind\n heart_beats_dict['aami_label_one_hot'] = heartbeat_types.convert_to_one_hot(aami_label_ind)\n heart_beats_dict['beat_ind'] = ind\n heart_beats_dict['lead'] = 'MLII'\n heart_beats.append(heart_beats_dict)\n return heart_beats",
"def eval_beat(individual):\n # compile the individual\n routine = gp.compile(individual, pset)\n # generate some test output\n try:\n test_output = gen_beat_output(routine)\n except:\n return 0.0,\n ## do some stats on the beat\n sd = np.std(np.array(test_output))\n bpm, correl = bpm_detector(test_output,24000)\n bpm_score = 1 - abs((bpm/120.0)-1)\n sd_score = sd / 128.0\n del test_output\n # return the score\n return float(bpm_score * sd_score),",
"def to_ms(self, bpm):\n raise NotImplementedError",
"def buzz(self, duration):\n self._io.buzz(duration)",
"def __init__(self, power, FWHM_ps, center_wavelength_nm,\n time_window_ps = 10., frep_MHz = 100., NPTS = 2**10, \n GDD = 0, TOD = 0, chirp2 = 0, chirp3 = 0,\n power_is_avg = False):\n Pulse.__init__(self, frep_MHz = frep_MHz, n = NPTS)\n # make sure we weren't passed mks units \n assert (center_wavelength_nm > 1.0) \n assert (time_window_ps > 1.0 ) \n self.set_center_wavelength_nm(center_wavelength_nm) \n self.set_time_window_ps(time_window_ps)\n\n T0_ps = FWHM_ps/3.7909885\n ### Generate pulse\n if not power_is_avg:\n # numpy.sinc is sin(pi*x)/(pi*x), so we divide by pi\n self.set_AT( np.sqrt(power) * np.sinc(self.T_ps/(T0_ps*np.pi)) ) \n else:\n self.set_AT( 1 / np.sinc(np.pi * self.T_ps/(T0_ps*np.pi)) )\n self.set_AT(self.AT * np.sqrt( power / ( frep_MHz*1.0e6 * self.calc_epp()) ))\n \n self.chirp_pulse_W(GDD, TOD)\n self.chirp_pulse_T(chirp2, chirp3, T0_ps)",
"def error_tone():\n\ttones.beep(880,250)\n\ttime.sleep(0.15)",
"def parseBeatLength(length):\n return [int(x) for x in \"{:07b}\".format(int(length*64))]",
"async def alarm(ctx, on_time:float=1, off_time:float=0.6, n:int=5):\n buzzer.beep(on_time, off_time, n)\n await ctx.send(f\"Alarme acionado\")",
"def freq2erb(freq_hz):\n return 9.265 * np.log(1 + freq_hz / (24.7 * 9.265))"
]
| [
"0.67328125",
"0.63008034",
"0.6223014",
"0.6183094",
"0.6152251",
"0.6092498",
"0.59835935",
"0.59532374",
"0.5835243",
"0.5692283",
"0.56689316",
"0.5542624",
"0.5461691",
"0.54379505",
"0.53047675",
"0.5275785",
"0.5267846",
"0.5249896",
"0.5233501",
"0.52271456",
"0.5215382",
"0.520575",
"0.51520467",
"0.5120971",
"0.5111614",
"0.5095029",
"0.5066741",
"0.50626683",
"0.5043771",
"0.5041628"
]
| 0.7568481 | 0 |
Command line entry point. $ python runsim.py show animation on screen $ python runsim.py file=video.mp4 save animation to video $ python runsim.py plot show plot on screen $ python runsim.py plot file=plot.pdf save plot to pdf | def main(*args):
#
# Use argparse to handle parsing the command line arguments.
# https://docs.python.org/3/library/argparse.html
#
parser = argparse.ArgumentParser(description='Animate an epidemic')
parser.add_argument('--size', metavar='N', type=int, default=50,
help='Use a N x N simulation grid')
parser.add_argument('--duration', metavar='T', type=int, default=100,
help='Simulate for T days')
parser.add_argument('--recovery', metavar='P', type=float, default=0.1,
help='Probability of recovery (per day)')
parser.add_argument('--infection', metavar='P', type=float, default=0.1,
help='Probability of infecting a neighbour (per day)')
parser.add_argument('--death', metavar='P', type=float, default=0.005,
help='Probability of dying when infected (per day)')
parser.add_argument('--cases', metavar='N', type=int, default=2,
help='Number of initial infected people')
parser.add_argument('--plot', action='store_true',
help='Generate plots instead of an animation')
parser.add_argument('--file', metavar='N', type=str, default=None,
help='Filename to save to instead of showing on screen')
args = parser.parse_args(args)
# Set up the simulation
simulation = Simulation(args.size, args.size,
args.recovery, args.infection, args.death)
simulation.infect_randomly(args.cases)
# Plot or animation?
if args.plot:
fig = plot_simulation(simulation, args.duration)
if args.file is None:
# python runsim.py --plot
plt.show()
else:
# python runsim.py --plot --file=plot.pdf
fig.savefig(args.file)
else:
animation = Animation(simulation, args.duration)
if args.file is None:
# python runsim.py
animation.show()
else:
# python runsim.py --file=animation.mp4
#
# NOTE: this needs ffmpeg to be installed.
animation.save(args.file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n\tplt.clf()\n\taxes = setup_axes()\n\tplot_output(axes, \"../../simulations/default\", \"black\")\n\tplot_output(axes, \"../../simulations/yccsr_zero\", \"crimson\")\n\tplot_output(axes, \"../../simulations/yccsr_linear\", \"lime\")\n\tplot_output(axes, \"../../simulations/yccsr_1-exp\", \"deepskyblue\")\n\tvisuals.plot_track_points_intervals(axes[0],\n\t\tvice.history(\"../../simulations/default\"), element = \"Sr\",\n\t\treference = \"Fe\")\n\tplot_legend(axes[1])\n\tplt.tight_layout()\n\tvisuals.yticklabel_formatter(axes[1])\n\tplt.savefig(sys.argv[1])\n\tplt.clf()",
"def play(out_dir, subprocesses, simulation_viz):\r\n simulator = Simulator()\r\n if simulation_viz is SimulationViz.FRONT:\r\n simulator.simulate_front_simulation()\r\n else:\r\n simulator.simulate_background_simulations(out_dir, subprocesses)",
"def main():\n\n parser = ArgumentParser()\n parser.add_argument('--config', '-c', type=str, required=True, help='Path to config file')\n parser.add_argument('--input', '-i', type=str, required=True, help='Path to video')\n parser.add_argument('--snapshot_path', '-s', type=str, required=False, default='', help='Path to snapshot')\n parser.add_argument('--out_scale', type=float, default=1.0, help='Output frame scale')\n parser.add_argument('--deploy', '-d', action='store_true', help='Execute in deploy mode')\n args = parser.parse_args()\n\n assert exists(args.config)\n assert exists(args.input)\n assert exists(args.snapshot_path + '.index')\n assert args.out_scale > 0.0\n\n task_monitor = get_monitor(args.config, snapshot_path=args.snapshot_path)\n task_monitor.demo(args.input, args.out_scale, args.deploy)",
"def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)",
"def main():\n parser = ArgumentParser()\n parser.add_argument('pose_config', help='Config file for pose')\n parser.add_argument('pose_checkpoint', help='Checkpoint file for pose')\n parser.add_argument('--video-path', type=str, help='Video path')\n parser.add_argument('--show', type=str2bool, nargs='?',\n default=False, help=\"show results.\")\n parser.add_argument('--device', default='cpu',\n help='Device used for inference')\n parser.add_argument('--box-thr', type=float, default=0.1,\n help='Bounding box score threshold')\n parser.add_argument('--kpt-thr', type=float, default=0.1,\n help='Keypoint score threshold')\n parser.add_argument('--folder_box', type=str, default='')\n parser.add_argument('--save_pixels', type=str2bool, nargs='?',\n const=True, default=False,\n help='saveposes as pixels or ratio of im')\n parser.add_argument('--skip_rate', type=int, default=1)\n parser.add_argument('--flip', type=str2bool, default=False)\n parser.add_argument('--save_vid', type=str2bool, default=False)\n\n args = parser.parse_args()\n\n start(args)",
"def main(argv):\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"sim_name\", type=str, help=\"name of simulation folder\")\n args = parser.parse_args() \n \n sim_path = '/'.join([current_dir, args.sim_name]) \n \n sim_input(sim_path) # write the fortran input files\n runmodel(sim_path) # compile and run fortran code\n sim_read(sim_path)",
"def run_simulator(scene, output_dir):\n with tempfile.TemporaryDirectory() as tmpdir:\n status = subprocess.run([\n SIMULATOR_BIN, '--no-cache', '--no-gui', '--no-initial-pause',\n '--output-dir', output_dir, scene\n ])",
"def main():\n # Goal is to model the OSSOS resonance detections given a file with parameters for those resonances.\n # e.g. from Crompvoets et al. (2021)\n\n # now run a survey simulation.\n params = sys.argv[1]\n H_max = float(sys.argv[2])\n outfile=f\"{os.path.splitext(params)[0]}_Model.dat\"\n print(f\"Saving results to {outfile}\")\n if not os.access(outfile, os.R_OK):\n run(outfile, params, 123456789, H_max=H_max)\n\n # confirm this looks like the OSSOS detections using rose plot.\n face_down_plot(outfile)",
"def main():\n options = docopt(main.__doc__)\n mfile = MusicalCodeFile(options['--file'])\n if not options['--output']:\n mfile.play()\n else:\n mfile.save(options['--output'])\n if options['--output-ly']:\n mfile.save_lilypond(options['--output-ly'])",
"def cli_simulate(model_file, output_dir, exporter, overwrite, compression,\n confirm, progress: int, progress_tag, output_same,\n simtime_total, simtime_lims, max_sweeps, max_residual, fipy_solver,\n snapshot_interval,\n plot, video, frames, budget, resume, show_eqns):\n\n click.secho('Starting MicroBenthos simulation', fg='green')\n from microbenthos.utils import yaml\n\n click.echo('Loading model from {}'.format(model_file))\n with open(model_file, 'r') as fp:\n defs = yaml.unsafe_load(fp)\n\n if 'model' not in defs and 'domain' in defs:\n # model is not under a separate key, so insert it under \"model\"\n defs = dict(model=defs)\n\n if 'simulation' not in defs:\n defs['simulation'] = {}\n\n # we want to override the keys in the loaded simulation dictionary,\n # so that when it is created the definition stored on the instance and\n # eventually exported to file includes these user overrides\n\n sim_kwargs = dict(\n simtime_total=simtime_total,\n fipy_solver=fipy_solver,\n max_sweeps=max_sweeps,\n simtime_lims=simtime_lims,\n max_residual=max_residual,\n snapshot_interval=snapshot_interval,\n )\n for k, v in sim_kwargs.items():\n if v is None:\n continue\n else:\n defs['simulation'][k] = v\n\n if output_same:\n output_dir = str(Path(model_file).parent)\n click.secho(f'Output directory set to: {output_dir}')\n\n from microbenthos.runners import SimulationRunner\n runner = SimulationRunner(output_dir=output_dir,\n model=defs['model'],\n simulation=defs['simulation'],\n resume=resume,\n overwrite=overwrite,\n confirm=confirm,\n progress=progress,\n progress_tag=progress_tag,\n plot=plot,\n video=video,\n frames=frames,\n budget=budget,\n exporters=exporter,\n show_eqns=show_eqns)\n\n if not runner.get_data_exporters():\n click.secho('No data exporters defined. Adding with compression={}'.format(\n compression), fg='red')\n runner.add_exporter('model_data', output_dir=runner.output_dir,\n compression=compression)\n\n runner.run()",
"def run(args):\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n # create a plot subcommand\n parser_plot = subparsers.add_parser('plot', aliases=[\"plt\", \"p\"],\n help='plot a set of discrete signals')\n parser_plot.add_argument(\n 'files', nargs='+', help='audio files to be plotted requiers >=1')\n parser_plot.set_defaults(func=plot)\n\n # create a interpolation subcommand\n parser_interpolation = subparsers.add_parser('interpolate', aliases=[\"inp\", \"interp\"],\n help='interpolate a signal by a factor a')\n parser_interpolation.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_interpolation.add_argument('factor', help='factor of interpolation')\n parser_interpolation.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_interpolation.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_interpolation.set_defaults(func=interpolate)\n\n # create a decimation subcommand\n parser_decimation = subparsers.add_parser('decimate', aliases=[\"dec\", \"d\"],\n help='downsample a signal by a factor a')\n parser_decimation.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_decimation.add_argument('factor', help='factor of downsampling')\n parser_decimation.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_decimation.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_decimation.set_defaults(func=decimate)\n\n # create a shift subcommand\n parser_shift = subparsers.add_parser('shift', aliases=[\"s\", \"sh\"],\n help='shifts a signal n times in time')\n parser_shift.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_shift.add_argument('factor', help='shift amount')\n parser_shift.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_shift.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_shift.set_defaults(func=shift)\n\n # create a reflect subcommand\n parser_reflect = subparsers.add_parser('reflect', aliases=[\"r\", \"rf\"],\n help='reflecrts a signal in time')\n parser_reflect.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_reflect.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_reflect.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_reflect.set_defaults(func=reflect)\n\n # create a mamplitude subcommand\n parser_mamplitude = subparsers.add_parser('mamplitude', aliases=[\"ma\", \"mamp\"],\n help='modifies the amplitude of a signal')\n parser_mamplitude.add_argument(\n 'ipath', help='path of the file which contains the signal')\n parser_mamplitude.add_argument('factor', help='amplitude amount')\n parser_mamplitude.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='output.wav')\n parser_mamplitude.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_mamplitude.set_defaults(func=mamplitude)\n\n # create a record subcommand\n parser_record = subparsers.add_parser('record', aliases=[\"rec\", \"r\"],\n help='records audio from computer\\'s built in mic')\n parser_record.add_argument(\n '-opath', '-o', help=\"Path to store the resulted signal\", default='record.wav')\n parser_record.add_argument(\n '-secs', '-s', help=\"seconds of audio to be recorded\", default=4)\n parser_record.add_argument(\n '-plot', '-p', help=\"plot the resulting signal\", default=False)\n parser_record.set_defaults(func=record)\n\n # create a play subcommand\n parser_play = subparsers.add_parser('play', aliases=[\"pl\", \"reproduce\"],\n help='reproduces audio from file')\n parser_play.add_argument(\n 'ipath', help=\"Path of the audio file\")\n parser_play.add_argument(\n '-plot', '-p', help=\"plot the audio signal\", default=False)\n parser_play.set_defaults(func=play)\n\n # create a gui subcommand\n parser_gui = subparsers.add_parser('gui', aliases=[\"g\", \"interface\"],\n help='launches the program in gui mode')\n parser_gui.set_defaults(func=guiMode)\n\n # parse arguments from shell\n if len(sys.argv) <= 1:\n sys.argv.append('--help')\n options = parser.parse_args()\n options.func(options)",
"def main():\n parser = argparse.ArgumentParser(description=\"Tracks adult fish\")\n # add options for argument parser\n parser.add_argument(\"in_path\",\n help=\"Path to the video directory.\")\n parser.add_argument(\"out_path\",\n help=\"Directory for results. Should be empty.\")\n parser.add_argument(\"-x\", \"--keep_temp\", action=\"store_true\",\n help=\"Keep temporary folder after execution.\")\n parser.add_argument(\"--visual\", action=\"store_true\",\n help=\"shows a visual representation of the tracking progress.\")\n\n # parse arguments from command line\n args = parser.parse_args()\n # get all file names and directories ready\n out_dir, temp_dir, video_bases, videos = housekeeping(args)\n borders = []\n for i in range(len(videos)):\n v = videos[i]\n get_borders(borders, temp_dir, v)\n\n for i in range(len(videos)):\n vbn = video_bases[i]\n v = videos[i]\n scaled_video = \"scaled_\" + vbn + \".avi\"\n ffmpeg = Ffmpeg(v, os.path.join(temp_dir, scaled_video))\n ffmpeg.f = \"avi\"\n ffmpeg.vcodec = \"libx264rgb\"\n ffmpeg.width = 480\n ffmpeg.run()\n\n for i in range(len(videos)):\n vbn = video_bases[i]\n pts = tracker(args, temp_dir, vbn)\n border = borders[i]\n tracks_lower, tracks_upper = split_tracks(border, pts)\n analysis = Analysis(tracks_lower, tracks_upper, px_size=0.06)\n analysis.analyze(os.path.join(out_dir, 'stats.txt'), vbn, vel=True)\n\n if not args.keep_temp:\n shutil.rmtree(temp_dir)",
"def start_sim(self):\n self.anim = animation.FuncAnimation(self.fig, self.anim_func, frames = self.timesteps, interval = 1, blit=True)\n plt.show()",
"def main(args: argparse.Namespace | None = None, argsraw: t.Sequence[str] | None = None, **kwargs: t.Any) -> None:\n if args is None:\n parser = argparse.ArgumentParser(\n formatter_class=at.CustomArgHelpFormatter, description=\"Plot ARTIS macroatom transitions.\"\n )\n addargs(parser)\n parser.set_defaults(**kwargs)\n args = parser.parse_args(argsraw)\n\n if Path(args.outputfile).is_dir():\n args.outputfile = str(Path(args.outputfile, defaultoutputfile))\n\n atomic_number = at.get_atomic_number(args.element.lower())\n if atomic_number < 1:\n print(f\"Could not find element '{args.element}'\")\n raise AssertionError\n\n timestepmin = args.timestep\n\n timestepmax = timestepmin if not args.timestepmax or args.timestepmax < 0 else args.timestepmax\n\n input_files = list(Path(args.modelpath).glob(\"**/macroatom_????.out*\"))\n\n if not input_files:\n print(\"No macroatom files found\")\n raise FileNotFoundError\n\n dfall = read_files(input_files, args.modelgridindex, timestepmin, timestepmax, atomic_number)\n\n specfilename = Path(args.modelpath, \"spec.out\")\n\n if not specfilename.is_file():\n print(f\"Could not find {specfilename}\")\n raise FileNotFoundError\n\n outputfile = args.outputfile.format(args.modelgridindex, timestepmin, timestepmax)\n make_plot(\n dfall,\n args.modelpath,\n str(specfilename),\n timestepmin,\n timestepmax,\n outputfile,\n xmin=args.xmin,\n xmax=args.xmax,\n modelgridindex=args.modelgridindex,\n )",
"def run(inputs, outdir, paramsfile, model, simulated, beta, diag,\n maxiter, tol, seed, tail, initgamma, initx, max_func, separate, save, prog):\n simulatehuman.cmd_run(inputs, outdir, paramsfile=paramsfile, modeltype=model,\n is_simulation=simulated, savemat=save, saveprog=prog, beta=beta, diag=diag,\n maxiter=maxiter, tol=tol, seed=seed, tail=tail, initgamma=initgamma, initx=initx, smooth=False, h=1,\n max_func=max_func, separate=separate)",
"def main():\n\n if args.sims[0].lower() == 'all':\n args.sims = xl.get_all_sims(args.base_dir)\n have_full_sim_dir = True\n else:\n have_full_sim_dir = False\n \n for isim in args.sims:\n\n if have_full_sim_dir:\n wdir = isim\n else:\n wdir = xl.get_sim_dir(args.base_dir, isim)\n \n print(\"\")\n print(\"====================================================================\")\n print(f\"=== Processing {wdir} ===\")\n print(\"====================================================================\") \n print(\"\")\n \n for iisnap, isnap in enumerate(args.snaps):\n\n # Account for possibly different VR numbering than (desired) output\n if args.vr_snaps is None:\n ivsnap = isnap\n else:\n ivsnap = args.vr_snaps[iisnap]\n \n process_snap(wdir, args.out_file, isnap, ivsnap)",
"def main():\n data_file = 'shrec_timer.json'\n\n if len(sys.argv) == 2:\n generate_data(data_file, sys.argv[1])\n\n plot_data(data_file)",
"def run_sequence(seq: Sequence, tracker: Tracker, debug=False, visdom_info=None):\n\n visdom_info = {} if visdom_info is None else visdom_info\n\n base_results_path = '{}/{}'.format(tracker.results_dir, seq.name)\n results_path = '{}.txt'.format(base_results_path)\n times_path = '{}_time.txt'.format(base_results_path)\n base_visual_path = '{}/{}'.format(tracker.visual_dir, seq.name)\n if not os.path.exists(base_visual_path):\n os.makedirs(base_visual_path)\n\n if os.path.isfile(results_path) and not debug:\n return\n\n print('Tracker: {} {} {} , Sequence: {}'.format(tracker.name, tracker.parameter_name, tracker.run_id, seq.name))\n\n if debug:\n output = tracker.run(seq, base_visual_path, debug=debug, visdom_info=visdom_info)\n # output = tracker.run(seq, debug=debug, visdom_info=visdom_info)\n else:\n try:\n # output = tracker.run(seq, debug=debug, visdom_info=visdom_info)\n output = tracker.run(seq, base_visual_path, debug=debug, visdom_info=visdom_info)\n except Exception as e:\n print(e)\n return\n\n tracked_bb = np.array(output['target_bbox']).astype(int)\n exec_times = np.array(output['time']).astype(float)\n\n print('FPS: {}'.format(len(exec_times) / exec_times.sum()))\n # if not debug:\n # np.savetxt(results_path, tracked_bb, delimiter='\\t', fmt='%d')\n # np.savetxt(times_path, exec_times, delimiter='\\t', fmt='%f')\n np.savetxt(results_path, tracked_bb, delimiter='\\t', fmt='%d')\n np.savetxt(times_path, exec_times, delimiter='\\t', fmt='%f')",
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)",
"def main():\n input_video = sys.argv[1]\n input_audio = sys.argv[2]\n output_video = sys.argv[3]\n set_audio(input_video, input_audio, output_video)",
"def main():\n start = 1554994269 # unix timestamp, fixed for reproducability\n stop = start + 850 * 61 # number of acqs * time between acqs\n sampling_rate = 512. # Hz\n\n # Nyquist freq needs to be larger than frequency of J-peaks\n nyquist = sampling_rate / 2 + 1\n assert nyquist > 250\n\n # Test single mass for now\n mass = 2e-15\n result = run_sim(mass, start, stop, sampling_rate)\n\n sim_name = 'sim_mass_{:g}_rate_{:g}.npz'.format(mass, sampling_rate)\n np.savez(sim_name, times=result[0], amplitudes=result[1])\n print('saved: {}'.format(sim_name))",
"def figures(z=None, name='MC', vext='.mp4', ext='.png', do_movie=True, do_figs=True, recompute=False,\n seed=None, impulse=False, events=None, verbose=False, masking=False,\n do_amp=False, do_mask=False, figpath=figpath, **kwargs):\n if not(os.path.isdir(figpath)): os.mkdir(figpath)\n\n if do_figs:\n if recompute or check_if_anim_exist(name, ext, figpath):\n try:\n visualize(z, filename=os.path.join(figpath, name + ext), **kwargs) # Visualize the Fourier Spectrum\n except Exception as e:\n print('Failed to generate the visualisation:', e)\n\n if recompute or check_if_anim_exist(name + '_cube', ext, figpath):\n try:\n movie = rectif(random_cloud(z, seed=seed, impulse=impulse, events=events, do_amp=do_amp, do_mask=do_mask), verbose=verbose)\n cube(movie, filename=os.path.join(figpath, name + '_cube' + ext), **kwargs) # Visualize the Stimulus cube\n except Exception as e:\n print('Failed to generate the cube:', e)\n\n if do_movie:\n if recompute or check_if_anim_exist(name, vext, figpath):\n try:\n movie = rectif(random_cloud(z, seed=seed, impulse=impulse, events=events, do_amp=do_amp, do_mask=do_mask), verbose=verbose)\n anim_save(movie, filename=os.path.join(figpath, name), display=False, vext=vext, **kwargs)\n except Exception as e:\n print('Failed to generate the movie:', e)",
"def Main():\n EnigmaSim = simulation() #Creates the simulation object\n EnigmaSim.Run() #Runs the simulation",
"def Run(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n\n s_opts = \"hdi\"\n l_opts = (\n \"help\",\n \"duplicates=\",\n \"ignore-comments\",\n \"ignore-imports\",\n \"ignore-docstrings\",\n )\n min_lines = 4\n ignore_comments = False\n ignore_docstrings = False\n ignore_imports = False\n opts, args = getopt(argv, s_opts, l_opts)\n for opt, val in opts:\n if opt in (\"-d\", \"--duplicates\"):\n min_lines = int(val)\n elif opt in (\"-h\", \"--help\"):\n usage()\n elif opt in (\"-i\", \"--ignore-comments\"):\n ignore_comments = True\n elif opt in (\"--ignore-docstrings\",):\n ignore_docstrings = True\n elif opt in (\"--ignore-imports\",):\n ignore_imports = True\n if not args:\n usage(1)\n sim = Similar(min_lines, ignore_comments, ignore_docstrings, ignore_imports)\n for filename in args:\n with open(filename) as stream:\n sim.append_stream(filename, stream)\n sim.run()\n sys.exit(0)",
"def run_simulation(run):\n # Write the argument file used by metrosim.\n simulation = run.simulation\n metrosim_dir = settings.BASE_DIR + '/metrosim_files/'\n metrosim_file = '{0}execs/metrosim'.format(metrosim_dir)\n arg_file = (\n '{0}arg_files/simulation_{1!s}_run_{2!s}.txt'.format(metrosim_dir,\n simulation.id,\n run.id)\n )\n with open(arg_file, 'w') as f:\n database = settings.DATABASES['default']\n db_host = database['HOST']\n db_name = database['NAME']\n db_user = database['USER']\n db_pass = database['PASSWORD']\n log = metrosim_dir + 'logs/run_{}.txt'.format(run.id)\n tmp = metrosim_dir + 'output'\n stop = metrosim_dir + 'stop_files/run_{}.stop'.format(run.id)\n arguments = ('-dbHost \"{0}\" -dbName \"{1}\" -dbUser \"{2}\" '\n + '-dbPass \"{3}\" -logFile \"{4}\" -tmpDir \"{5}\" '\n + '-stopFile \"{6}\" -simId \"{7!s}\" -runId \"{8!s}\"'\n ).format(db_host, db_name, db_user, db_pass, log, tmp,\n stop, simulation.id, run.id)\n f.write(arguments)\n\n # Run the script 'prepare_run.py' then run metrosim then run the script \n # 'run_end.py'.\n # The two scripts are run with the run.id as an argument.\n prepare_run_file = settings.BASE_DIR + '/metro_app/prepare_run.py'\n build_results_file = settings.BASE_DIR + '/metro_app/build_results.py'\n log_file = (\n '{0}/website_files/script_logs/run_{1}.txt'.format(\n settings.BASE_DIR, run.id\n )\n )\n # Command looks like: \n #\n # python3 ./metro_app/prepare_results.py y\n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n # && ./metrosim_files/execs/metrosim\n # ./metrosim_files/arg_files/simulation_x_run_y.txt \n # && python3 ./metro_app/build_results.py y \n # 2>&1 | tee ./website_files/script_logs/run_y.txt\n #\n # 2>&1 | tee is used to redirect output and errors to file.\n command = ('python3 {first_script} {run_id} 2>&1 | tee {log} && '\n + '{metrosim} {argfile} && '\n + 'python3 {second_script} {run_id} 2>&1 | tee {log}')\n command = command.format(first_script=prepare_run_file, run_id=run.id,\n log=log_file, metrosim=metrosim_file,\n argfile=arg_file,\n second_script=build_results_file)\n subprocess.Popen(command, shell=True)",
"def video_files():\n p = parse_cmdline(get_parser=get_parser_files)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.INFO)\n vis.show_video_abi_glm(\n files=p.files,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir)\n print(\"Files written to:\", p.outdir)",
"def Main():\n numberOfPopulation = 350\n numberOfDays = 60\n \n simulation = Simulation(Covid19(), numberOfPopulation, numberOfDays, \"Covid 19 Simulation\")\n simulation.run() \n simulation = Simulation(Ebola(), numberOfPopulation, numberOfDays, \"Ebola Simulation\")\n simulation.run()",
"def run_simulator(self):\n\n self.update_settings()\n\n # Pass in the progress bar and the master so that the simulator can\n # update the progress bar and then refresh the screen when the progress\n # checkpoints are hit\n\n self.sim_results = self.sim.run(self.progress_bar, self.master)\n self.graph_results()",
"def main():\n # Parameters\n opt = get_args()\n\n assert os.path.exists(opt.path_video), \"Video file does not exist\"\n try:\n os.makedirs(opt.path_images)\n except Exception:\n print(\"Folder already exists. Overwriting it\")\n pass\n\n assert opt.size is None or opt.size is not None and len(opt.size) <= 2, \"Make sure the size indicated contains at maximum two numbers [none, max_dimension or width and height]\"\n\n # Get base path\n base_path = os.path.join(opt.path_images, opt.basename)\n\n # Load video from file\n try:\n cap = cv2.VideoCapture(opt.path_video)\n except Exception as e:\n print('Video failed to be loaded:', e)\n sys.exit(0)\n\n # Parse video\n parse_video(cap, base_path, opt.step, opt.size)\n\n # Release capture\n cap.release()\n cv2.destroyAllWindows()\n \n return 0",
"def simul_and_export(file, config, i):\n\n simulate_UVSPEC(file, config)\n\n load_skymap(config)\n\n sim = files_sim(config)[i]\n export_sim_rad(sim, config)"
]
| [
"0.6634457",
"0.663252",
"0.66184664",
"0.65506244",
"0.64640737",
"0.6457846",
"0.6301039",
"0.6249109",
"0.6241222",
"0.61924034",
"0.61819977",
"0.6164607",
"0.61598307",
"0.6156952",
"0.61398953",
"0.6134857",
"0.61104107",
"0.6092299",
"0.6074707",
"0.6071585",
"0.606645",
"0.60631496",
"0.60361093",
"0.6033212",
"0.60204935",
"0.6017205",
"0.6016089",
"0.5996944",
"0.59676844",
"0.5957861"
]
| 0.78607196 | 0 |
Returns a triplet with a 1 bit sign, an 11 bits unsigned integer exponent and a 53 bits unsigned integer mantissa. The returned triplet can be used to fully reconstruct the fp64 value passed as an argument. | def to_sign_exponent_mantissa(value, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits):
float_mantissa, float_exponent = math.frexp(value)
if (float_mantissa >= 0):
sign = 0
else:
sign = 1
exponent = int(float_exponent + 2**(exponent_bits - 1))
mantissa = int(abs(float_mantissa) * 2**mantissa_bits)
return sign, exponent, mantissa | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DequantizeFP(scale, mantissa, nScaleBits=3, nMantBits=5):\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n aNum = 0\n s = mantissa & (1 << (nMantBits - 1))\n if s > 0:\n s = 1 << (R - 1)\n code = mantissa & (2**(nMantBits - 1) - 1)\n aNum += s\n aNum += code << max(R - scale - nMantBits - 1, 0)\n if scale != (2**nScaleBits - 1):\n aNum += 1 << (R - scale - 2)\n shift = R - scale - nMantBits - 2\n if shift > 0:\n aNum += 1 << shift\n aNum = DequantizeUniform(aNum, R)\n ### YOUR CODE ENDS HERE ###\n\n return aNum",
"def ts_float32(val):\n return np.float64(val)",
"def _eight_byte_real(value):\n if value == 0:\n return b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n if value < 0:\n byte1 = 0x80\n value = -value\n else:\n byte1 = 0x00\n fexp = numpy.log2(value) / 4\n exponent = int(numpy.ceil(fexp))\n if fexp == exponent:\n exponent += 1\n mantissa = int(value * 16.**(14 - exponent))\n byte1 += exponent + 64\n byte2 = (mantissa // 281474976710656)\n short3 = (mantissa % 281474976710656) // 4294967296\n long4 = mantissa % 4294967296\n return struct.pack(\">HHL\", byte1 * 256 + byte2, short3, long4)",
"def compress(self,float32):\n\n F16_EXPONENT_BITS = 0x1F\n F16_EXPONENT_SHIFT = 10\n F16_EXPONENT_BIAS = 15\n F16_MANTISSA_BITS = 0x3ff\n F16_MANTISSA_SHIFT = (23 - F16_EXPONENT_SHIFT)\n F16_MAX_EXPONENT = (F16_EXPONENT_BITS << F16_EXPONENT_SHIFT)\n\n if type(float32) == float:\n f32 = self.unpack(float32)\n else:\n f32 = float32\n f16 = 0\n sign = (f32 >> 16) & 0x8000\n exponent = ((f32 >> 23) & 0xff) - 127\n mantissa = f32 & 0x007fffff\n \n if exponent == 128:\n f16 = sign | F16_MAX_EXPONENT\n if mantissa:\n f16 |= (mantissa & F16_MANTISSA_BITS)\n elif exponent > 15:\n f16 = sign | F16_MAX_EXPONENT\n elif exponent > -15:\n exponent += F16_EXPONENT_BIAS\n mantissa >>= F16_MANTISSA_SHIFT\n f16 = sign | exponent << F16_EXPONENT_SHIFT | mantissa\n else:\n f16 = sign\n return f16",
"def _eight_byte_real(value):\n if value == 0:\n return b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n if value < 0:\n byte1 = 0x80\n value = -value\n else:\n byte1 = 0x00\n fexp = numpy.log2(value) / 4\n exponent = int(numpy.ceil(fexp))\n if fexp == exponent:\n exponent += 1\n mantissa = int(value * 16.0 ** (14 - exponent))\n byte1 += exponent + 64\n byte2 = mantissa // 281474976710656\n short3 = (mantissa % 281474976710656) // 4294967296\n long4 = mantissa % 4294967296\n return struct.pack(\">HHL\", byte1 * 256 + byte2, short3, long4)",
"def MantissaFP(aNum, scale, nScaleBits=3, nMantBits=5):\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n s_code = QuantizeUniform(aNum, R)\n s = s_code & (1 << (R - 1))\n if s > 0:\n s = 1 << (nMantBits - 1)\n code = s_code & (2**(R - 1) - 1)\n if scale == (2**nScaleBits - 1):\n mantissa = s + (code & (2**(nMantBits - 1) - 1))\n else:\n mantissa = s + ((code >> (R - scale - nMantBits - 1)) &\n ((2**(nMantBits - 1) - 1)))\n ### YOUR CODE ENDS HERE ###\n\n return int(mantissa)",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent",
"def rop64(*args):\n\tpacked = \"\"\n\tfor x in args:\n\t\tif type(x) == int or type(x) == long:\n\t\t\tpacked += pack64(x)\n\t\telse:\n\t\t\tpacked += x\n\treturn packed",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent",
"def from_sign_exponent_mantissa(sign, exponent, mantissa, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits):\n if (sign):\n signed_mantissa = - mantissa\n else:\n signed_mantissa = mantissa\n signed_exponent = exponent - 2**(exponent_bits - 1)\n norm_signed_mantissa = float(signed_mantissa) / float(2**mantissa_bits)\n return math.ldexp(norm_signed_mantissa, signed_exponent)",
"def frac11(self,lx,ly,lz):\n return str(self.coord[0]/lx*2)+'\\t'+str(self.coord[1]/ly*2)+'\\t'+str(self.coord[2]/lz*2)",
"def _float64_feature(value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))",
"def _int64_feature(value: int) -> tf.train.Features.FeatureEntry:\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))",
"def float_to_bin64(value):\n [d] = struct.unpack(\">Q\", struct.pack(\">d\", value))\n return \"{:064b}\".format(d)",
"def p64(d):\n return pack('<Q', d)",
"def Dequantize(scale, mantissa, nScaleBits=3, nMantBits=5):\n\n aNum = 0.0 # REMOVE THIS LINE WHEN YOUR FUNCTION IS DONE\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n aNum = 0\n s = mantissa & (1 << (nMantBits - 1))\n if s > 0:\n s = 1 << (R - 1)\n code = mantissa & (2**(nMantBits - 1) - 1)\n aNum += s\n aNum += code << max(R - scale - nMantBits, 0)\n\n if scale < (2**nScaleBits - 1):\n if code > 0:\n shift = R - scale - nMantBits - 1\n aNum += 1 << shift\n aNum = DequantizeUniform(aNum, R)\n ### YOUR CODE ENDS HERE ###\n\n return aNum",
"def _ltz(self):\n shift = torch.iinfo(torch.long).bits - 1\n precision = 0 if self.encoder.scale == 1 else None\n\n result = self._to_ptype(Ptype.binary)\n result.share >>= shift\n result = result._to_ptype(Ptype.arithmetic, precision=precision, bits=1)\n result.encoder._scale = 1\n return result",
"def _int64_feature(value):\r\n if type(value) is tuple:\r\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\r\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))",
"def _int64_feature(value):\n return tf.train.Feature(\n int64_list=tf.train.Int64List(value=[int(v) for v in value]))",
"def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(\n value=[int(v) for v in value]))",
"def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))",
"def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))",
"def extract_64timestamp_fraction(bits: str) -> str:\n assert len(bits) == 64\n bits = bits[32:64]\n # __log.info(bits)\n ints = int(bits, 2)\n result = ints / _max_32bit\n result = int(result * 1000000000)\n result = str(result)\n while len(result) < 9:\n result = '0' + result\n return result",
"def _int64_feature(value):\n\treturn tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))",
"def bin_to_float64(b):\n bf = int_to_bytes(int(b, 2), 8) # 8 bytes needed for IEEE 754 binary64.\n return struct.unpack(\">d\", bf)[0]",
"def int64_feature(values):\n if not isinstance(values, (tuple, list)):\n values = [values]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=values))",
"def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))",
"def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))",
"def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))",
"def _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))"
]
| [
"0.5286657",
"0.5091577",
"0.50912625",
"0.5062717",
"0.5032826",
"0.50214833",
"0.50192976",
"0.5013269",
"0.4999337",
"0.49722776",
"0.49537775",
"0.49217296",
"0.49116477",
"0.4890735",
"0.48767433",
"0.48740077",
"0.48563376",
"0.48374486",
"0.48224583",
"0.48199913",
"0.48074186",
"0.48074186",
"0.480052",
"0.47957742",
"0.47828797",
"0.47618484",
"0.4761483",
"0.4761483",
"0.4761483",
"0.4761483"
]
| 0.5590061 | 0 |
Returns an fp64 from a 1 bit sign, an 11 bit unsigned integer exponent and a 53 bit unsigned integer mantissa. | def from_sign_exponent_mantissa(sign, exponent, mantissa, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits):
if (sign):
signed_mantissa = - mantissa
else:
signed_mantissa = mantissa
signed_exponent = exponent - 2**(exponent_bits - 1)
norm_signed_mantissa = float(signed_mantissa) / float(2**mantissa_bits)
return math.ldexp(norm_signed_mantissa, signed_exponent) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def to_sign_exponent_mantissa(value, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits):\n float_mantissa, float_exponent = math.frexp(value)\n if (float_mantissa >= 0):\n sign = 0\n else:\n sign = 1\n exponent = int(float_exponent + 2**(exponent_bits - 1))\n mantissa = int(abs(float_mantissa) * 2**mantissa_bits)\n return sign, exponent, mantissa",
"def MantissaFP(aNum, scale, nScaleBits=3, nMantBits=5):\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n s_code = QuantizeUniform(aNum, R)\n s = s_code & (1 << (R - 1))\n if s > 0:\n s = 1 << (nMantBits - 1)\n code = s_code & (2**(R - 1) - 1)\n if scale == (2**nScaleBits - 1):\n mantissa = s + (code & (2**(nMantBits - 1) - 1))\n else:\n mantissa = s + ((code >> (R - scale - nMantBits - 1)) &\n ((2**(nMantBits - 1) - 1)))\n ### YOUR CODE ENDS HERE ###\n\n return int(mantissa)",
"def DequantizeFP(scale, mantissa, nScaleBits=3, nMantBits=5):\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n aNum = 0\n s = mantissa & (1 << (nMantBits - 1))\n if s > 0:\n s = 1 << (R - 1)\n code = mantissa & (2**(nMantBits - 1) - 1)\n aNum += s\n aNum += code << max(R - scale - nMantBits - 1, 0)\n if scale != (2**nScaleBits - 1):\n aNum += 1 << (R - scale - 2)\n shift = R - scale - nMantBits - 2\n if shift > 0:\n aNum += 1 << shift\n aNum = DequantizeUniform(aNum, R)\n ### YOUR CODE ENDS HERE ###\n\n return aNum",
"def bin_to_float64(b):\n bf = int_to_bytes(int(b, 2), 8) # 8 bytes needed for IEEE 754 binary64.\n return struct.unpack(\">d\", bf)[0]",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent",
"def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent",
"def Mantissa(aNum, scale, nScaleBits=3, nMantBits=5):\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n s_code = QuantizeUniform(aNum, R)\n s = s_code & (1 << (R - 1))\n if s > 0:\n s = 1 << (nMantBits - 1)\n code = s_code & (2**(R - 1) - 1)\n if scale == (2**nScaleBits - 1):\n mantissa = s + (code & (2**(nMantBits - 1) - 1))\n else:\n mantissa = s + ((code >>\n (R - scale - nMantBits)) & ((2**(nMantBits - 1) - 1)))\n ### YOUR CODE ENDS HERE ###\n\n return int(mantissa)",
"def float32_to_float8e5m2( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = False,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n\n if fn and uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if (b & 0x7FFFFFFF) == 0x7F800000:\n # inf\n if saturate:\n return ret | 0x7F\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 109:\n pass\n elif e < 112:\n # denormalized number\n ex = e - 111\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 111\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n ret = 0x80\n elif e == 255 and m == 0: # inf\n ret = 0x80\n elif saturate:\n ret |= 0x7F # last possible number\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n elif not fn and not uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return 0x7B | ret\n return 0x7C | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 110:\n pass\n elif e < 113:\n # denormalized number\n ex = e - 112\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 112\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7B:\n # rounding\n ret += 1\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n return int(ret)\n else:\n raise NotImplementedError(\"fn and uz must be both False or True.\")",
"def _decode_double(fp):\n return struct.unpack('>d', fp.read(8))[0]",
"def vMantissa(aNumVec, scale, nScaleBits=3, nMantBits=5):\n\n mantissaVec = np.zeros_like(\n aNumVec, dtype=int) # REMOVE THIS LINE WHEN YOUR FUNCTION IS DONE\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n s_code = vQuantizeUniform(aNumVec, R)\n s = np.bitwise_and(s_code, (1 << (R - 1)))\n s[s > 0] = 1 << (nMantBits - 1)\n code = np.bitwise_and(s_code, (2**(R - 1) - 1))\n if scale == (2**nScaleBits - 1):\n mantissaVec = s + (code & (2**(nMantBits - 1) - 1))\n else:\n mantissaVec = s + ((code >> (R - scale - nMantBits)) &\n ((2**(nMantBits - 1) - 1)))\n ### YOUR CODE ENDS HERE ###\n\n return mantissaVec",
"def Dequantize(scale, mantissa, nScaleBits=3, nMantBits=5):\n\n aNum = 0.0 # REMOVE THIS LINE WHEN YOUR FUNCTION IS DONE\n\n ### YOUR CODE STARTS HERE ###\n R = 2**nScaleBits - 1 + nMantBits\n aNum = 0\n s = mantissa & (1 << (nMantBits - 1))\n if s > 0:\n s = 1 << (R - 1)\n code = mantissa & (2**(nMantBits - 1) - 1)\n aNum += s\n aNum += code << max(R - scale - nMantBits, 0)\n\n if scale < (2**nScaleBits - 1):\n if code > 0:\n shift = R - scale - nMantBits - 1\n aNum += 1 << shift\n aNum = DequantizeUniform(aNum, R)\n ### YOUR CODE ENDS HERE ###\n\n return aNum",
"def float_to_bin64(value):\n [d] = struct.unpack(\">Q\", struct.pack(\">d\", value))\n return \"{:064b}\".format(d)",
"def float_to_fp(x, precision='single'):\n\n # Zero\n if x == 0:\n return 0\n\n # Inf\n if math.isinf(x):\n s = '0' if x > 0 else '1'\n return int(s + '1' * _Exponent_bits[precision] + '0' * _Fraction_bits[precision], 2)\n\n # NaN\n if math.isnan(x):\n return int('0' + '1' * _Exponent_bits[precision] + '1' * _Fraction_bits[precision], 2)\n\n if not float_in_range(x, precision):\n raise ValueError(\"Value out of range for precision\")\n\n # Get exponent and upper fraction\n l = abs(int(x)) # TODO check abs()\n f_upper = bin(l)[3:] # remove 0b1 (includes leading 1 implied in fp)\n e = bin(len(f_upper) + _Bias[precision])[2:2 + _Exponent_bits[precision]]\n\n # Get lower fraction\n r = abs(x) - l # TODO check abs()\n fraction_bits = len(f_upper)\n f_lower = ''\n while r != 0.0 and fraction_bits <= _Fraction_bits[precision]:\n r *= 2\n fraction_bits += 1\n f_lower = f_lower + str(int(r))\n r -= int(r)\n\n # Get sign and join\n sign = '1' if x < 0 else '0'\n res = zfill_right(sign + e + f_upper + f_lower, _Bitwidth[precision])\n return int(res, 2)",
"def num_to_mant_exp( num ):\n try:\n exponent = math.floor(math.log10(abs(num)))\n except ValueError: # Case of log10(0)\n return (0, 0) # Convention: 0 = 0*10^0\n mantissa = num/10**exponent\n\n return (mantissa, int(exponent))",
"def float_pack(x, size, order=LITTLE_ENDIAN):\n\n if size == 8:\n MIN_EXP = -1021 # = sys.float_info.min_exp\n MAX_EXP = 1024 # = sys.float_info.max_exp\n MANT_DIG = 53 # = sys.float_info.mant_dig\n BITS = 64\n elif size == 4:\n MIN_EXP = -125 # C's FLT_MIN_EXP\n MAX_EXP = 128 # FLT_MAX_EXP\n MANT_DIG = 24 # FLT_MANT_DIG\n BITS = 32\n else:\n raise ValueError(\"invalid size value\")\n\n sign = math.copysign(1.0, x) < 0.0\n if math.isinf(x):\n mant = 0\n exp = MAX_EXP - MIN_EXP + 2\n elif math.isnan(x):\n mant = 1 << (MANT_DIG-2) # other values possible\n exp = MAX_EXP - MIN_EXP + 2\n elif x == 0.0:\n mant = 0\n exp = 0\n else:\n m, e = math.frexp(abs(x)) # abs(x) == m * 2**e\n exp = e - (MIN_EXP - 1)\n if exp > 0:\n # Normal case.\n mant = round_to_nearest(m * (1 << MANT_DIG))\n mant -= 1 << MANT_DIG - 1\n else:\n # Subnormal case.\n if exp + MANT_DIG - 1 >= 0:\n mant = round_to_nearest(m * (1 << exp + MANT_DIG - 1))\n else:\n mant = 0\n exp = 0\n\n # Special case: rounding produced a MANT_DIG-bit mantissa.\n assert 0 <= mant <= 1 << MANT_DIG - 1\n if mant == 1 << MANT_DIG - 1:\n mant = 0\n exp += 1\n\n # Raise on overflow (in some circumstances, may want to return\n # infinity instead).\n if exp >= MAX_EXP - MIN_EXP + 2:\n raise OverflowError(\"float too large to pack in this format\")\n\n # check constraints\n assert 0 <= mant < 1 << MANT_DIG - 1\n assert 0 <= exp <= MAX_EXP - MIN_EXP + 2\n assert 0 <= sign <= 1\n return ((sign << BITS - 1) | (exp << MANT_DIG - 1)) | mant",
"def _decode_float(fp):\n return struct.unpack('>f', fp.read(4))[0]",
"def ts_float32(val):\n return np.float64(val)",
"def float32_to_float8e4m3( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = True,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n if not fn:\n raise NotImplementedError(\n \"float32_to_float8e4m3 not implemented with fn=False.\"\n )\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n if uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if np.isinf(x):\n if saturate:\n return ret | 127\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 116:\n pass\n elif e < 120:\n # denormalized number\n ex = e - 119\n if ex >= -2:\n ret |= 1 << (2 + ex)\n ret |= m >> (21 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (20 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 135:\n # normalized number\n ex = e - 119 # 127 - 8\n if ex == 0:\n ret |= 0x4\n ret |= m >> 21\n else:\n ret |= ex << 3\n ret |= m >> 20\n if m & 0x80000 and ((m & 0x100000) or (m & 0x7FFFF)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n return 0x80\n elif saturate:\n ret |= 0x7F # 01111110\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n else:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return ret | 126\n return 0x7F | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 117:\n pass\n elif e < 121:\n # denormalized number\n ex = e - 120\n if ex >= -2:\n ret |= 1 << (2 + ex)\n ret |= m >> (21 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (20 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 136:\n # normalized number\n ex = e - 120\n if ex == 0:\n ret |= 0x4\n ret |= m >> 21\n else:\n ret |= ex << 3\n ret |= m >> 20\n if (ret & 0x7F) == 0x7F:\n ret &= 0xFE\n if (m & 0x80000) and ((m & 0x100000) or (m & 0x7FFFF)):\n if (ret & 0x7F) < 0x7E:\n # rounding\n ret += 1\n elif not saturate:\n ret |= 0x7F\n elif saturate:\n ret |= 126 # 01111110\n else:\n ret |= 0x7F\n return int(ret)",
"def fp_to_float(fp, precision='single'):\n\n if precision not in ('half', 'single', 'double', 'quad'):\n raise ValueError(\"Precision must be one of 'half', 'single', 'double', or 'quad\")\n if not isinstance(fp, int):\n raise TypeError(\"fp must be an integer\")\n\n fp = bin(fp)[2:].zfill(_Bitwidth[precision])\n s = fp[0]\n e = fp[1:1 + _Exponent_bits[precision]]\n f = fp[1 + _Exponent_bits[precision]:]\n\n if e == '0' * _Exponent_bits[precision]:\n if f == '0' * _Fraction_bits[precision]:\n return 0.0\n else:\n raise ValueError(\"Subnormal number not supported\")\n elif e == '1' * _Exponent_bits[precision]:\n if f == '0' * _Fraction_bits[precision]:\n return math.inf if s == '0' else -math.inf\n else:\n # Or float('nan') (Using math.nan permits object comparision, i.e. x is math.nan)\n return math.nan\n\n ev = 2 ** (int(e, 2) - _Bias[precision])\n fv = 1 + (int(f, 2) / 2 ** _Fraction_bits[precision])\n v = ev * fv\n return v if s == '0' else -v",
"def test_float_storage():\n values = [2.3434, 124012.2323209999, -12.39212445433389]\n\n for value in values:\n sign, exp, mantissa = to_sign_exponent_mantissa(value)\n restored_value = from_sign_exponent_mantissa(sign, exp, mantissa)\n print(restored_value)\n assert(value == restored_value)",
"def _real_to_int(d):\n\n if d < 0:\n sign = 0x8000000000000000\n else:\n sign = 0\n\n exponent = log(d, 16)\n if (exponent < 0):\n exponent = ceil(exponent)\n else: # exponent > 0\n exponent = floor(exponent) + 1\n d = d / (16 ** exponent)\n\n mantissa = getMantissa(d)\n\n return sign | (int(exponent) + 64) << 56 | mantissa #updated for Python2 compatibility\n #return sign | (exponent + 64) << 56 | mantissa",
"def compress(self,float32):\n\n F16_EXPONENT_BITS = 0x1F\n F16_EXPONENT_SHIFT = 10\n F16_EXPONENT_BIAS = 15\n F16_MANTISSA_BITS = 0x3ff\n F16_MANTISSA_SHIFT = (23 - F16_EXPONENT_SHIFT)\n F16_MAX_EXPONENT = (F16_EXPONENT_BITS << F16_EXPONENT_SHIFT)\n\n if type(float32) == float:\n f32 = self.unpack(float32)\n else:\n f32 = float32\n f16 = 0\n sign = (f32 >> 16) & 0x8000\n exponent = ((f32 >> 23) & 0xff) - 127\n mantissa = f32 & 0x007fffff\n \n if exponent == 128:\n f16 = sign | F16_MAX_EXPONENT\n if mantissa:\n f16 |= (mantissa & F16_MANTISSA_BITS)\n elif exponent > 15:\n f16 = sign | F16_MAX_EXPONENT\n elif exponent > -15:\n exponent += F16_EXPONENT_BIAS\n mantissa >>= F16_MANTISSA_SHIFT\n f16 = sign | exponent << F16_EXPONENT_SHIFT | mantissa\n else:\n f16 = sign\n return f16",
"def _cast_to_float64(matrix):\n return matrix.astype(np.float64) if matrix.dtype != np.float64 else matrix",
"def get_eps_float32():\n\n return np.finfo(np.float32).eps",
"def _eight_byte_real(value):\n if value == 0:\n return b'\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00'\n if value < 0:\n byte1 = 0x80\n value = -value\n else:\n byte1 = 0x00\n fexp = numpy.log2(value) / 4\n exponent = int(numpy.ceil(fexp))\n if fexp == exponent:\n exponent += 1\n mantissa = int(value * 16.**(14 - exponent))\n byte1 += exponent + 64\n byte2 = (mantissa // 281474976710656)\n short3 = (mantissa % 281474976710656) // 4294967296\n long4 = mantissa % 4294967296\n return struct.pack(\">HHL\", byte1 * 256 + byte2, short3, long4)",
"def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n return self._cast_floating_to(params, jnp.float16, mask)",
"def u64(d):\n return unpack('<Q', d)[0]",
"def p64(d):\n return pack('<Q', d)",
"def bin_to_float(b):\n bf = int_to_bytes(int(b, 2), 8) # 8 bytes needed for IEEE 754 binary64.\n return struct.unpack('>d', bf)[0]",
"def DecryptFloat(self, ciphertext):\n original_plaintext = self.Decrypt(ciphertext)\n plaintext = original_plaintext\n mantissa_and_exponent = plaintext & _ONES_FLOAT_SIGN_LOW_LSB\n plaintext >>= FLOAT_SIGN_LOW_LSB # >>= 831\n sign_low32 = plaintext & 0xffffffff\n plaintext >>= 32\n sign_high32 = plaintext & 0xffffffff\n plaintext >>= 32\n # carry_over32 = plaintext & 0xffffffff\n plaintext >>= 32\n minus_inf32 = plaintext & 0xffffffff\n plaintext >>= 32\n plus_inf32 = plaintext & 0xffffffff\n plaintext >>= 32\n nan_32 = plaintext & 0xffffffff\n if nan_32 > 0:\n return float('nan')\n # adding a +inf and -inf should return a nan\n if plus_inf32 > 0 and minus_inf32 > 0:\n return float('nan')\n if plus_inf32 > 0:\n return float('inf')\n if minus_inf32 > 0:\n return float('-inf')\n if sign_high32 == 0 and sign_low32 > 0:\n # This indicates that positive overflow has happened, mimic ieee float\n # behaviour and return +inf.\n return float('inf')\n if sign_high32 == 0xffffffff and sign_low32 < 0xffffffff:\n # This indicates that negative overflow has happened, mimic ieee float\n # behaviour and return -inf.\n return float('-inf')\n if sign_high32 == 0 and sign_low32 == 0:\n # positive finite number.\n if mantissa_and_exponent == 0L:\n return float(0)\n size = len(bin(mantissa_and_exponent)) - 2 # -2 to remove prepended 0b\n if size >= MANTISSA_BITS:\n # take the first 53 bits and remove the leading 1 bit i.e 52 bits.\n new_mantissa = ((mantissa_and_exponent >> (size - MANTISSA_BITS))\n & 0xfffffffffffff)\n else:\n # take all the bits and shift left to make it a normal number,\n # the exponent also gets updated appropriately.\n new_mantissa = ((mantissa_and_exponent << (MANTISSA_BITS - size))\n & 0xfffffffffffff)\n new_exponent = ((size - MANTISSA_BITS) - FLOAT_MANTISSA_ZERO +\n EXPONENT_BIAS)\n new_value = (new_exponent << EXPLICIT_MANTISSA_BITS) | new_mantissa\n return struct.unpack('d', struct.pack('Q', new_value))[0]\n if sign_high32 == 0xffffffff and sign_low32 == 0xffffffff:\n # negative finite number.\n # - first find the positive value of the number by taking the 2s\n # complement of the 895 bit integer.\n num = original_plaintext & _ONES_CARRYOVER_LSB\n positive_895bit_value = (num ^ _ONES_CARRYOVER_LSB) + 1L\n # - final value will mostly be a 831 bit number or smaller except if\n # 831 bits are all zero which represents -2^831 and gives a 2's complement\n # positive value of 2^831, we detect this case and return -inf.\n positive_832bit_value = positive_895bit_value & _ONES_832\n if positive_832bit_value >> FLOAT_SIGN_LOW_LSB: # >> 831:\n return float('-inf')\n size = len(bin(positive_832bit_value)) - 2\n if size >= MANTISSA_BITS:\n # take the first 53 bits and remove the leading 1 bit.\n new_mantissa = ((positive_832bit_value >> (size - MANTISSA_BITS))\n & 0xfffffffffffff)\n else:\n # take all the bits and shift left to make it a normal number,\n # the exponent also gets updated appropriately.\n new_mantissa = ((positive_832bit_value << (MANTISSA_BITS - size))\n & 0xfffffffffffff)\n new_exponent = ((size - MANTISSA_BITS) - FLOAT_MANTISSA_ZERO +\n EXPONENT_BIAS)\n new_value = ((new_exponent << EXPLICIT_MANTISSA_BITS) | new_mantissa |\n (1 << (EXPLICIT_MANTISSA_BITS + EXPONENT_BITS)))\n return struct.unpack('d', struct.pack('Q', new_value))[0]\n raise ValueError('Got an unusual decrypted value either nan, inf or sign '\n 'bits aren\\'t set correctly: %s' % hex(original_plaintext))"
]
| [
"0.6564609",
"0.61634827",
"0.592686",
"0.5826476",
"0.57423556",
"0.5709573",
"0.55164057",
"0.54724276",
"0.5424843",
"0.5373116",
"0.5356698",
"0.5350474",
"0.52375185",
"0.52315235",
"0.51248753",
"0.5092813",
"0.5047708",
"0.5032996",
"0.5011347",
"0.49896455",
"0.49791184",
"0.495157",
"0.49406797",
"0.49403185",
"0.4912811",
"0.49024177",
"0.4882707",
"0.4870206",
"0.48622322",
"0.48515242"
]
| 0.644643 | 1 |
Check that the storage of a float as sign, exponent and mantissa and way back produces exactly the original number. | def test_float_storage():
values = [2.3434, 124012.2323209999, -12.39212445433389]
for value in values:
sign, exp, mantissa = to_sign_exponent_mantissa(value)
restored_value = from_sign_exponent_mantissa(sign, exp, mantissa)
print(restored_value)
assert(value == restored_value) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_float(check):",
"def testfloat ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, expRes in self.knownFloatValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) ) \r\n\t\t\tself.assertAlmostEqual ( float ( frac1 ), expRes )",
"def check_for_float_and_int(check):",
"def test_float():\n floatify = fields.FloatField().adapt\n\n for input, expect in [\n (1.1, 1.1),\n (11, 11.0),\n (int(5.7), 5)\n ]:\n assert floatify(input) == expect",
"def is_float(space, w_obj):\n return space.wrap(w_obj.tp == space.tp_float)",
"def test_float(self):\n self.assertFalse(validate_measure_input('0.0', self.measures))\n self.assertFalse(validate_measure_input('1.0', self.measures))\n self.assertFalse(validate_measure_input('1.1', self.measures))",
"def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res",
"def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12",
"def is_float_like(val):\n try:\n return str(float(val)) == str(val)\n except Exception:\n return False",
"def _has_numeric_strict(self) -> bool:\n return bool({'i', 'f'} & self._data.keys())",
"def test_wiki_toc_isfloat_true(self):\n from .wiki_toc import isfloat\n value = isfloat(value='40.22222')\n self.assertTrue(value is True)",
"def test_float_type(self):\n\n input_ = 1.2\n expected = ValueError\n with self.assertRaises(expected):\n math.factorial(input_)",
"def could_be_float(val):\n if val == None:\n return False\n\n if isinstance(val, float):\n return True\n\n # allow coercion from str\n if isinstance(val, (str, unicode)):\n try:\n f = float(val)\n if not isinstance(f, float):\n raise ValueError\n else:\n return True\n except:\n return False\n\n #otherwise\n return False",
"def isFloat(value): \n try:\n float(value)\n return True\n except ValueError:\n return False",
"def test_wiki_toc_isfloat_false(self):\n from .wiki_toc import isfloat\n value = isfloat(value='test_float')\n self.assertTrue(value is False)",
"def isfloat(s):\n try:\n x = float(s)\n return True\n except:\n return False",
"def testtofloatString ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTup1, expRes in self.knownFloatStringValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracTup1 ) ) \r\n\t\t\tself.assertEqual ( frac1.tofloatString (), expRes )",
"def is_float(val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def check_pos_float(v):\n status = True\n try:\n val = float(v)\n if val <= 0:\n status = False\n except ValueError:\n status = False\n return status",
"def is_float(x):\r\n try:\r\n float(x)\r\n except ValueError:\r\n return False\r\n return True",
"def assert_floats_are_equal(a, b, tol=1e-5):\r\n assert floats_are_equal(a, b, tol), (a,b)",
"def testpowValidInput ( self ):\r\n\t\t\"\"\"1/2 1/2 <type 'instance'> <type 'instance'>\r\n\t\tAssertionError: 0.70710678118654746 != 0.70710678118654757 within 16 places\"\"\"\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracBase, fracPow, expRes in self.knownPowValidInputValues:\r\n\t\t\tfrac1 = eval ( r.sub ( 'frac.frac', fracBase ) )\r\n\t\t\tfrac2 = eval ( r.sub ( 'frac.frac', fracPow ) )\r\n#\t\t\tprint frac1, frac2, type ( frac1), type ( frac2) \t\r\n\t\t\tres = pow ( frac1, frac2)\r\n\t\t\tif isinstance ( res, frac.frac ):\r\n\t\t\t\tself.assertAlmostEqual ( float ( res ), eval ( expRes ) )\t\t\t\r\n\t\t\telse:\r\n\t\t\t\tself.assertAlmostEqual ( res, eval ( expRes ) )",
"def is_float(self, val):\n try:\n float(val)\n return True\n except ValueError:\n return False",
"def is_float(self, input):\n try:\n float(input)\n return True\n except ValueError:\n return False",
"def test_floats(self):\r\n problem_setup = [\r\n # [given_answer, [list of correct responses], [list of incorrect responses]]\r\n [1, [\"1\"], [\"1.1\"]],\r\n [2.0, [\"2.0\"], [\"1.0\"]],\r\n [4, [\"4.0\", \"4.00004\"], [\"4.00005\"]],\r\n [0.00016, [\"1.6*10^-4\"], [\"\"]],\r\n [0.000016, [\"1.6*10^-5\"], [\"0.000165\"]],\r\n [1.9e24, [\"1.9*10^24\"], [\"1.9001*10^24\"]],\r\n [2e-15, [\"2*10^-15\"], [\"\"]],\r\n [3141592653589793238., [\"3141592653589793115.\"], [\"\"]],\r\n [0.1234567, [\"0.123456\", \"0.1234561\"], [\"0.123451\"]],\r\n [1e-5, [\"1e-5\", \"1.0e-5\"], [\"-1e-5\", \"2*1e-5\"]],\r\n ]\r\n for given_answer, correct_responses, incorrect_responses in problem_setup:\r\n problem = self.build_problem(answer=given_answer)\r\n self.assert_multiple_grade(problem, correct_responses, incorrect_responses)",
"def test_float_log(self):\n htype = h5t.py_create('f', logical=True)\n self.assertIsInstance(htype, h5t.TypeFloatID)",
"def DecryptFloat(self, ciphertext):\n original_plaintext = self.Decrypt(ciphertext)\n plaintext = original_plaintext\n mantissa_and_exponent = plaintext & _ONES_FLOAT_SIGN_LOW_LSB\n plaintext >>= FLOAT_SIGN_LOW_LSB # >>= 831\n sign_low32 = plaintext & 0xffffffff\n plaintext >>= 32\n sign_high32 = plaintext & 0xffffffff\n plaintext >>= 32\n # carry_over32 = plaintext & 0xffffffff\n plaintext >>= 32\n minus_inf32 = plaintext & 0xffffffff\n plaintext >>= 32\n plus_inf32 = plaintext & 0xffffffff\n plaintext >>= 32\n nan_32 = plaintext & 0xffffffff\n if nan_32 > 0:\n return float('nan')\n # adding a +inf and -inf should return a nan\n if plus_inf32 > 0 and minus_inf32 > 0:\n return float('nan')\n if plus_inf32 > 0:\n return float('inf')\n if minus_inf32 > 0:\n return float('-inf')\n if sign_high32 == 0 and sign_low32 > 0:\n # This indicates that positive overflow has happened, mimic ieee float\n # behaviour and return +inf.\n return float('inf')\n if sign_high32 == 0xffffffff and sign_low32 < 0xffffffff:\n # This indicates that negative overflow has happened, mimic ieee float\n # behaviour and return -inf.\n return float('-inf')\n if sign_high32 == 0 and sign_low32 == 0:\n # positive finite number.\n if mantissa_and_exponent == 0L:\n return float(0)\n size = len(bin(mantissa_and_exponent)) - 2 # -2 to remove prepended 0b\n if size >= MANTISSA_BITS:\n # take the first 53 bits and remove the leading 1 bit i.e 52 bits.\n new_mantissa = ((mantissa_and_exponent >> (size - MANTISSA_BITS))\n & 0xfffffffffffff)\n else:\n # take all the bits and shift left to make it a normal number,\n # the exponent also gets updated appropriately.\n new_mantissa = ((mantissa_and_exponent << (MANTISSA_BITS - size))\n & 0xfffffffffffff)\n new_exponent = ((size - MANTISSA_BITS) - FLOAT_MANTISSA_ZERO +\n EXPONENT_BIAS)\n new_value = (new_exponent << EXPLICIT_MANTISSA_BITS) | new_mantissa\n return struct.unpack('d', struct.pack('Q', new_value))[0]\n if sign_high32 == 0xffffffff and sign_low32 == 0xffffffff:\n # negative finite number.\n # - first find the positive value of the number by taking the 2s\n # complement of the 895 bit integer.\n num = original_plaintext & _ONES_CARRYOVER_LSB\n positive_895bit_value = (num ^ _ONES_CARRYOVER_LSB) + 1L\n # - final value will mostly be a 831 bit number or smaller except if\n # 831 bits are all zero which represents -2^831 and gives a 2's complement\n # positive value of 2^831, we detect this case and return -inf.\n positive_832bit_value = positive_895bit_value & _ONES_832\n if positive_832bit_value >> FLOAT_SIGN_LOW_LSB: # >> 831:\n return float('-inf')\n size = len(bin(positive_832bit_value)) - 2\n if size >= MANTISSA_BITS:\n # take the first 53 bits and remove the leading 1 bit.\n new_mantissa = ((positive_832bit_value >> (size - MANTISSA_BITS))\n & 0xfffffffffffff)\n else:\n # take all the bits and shift left to make it a normal number,\n # the exponent also gets updated appropriately.\n new_mantissa = ((positive_832bit_value << (MANTISSA_BITS - size))\n & 0xfffffffffffff)\n new_exponent = ((size - MANTISSA_BITS) - FLOAT_MANTISSA_ZERO +\n EXPONENT_BIAS)\n new_value = ((new_exponent << EXPLICIT_MANTISSA_BITS) | new_mantissa |\n (1 << (EXPLICIT_MANTISSA_BITS + EXPONENT_BITS)))\n return struct.unpack('d', struct.pack('Q', new_value))[0]\n raise ValueError('Got an unusual decrypted value either nan, inf or sign '\n 'bits aren\\'t set correctly: %s' % hex(original_plaintext))",
"def is_valid(self):\n return (4 * (self.a ** 3) + 27 * (self.b ** 2)) % self.fp != 0",
"def test_float_single_precision(self):\n data = service_call.encode_call(\"foo\", 1. + 1e-8)\n name, args = service_call.decode_call(data)\n\n # Check that we have the marker for single precision float\n self.assertEqual(1., args)",
"def ok_floats():\n # copy float list so we don't change the numpy global\n floats = np.sctypes['float'][:]\n if best_float() != np.longdouble and np.longdouble in floats:\n floats.remove(np.longdouble)\n return sorted(floats, key=lambda f: type_info(f)['nmant'])"
]
| [
"0.6891519",
"0.66194546",
"0.6434158",
"0.63923424",
"0.62414616",
"0.6237944",
"0.6170298",
"0.6094164",
"0.6040777",
"0.594754",
"0.5923214",
"0.59174234",
"0.5906689",
"0.5906176",
"0.5862436",
"0.58263683",
"0.58231926",
"0.58215433",
"0.58195156",
"0.5811164",
"0.5808681",
"0.57977",
"0.5794806",
"0.57700676",
"0.5761815",
"0.57591856",
"0.5756367",
"0.57447416",
"0.57386845",
"0.5725125"
]
| 0.7909229 | 0 |
Inverse projection of the projected map to a healpix spherical map. | def inv_projmap(self, img, nside=None):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inv_projmap(self, img, nside=None):\n pass\n\n ysize, xsize = img.shape\n\n if nside is None:\n lonra = self.arrayinfo['lonra']\n latra = self.arrayinfo['latra']\n npix = np.int((360.0 * xsize / (lonra[1] - lonra[0])) * (180.0 * ysize / (latra[1] - latra[0]))) # the total pixel\n nside = 2**np.int(np.ceil(np.log2(np.sqrt(npix/12.0)) - 1))\n\n npix = 12 * nside**2\n hpmap = np.zeros(npix, dtype=img.dtype)\n theta, phi = pixelfunc.pix2ang(nside, np.arange(npix)) # in radians, theta: [0, pi], phi: [0. 2pi]\n x = np.degrees(phi)\n x = -np.where(x>180.0, x-360.0, x) # [-180.0, 180.0]\n # x = np.degrees(phi) - 180.0 # [-180.0, 180.0]\n y = -np.degrees(theta) + 90.0 # [-90.0, 90.0]\n # y = np.degrees(theta) - 90.0 # [-90.0, 90.0]\n for pix in np.arange(npix):\n i, j = self.xy2ij(x[pix], y[pix])\n if i is not None and j is not None:\n hpmap[pix] = img[i, j]\n\n return hpmap",
"def reproject_map(nside, phi, healpix_array=None):\n\n vec = hp.pix2vec(nside, np.arange(hp.nside2npix(nside)))\n eu_mat = euler(-phi, 0, 0, deg=True)\n rot_map = hp.rotator.rotateVector(eu_mat, vec)\n new_hp_inds = hp.vec2pix(nside, rot_map[0], rot_map[1], rot_map[2])\n\n return healpix_array[new_hp_inds]",
"def Inverse(self, lat1, lon1, lat2, lon2,\n outmask = GeodesicCapability.STANDARD):\n\n a12, s12, salp1,calp1, salp2,calp2, m12, M12, M21, S12 = self._GenInverse(\n lat1, lon1, lat2, lon2, outmask)\n outmask &= Geodesic.OUT_MASK\n if outmask & Geodesic.LONG_UNROLL:\n lon12, e = Math.AngDiff(lon1, lon2)\n lon2 = (lon1 + lon12) + e\n else:\n lon2 = Math.AngNormalize(lon2)\n result = {'lat1': Math.LatFix(lat1),\n 'lon1': lon1 if outmask & Geodesic.LONG_UNROLL else\n Math.AngNormalize(lon1),\n 'lat2': Math.LatFix(lat2),\n 'lon2': lon2}\n result['a12'] = a12\n if outmask & Geodesic.DISTANCE: result['s12'] = s12\n if outmask & Geodesic.AZIMUTH:\n result['azi1'] = Math.atan2d(salp1, calp1)\n result['azi2'] = Math.atan2d(salp2, calp2)\n if outmask & Geodesic.REDUCEDLENGTH: result['m12'] = m12\n if outmask & Geodesic.GEODESICSCALE:\n result['M12'] = M12; result['M21'] = M21\n if outmask & Geodesic.AREA: result['S12'] = S12\n return result",
"def invmap(self, lat, long):\r\n r1 = self._r1\r\n r2 = self._r2\r\n direction = self._direction\r\n\r\n return self.map_aux(lat, long, direction, r2, r1)",
"def invmap(self, x, y):\r\n r = self._r\r\n rxy = np.sqrt(x*x + y*y)\r\n\r\n lat = (rxy/r)*(np.pi/2)\r\n long = np.arctan2(y, x)\r\n\r\n return (lat, long)",
"def inverse_mapping(self):\n return self._inverse_mapping",
"def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))",
"def inverse_warp(self, x, depth, pose, intrinsics, intrinsics_inv):\n B, C, H, W = x.size()\n cam_coords = self.pixel2cam(depth, intrinsics_inv)\n proj_cam_to_src_pixel = intrinsics.bmm(pose)\n src_pixel_coords = self.cam2pixel(cam_coords, proj_cam_to_src_pixel)\n #grid = F.affine_grid(theta, x.size())\n x = F.grid_sample(x, src_pixel_coords)\n\n return x",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def apply_inverse_map(self, transport_map, sig0):\n # Check input arrays\n transport_map = check_array(transport_map, ndim=2,\n dtype=[np.float64, np.float32])\n sig0 = check_array(sig0, ndim=2, dtype=[np.float64, np.float32],\n force_strictly_positive=True)\n\n # Initialize Radon transforms\n rad0 = radon(sig0, theta=self.theta, circle=False)\n rad1 = np.zeros_like(rad0)\n\n # Check transport map and Radon transforms are the same size\n assert_equal_shape(transport_map, rad0,\n ['transport_map', 'Radon transform of sig0'])\n\n # Loop over angles\n cdt = CDT()\n for i in range(self.theta.size):\n # Convert projection to PDF\n j0 = signal_to_pdf(rad0[:,i], epsilon=1e-8, total=1.)\n\n # Radon transform of sig1 comprised of inverse CDT of projections\n rad1[:,i] = cdt.apply_inverse_map(transport_map[:,i], j0)\n\n # Inverse Radon transform\n sig1_recon = iradon(rad1, self.theta, circle=False, filter='ramp')\n\n # Crop sig1_recon to match sig0\n sig1_recon = match_shape2d(sig0, sig1_recon)\n\n return sig1_recon",
"def polySphericalProjection(*args, imageCenter: Union[List[float, float], bool]=None,\n imageCenterX: Union[float, bool]=0.5, imageCenterY: Union[float,\n bool]=0.5, imageScale: Union[List[float, float], bool]=None,\n imageScaleU: Union[float, bool]=1.0, imageScaleV: Union[float,\n bool]=1.0, projectionCenter: Union[List[float, float, float],\n bool]=None, projectionCenterX: Union[float, bool]=0.0,\n projectionCenterY: Union[float, bool]=0.0, projectionCenterZ:\n Union[float, bool]=0.0, projectionHorizontalSweep: Union[float,\n bool]=0.0, projectionScale: Union[List[float, float], bool]=None,\n projectionScaleU: Union[float, bool]=180.0, projectionScaleV:\n Union[float, bool]=90.0, radius: Union[float, bool]=0.0, rotate:\n Union[List[float, float, float], bool]=None, rotateX: Union[float,\n bool]=0.0, rotateY: Union[float, bool]=0.0, rotateZ: Union[float,\n bool]=0.0, rotationAngle: Union[float, bool]=10.0, seamCorrect:\n bool=True, caching: bool=True, constructionHistory: bool=True,\n createNewMap: bool=True, insertBeforeDeformers: bool=True,\n keepImageRatio: bool=True, mapDirection: AnyStr=\"\", name: AnyStr=\"\",\n nodeState: Union[int, bool]=0, perInstance: bool=True, smartFit:\n bool=True, worldSpace: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()",
"def unproject(self, (x, y)):\n lng = x/EARTH_RADIUS * RAD_TO_DEG\n lat = 2 * math.atan(math.exp(y/EARTH_RADIUS)) - math.pi/2 * RAD_TO_DEG\n return (lng, lat)",
"def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img",
"def sphere2img(lat,lon,latc,lonc,xcen,ycen,rSun,peff,hemi_out=False):\n # Correction of finite distance (1AU)\n sin_asd = 0.004660\n cos_asd = 0.99998914\n\n last_latc = 0.0\n cos_latc = 1.0\n sin_latc = 0.0\n\n if latc != last_latc:\n sin_latc = np.sin(latc)\n cos_latc = np.cos(latc)\n last_latc = latc\n\n sin_lat = np.sin(lat)\n cos_lat = np.cos(lat)\n cos_lat_lon = cos_lat*np.cos(lon-lonc)\n\n cos_cang = sin_lat*sin_latc + cos_latc*cos_lat_lon\n if cos_cang < 0.0:\n hemisphere = 1\n else:\n hemisphere = 0\n\n r = rSun*cos_asd/(1.0 - cos_cang*sin_asd)\n xr = r*cos_lat*np.sin(lon - lonc)\n yr = r*(sin_lat*cos_latc - sin_latc*cos_lat_lon)\n\n cospa = np.cos(peff)\n sinpa = np.sin(peff)\n xi = xr*cospa - yr*sinpa\n eta = xr*sinpa + yr*cospa\n\n xi = xi + xcen\n eta = eta + ycen\n\n if hemi_out == True:\n return xi,eta,hemisphere\n else:\n return xi,eta",
"def inverse_warp(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros', return_coordinates=False):\n check_sizes(img, 'img', 'B3HW')\n\n src_pixel_coords = get_warp_pixel_transformation(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode=rotation_mode, padding_mode=padding_mode)\n projected_img = torch.nn.functional.grid_sample(img, src_pixel_coords, padding_mode=padding_mode, align_corners=False)\n if return_coordinates:\n return projected_img, src_pixel_coords\n else:\n return projected_img",
"def update_hpx_skymap_allsky(map_in, map_out):\n if map_out is None:\n in_hpx = map_in.hpx\n out_hpx = HPX.create_hpx(in_hpx.nside, in_hpx.nest, in_hpx.coordsys,\n None, in_hpx.ebins, None, in_hpx.conv, None)\n data_out = map_in.expanded_counts_map()\n print(data_out.shape, data_out.sum())\n map_out = HpxMap(data_out, out_hpx)\n else:\n map_out.data += map_in.expanded_counts_map()\n return map_out",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()\n ymin, ymax = self.get_ylim3d()\n zmin, zmax = self.get_zlim3d()\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0\n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates\n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down\n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def apply_spherical_map(img_src, mapping, output_res=(360, 640), f_fill=np.mean):\n (y_map, x_map), (fill_y_l, fill_x_l, nonzeros_l) = mapping\n\n transfo_img = np.zeros(output_res) + 128\n transfo_img[y_map, x_map] = img_src.flatten()\n\n for y, x, nonz in zip(fill_y_l, fill_x_l, nonzeros_l):\n transfo_img[y, x] = f_fill(transfo_img[nonz])\n return transfo_img",
"def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2",
"def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)",
"def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return",
"def map_ll_to_seviri(lon, lat):\n # new method\n # project lat/lon input to meteosat view, mask out of bounds data\n geos = pyproj.Proj(proj='geos', h=35785831.0,lon_0=0,lat_0=0,x_0=0,y_0=0,units='m')\n x,y = geos(lon,lat)\n x = ma.masked_equal(x,1e30)\n y = ma.masked_equal(y,1e30)\n # Convert to index. ~3000.5m per pixel, centre pixel index is [1855,1855]\n x = x/-3000.5+1855\n y = y/3000.5+1855\n return x,y\n # old method\n \"\"\"\n # Define Earth radius and geostationary orbit height in km and calucalte max\n # viewer angle\n r_sat = 42164.\n r_earth = 6378.\n zenith_max = np.arcsin(r_earth/r_sat)\n # convert lat/lon to cartesian coordinates\n x = np.cos(np.radians(lat)) * np.sin(np.radians(lon))\n y = np.sin(np.radians(lat))\n z = np.cos(np.radians(lat)) * np.cos(np.radians(lon))\n # x,y vector magnitude\n d = np.sqrt(x**2 + y**2)\n # Calculate footprint SEVIRI effective zenith angle and mask for > pi/2\n # values\n zenith = np.arctan2(d, z) + np.arctan2(r_earth*d, r_sat-r_earth*z)\n zenith_mask = np.abs(zenith) >= (0.5 * np.pi)\n # Calculate x and y viewer angles\n theta_x = np.arctan2(r_earth*x, r_sat-r_earth*z)\n theta_y = np.arctan2(r_earth*y, r_sat-r_earth*z)\n # Define SEVIRI global index range and offset\n # These should be the same on all files, but may need to check\n x_irange = 3623\n x_ioffset = 44\n y_irange = 3611\n y_ioffset = 51\n # Remap viewer angles to indexes using max viewer angle, index range and\n # offset. Note -ve theta_y as SEVIRI indexes the x-axis right to left(E-W)\n x_out = (1 - theta_x / zenith_max) * 0.5 * x_irange + x_ioffset\n y_out = (1 + theta_y / zenith_max) * 0.5 * y_irange + y_ioffset\n # Return masked arrays using the zenith angle mask\n return ma.array(x_out, mask=zenith_mask), ma.array(y_out, mask=zenith_mask)\n \"\"\"",
"def inverse( m, context = FloatContext, copy_m=True ):\n n,n_ = shape_mat(m)\n assert (n==n_) #matris should be square\n\n return solve( m, eye(n), context=context, copy_b=False, copy_a=copy_m )",
"def get_spherical_map(screen_pos, input_res=(281, 500), output_res=(360, 640), k_side=2, filling_pol=\"nonzero\"):\n assert filling_pol in [\"nonzero\", \"closest\"]\n\n screen_interp = interpolate_screen_pos(screen_pos, np.linspace(0,16, input_res[1], endpoint=True),\n np.linspace(0, 9, input_res[0], endpoint=True))\n y_inres, x_inres = input_res\n y_res, x_res = output_res\n xnew = np.linspace(screen_interp[:,:,1].min(), screen_interp[:,:,1].max(), x_res)\n ynew = np.linspace(screen_interp[:,:,0].min(), screen_interp[:,:,0].max(), y_res)\n map_img = np.zeros((y_res, x_res))\n\n y_map, x_map = np.empty(y_inres*x_inres, dtype=int), np.empty(y_inres*x_inres, dtype=int)\n for i, (y, x) in enumerate(zip(screen_interp[:,::-1,0].flatten(), screen_interp[:,::-1,1].flatten())):\n y_map[i] = np.argmin(ynew<y)\n x_map[i] = np.argmin(xnew<x)\n map_img[y_map, x_map] = 1\n\n y_nonzero, x_nonzero = np.nonzero(map_img==0) #Finds where the image is still zero\n fill_x_l, fill_y_l, nonzeros_l = [], [], []\n for y, x in zip(y_nonzero, x_nonzero):\n # Sets the limits to where to look for nonzeros pixels\n ylow, xlow = max(0, y-k_side), max(0, x-k_side)\n yhig, xhig = min(y+k_side+1, y_res), min(x+k_side+1, x_res)\n area = map_img[ylow:yhig, xlow:xhig]\n\n if np.any(area): #If there are pixels around\n fill_x_l.append(x)\n fill_y_l.append(y)\n nonz_y, nonz_x = np.nonzero(area)\n if filling_pol==\"nonzero\":\n nonzeros_l.append((nonz_y+ylow, nonz_x+xlow)) #store the nonzero slicing for later filling\n elif filling_pol==\"closest\":\n xx, yy = np.meshgrid(np.arange(xlow,xhig), np.arange(ylow,yhig))\n distances = np.sqrt((yy-y)**2+(xx-x)**2)\n idx_min = np.argmin(distances[nonz_y, nonz_x])\n nonzeros_l.append(([nonz_y[idx_min]+ylow], [nonz_x[idx_min]+xlow]))\n\n return (y_map, x_map), (fill_y_l, fill_x_l, nonzeros_l)",
"def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_GetInverse(self, *args)",
"def invmap(self, x, y):\r\n r = self._r\r\n rxy = np.sqrt(x*x + y*y)\r\n\r\n lat = np.arccos(1-(rxy/r)**2)\r\n long = np.arctan2(y, x)\r\n\r\n try:\r\n long[np.isnan(lat)] = np.nan\r\n except TypeError: # Thrown if long is scalar\r\n if np.isnan(lat): long = np.nan\r\n return (lat, long)",
"def pinhole_projection_image_to_world(uv, z, K):\n\n u_v_1 = np.array([uv[0], uv[1], 1])\n pos = z * np.matmul(inv(K),u_v_1)\n return pos",
"def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat",
"def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi"
]
| [
"0.71200365",
"0.6357577",
"0.5971307",
"0.5946619",
"0.5824953",
"0.57034713",
"0.5698357",
"0.56218755",
"0.56114763",
"0.56053007",
"0.5576599",
"0.55293226",
"0.552154",
"0.5499768",
"0.5479456",
"0.5463952",
"0.5462339",
"0.544783",
"0.5424593",
"0.5414713",
"0.53791064",
"0.53522205",
"0.53378284",
"0.5321894",
"0.5290251",
"0.5288471",
"0.5284264",
"0.52440906",
"0.5220827",
"0.52183497"
]
| 0.6943881 | 1 |
flipconv is either 'astro' or 'geo'. None will be default. With 'astro', east is toward left and west toward right. It is the opposite for 'geo' | def set_flip(self, flipconv):
if flipconv is None:
flipconv = 'astro' # default
if flipconv == 'astro': self._flip = -1
elif flipconv == 'geo': self._flip = 1
else: raise ValueError("flipconv must be 'astro', 'geo' or None for default.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convert_flip(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n axis = op.attr(\"axis\")\n\n for i, ax in enumerate(axis):\n if i == 0:\n out = _op.reverse(x, ax)\n else:\n out = _op.reverse(out, ax)\n\n g.add_node(op.output(\"Out\")[0], out)",
"def flip(self, bev_direction: str = 'horizontal') -> None:\n pass",
"def flip_direction(direction):\n if direction==\"NORTH\": return \"SOUTH\"\n if direction==\"SOUTH\": return \"NORTH\"\n if direction==\"WEST\": return \"EAST\"\n if direction==\"EAST\": return \"WEST\"\n elif isinstance(direction, float):\n return (direction + np.pi)%(2*np.pi)",
"def td_flip(self):\n self.cw_rotate()\n self.cw_rotate()\n self.lr_flip()\n self.find_edges()",
"def __flip(img, flip, flip_type=Image.FLIP_LEFT_RIGHT):\n if flip:\n return img.transpose(flip_type)\n return img",
"def flip(self, mode='h'):\n # TODO: Implement the flip function. Remember to record the boolean values is_horizontal_flip and\n # is_vertical_flip.\n if mode == 'h':\n self.is_horizontal_flip = True\n self.x = np.flipud(self.x)\n elif mode == 'v':\n self.is_vertical_flip = True\n self.x = np.fliplr(self.x)\n else:\n self.is_vertical_flip = True\n self.is_horizontal_flip = True\n self.x = np.fliplr(self.x)\n self.x = np.flipud(self.x)\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################",
"def flip(self):",
"def polyFlipEdge(*args, q=True, query=True, e=True, edit=True, **kwargs)->Union[bool, Any]:\n pass",
"def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True",
"def __call__(self, results):\n\n if 'flip' not in results:\n if isinstance(self.direction, list):\n # None means non-flip\n direction_list = self.direction + [None]\n else:\n # None means non-flip\n direction_list = [self.direction, None]\n\n if isinstance(self.flip_ratio, list):\n non_flip_ratio = 1 - sum(self.flip_ratio)\n flip_ratio_list = self.flip_ratio + [non_flip_ratio]\n else:\n non_flip_ratio = 1 - self.flip_ratio\n # exclude non-flip\n single_ratio = self.flip_ratio / (len(direction_list) - 1)\n flip_ratio_list = [single_ratio] * (len(direction_list) -\n 1) + [non_flip_ratio]\n\n cur_dir = np.random.choice(direction_list, p=flip_ratio_list)\n\n results['flip'] = cur_dir is not None\n if 'flip_direction' not in results:\n results['flip_direction'] = cur_dir\n if results['flip']:\n # flip image\n for key in results.get('img_fields', ['img']):\n results[key] = general_ocr.imflip(\n results[key], direction=results['flip_direction'])\n # flip bboxes\n for key in results.get('bbox_fields', []):\n results[key] = self.bbox_flip(results[key],\n results['img_shape'],\n results['flip_direction'])\n # flip masks\n for key in results.get('mask_fields', []):\n results[key] = results[key].flip(results['flip_direction'])\n\n # flip segs\n for key in results.get('seg_fields', []):\n results[key] = general_ocr.imflip(\n results[key], direction=results['flip_direction'])\n return results",
"def collate_fn_flip(self, batch):\n FT = torch.FloatTensor\n img, uvd_gt = zip(*batch)\n flip = random.randint(1, 10000)%2\n # Do flipping\n # 0 = left, 1 = right\n hand_side = 1\n if flip:\n hand_side = 0 \n\n new_img = []\n new_uvd = []\n for i, u in batch:\n if flip:\n i = i.transpose(Image.FLIP_LEFT_RIGHT)\n u[:, 0] = 0.999 - u[:, 0]\n i = np.asarray(i)\n i = i/255.0\n i = IMG.imgshape2torch(i)\n new_img.append(i)\n new_uvd.append(u)\n \n new_img = FT(new_img)\n new_uvd = FT(new_uvd)\n return new_img, new_uvd, hand_side",
"def flip_augmentation():\n return lambda image: ImageOps.flip(image)",
"def NFkB_cFlip_interaction():\n Parameter('Flip_degradase_0', 0)\n alias_model_components()\n \n Initial(Flip_degradase(bf=None), Flip_degradase_0)\n \n Rule('NFkB_cFlipL', NFkB() >> NFkB() + flip_L(bDED=None), Parameter('NFkB_FlipL', 1e-2))\n Rule('NFkB_cFlipS', NFkB() >> NFkB() + flip_S(bDED=None), Parameter('NFkB_FlipS', 1e-2))\n \n Rule('NFkB_degradase', NFkB() >> NFkB() + Flip_degradase(bf=None), Parameter('Deg_flip', 1e-6))\n Rule('Deg_cFlipL', Flip_degradase(bf=None) + flip_L(bDED=None) >> Flip_degradase(bf=None), Parameter('deg_FlipL', 5e-6))\n Rule('Deg_cFlipS', Flip_degradase(bf=None) + flip_S(bDED=None) >> Flip_degradase(bf=None), Parameter('deg_FlipS', 5e-6))",
"def flipNormals(self):\n self.flip = not self.flip",
"def fl_flip_yorigin():\n _fl_flip_yorigin = library.cfuncproto(\n library.load_so_libforms(), \"fl_flip_yorigin\",\\\n None, [],\\\n \"\"\"void fl_flip_yorigin()\"\"\")\n _fl_flip_yorigin()",
"def flip(self, xflip=True, yflip=False):\n self.drawer.flush()\n img = self.img\n if xflip: img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT)\n if yflip: img = img.transpose(PIL.Image.FLIP_TOP_BOTTOM)\n self.img = img\n self.update_drawer_img()\n return self",
"def flip(flip_x=False, flip_y=False): \r\n x, y = 1, 1\r\n if flip_x:\r\n x = np.random.choice((-1,1))\r\n if flip_y:\r\n y = np.random.choice((-1,1))\r\n return np.array(((x, 0, 0),\r\n (0, y, 0),\r\n (0, 0, 1)), dtype=np.float)",
"def forward_transform(data, flip, rotate):\n if flip == 'FLIP_LEFT_RIGHT':\n data = np.fliplr(data)\n\n if rotate == 'ROTATE_90':\n data = np.rot90(data, 1)\n\n if rotate == 'ROTATE_180':\n data = np.rot90(data, 2)\n\n if rotate == 'ROTATE_270':\n data = np.rot90(data, 3)\n\n return data",
"def flip(img, boolean=True):\n return pg.transform.flip(img, boolean, False)",
"def test_flip():\n template_r = np.array([\n [0.5, 0],\n [0.7, 0],\n ])\n template_g = np.array([\n [0.9, 0],\n [0.2, 0],\n ])\n template_b = np.array([\n [0.1, 0],\n [0.4, 0],\n ])\n template = np.dstack([template_r, template_g, template_b])\n return template, np.flipud(np.fliplr(template))",
"def inverse_transform(data, flip, rotate):\n if flip == 'FLIP_LEFT_RIGHT':\n data = np.fliplr(data)\n\n if rotate == 'ROTATE_90':\n data = np.rot90(data, 3)\n\n if rotate == 'ROTATE_180':\n data = np.rot90(data, 2)\n\n if rotate == 'ROTATE_270':\n data = np.rot90(data, 1)\n\n return data",
"def flip_image(image, direction):\n prevShape = image.shape\n image, reshaped = reshape_to_cv_format(image, False)\n image = cv.flip(image, direction)\n if reshaped: \n image = image.reshape(prevShape)\n return image",
"def reverse_edge(\n G: DiGraphGPKG,\n edge: EdgeData,\n invert: Optional[Iterable[str]] = None,\n flip: Optional[Iterable[str]] = None,\n) -> None:\n rev_coords = list(\n reversed(edge[G.network.edges.geom_column][\"coordinates\"])\n )\n edge[G.network.edges.geom_column][\"coordinates\"] = rev_coords\n if invert is not None:\n for key in invert:\n if key in edge:\n edge[key] = edge[key] * -1\n if flip is not None:\n for key in flip:\n if key in edge:\n edge[key] = type(edge[key])(not edge[key])",
"def set_flipout(flipout):\n if isinstance(flipout, bool):\n __SETTINGS__._FLIPOUT = flipout\n else:\n raise TypeError('flipout must be True or False')",
"def flip(img, code=0):\n\treturn cv2.flip(img, flipCode=code)",
"def flip(self):\n if self.is_face_up:\n arcade.load_texture(self.back_file)\n self.is_face_up = False\n else:\n arcade.load_texture(self.face_file)\n self.is_face_up = True",
"def solve_perspective(region, flip_code):\n dst = np.array([\n [0, 0],\n [algo_width, 0],\n [algo_width, algo_height],\n [0, algo_height]\n ], dtype=\"float32\")\n\n if flip_code in (0, -1):\n # Flip vertical\n dst[[0, 3]] = dst[[3, 0]]\n dst[[1, 2]] = dst[[2, 1]]\n\n if flip_code in (1, -1):\n # Flip horizontal\n dst[[0, 1]] = dst[[1, 0]]\n dst[[2, 3]] = dst[[3, 2]]\n\n return cv2.getPerspectiveTransform(region, dst)",
"def flip_image(image):\n return cv2.flip(image, flipCode=1)",
"def flip(self, xbool, ybool):\n self._surf = pygame.transform.flip(self._surf, xbool, ybool).convert_alpha()",
"def flip_image(image):\n\n return cv2.flip(image, 1)"
]
| [
"0.5972456",
"0.57888746",
"0.5710304",
"0.5693313",
"0.55282015",
"0.5495678",
"0.5467454",
"0.5432339",
"0.53047246",
"0.5252012",
"0.52447367",
"0.52384484",
"0.52223796",
"0.52158856",
"0.517234",
"0.51295346",
"0.5108745",
"0.507657",
"0.50323236",
"0.5025738",
"0.50121415",
"0.50079477",
"0.5004191",
"0.4990998",
"0.4958576",
"0.49521774",
"0.494758",
"0.49354044",
"0.493069",
"0.49122065"
]
| 0.7790945 | 0 |
Get the field of view in degree of the plane of projection | def get_fov(self):
return 2.*pi | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fov(self) -> float:\n return self._fov[self.projection_mode.value]",
"def field ( self , xyz ) :\n return self._ilhcbmagnet.fieldVector ( xyz )",
"def get_projection(self):\n return self.projection",
"def base_projection(self, fiber):\n return self.ambient_space().quotient(fiber.affine_space())",
"def projection_matrix(self):\n scene = self.figure.scene\n scene_size = tuple(scene.get_size())\n aspect_ratio = float(scene_size[0]) / float(scene_size[1])\n p = scene.camera.get_perspective_transform_matrix(\n aspect_ratio, -1, 1).to_array().astype(np.float32)\n return p",
"def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result",
"def plane(self):\n return plane(self.N, self.o)",
"def fieldCenter(self):\n if self.ra0 is None:\n self.ra0 = reduce(lambda x, y: x + y, [src.pos.ra for src in self.sources]) / len(\n self.sources) if self.sources else 0\n if self.dec0 is None:\n self.dec0 = reduce(lambda x, y: x + y, [src.pos.dec for src in self.sources]) / len(\n self.sources) if self.sources else 0\n return self.ra0, self.dec0",
"def perspectiveNormalizationXform(self):\n return np.array([[1.0/np.tan(self.view_angle_h), 0, 0, 0],\n [0, 1.0/np.tan(self.view_angle_v), 0, 0],\n [0, 0, (self.far + self.near)/(self.far - self.near),\n 2*self.far*self.near/(self.far - self.near)],\n [0, 0, -1, 0]])",
"def project_onto_plane(self,z):\n U=self.U\n Q=self.Q_p\n #print(((z-Q[-2,:,[2]])/P[-2,:,[2]]).T)\n #print(P[-2])\n return ((z-Q[-2,:,[2]])/U[-2,:,[2]]).T*U[-2]+Q[-2]",
"def getPerspectiveProjectionMatrix(l, r, b, t, n, f):\n e11 = 2 * n / (r - l)\n e13 = (r + l) / (r - l)\n e22 = (2 * n) / (t - b)\n e23 = (t + b) / (t - b)\n e33 = -1 * (f + n) / (f - n)\n e34 = (-2 * f * n) / (f - n)\n\n return MatrixExtended([\n [e11, 0, e13, 0],\n [0, e22, e23, 0],\n [0, 0, e33, e34],\n [0, 0, -1, 0]])",
"def compute_viewpoint(self, box):\n x, y, z = self.compute_ray(box)\n theta = math.degrees(math.atan2(z, x))\n phi = math.degrees(math.atan2(y, math.hypot(x, z)))\n return theta, phi",
"def plane(self):\r\n from lsst.analysis import utils\r\n return utils.fitplane(self.points, self.z)",
"def projection(self):\n return self._map_projection",
"def projection(self):\n return self._map_projection",
"def fieldOfView(self):\n return self._fieldOfView",
"def perspective(self, fovy, aspect, near, far):\r\n\r\n top = near * math.tan(fovy * math.pi / 360.0)\r\n bottom = -top\r\n left = bottom * aspect\r\n right = top * aspect\r\n\r\n return self.frustum(left, right, bottom, top, near, far)",
"def render_4d(polyhedron, **kwds):\n projection_direction = None\n try: \n projection_direction = kwds.pop('projection_directior')\n except KeyError:\n for ineq in polyhedron.inequality_generator():\n center = [v() for v in ineq.incident() if v.is_vertex()]\n center = sum(center) / len(center)\n if not center.is_zero(): \n projection_direction = center\n break\n projection_3d = Projection(polyhedron).schlegel(projection_direction)\n return render_3d(projection_3d, **kwds)",
"def get_projection_point(self, point, plane, test=False):\n return point_on_plane_projection(point, plane, test=test)",
"def parallel_projection(self):\n return self.camera.parallel_projection",
"def build_perspective_camera(field_of_view=60.0,\n aspect_ratio=1.0,\n near_plane=0.01,\n far_plane=1000.0,\n position=(0.0, 0.0, 5.0),\n enable_zoom=False):\n context = build_context()\n camera = context.THREE.PerspectiveCamera.new_object(field_of_view,\n aspect_ratio, near_plane,\n far_plane)\n camera.position.set(*position)\n controls = context.THREE.OrbitControls.new_object(camera)\n controls.enableZoom = enable_zoom\n return camera",
"def worldToCameraCentricXform(self):\n return self.rotateAlignXform().dot(self.translateToOriginXform())",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def perspective(self):\n return self.container['perspective']",
"def frustum(self, left, right, bottom, top, near, far):\r\n \r\n return mat4( (2.0*near)/(right-left), 0.0, float(right+left)/(right-left), 0.0,\r\n 0.0, (2.0*near)/(top-bottom), float(top+bottom)/(top-bottom), 0.0,\r\n 0.0, 0.0, -float(far+near)/(far-near), -(2.0*far*near)/(far-near),\r\n 0.0, 0.0, -1.0, 0.0)",
"def _get_projection(el):\n result = None\n if hasattr(el, 'crs'):\n result = (int(el._auxiliary_component), el.crs)\n return result",
"def plot_plane(unit_normal, x_array, y_array, fore):\n # print'unit normal = ', unit_normal\n z = (((unit_normal[0] * (fore[0] - x_array)) + (unit_normal[1] * (fore[1] - y_array))) / unit_normal[2]) + fore[2]\n # print 'plane numbers\\n', z\n return z",
"def deg_mat(self):\n return self._deg_mat",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()\n ymin, ymax = self.get_ylim3d()\n zmin, zmax = self.get_zlim3d()\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0\n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates\n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down\n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def plane(self):\n return Plane(Point(0, self.evaluations.exposedWing.edges[2].point1.y, 0), Vector(0, 1, 0),\n hidden=True)"
]
| [
"0.65581155",
"0.6266231",
"0.6220673",
"0.61471313",
"0.59898585",
"0.5958863",
"0.5957005",
"0.59534055",
"0.5926802",
"0.5884517",
"0.5833143",
"0.5828602",
"0.5786574",
"0.5780048",
"0.5780048",
"0.5776455",
"0.57687336",
"0.5767944",
"0.5759518",
"0.574998",
"0.5748476",
"0.57438475",
"0.5720887",
"0.57129776",
"0.5693919",
"0.5682325",
"0.5664163",
"0.5648589",
"0.5638489",
"0.55992746"
]
| 0.6937915 | 0 |
Inverse projection of the projected map to a healpix spherical map. | def inv_projmap(self, img, nside=None):
pass
ysize, xsize = img.shape
if nside is None:
lonra = self.arrayinfo['lonra']
latra = self.arrayinfo['latra']
npix = np.int((360.0 * xsize / (lonra[1] - lonra[0])) * (180.0 * ysize / (latra[1] - latra[0]))) # the total pixel
nside = 2**np.int(np.ceil(np.log2(np.sqrt(npix/12.0)) - 1))
npix = 12 * nside**2
hpmap = np.zeros(npix, dtype=img.dtype)
theta, phi = pixelfunc.pix2ang(nside, np.arange(npix)) # in radians, theta: [0, pi], phi: [0. 2pi]
x = np.degrees(phi)
x = -np.where(x>180.0, x-360.0, x) # [-180.0, 180.0]
# x = np.degrees(phi) - 180.0 # [-180.0, 180.0]
y = -np.degrees(theta) + 90.0 # [-90.0, 90.0]
# y = np.degrees(theta) - 90.0 # [-90.0, 90.0]
for pix in np.arange(npix):
i, j = self.xy2ij(x[pix], y[pix])
if i is not None and j is not None:
hpmap[pix] = img[i, j]
return hpmap | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inv_projmap(self, img, nside=None):\n pass",
"def reproject_map(nside, phi, healpix_array=None):\n\n vec = hp.pix2vec(nside, np.arange(hp.nside2npix(nside)))\n eu_mat = euler(-phi, 0, 0, deg=True)\n rot_map = hp.rotator.rotateVector(eu_mat, vec)\n new_hp_inds = hp.vec2pix(nside, rot_map[0], rot_map[1], rot_map[2])\n\n return healpix_array[new_hp_inds]",
"def Inverse(self, lat1, lon1, lat2, lon2,\n outmask = GeodesicCapability.STANDARD):\n\n a12, s12, salp1,calp1, salp2,calp2, m12, M12, M21, S12 = self._GenInverse(\n lat1, lon1, lat2, lon2, outmask)\n outmask &= Geodesic.OUT_MASK\n if outmask & Geodesic.LONG_UNROLL:\n lon12, e = Math.AngDiff(lon1, lon2)\n lon2 = (lon1 + lon12) + e\n else:\n lon2 = Math.AngNormalize(lon2)\n result = {'lat1': Math.LatFix(lat1),\n 'lon1': lon1 if outmask & Geodesic.LONG_UNROLL else\n Math.AngNormalize(lon1),\n 'lat2': Math.LatFix(lat2),\n 'lon2': lon2}\n result['a12'] = a12\n if outmask & Geodesic.DISTANCE: result['s12'] = s12\n if outmask & Geodesic.AZIMUTH:\n result['azi1'] = Math.atan2d(salp1, calp1)\n result['azi2'] = Math.atan2d(salp2, calp2)\n if outmask & Geodesic.REDUCEDLENGTH: result['m12'] = m12\n if outmask & Geodesic.GEODESICSCALE:\n result['M12'] = M12; result['M21'] = M21\n if outmask & Geodesic.AREA: result['S12'] = S12\n return result",
"def invmap(self, lat, long):\r\n r1 = self._r1\r\n r2 = self._r2\r\n direction = self._direction\r\n\r\n return self.map_aux(lat, long, direction, r2, r1)",
"def invmap(self, x, y):\r\n r = self._r\r\n rxy = np.sqrt(x*x + y*y)\r\n\r\n lat = (rxy/r)*(np.pi/2)\r\n long = np.arctan2(y, x)\r\n\r\n return (lat, long)",
"def inverse_mapping(self):\n return self._inverse_mapping",
"def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))",
"def inverse_warp(self, x, depth, pose, intrinsics, intrinsics_inv):\n B, C, H, W = x.size()\n cam_coords = self.pixel2cam(depth, intrinsics_inv)\n proj_cam_to_src_pixel = intrinsics.bmm(pose)\n src_pixel_coords = self.cam2pixel(cam_coords, proj_cam_to_src_pixel)\n #grid = F.affine_grid(theta, x.size())\n x = F.grid_sample(x, src_pixel_coords)\n\n return x",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()/self.pbaspect[0]\n ymin, ymax = self.get_ylim3d()/self.pbaspect[1]\n zmin, zmax = self.get_zlim3d()/self.pbaspect[2]\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0 \n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates \n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down \n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def apply_inverse_map(self, transport_map, sig0):\n # Check input arrays\n transport_map = check_array(transport_map, ndim=2,\n dtype=[np.float64, np.float32])\n sig0 = check_array(sig0, ndim=2, dtype=[np.float64, np.float32],\n force_strictly_positive=True)\n\n # Initialize Radon transforms\n rad0 = radon(sig0, theta=self.theta, circle=False)\n rad1 = np.zeros_like(rad0)\n\n # Check transport map and Radon transforms are the same size\n assert_equal_shape(transport_map, rad0,\n ['transport_map', 'Radon transform of sig0'])\n\n # Loop over angles\n cdt = CDT()\n for i in range(self.theta.size):\n # Convert projection to PDF\n j0 = signal_to_pdf(rad0[:,i], epsilon=1e-8, total=1.)\n\n # Radon transform of sig1 comprised of inverse CDT of projections\n rad1[:,i] = cdt.apply_inverse_map(transport_map[:,i], j0)\n\n # Inverse Radon transform\n sig1_recon = iradon(rad1, self.theta, circle=False, filter='ramp')\n\n # Crop sig1_recon to match sig0\n sig1_recon = match_shape2d(sig0, sig1_recon)\n\n return sig1_recon",
"def polySphericalProjection(*args, imageCenter: Union[List[float, float], bool]=None,\n imageCenterX: Union[float, bool]=0.5, imageCenterY: Union[float,\n bool]=0.5, imageScale: Union[List[float, float], bool]=None,\n imageScaleU: Union[float, bool]=1.0, imageScaleV: Union[float,\n bool]=1.0, projectionCenter: Union[List[float, float, float],\n bool]=None, projectionCenterX: Union[float, bool]=0.0,\n projectionCenterY: Union[float, bool]=0.0, projectionCenterZ:\n Union[float, bool]=0.0, projectionHorizontalSweep: Union[float,\n bool]=0.0, projectionScale: Union[List[float, float], bool]=None,\n projectionScaleU: Union[float, bool]=180.0, projectionScaleV:\n Union[float, bool]=90.0, radius: Union[float, bool]=0.0, rotate:\n Union[List[float, float, float], bool]=None, rotateX: Union[float,\n bool]=0.0, rotateY: Union[float, bool]=0.0, rotateZ: Union[float,\n bool]=0.0, rotationAngle: Union[float, bool]=10.0, seamCorrect:\n bool=True, caching: bool=True, constructionHistory: bool=True,\n createNewMap: bool=True, insertBeforeDeformers: bool=True,\n keepImageRatio: bool=True, mapDirection: AnyStr=\"\", name: AnyStr=\"\",\n nodeState: Union[int, bool]=0, perInstance: bool=True, smartFit:\n bool=True, worldSpace: bool=True, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass",
"def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()",
"def unproject(self, (x, y)):\n lng = x/EARTH_RADIUS * RAD_TO_DEG\n lat = 2 * math.atan(math.exp(y/EARTH_RADIUS)) - math.pi/2 * RAD_TO_DEG\n return (lng, lat)",
"def inverse_warping(img_initial, img_final, pts_initial, pts_final): \n \n # YOU SHOULDN'T NEED TO CHANGE THIS\n pts_final = pts_final.astype(int)\n \n projected_img = img_initial.copy()\n for i in range(3):\n sub_img_i = img_initial[:,:,i][pts_initial[:,1], pts_initial[:,0]]\n sub_img_f = img_final[:,:,i][pts_final[:,1], pts_final[:,0]]\n \n sub_img = sub_img_i*0.5 + sub_img_f*0.5\n projected_img[:,:,i][pts_initial[:,1], pts_initial[:,0]] = sub_img\n \n return projected_img",
"def sphere2img(lat,lon,latc,lonc,xcen,ycen,rSun,peff,hemi_out=False):\n # Correction of finite distance (1AU)\n sin_asd = 0.004660\n cos_asd = 0.99998914\n\n last_latc = 0.0\n cos_latc = 1.0\n sin_latc = 0.0\n\n if latc != last_latc:\n sin_latc = np.sin(latc)\n cos_latc = np.cos(latc)\n last_latc = latc\n\n sin_lat = np.sin(lat)\n cos_lat = np.cos(lat)\n cos_lat_lon = cos_lat*np.cos(lon-lonc)\n\n cos_cang = sin_lat*sin_latc + cos_latc*cos_lat_lon\n if cos_cang < 0.0:\n hemisphere = 1\n else:\n hemisphere = 0\n\n r = rSun*cos_asd/(1.0 - cos_cang*sin_asd)\n xr = r*cos_lat*np.sin(lon - lonc)\n yr = r*(sin_lat*cos_latc - sin_latc*cos_lat_lon)\n\n cospa = np.cos(peff)\n sinpa = np.sin(peff)\n xi = xr*cospa - yr*sinpa\n eta = xr*sinpa + yr*cospa\n\n xi = xi + xcen\n eta = eta + ycen\n\n if hemi_out == True:\n return xi,eta,hemisphere\n else:\n return xi,eta",
"def inverse_warp(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros', return_coordinates=False):\n check_sizes(img, 'img', 'B3HW')\n\n src_pixel_coords = get_warp_pixel_transformation(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode=rotation_mode, padding_mode=padding_mode)\n projected_img = torch.nn.functional.grid_sample(img, src_pixel_coords, padding_mode=padding_mode, align_corners=False)\n if return_coordinates:\n return projected_img, src_pixel_coords\n else:\n return projected_img",
"def update_hpx_skymap_allsky(map_in, map_out):\n if map_out is None:\n in_hpx = map_in.hpx\n out_hpx = HPX.create_hpx(in_hpx.nside, in_hpx.nest, in_hpx.coordsys,\n None, in_hpx.ebins, None, in_hpx.conv, None)\n data_out = map_in.expanded_counts_map()\n print(data_out.shape, data_out.sum())\n map_out = HpxMap(data_out, out_hpx)\n else:\n map_out.data += map_in.expanded_counts_map()\n return map_out",
"def get_proj(self):\n relev, razim = np.pi * self.elev/180, np.pi * self.azim/180\n\n xmin, xmax = self.get_xlim3d()\n ymin, ymax = self.get_ylim3d()\n zmin, zmax = self.get_zlim3d()\n\n # transform to uniform world coordinates 0-1.0,0-1.0,0-1.0\n worldM = proj3d.world_transformation(xmin, xmax,\n ymin, ymax,\n zmin, zmax)\n\n # look into the middle of the new coordinates\n R = np.array([0.5, 0.5, 0.5])\n\n xp = R[0] + np.cos(razim) * np.cos(relev) * self.dist\n yp = R[1] + np.sin(razim) * np.cos(relev) * self.dist\n zp = R[2] + np.sin(relev) * self.dist\n E = np.array((xp, yp, zp))\n\n self.eye = E\n self.vvec = R - E\n self.vvec = self.vvec / proj3d.mod(self.vvec)\n\n if abs(relev) > np.pi/2:\n # upside down\n V = np.array((0, 0, -1))\n else:\n V = np.array((0, 0, 1))\n zfront, zback = -self.dist, self.dist\n\n viewM = proj3d.view_transformation(E, R, V)\n perspM = proj3d.persp_transformation(zfront, zback)\n M0 = np.dot(viewM, worldM)\n M = np.dot(perspM, M0)\n return M",
"def apply_spherical_map(img_src, mapping, output_res=(360, 640), f_fill=np.mean):\n (y_map, x_map), (fill_y_l, fill_x_l, nonzeros_l) = mapping\n\n transfo_img = np.zeros(output_res) + 128\n transfo_img[y_map, x_map] = img_src.flatten()\n\n for y, x, nonz in zip(fill_y_l, fill_x_l, nonzeros_l):\n transfo_img[y, x] = f_fill(transfo_img[nonz])\n return transfo_img",
"def Reproject(x, y, in_grid = 4326, out_grid = 32737):\n \n inProj = Proj(init='epsg:'+str(in_grid))\n outProj = Proj(init='epsg:'+str(out_grid))\n \n \n x2,y2 = transform(inProj,outProj,x,y)\n \n return x2, y2",
"def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)",
"def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return",
"def map_ll_to_seviri(lon, lat):\n # new method\n # project lat/lon input to meteosat view, mask out of bounds data\n geos = pyproj.Proj(proj='geos', h=35785831.0,lon_0=0,lat_0=0,x_0=0,y_0=0,units='m')\n x,y = geos(lon,lat)\n x = ma.masked_equal(x,1e30)\n y = ma.masked_equal(y,1e30)\n # Convert to index. ~3000.5m per pixel, centre pixel index is [1855,1855]\n x = x/-3000.5+1855\n y = y/3000.5+1855\n return x,y\n # old method\n \"\"\"\n # Define Earth radius and geostationary orbit height in km and calucalte max\n # viewer angle\n r_sat = 42164.\n r_earth = 6378.\n zenith_max = np.arcsin(r_earth/r_sat)\n # convert lat/lon to cartesian coordinates\n x = np.cos(np.radians(lat)) * np.sin(np.radians(lon))\n y = np.sin(np.radians(lat))\n z = np.cos(np.radians(lat)) * np.cos(np.radians(lon))\n # x,y vector magnitude\n d = np.sqrt(x**2 + y**2)\n # Calculate footprint SEVIRI effective zenith angle and mask for > pi/2\n # values\n zenith = np.arctan2(d, z) + np.arctan2(r_earth*d, r_sat-r_earth*z)\n zenith_mask = np.abs(zenith) >= (0.5 * np.pi)\n # Calculate x and y viewer angles\n theta_x = np.arctan2(r_earth*x, r_sat-r_earth*z)\n theta_y = np.arctan2(r_earth*y, r_sat-r_earth*z)\n # Define SEVIRI global index range and offset\n # These should be the same on all files, but may need to check\n x_irange = 3623\n x_ioffset = 44\n y_irange = 3611\n y_ioffset = 51\n # Remap viewer angles to indexes using max viewer angle, index range and\n # offset. Note -ve theta_y as SEVIRI indexes the x-axis right to left(E-W)\n x_out = (1 - theta_x / zenith_max) * 0.5 * x_irange + x_ioffset\n y_out = (1 + theta_y / zenith_max) * 0.5 * y_irange + y_ioffset\n # Return masked arrays using the zenith angle mask\n return ma.array(x_out, mask=zenith_mask), ma.array(y_out, mask=zenith_mask)\n \"\"\"",
"def inverse( m, context = FloatContext, copy_m=True ):\n n,n_ = shape_mat(m)\n assert (n==n_) #matris should be square\n\n return solve( m, eye(n), context=context, copy_b=False, copy_a=copy_m )",
"def get_spherical_map(screen_pos, input_res=(281, 500), output_res=(360, 640), k_side=2, filling_pol=\"nonzero\"):\n assert filling_pol in [\"nonzero\", \"closest\"]\n\n screen_interp = interpolate_screen_pos(screen_pos, np.linspace(0,16, input_res[1], endpoint=True),\n np.linspace(0, 9, input_res[0], endpoint=True))\n y_inres, x_inres = input_res\n y_res, x_res = output_res\n xnew = np.linspace(screen_interp[:,:,1].min(), screen_interp[:,:,1].max(), x_res)\n ynew = np.linspace(screen_interp[:,:,0].min(), screen_interp[:,:,0].max(), y_res)\n map_img = np.zeros((y_res, x_res))\n\n y_map, x_map = np.empty(y_inres*x_inres, dtype=int), np.empty(y_inres*x_inres, dtype=int)\n for i, (y, x) in enumerate(zip(screen_interp[:,::-1,0].flatten(), screen_interp[:,::-1,1].flatten())):\n y_map[i] = np.argmin(ynew<y)\n x_map[i] = np.argmin(xnew<x)\n map_img[y_map, x_map] = 1\n\n y_nonzero, x_nonzero = np.nonzero(map_img==0) #Finds where the image is still zero\n fill_x_l, fill_y_l, nonzeros_l = [], [], []\n for y, x in zip(y_nonzero, x_nonzero):\n # Sets the limits to where to look for nonzeros pixels\n ylow, xlow = max(0, y-k_side), max(0, x-k_side)\n yhig, xhig = min(y+k_side+1, y_res), min(x+k_side+1, x_res)\n area = map_img[ylow:yhig, xlow:xhig]\n\n if np.any(area): #If there are pixels around\n fill_x_l.append(x)\n fill_y_l.append(y)\n nonz_y, nonz_x = np.nonzero(area)\n if filling_pol==\"nonzero\":\n nonzeros_l.append((nonz_y+ylow, nonz_x+xlow)) #store the nonzero slicing for later filling\n elif filling_pol==\"closest\":\n xx, yy = np.meshgrid(np.arange(xlow,xhig), np.arange(ylow,yhig))\n distances = np.sqrt((yy-y)**2+(xx-x)**2)\n idx_min = np.argmin(distances[nonz_y, nonz_x])\n nonzeros_l.append(([nonz_y[idx_min]+ylow], [nonz_x[idx_min]+xlow]))\n\n return (y_map, x_map), (fill_y_l, fill_x_l, nonzeros_l)",
"def GetInverse(self, *args):\n return _itkTranslationTransformPython.itkTranslationTransformD2_GetInverse(self, *args)",
"def invmap(self, x, y):\r\n r = self._r\r\n rxy = np.sqrt(x*x + y*y)\r\n\r\n lat = np.arccos(1-(rxy/r)**2)\r\n long = np.arctan2(y, x)\r\n\r\n try:\r\n long[np.isnan(lat)] = np.nan\r\n except TypeError: # Thrown if long is scalar\r\n if np.isnan(lat): long = np.nan\r\n return (lat, long)",
"def pinhole_projection_image_to_world(uv, z, K):\n\n u_v_1 = np.array([uv[0], uv[1], 1])\n pos = z * np.matmul(inv(K),u_v_1)\n return pos",
"def _get_proj_mat(self): \n if self._proj_mat is None:\n if self.symmetric:\n IP_mat = self.vec_space.compute_symmetric_inner_product_mat(\n self.basis_vecs)\n else:\n IP_mat = self.vec_space.compute_inner_product_mat(\n self.adjoint_basis_vecs, self.basis_vecs)\n self._proj_mat = np.linalg.inv(IP_mat)\n return self._proj_mat",
"def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi"
]
| [
"0.6943809",
"0.6358386",
"0.5970374",
"0.5945898",
"0.58239275",
"0.570336",
"0.56980294",
"0.5621514",
"0.5613242",
"0.56047964",
"0.55778897",
"0.55289084",
"0.5519292",
"0.5498979",
"0.5480552",
"0.54634655",
"0.5462376",
"0.54497546",
"0.54262376",
"0.5415537",
"0.53776425",
"0.5350934",
"0.533963",
"0.53216696",
"0.52907115",
"0.52868295",
"0.5283053",
"0.5244825",
"0.5222821",
"0.5218503"
]
| 0.7119491 | 0 |
Computes sag using voltage trace. | def sag(V):
Vmin = np.amin(V)
Vend = V[-1]
return Vmin - Vend | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_gae(V, s, ss, r, absorbing, last, gamma, lam):\n v = V(s)\n v_next = V(ss)\n gen_adv = np.empty_like(v)\n for rev_k in range(len(v)):\n k = len(v) - rev_k - 1\n if last[k] or rev_k == 0:\n gen_adv[k] = r[k] - v[k]\n if not absorbing[k]:\n gen_adv[k] += gamma * v_next[k]\n else:\n gen_adv[k] = r[k] + gamma * v_next[k] - v[k] + gamma * lam * gen_adv[k + 1]\n return gen_adv + v, gen_adv",
"def sag_ratio(V):\n\n Vmin = np.amin(V)\n Vend = V[-1]\n sr = (Vmin - Vend) / Vmin\n if sr < 0:\n print(\"Warning: sag ratio being negative indicates there is no sag\")\n return sr",
"def g_sebal_func(ts, albedo_sur, ndvi):\n g = np.copy(ndvi).astype(np.float64)\n np.power(g, 4, out=g)\n g *= -0.98\n g += 1\n g *= ts\n g *= (albedo_sur * 0.0074 + 0.0038)\n return g",
"def gVI(g,rBC,lBC,time,npts):\n #Important coeffcients\n global gamma\n gamma = g\n global alpha\n alpha = (gamma+1)/(gamma-1)\n global beta\n beta = (2*gamma)/(gamma-1)\n global epsilon\n epsilon = (2*gamma)/(gamma+1)\n #Boundary conditions\n global lbc\n lbc = lBC\n global rbc\n rbc = rBC\n #Time\n global t\n t = time\n #points\n global numPts\n numPts = npts\n #Speed of sound for states 1 and 5\n global cL\n cL = np.sqrt(gamma*lbc[0]/lbc[1])\n global cR\n cR = np.sqrt(gamma*rbc[0]/rbc[1])",
"def vf(gravedad, tiempo):\r\n #se realiza un multiplicacion y el valor se le asigna a la variable vf\r\n vf=gravedad*tiempo\r\n #se regresa vf\r\n return vf",
"def gate(self):\n self.gatedFrames = IVUS_gating(self.images, self.ivusPullbackRate, self.dicom.CineRate)",
"def get_voltage(self, i_sup, t, *args, **kwargs):\r\n raise NotImplementedError",
"def vega(flag, S, K, t, r, sigma, q):\n\n b = r-q\n\n return numerical_vega(flag, S, K, t, r, sigma, b, f)",
"def bsm_vega(S0, K, T, r, sigma):\n \n from math import log, sqrt\n from scipy import stats\n \n S0 = float(S0)\n d1 = (log(S0 / K) + (r + 0.5 * sigma ** 2) * T / (sigma * sqrt(T))\n vega = S0 * stats.normcdf(d1, 0.0, 1.0) * sqrt(T)\n return vega\n \n# Implied volatility function\n\ndef bsm_call_imp_vol(S0, K, T, r, C0, sigma_est, it = 100):\n \"\"\" \n Implied volatility of European call option in BSM model\n \n Parameters\n ==========\n S0 : Float\n Initial stock/index level\n K : Float\n Strike Price\n T : Float\n Maturity Date (in year fractions)\n r : Float\n Constant risk-free short rate\n sigma_est : Float\n Estimate of impl. volatility\n it : integer\n Number of iterations\n \n Returns\n =======\n sigma_est : Float\n Numerically estimated implied volatility\n \"\"\"\n for i in range(it):\n sigma_est -= ((bsm_call_value(S0, K, T, r, sigma_est) - C0)\n / bsm_vega(S0, K, T, r, sigma_est))\n return sigma_est",
"def calc_output(line, react_cap=None, gen_res_high=225, gen_res_low=50):\n # unpack\n t, v, i = line\n t_diff = t[1] - t[0]\n # assert t_diff == 1e-9 # time scale should be 1ns.\n # values based on current measurment. Assuming voltage waveform is aligned.\n\n # validation on the real maxima/minima of current\n assert i.argmax() < i.argmin(), 'Current valley before peak, signal is inverted!'\n\n v_min = min(v)\n v_max = max(v)\n v_max_time = np.where(v == v_max)[0][0] # first value where voltage has maximum\n # v_min_time = np.where(v == v_min)[0][-1] # last value where voltage has minimum\n # assert v_max_time < v_min_time, 'Voltage valley before peak, signal inverted!'\n c_peak_time = i[0:v_max_time].argmax() # current peak is before voltage maximum\n c_max = i[c_peak_time]\n\n c_valley_time = i.argmin()\n c_min = min(i)\n assert i[c_valley_time] == c_min\n\n # some validation\n assert c_peak_time < c_valley_time, 'Current valley before peak, signal is inverted!'\n assert MAX_VOLTAGE_MIN <= v_max < MAX_VOLTAGE_MAX, 'Max voltage error (%r)' % v_max\n assert MAX_CURRENT_MIN <= c_max < MAX_CURRENT_MAX, 'Max current error (%r)' % c_max\n\n # Find the settling time of the current. Than use the time where the current is stable\n # to calculate the final pulse voltage. This pulse final voltage is then used to calculate\n # the settling time and risetime of the voltage.\n\n # all parts of current inside 10% of maximum, till end of pulse\n i_time_settling_options = [abs(x) < 0.1 * c_max for x in i[0:c_valley_time]]\n ranges = count_ranges(i_time_settling_options)\n range_before, range_pulse = find_longest_ranges(ranges, 2) # [end, length]\n end_pulse = range_pulse[0]\n i_time_settling = range_pulse[0] - range_pulse[1]\n # average of voltage during pulse when current is < 5% of max current\n v_pulse = np.mean(v[i_time_settling:end_pulse])\n # all parts of current inside 10% of maximum, till end of pulse\n v_time_settling_options = [abs(x - v_pulse) < (0.1 * v_pulse) for x in v]\n ranges = count_ranges(v_time_settling_options)\n if ranges == []: # if too much oscillations, a range cannot be found. Increase the bounds:\n # all parts of current inside 10% of maximum, till end of pulse\n v_time_settling_options = [abs(x - v_pulse) < (0.3 * v_pulse) for x in v]\n ranges = count_ranges(v_time_settling_options)\n print('Warning, voltage settling options increased from 10% to 30%!')\n assert ranges != [], \"Error! Line is too unstable.\"\n pulse = find_longest_ranges(ranges, 1) # pulse=[end,length] of voltage pulse stable\n settling_end = pulse[0] - pulse[1] # voltage pulse stable start\n # recalculate pulse voltage\n v_pulse_new = np.mean(v[settling_end:pulse[0]])\n if v_pulse > 13e3: # pulses for highest voltages have to be stable. Lower voltages are always less stable.\n assert abs(v_pulse-v_pulse_new)/v_pulse_new < 0.01, 'Pulse voltage unstable.'\n t_settling_end = t[settling_end] # voltage pulse stable start time\n v05 = 0.05 * v_pulse\n settling_start = np.where(v > v05)[0][0]\n t_settling_start = t[settling_start] # when v first rises above 0.05 of final\n t_settling = t_settling_end - t_settling_start\n v10 = 0.1 * v_pulse\n v90 = 0.9 * v_pulse\n t_rise_start = t[np.where(v > v10)[0][0]]\n t_rise_end = t[np.where(v > v90)[0][0]]\n t_rise = t_rise_end - t_rise_start\n rise_rate = (v90 - v10) / (t_rise)\n v_overshoot = v_max / v_pulse\n pulse_stable = int((settling_end + end_pulse) / 2) # point where the pulse is very stable\n # energy\n p = (v * i) # for this to be correct, make sure lines are aligned in b_correct_lines using offset 'v_div'\n e = integrate.cumtrapz(p, t, initial=0)\n p_rise = p[settling_start:pulse_stable]\n e_rise = e[settling_start:pulse_stable][-1]\n p_res = np.append(i[0:pulse_stable] ** 2 * gen_res_high, i[pulse_stable:] ** 2 * gen_res_low)\n # 1/2*C*V^2 is energy stored in capacitor, which is lost after discharging pulse.\n # e_cap = 1 / 2 * react_cap * v_pulse ** 2\n e_res = integrate.cumtrapz(p_res, t, initial=0)\n e_res_total = e_res[-1]\n e_plasma = e[-1] # energy to plasma is energy in positive pulse except charge on capacitor.\n\n # Correct the time axis to have 0 at the start of the pulse\n start = t[settling_start]\n t = t - start\n\n # all these values are added to the pickle and xlsx with 'output_' prepend in calc_run.py\n data = {\n 't': t,\n 'v': v,\n 'c': i,\n 'c_min': c_min,\n 'c_max': c_max,\n 'v_min': v_min,\n 'v_max': v_max,\n 'v_pulse': v_pulse,\n 't_settling': t_settling,\n 't_rise': t_rise,\n 'rise_rate': rise_rate,\n 'v_overshoot': v_overshoot,\n 'p': p,\n 'e': e,\n 'p_rise': p_rise,\n 'e_rise': e_rise,\n\n 'p_res': p_res,\n 'e_res': e_res,\n 'e_res_total': e_res_total,\n # 'e_cap': e_cap,\n 'e_plasma': e_plasma,\n\n 'start': start,\n 'end': t[end_pulse],\n # 'start_index': settling_start,\n # 'end_index': end_pulse,\n # 'test': i_time_settling\n }\n return data",
"def test_diff_analog_in_cal_5v_loop(self):\n for g in self.l.gains:\n for s,c,e in [(5, 11, .1), (2.5, 10, .03)]:\n v = self.l.input(channels=(c,c,c,c), gains=(g,g,g,g))\n r = v[0]\n if s*g > 20:\n if s*g > 25:\n self.assertTrue(v[3],\n \"%s should be overvoltage (%g, %g)\" % (v,s,g))\n continue\n for i in r:\n self.assertTrue(abs(s-i) < e,\n \"%g is not %g, channel %g, gain %g\" % (i,s,c,g))",
"def generate_arrival_ivt():\n r = np.random.rand()\n return -np.log(1-r)/_lambda\n \"\"\"\n note: the code above could be replaced with the built-in process generator:\n \n return np.random.exponential(1/_lambda)\n \"\"\"",
"def sweep():\n \n set_enable_load(True) # turn input ON\n time.sleep(.250)\n \n print('Begin IV curve measurement')\n \n voc = open_circ() # measure open circuit voltage\n iv_curve(voc) # measure iv curve\n short_circ() # measure short circuit current\n \n time.sleep(.250)\n set_enable_load(False) # turn input OFF",
"def init_V(g):\n return -np.log(g)",
"def get_voltage(self):\n print(\"voici le voltage de la batterie\")",
"def compute_advantage(V, s, ss, r, absorbing, gamma):\n v = V(s).squeeze()\n v_next = V(ss).squeeze() * (1 - absorbing)\n\n q = r + gamma * v_next\n adv = q - v\n return q[:, np.newaxis], adv[:, np.newaxis]",
"def swiss_to_gts(v):\n return v - np.array([667400, 158800, 1700])",
"def sag_abf(abf, epoch_ind):\n p0 = abf.sweepEpochs.p1s[epoch_ind]\n p1 = abf.sweepEpochs.p1s[epoch_ind+1]\n V = abf.sweepY[p0:p1]\n return sag(V)",
"def evaluate_vag(\n params: Array,\n preset: Sequence[int],\n g: Graph,\n lbd: float = 0.0,\n overlap_threhold: float = 0.0,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n params = array_to_tensor(params) # complexify params\n with tf.GradientTape() as t:\n t.watch(params)\n expe, ene, probasum = exp_forward(\n params, preset, g, lbd=lbd, overlap_threhold=overlap_threhold\n )\n if lbd == 0:\n gr = t.gradient(ene, params)\n else:\n gr = t.gradient(expe, params)\n # return forward(beta)\n return expe, ene, cons.backend.real(gr), probasum",
"def bench():\n \n clock = Signal(bool(False))\n reset = Signal(bool(False))\n strobe = Signal(bool(False))\n serialOut = Signal(bool(False))\n load = Signal(bool(False))\n ldac = Signal(bool(False))\n clkDacOut = Signal(bool(False))\n busy = Signal(bool(False))\n vrefTopA = Signal(intbv(0)[8:])\n vrefTopB = Signal(intbv(0)[8:])\n vrefBotA = Signal(intbv(0)[8:])\n vrefBotB = Signal(intbv(0)[8:])\n \n\n dut_dac_controller = dac_controller(clock, reset, vrefTopA, vrefTopB, vrefBotA, vrefBotB, strobe, \n serialOut, load, ldac, clkDacOut, busy)\n \n @always(delay(PERIOD//2))\n def clkgen():\n clock.next = not clock\n\n @instance\n def stimulus():\n reset.next = True\n strobe.next = False\n vrefTopA.next = 0\n vrefTopB.next = 0\n vrefBotA.next = 0\n vrefBotB.next = 0\n yield delay(100)\n reset.next = False\n yield busy.negedge\n vrefTopA.next = 100\n vrefTopB.next = 120\n vrefBotA.next = 50\n vrefBotB.next = 20\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n vrefTopA.next = 100\n vrefTopB.next = 120\n vrefBotA.next = 130\n vrefBotB.next = 20\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n vrefTopA.next = 100\n vrefTopB.next = 120\n vrefBotA.next = 50\n vrefBotB.next = 220\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n vrefTopA.next = 100\n vrefTopB.next = 120\n vrefBotA.next = 150\n vrefBotB.next = 220\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n vrefTopA.next = 220\n vrefTopB.next = 148\n vrefBotA.next = 60\n vrefBotB.next = 37\n strobe.next = True\n yield busy.posedge\n strobe.next = False\n yield busy.negedge\n yield delay(1000) \n raise StopSimulation\n\n return dut_dac_controller, clkgen, stimulus",
"def res(self, t, y, yd, sw):\n if sw[0]:\n# print('state 1')\n G = scipy.array([\n [0, 1, 0],\n [0, 0, 1]\n ])\n gvec = y[3:5]\n elif sw[1]:\n# print('state 2')\n G = scipy.array([\n [0, self.hS, 0],\n [1, self.rS, 0]\n ])\n gvec = scipy.array([self.rS - self.r0 + self.hS * y[1],\n y[5] + self.rS * y[6]])\n elif sw[2]:\n# print('state 3, phi_bp: ', y[7], 'res: ', self.hB * y[2] - self.lS - self.lG + self.lB + self.r0)\n G = scipy.array([\n [0, - self.hS, 0],\n [1, self.rS, 0]\n ])\n gvec = scipy.array([self.rS - self.r0 - self.hS * y[1],\n y[5] + self.rS * y[6]])\n\n ff = scipy.array([- (self.mS + self.mB) * self.g,\n self.cp * (y[2] - y[1]) - self.mB * self.lS * self.g,\n self.cp * (y[1] - y[2]) - self.mB * self.lG * self.g])\n\n res_1 = yd[0:5] - y[5:10]\n res_2 = scipy.dot(self.M, yd[5:8]) - ff + scipy.dot(G.T, y[3:5])\n res_3 = gvec\n\n return scipy.hstack((res_1, res_2, res_3)).flatten()",
"def get_vcond(lambdam, taum):\n return 2 * lambdam / taum",
"def slow_iv_all(self, bias_groups=None, wait_time=.1, bias=None, \n bias_high=1.5, gcp_mode=True, bias_low=0, bias_step=.005, \n show_plot=False, high_current_wait=1., cool_wait=30,\n make_plot=True, save_plot=True, channels=None, band=None,\n high_current_mode=True, overbias_voltage=8., \n grid_on=True, phase_excursion_min=3.):\n if bias_groups is None:\n bias_groups = self.all_groups\n\n if overbias_voltage != 0.:\n overbias = True\n else:\n overbias = False\n\n if bias is None:\n bias = np.arange(bias_high, bias_low-bias_step, -bias_step)\n\n overbias_wait = 2.\n if overbias:\n self.overbias_tes_all(bias_groups=bias_groups, \n overbias_wait=overbias_wait, tes_bias=np.max(bias), \n cool_wait=cool_wait, high_current_mode=high_current_mode,\n overbias_voltage=overbias_voltage)\n\n self.log('Turning lmsGain to 0.', self.LOG_USER)\n lms_gain2 = self.get_lms_gain(2) # just do this on both bands\n lms_gain3 = self.get_lms_gain(3) # should fix the hardcoding though -CY\n self.set_lms_gain(2, 0)\n self.set_lms_gain(3, 0)\n\n self.log('Starting to take IV.', self.LOG_USER)\n self.log('Starting TES bias ramp.', self.LOG_USER)\n\n\n self.log('Starting to take IV.', self.LOG_USER)\n self.log('Starting TES bias ramp.', self.LOG_USER)\n\n bias_group_bool = np.zeros((8,)) # hard coded to have 8 bias groups\n bias_group_bool[bias_groups] = 1 # only set things on the bias groups that are on\n\n self.set_tes_bias_bipolar_array(bias[0] * bias_group_bool)\n time.sleep(wait_time) # loops are in pyrogue now, which are faster?\n\n datafile = self.stream_data_on(gcp_mode=gcp_mode)\n self.log('writing to {}'.format(datafile))\n\n for b in bias:\n self.log('Bias at {:4.3f}'.format(b))\n self.set_tes_bias_bipolar_array(b * bias_group_bool)\n time.sleep(wait_time) # loops are now in pyrogue, so no division\n\n self.stream_data_off(gcp_mode=gcp_mode)\n self.log('Done with TES bias ramp', self.LOG_USER)\n\n self.log('Returning lmsGain to original values', self.LOG_USER)\n self.set_lms_gain(2, lms_gain2)\n self.set_lms_gain(3, lms_gain3)\n\n basename, _ = os.path.splitext(os.path.basename(datafile))\n np.save(os.path.join(self.output_dir, basename + '_iv_bias_all'), bias)\n\n iv_raw_data = {}\n iv_raw_data['bias'] = bias\n iv_raw_data['high_current_mode'] = high_current_mode\n iv_raw_data['bias group'] = bias_groups\n iv_raw_data['datafile'] = datafile\n iv_raw_data['basename'] = basename\n iv_raw_data['output_dir'] = self.output_dir\n iv_raw_data['plot_dir'] = self.plot_dir\n fn_iv_raw_data = os.path.join(self.output_dir, basename + \n '_iv_raw_data.npy')\n self.log('Writing IV metadata to {}.'.format(fn_iv_raw_data))\n np.save(os.path.join(self.output_dir, fn_iv_raw_data), iv_raw_data)\n\n R_sh=self.R_sh\n self.analyze_slow_iv_from_file(fn_iv_raw_data, make_plot=make_plot,\n show_plot=show_plot, save_plot=save_plot, R_sh=R_sh,\n gcp_mode=gcp_mode, grid_on=grid_on,\n phase_excursion_min=phase_excursion_min,chs=channels,band=band)",
"def setvoltages(self):\n pass",
"def dvdt(self, args: Dict) -> float:\n if self.channel_bool['leak']:\n i_leak: float = self.leak.i(args['v'])\n else:\n i_leak: float = 0.\n \n if self.channel_bool['nav']:\n i_nav: float = self.nav.i(args['v'], h=args['h_nav'])\n else:\n i_nav: float = 0.\n\n if self.channel_bool['kvhh']:\n i_kvhh: float = self.kvhh.i(args['v'], n=args['n_kvhh'])\n else:\n i_kvhh: float = 0.\n\n if self.channel_bool['kva']:\n i_kva: float = self.kva.i(args['v'], h=args['h_kva'])\n else:\n i_kva: float = 0.\n\n if self.channel_bool['kvsi']:\n i_kvsi: float = self.kvsi.i(args['v'], m=args['m_kvsi'])\n else:\n i_kvsi: float = 0.\n\n if self.channel_bool['cav']:\n i_cav: float = self.cav.i(args['v'])\n else:\n i_cav: float = 0.\n\n if self.channel_bool['kca']:\n i_kca: float = self.kca.i(args['v'], ca=args['ca'])\n else:\n i_kca: float = 0.\n \n if self.channel_bool['nap']:\n i_nap: float = self.nap.i(args['v'])\n else:\n i_nap: float = 0.\n\n if self.channel_bool['kir']:\n i_kir: float = self.kir.i(args['v'])\n else:\n i_kir: float = 0.\n\n if self.channel_bool['ampar']:\n i_ampar: float = self.ampar.i(args['v'], s=args['s_ampar'])\n else:\n i_ampar: float = 0.\n\n if self.channel_bool['nmdar']:\n i_nmdar: float = self.nmdar.i(args['v'], s=args['s_nmdar'])\n else:\n i_nmdar: float = 0.\n\n if self.channel_bool['gabar']:\n i_gabar: float = self.gabar.i(args['v'], s=args['s_gabar'])\n else:\n i_gabar: float = 0.\n\n return ((-10.0*self.params.area \n * (i_leak\n + i_nav \n + i_kvhh \n + i_kva \n + i_kvsi \n + i_cav \n + i_kca \n + i_nap \n + i_kir) \n - (i_ampar \n + i_nmdar \n + i_gabar))\n / (10.0*self.params.cm*self.params.area))",
"def plot(self):\n\t\tself.plotOfLoopVoltage()",
"def compute_advantage_montecarlo(V, s, ss, r, absorbing, gamma):\n r = r.squeeze()\n q = np.zeros(len(r))\n v = V(s).squeeze()\n\n q_next = V(ss[-1]).squeeze().item()\n for rev_k in range(len(r)):\n k = len(r) - rev_k - 1\n q_next = r[k] + gamma * q_next * (1. - absorbing[k])\n q[k] = q_next\n\n adv = q - v\n return q[:, np.newaxis], adv[:, np.newaxis]",
"def fgausbg(v,p):\n return np.exp(-0.5 * ((v[0] - p[0]) / p[1])**2) * p[2] + p[3]",
"def g_R_k_i(s, p, k='x', i=1): # (Validated)\n from numpy import log\n if k == 'y': # 'y' = Vapour phase standard\n V = s.c[i]['V_v'] \n else: # Assume all phases other than vapour are liquid, ex. 'x'\n V = s.c[i]['V_l'] \n \n return (s.c[i]['P'] * V / (p.c[i]['R'] * s.c[i]['T']) - 1.0\n - log(s.c[i]['P'] / (p.c[i]['R'] * s.c[i]['T']))\n - log(V - s.c[i]['b']) \n - s.c[i]['a'] / (p.c[i]['R'] * s.c[i]['T'] * V))",
"def sm_measure_voltage(self,num_readings=1):\n self.sm.set_measurement_function(\"VOLTAGE\")\n self.sm.format_readings(\"VOLTAGE\")\n return average(self.sm.take_measurement(num_readings))"
]
| [
"0.5797982",
"0.5753957",
"0.5719163",
"0.5645294",
"0.54906857",
"0.54530114",
"0.5418226",
"0.5358796",
"0.53198016",
"0.5301523",
"0.52992094",
"0.5296971",
"0.5276875",
"0.5262266",
"0.52552825",
"0.51872295",
"0.5164188",
"0.51586914",
"0.51306736",
"0.51049095",
"0.5076097",
"0.50319785",
"0.5020434",
"0.50085723",
"0.5002254",
"0.49969283",
"0.49926582",
"0.49905184",
"0.4985876",
"0.49796852"
]
| 0.6116707 | 0 |
Computes sag (absolute, not sag ratio) using abf object and epoch index. See `sag_ratio` to calculate the sag ratio. | def sag_abf(abf, epoch_ind):
p0 = abf.sweepEpochs.p1s[epoch_ind]
p1 = abf.sweepEpochs.p1s[epoch_ind+1]
V = abf.sweepY[p0:p1]
return sag(V) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sag_ratio_abf(abf, epoch_ind):\n p0 = abf.sweepEpochs.p1s[epoch_ind]\n p1 = abf.sweepEpochs.p1s[epoch_ind+1]\n V = abf.sweepY[p0:p1]\n return sag_ratio(V)",
"def sag_ratio(V):\n\n Vmin = np.amin(V)\n Vend = V[-1]\n sr = (Vmin - Vend) / Vmin\n if sr < 0:\n print(\"Warning: sag ratio being negative indicates there is no sag\")\n return sr",
"def test_avgeraging(self):\n\n num_ensemble = 10\n enn = networks.MLPEnsembleEnn(\n output_sizes=[1],\n num_ensemble=num_ensemble,\n )\n\n dummy_metrics = {'a': 0, 'b': 1}\n # A dummy loss fn that returns the normalized index as loss and two constant\n # metrics. Index is random but normalized such that its mean is 1.\n single_loss_fn = DummySingleIndexLossFn(num_ensemble, dummy_metrics)\n\n num_index_samples = 100\n loss_fn = average_single_index_loss(single_loss_fn, num_index_samples)\n dummy_batch = base.Batch(np.ones([1, 1]), np.ones([1, 1]))\n loss, metrics = loss_fn(\n enn=enn, params=dict(), batch=dummy_batch, key=jax.random.PRNGKey(0))\n\n # Since the single loss has mean 1 the averaged loss also has mean 1 a\n # variance proportional to 1/np.sqrt(num_index_samples).\n self.assertAlmostEqual(\n loss,\n 1.0,\n delta=5 / np.sqrt(num_index_samples),\n msg=f'Expected loss to be ~1.0 but it is {loss}')\n self.assertDictEqual(\n metrics, dummy_metrics,\n f'expected metrics to be {dummy_metrics} but it is {metrics}')",
"def compute_gae(V, s, ss, r, absorbing, last, gamma, lam):\n v = V(s)\n v_next = V(ss)\n gen_adv = np.empty_like(v)\n for rev_k in range(len(v)):\n k = len(v) - rev_k - 1\n if last[k] or rev_k == 0:\n gen_adv[k] = r[k] - v[k]\n if not absorbing[k]:\n gen_adv[k] += gamma * v_next[k]\n else:\n gen_adv[k] = r[k] + gamma * v_next[k] - v[k] + gamma * lam * gen_adv[k + 1]\n return gen_adv + v, gen_adv",
"def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)",
"def test_sag_adaptive():\n np.random.seed(0)\n X = sparse.rand(100, 10, density=.5, random_state=0).tocsr()\n y = np.random.randint(0, high=2, size=100)\n for alpha in np.logspace(-3, 1, 5):\n clf_adaptive = SAGClassifier(\n eta='line-search', random_state=0, alpha=alpha)\n clf_adaptive.fit(X, y)\n clf = SAGClassifier(\n eta='auto', random_state=0, alpha=alpha)\n clf.fit(X, y)\n assert_almost_equal(clf_adaptive.score(X, y), clf.score(X, y), 1)\n\n clf_adaptive = SAGAClassifier(\n eta='line-search', loss='log', random_state=0, alpha=alpha, max_iter=20)\n clf_adaptive.fit(X, y)\n assert np.isnan(clf_adaptive.coef_.sum()) == False\n clf = SAGAClassifier(\n eta='auto', loss='log', random_state=0, alpha=alpha, max_iter=20)\n clf.fit(X, y)\n assert_almost_equal(clf_adaptive.score(X, y), clf.score(X, y), 1)",
"def adjust_for_sag(self, sag: int, orientation: str | Orientation) -> None:\n orient = convert_to_enum(orientation, Orientation)\n direction = \"y\" if orient == Orientation.UP_DOWN else \"x\"\n self.roll(direction, sag)",
"def abv(og, fg):\n return abw(og, fg) * fg / 0.794",
"def createAverageImages(self):\n for grabber in self.grabbers:\n callsign = grabber[\"ID\"]\n callMatch = \"%s/%s*\" % (self.downloadFolder, callsign)\n fnameOut = \"%s/%s.%s.jpg\" % (self.averagesFolder, callsign, self.timeCode())\n cmd = \"convert %s -evaluate-sequence Mean %s\" %(callMatch, fnameOut)\n print(cmd)\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n process.wait()",
"def schedule_variant(epoch):\n\n def _linear_annealing(epoch):\n t = epoch / args.epochs\n lr_ratio = 0.01\n if t <= 0.5:\n factor = 1.0\n elif t <= 0.9:\n factor = 1.0 - (1.0 - lr_ratio) * (t - 0.5) / 0.4\n else:\n factor = lr_ratio\n return args.lr_init * factor\n\n lr_tmp = _linear_annealing(epoch)\n if args.swag:\n if (epoch > args.swag_start) or lr_tmp <= args.swag_lr:\n return _linear_annealing(args.swag_start)\n return lr_tmp",
"def eval_genome(g, conf, batch):\n\n inputs, outputs = batch\n inputs = preprocessor(inputs)\n net = RecurrentNet.create(g, conf, device=\"cpu\")\n mse = 0\n for single_inputs, output in zip(inputs, outputs):\n net.reset()\n mask, score = gate_activation(net, single_inputs)\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n mse += (xo - output.item()) ** 2\n\n return 1 / (1 + mse)",
"def cal_sdri(src_ref, src_est, mix):\r\n src_anchor = np.stack([mix, mix], axis=0)\r\n sdr, sir, sar, popt = bss_eval_sources(src_ref, src_est)\r\n sdr0, sir0, sar0, popt0 = bss_eval_sources(src_ref, src_anchor)\r\n avg_sdri = ((sdr[0]-sdr0[0]) + (sdr[1]-sdr0[1])) / 2\r\n return avg_sdri",
"def assessStrategyGlobal(test_beginning_match,\r\n duration_train_matches,\r\n duration_val_matches,\r\n duration_test_matches,\r\n xgb_params,\r\n nb_players,\r\n nb_tournaments,\r\n features,\r\n data,\r\n model_name=\"0\"):\r\n ########## Training/validation/testing set generation\r\n\r\n # Number of matches in our dataset (ie. nb. of outcomes divided by 2)\r\n nm = int(len(features) / 2)\r\n\r\n # Id of the first and last match of the testing,validation,training set\r\n beg_test = test_beginning_match\r\n end_test = min(test_beginning_match + duration_test_matches - 1, nm - 1)\r\n end_val = min(beg_test - 1, nm - 1)\r\n beg_val = beg_test - duration_val_matches\r\n end_train = beg_val - 1\r\n beg_train = beg_val - duration_train_matches\r\n\r\n train_indices = range(2 * beg_train, 2 * end_train + 2)\r\n val_indices = range(2 * beg_val, 2 * end_val + 2)\r\n test_indices = range(2 * beg_test, 2 * end_test + 2)\r\n\r\n if (len(test_indices) == 0) | (len(train_indices) == 0):\r\n return 0\r\n\r\n # Split in train/validation/test\r\n xval = features.iloc[val_indices, :].reset_index(drop=True)\r\n xtest = features.iloc[test_indices, :].reset_index(drop=True)\r\n xtrain = features.iloc[train_indices, :].reset_index(drop=True)\r\n ytrain = pd.Series([1, 0] * int(len(train_indices) / 2))\r\n yval = pd.Series([1, 0] * int(len(val_indices) / 2))\r\n\r\n # We limit the number of players and tournaments one-hot encoded : we'll keep only the\r\n # players that won the most matches to avoid overfitting and make the process quicker\r\n # Biggest players :\r\n biggest_players = data.iloc[range(beg_train, end_train), :][[\"Winner\", \"Loser\"]]\r\n biggest_players = pd.concat([biggest_players.Winner, biggest_players.Loser], 0)\r\n biggest_players = list(biggest_players.value_counts().index[:nb_players])\r\n player_columns = [el for el in xtrain.columns if el[:6] == \"player\"]\r\n to_drop_players = [el for el in player_columns if el[7:] not in biggest_players]\r\n # Biggest Tournaments\r\n biggest_tournaments = data.iloc[range(beg_train, end_train), :][\"Tournament\"]\r\n biggest_tournaments = list(biggest_tournaments.value_counts().index[:nb_tournaments])\r\n tournament_columns = [el for el in xtrain.columns if el[:10] == \"tournament\"]\r\n to_drop_tournaments = [el for el in tournament_columns if el[11:] not in biggest_tournaments]\r\n # We drop smallest Tournaments and players\r\n xtrain = xtrain.drop(to_drop_players + to_drop_tournaments, 1)\r\n xval = xval.drop(to_drop_players + to_drop_tournaments, 1)\r\n xtest = xtest.drop(to_drop_players + to_drop_tournaments, 1)\r\n\r\n ### ML model training\r\n model = xgbModelBinary(xtrain, ytrain, xval, yval, xgb_params, sample_weights=None)\r\n\r\n # The probability given by the model to each outcome of each match :\r\n pred_test = model.predict(xgb.DMatrix(xtest, label=None))\r\n # For each match, the winning probability the model gave to the players that won (should be high...) :\r\n prediction_test_winner = pred_test[range(0, len(pred_test), 2)]\r\n # For each match, the winning probability the model gave to the players that lost (should be low...) :\r\n prediction_test_loser = pred_test[range(1, len(pred_test), 2)]\r\n\r\n ### Odds and predicted probabilities for the testing set (1 row/match)\r\n odds = data[[\"PSW\", \"PSL\"]].iloc[range(beg_test, end_test + 1)]\r\n implied_probabilities = 1 / odds\r\n p = pd.Series(\r\n list(zip(prediction_test_winner, prediction_test_loser, implied_probabilities.PSW, implied_probabilities.PSL)))\r\n\r\n ### For each match in the testing set, if the model predicted the right winner :\r\n right = (prediction_test_winner > prediction_test_loser).astype(int)\r\n\r\n ### For each match in the testing set, the confidence of the model in the outcome it chose\r\n def sel_match_confidence(x):\r\n if x[0] > x[1]:\r\n return x[0] / x[2]\r\n else:\r\n return x[1] / x[3]\r\n\r\n confidence = p.apply(lambda x: sel_match_confidence(x))\r\n\r\n ### The final confidence dataset\r\n confidenceTest = pd.DataFrame({\"match\": range(beg_test, end_test + 1),\r\n \"win\" + model_name: right,\r\n \"confidence\" + model_name: confidence,\r\n \"PSW\": odds.PSW.values})\r\n confidenceTest = confidenceTest.sort_values(\"confidence\" + model_name, ascending=False).reset_index(drop=True)\r\n\r\n return confidenceTest",
"def test_burst_dispersion(self):\n # some reproducible arbitrariness\n np.random.seed(7342642)\n\n n = 25\n t_max = 50\n dt = 0.1\n n_sim = 10\n \n G = RateHVCLayer(n)\n\n burst_starts = []\n for i in xrange(n_sim):\n M = simulation.StateMonitor(G, 'out')\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n burst_starts.append([dt*min((M.out[i] > 0).nonzero()[0])\n for i in xrange(n)])\n\n burst_starts_range = [np.ptp([_[i] for _ in burst_starts])\n for i in xrange(n)]\n \n self.assertLess(np.max(burst_starts_range), G.burst_noise + dt/2)",
"def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n value_estimates = np.asarray(value_estimates.tolist() + [value_next])\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage",
"def gfa(samples):\n diff = samples - samples.mean(-1)[..., None]\n n = samples.shape[-1]\n numer = n*(diff*diff).sum(-1)\n denom = (n-1)*(samples*samples).sum(-1)\n return sqrt(numer/denom)",
"def shrunken_averages_encoder(training_frame, valid_frame = None,test_frame=None, x='x', y='y', lambda_=0.15, perturb_range=0.05,threshold=150, test=False, frame_type='h2o',test_does_have_y=False,id_col=None,only_return_encoded=False):\n\n encode_name = x + '_Tencode'\n\n if frame_type == 'spark':\n # x_column_type = training_frame.select(x).dtypes.flatMap(list)[1]\n\n #To get the average out of the df have to convert to an rdd and flatMap\n #it. Then take the first and only value from the list returned.\n overall_mean = training_frame.agg({y:'avg'}).rdd.flatMap(list).first()\n overall_mean_train = overall_mean\n #ALTERNATIVE way to do the same thing with sql functions\n # from pyspark.sql.functions import col, avg\n # overall_mean = training_frame.agg(avg(col(y))).rdd.flatMap(list).first()\n\n def find_shrunken_averages(tuple_input):\n \"\"\"\n Reduce function to return the proper average for a given level.\n\n :return: A tuple of (level, ajusted_mean||overall_mean)\n \"\"\"\n #The categorical level.\n level = tuple_input[0]\n # The labels list (y varaibale) from a map function.\n labels = tuple_input[1]\n # The total number of level occurances in the frame (ie count)\n level_n = len(labels)\n level_mean = sum(labels) / level_n\n\n # Determine if there enough occurances of a level. If NOT return overall_mean\n if level_n >= threshold:\n return(level,level_mean)\n else:\n return(level, ((1 - lambda_) * level_mean) +\\\n (lambda_ * overall_mean) )\n #This article shows why one has to use a map-groupByKey-map rather then map-reduce order. To collect all values into one reducer\n #you have to do a groupByKey.\n #https://databricks.gitbooks.io/databricks-spark-knowledge-base/content/best_practices/prefer_reducebykey_over_groupbykey.html\n levels_average_list_train = training_frame.select(x,y).rdd.map(lambda i: (i[0], i[1])).groupByKey().map(find_shrunken_averages).collect()\n # print(levels_average_list_train)\n\n levels_average_list_valid = None\n overall_mean_valid = None\n if valid_frame:\n #update overall_mean to valid frames mean\n overall_mean_valid = valid_frame.agg({y:'avg'}).rdd.flatMap(list).first()\n overall_mean = overall_mean_valid\n levels_average_list_valid = valid_frame.select(x,y).rdd.map(lambda i: (i[0], i[1])).groupByKey().map(find_shrunken_averages).collect()\n\n levels_average_list_test = None\n overall_mean_test = None\n if test_does_have_y:\n #update overall_mean to valid frames mean\n overall_mean_test = test_frame.agg({y:'avg'}).rdd.flatMap(list).first()\n overall_mean = overall_mean_test\n levels_average_list_test = test_frame.select(x,y).rdd.map(lambda i: (i[0], i[1])).groupByKey().map(find_shrunken_averages).collect()\n\n from pyspark.sql.functions import lit #creates a literal value\n # create new frames with a new column\n new_training_frame, new_test_frame, new_valid_frame = None,None,None\n if id_col != None:\n #filter out other columns to save memory if id_col specified\n new_training_frame = training_frame.select(id_col,x).withColumn(encode_name, lit(overall_mean_train))\n if valid_frame:\n new_valid_frame = valid_frame.select(id_col,x).withColumn(encode_name, lit(overall_mean_valid))\n if test_does_have_y:\n new_test_frame = test_frame.select(id_col,x).withColumn(encode_name, lit(overall_mean_test))\n else:\n if valid_frame:\n new_test_frame = test_frame.select(id_col,x).withColumn(encode_name, lit(overall_mean_valid))\n else: #no valid frame so apply train means\n new_test_frame = test_frame.select(id_col,x).withColumn(encode_name, lit(overall_mean_train))\n else:\n new_training_frame = training_frame.withColumn(encode_name, lit(overall_mean_train))\n if valid_frame:\n new_valid_frame = valid_frame.withColumn(encode_name, lit(overall_mean_valid))\n if test_does_have_y:\n new_test_frame = test_frame.withColumn(encode_name, lit(overall_mean_test))\n else:\n if valid_frame:\n new_test_frame = test_frame.withColumn(encode_name, lit(overall_mean_valid))\n else: #no valid frame so apply train means\n new_test_frame = test_frame.withColumn(encode_name, lit(overall_mean_train))\n\n #Replace the values in the dataframes with new encoded values\n from pyspark.sql.functions import when\n for k,v in levels_average_list_train:\n new_training_frame = new_training_frame.withColumn(encode_name,\n when(new_training_frame[x] == k, v)\n .otherwise(new_training_frame[encode_name]))\n if not test_does_have_y:\n if not valid_frame:\n new_test_frame= new_test_frame.withColumn(encode_name,\n when(new_test_frame[x] == k, v)\n .otherwise(new_test_frame[encode_name]))\n #if we have a validation frame we want to set the test levels to the original_numerics\n #from the averaged valid frame instead of the test frame\n if valid_frame:\n for k,v in levels_average_list_valid:\n new_valid_frame = new_valid_frame.withColumn(encode_name,\n when(new_valid_frame[x] == k, v)\n .otherwise(new_valid_frame[encode_name]))\n if not test_does_have_y:\n new_test_frame= new_test_frame.withColumn(encode_name,\n when(new_test_frame[x] == k, v)\n .otherwise(new_test_frame[encode_name]))\n #if the test frame has its own levels\n if test_does_have_y:\n for k,v in levels_average_list_test:\n new_test_frame= new_test_frame.withColumn(encode_name,\n when(new_test_frame[x] == k, v)\n .otherwise(new_test_frame[encode_name]))\n\n if perturb_range > 0 or perturb_range < 0:\n #This will perturb everything by the same amount udfs dont work.\n # from pyspark.sql.types import NumericType,FloatType\n # from pyspark.sql.functions import udf\n # def perturb_value(value):\n # import numpy as np\n # perturb_percent = np.random.uniform(low=1-perturb_range, high=1+perturb_range, size=(1))[0]\n # return (value*perturb_percent)\n # perturb_value_udf = udf(perturb_value, FloatType())\n # new_training_frame = new_training_frame.withColumn(encode_name,perturb_value(new_training_frame[encode_name]))\n def perturb_value(tuple_input):\n \"\"\"\n A mapper to inject random noise into each individual value.\n \"\"\"\n id = tuple_input[0]\n value = tuple_input[1]\n from numpy.random import uniform\n perturb_percent = uniform(low=1-perturb_range, high=1+perturb_range, size=(1))[0]\n return (id, float(value*perturb_percent))\n # new_training_frame.select(encode_name).show(10)\n if training_frame:\n #Do the transformations and perturb\n temp_df = new_training_frame.select(id_col,encode_name).rdd.map(lambda i: (i[0], i[1])).map(perturb_value).toDF([id_col,encode_name])\n #Join the perturbed row back onto the main set\n new_training_frame = new_training_frame.drop(encode_name).join(temp_df,id_col,'inner')\n if valid_frame:\n #Do the transformations and perturb\n temp_df = new_valid_frame.select(id_col,encode_name).rdd.map(lambda i: (i[0], i[1])).map(perturb_value).toDF([id_col,encode_name])\n #Join the perturbed row back onto the main set\n new_valid_frame = new_valid_frame.drop(encode_name).join(temp_df,id_col,'inner')\n if test_frame:\n #Do the transformations and perturb\n temp_df = new_test_frame.select(id_col,encode_name).rdd.map(lambda i: (i[0], i[1])).map(perturb_value).toDF([id_col,encode_name])\n #Join the perturbed row back onto the main set\n new_test_frame = new_test_frame.drop(encode_name).join(temp_df,id_col,'inner')\n # new_training_frame.select(encode_name).show(10)\n\n if only_return_encoded:\n #remove origional x as its already in the original dfs\n if valid_frame:\n if test_frame:\n return new_training_frame.drop(x), new_valid_frame.drop(x),new_test_frame.drop(x)\n else:\n return new_training_frame.drop(x), new_valid_frame.drop(x)\n else:\n if test_frame:\n return new_training_frame.drop(x), new_test_frame.drop(x)\n else:\n return new_training_frame.drop(x)\n else:\n if valid_frame:\n if test_frame:\n return new_training_frame.drop(x).join(training_frame,id_col,'inner'), new_valid_frame.drop(x).join(valid_frame,id_col,'inner'), new_test_frame.drop(x).join(test_frame,id_col,'inner')\n else:\n return new_training_frame.drop(x).join(training_frame,id_col,'inner'), new_valid_frame.drop(x).join(valid_frame,id_col,'inner')\n else:\n if test_frame:\n return new_training_frame.drop(x).join(training_frame,id_col,'inner'), new_test_frame.drop(x).join(test_frame,id_col,'inner')\n else:\n return new_training_frame.drop(x).join(training_frame,id_col,'inner')\n else:\n import h2o\n import pandas as pd\n import numpy as np\n\n trdf, vdf, tsdf, tss = None, None, None, None\n if frame_type == 'h2o':\n # convert to pandas\n trdf = training_frame.as_data_frame().loc[:, [x,y]] # df\n if valid_frame:\n vdf = valid_frame.as_data_frame().loc[:, [x,y]] # df\n if test_frame:\n if test_does_have_y:\n tsdf = test_frame.as_data_frame().loc[:, [x,y]] # df\n else:\n tss = test_frame.as_data_frame().loc[:, x] # series\n elif frame_type == 'pandas':\n trdf = training_frame.loc[:, [x,y]] # df\n if valid_frame:\n vdf = valid_frame.loc[:, [x,y]] # df\n if test_frame:\n if test_does_have_y:\n tsdf = test_frame.loc[:, [x,y]] # df\n else:\n tss = test_frame.loc[:, x] # series\n\n\n # create dictionary of level:encode val\n\n overall_mean_train = trdf[y].mean()\n overall_mean_valid = None\n if valid_frame:\n overall_mean_valid = vdf[y].mean()\n overall_mean_test = None\n if test_frame:\n if test_does_have_y:\n overall_mean_test = tsdf[y].mean()\n row_val_dict_train = {}\n row_val_dict_valid = {}\n row_val_dict_test = {}\n\n for level in trdf[x].unique():\n level_df = trdf[trdf[x] == level][y]\n level_n = level_df.shape[0]\n level_mean = level_df.mean()\n if level_n >= threshold:\n row_val_dict_train[level] = level_mean\n else:\n row_val_dict_train[level] = ((1 - lambda_) * level_mean) +\\\n (lambda_ * overall_mean_train)\n if valid_frame:\n for level in vdf[x].unique():\n level_df = vdf[trdf[x] == level][y]\n level_n = level_df.shape[0]\n level_mean = level_df.mean()\n if level_n >= threshold:\n row_val_dict_valid[level] = level_mean\n else:\n row_val_dict_valid[level] = ((1 - lambda_) * level_mean) +\\\n (lambda_ * overall_mean_valid)\n if test_frame:\n if test_does_have_y:\n for level in tsdf[x].unique():\n level_df = tsdf[tsdf[x] == level][y]\n level_n = level_df.shape[0]\n level_mean = level_df.mean()\n if level_n >= threshold:\n row_val_dict_test[level] = level_mean\n else:\n row_val_dict_test[level] = ((1 - lambda_) * level_mean) +\\\n (lambda_ * overall_mean_test)\n\n row_val_dict_train[np.nan] = overall_mean_train # handle missing values\n if valid_frame:\n row_val_dict_valid[np.nan] = overall_mean_valid # handle missing values\n if test_frame:\n if test_does_have_y:\n row_val_dict_test[np.nan] = overall_mean_test # handle missing values\n\n if test:\n print(row_val_dict_train)\n print(row_val_dict_valid)\n\n from numpy.random import uniform\n\n # apply the transform to training data\n trdf[encode_name] = trdf[x].apply(lambda i: row_val_dict_train[i]*uniform(low=1-perturb_range, high=1+perturb_range))\n if valid_frame:\n vdf[encode_name] = vdf[x].apply(lambda i: row_val_dict_valid[i]*uniform(low=1-perturb_range, high=1+perturb_range))\n if test_frame:\n if test_does_have_y:\n tsdf[encode_name] = tsdf[x].apply(lambda i: row_val_dict_test[i]*uniform(low=1-perturb_range, high=1+perturb_range))\n\n # apply the transform to test data if it doesn't have its own y values\n if test_frame:\n if not test_does_have_y:\n tsdf = pd.DataFrame(columns=[x, encode_name])\n tsdf[x] = tss\n if valid_frame:\n tsdf.loc[:, encode_name] = overall_mean_valid # handle previously unseen values\n else:\n tsdf.loc[:, encode_name] = overall_mean_train # handle previously unseen values\n # handle values that are seen in tsdf but not row_val_dict\n for i, col_i in enumerate(tsdf[x]):\n try:\n row_val_dict_train[col_i]\n except:\n # a value that appeared in tsdf isn't in the row_val_dict so just\n # make it the overall_mean\n row_val_dict_train[col_i] = overall_mean_train\n\n if valid_frame:\n for i, col_i in enumerate(vdf[x]):\n try:\n row_val_dict_valid[col_i]\n except:\n # a value that appeared in tsdf isn't in the row_val_dict so just\n # make it the overall_mean\n row_val_dict_valid[col_i] = overall_mean_valid\n tsdf[encode_name] = tsdf[x].apply(lambda i: row_val_dict_valid[i]*uniform(low=1-perturb_range, high=1+perturb_range))\n else:\n tsdf[encode_name] = tsdf[x].apply(lambda i: row_val_dict_train[i]*uniform(low=1-perturb_range, high=1+perturb_range))\n\n if frame_type == 'h2o':\n # convert back to H2O\n trdf = h2o.H2OFrame(trdf[encode_name].as_matrix())\n trdf.columns = [encode_name]\n if valid_frame:\n vdf = h2o.H2OFrame(vdf[encode_name].as_matrix())\n vdf.columns = [encode_name]\n if test_frame:\n tsdf = h2o.H2OFrame(tsdf[encode_name].as_matrix())\n tsdf.columns = [encode_name]\n if valid_frame:\n if test_frame:\n return (trdf,vdf, tsdf)\n else:\n return (trdf,vdf)\n else:\n if test_frame:\n return (trdf,tsdf)\n else:\n return trdf\n else: #pandas\n #just return pandas\n if valid_frame:\n if test_frame:\n return (trdf,vdf, tsdf)\n else:\n return (trdf,vdf)\n else:\n if test_frame:\n return (trdf,tsdf)\n else:\n return trdf",
"def test_ags_distributions(self):\n\n def create_sample(i):\n \"\"\"Create test sample.\"\"\"\n metadata = {'foo': f'bar{i}'}\n return Sample(name=f'SMPL_{i}',\n metadata=metadata,\n microbe_census=create_microbe_census())\n\n samples = [create_sample(i).fetch_safe() for i in range(15)]\n result = ags_distributions.delay(samples).get()\n self.assertIn('foo', result)\n self.assertIn('bar0', result['foo'])\n self.assertIn('bar1', result['foo'])\n self.assertIn('min_val', result['foo']['bar0'])",
"def __get_img_augm_idx__(self, idx: int):\n\n images_done = idx * self.batch_size\n return divmod(images_done, self.gen_count)",
"def gaModel(NGEN,\n CXPB,\n MUTPB,\n modelOmega,\n year,\n region,\n mean,\n tournsize,\n n_aval\n ):\n start = time.clock()\n # Attribute generator\n toolbox.register(\"attr_float\", random.random)\n toolbox.register(\"mate\", tools.cxOnePoint)\n # operator for selecting individuals for breeding the next\n # generation: each individual of the current generation\n # is replaced by the 'fittest' (best) of three individuals\n # drawn randomly from the current generation.\n toolbox.register(\"select\", tools.selTournament, tournsize=tournsize)\n toolbox.register(\"mutate\", tools.mutPolynomialBounded,\n indpb=0.1, eta=1, low=0, up=1)\n\n stats = tools.Statistics(key=lambda ind: ind.fitness.values)\n stats.register(\"avg\", numpy.mean)\n stats.register(\"std\", numpy.std)\n stats.register(\"min\", numpy.min)\n stats.register(\"max\", numpy.max)\n\n # calculating the number of individuals of the\n # populations based on the number of executions\n y = int(n_aval / NGEN)\n x = n_aval - y * NGEN\n n = x + y\n\n toolbox.register(\"evaluate\", evaluationFunction,\n modelOmega=modelOmega, mean=mean)\n toolbox.register(\"individual\",\n tools.initRepeat,\n creator.Individual,\n toolbox.attr_float,\n len(modelOmega[0].bins)\n )\n toolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\n logbook = tools.Logbook()\n logbook.header = \"gen\", \"min\", \"avg\", \"max\", \"std\"\n\n pop = toolbox.population(n)\n # Evaluate the entire population\n # 2 model.bins: real data, generated model\n fitnesses = list(map(toolbox.evaluate, pop))\n for ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\n for g in range(NGEN):\n print(g)\n # Select the next generation individuals\n offspring = toolbox.select(pop, len(pop))\n # Clone the selected individuals\n offspring = list(map(toolbox.clone, offspring))\n # Apply crossover and mutation on the offspring\n for child1, child2 in zip(offspring[::2], offspring[1::2]):\n if random.random() < CXPB:\n toolbox.mate(child1, child2)\n del child1.fitness.values\n del child2.fitness.values\n for mutant in offspring:\n if random.random() < MUTPB:\n toolbox.mutate(mutant)\n del mutant.fitness.values\n # Evaluate the individuals with an invalid fitness\n invalid_ind = [ind for ind in offspring if not ind.fitness.valid]\n fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)\n for ind, fit in zip(invalid_ind, fitnesses):\n ind.fitness.values = fit\n\n # The population is entirely replaced by the offspring,\n # but the last pop best_pop\n # Elitism\n best_pop = tools.selBest(pop, 1)[0]\n offspring = sorted(offspring, key=attrgetter(\"fitness\"), reverse=True)\n offspring[len(offspring) - 1] = best_pop\n random.shuffle(offspring)\n pop[:] = offspring\n # logBook\n record = stats.compile(pop)\n logbook.record(gen=g, **record)\n end = time.clock()\n generatedModel = models.model.newModel(modelOmega[0].definitions)\n # conferir se e bins o best_pop\n generatedModel.prob = best_pop\n generatedModel.bins = calcNumberBins(list(best_pop), mean)\n generatedModel.loglikelihood = best_pop.fitness.values\n generatedModel.definitions = modelOmega[0].definitions\n generatedModel.time = start - end\n generatedModel.logbook = logbook\n # output = generatedModel.loglikelihood\n # return((-1)*output[0])\n return generatedModel",
"def evaluate_vag(\n params: Array,\n preset: Sequence[int],\n g: Graph,\n lbd: float = 0.0,\n overlap_threhold: float = 0.0,\n) -> Tuple[Tensor, Tensor, Tensor, Tensor]:\n params = array_to_tensor(params) # complexify params\n with tf.GradientTape() as t:\n t.watch(params)\n expe, ene, probasum = exp_forward(\n params, preset, g, lbd=lbd, overlap_threhold=overlap_threhold\n )\n if lbd == 0:\n gr = t.gradient(ene, params)\n else:\n gr = t.gradient(expe, params)\n # return forward(beta)\n return expe, ene, cons.backend.real(gr), probasum",
"def abw(og, fg):\n\n oe = sg2plato(og)\n re = real_extract(og, fg)\n return (oe - re) / (2.0665 - 0.010665 * oe)",
"def eval_beat(individual):\n # compile the individual\n routine = gp.compile(individual, pset)\n # generate some test output\n try:\n test_output = gen_beat_output(routine)\n except:\n return 0.0,\n ## do some stats on the beat\n sd = np.std(np.array(test_output))\n bpm, correl = bpm_detector(test_output,24000)\n bpm_score = 1 - abs((bpm/120.0)-1)\n sd_score = sd / 128.0\n del test_output\n # return the score\n return float(bpm_score * sd_score),",
"def sarsa_lambda(env, estimator, num_episodes, num_timesteps, gamma=1.0, epsilon=0.1, epsilon_decay=0.1, lambd=0.1):\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n for i_episode in range(num_episodes):\n # print(\"episode: \", i_episode)\n # initialize\n\n policy = make_epsilon_greedy_policy(\n estimator, epsilon, env.action_space.n)\n\n estimator.initialize_eligibility()\n\n s = env.reset()\n rAll = 0\n\n # pick the first action\n probs = policy(s)\n a = np.random.choice(np.arange(len(probs)), p=probs)\n Q = estimator.predict(s, a)\n\n for t in range(num_timesteps):\n next_s, r, done, _ = env.step(a)\n # print(\"time setp\", t)\n # print(\"took action \", a)\n # print(\"took state \", s)\n\n # update eligibility\n estimator.update_eligibility(s, a, gamma, lambd)\n if done:\n\n td_target = r\n else:\n next_probs = policy(next_s)\n next_a = np.random.choice(np.arange(len(next_probs)), p=next_probs)\n next_Q = estimator.predict(next_s, next_a)\n td_target = r + gamma * next_Q\n\n td_delta = td_target - Q\n\n estimator.update(s, a, td_delta)\n\n rAll += r\n s = next_s\n a = next_a\n Q = next_Q\n epsilon *= epsilon_decay\n\n if done:\n print(\"reached the goal! at episode {}\".format(i_episode))\n break\n\n stats.episode_rewards[i_episode] = rAll\n stats.episode_lengths[i_episode] = t\n return stats",
"def average_value_estimation_scorer(algo, episodes, window_size=1024):\n total_values = []\n for episode in episodes:\n for batch in _make_batches(episode, window_size, algo.n_frames):\n actions = algo.predict(batch.observations)\n values = algo.predict_value(batch.observations, actions)\n total_values += values.tolist()\n # smaller is better, maybe?\n return -np.mean(total_values)",
"def get_gae(rewards, value_estimates, value_next=0.0, gamma=0.99, lambd=0.95):\n value_estimates = np.append(value_estimates, value_next)\n delta_t = rewards + gamma * value_estimates[1:] - value_estimates[:-1]\n advantage = discount_rewards(r=delta_t, gamma=gamma * lambd)\n return advantage",
"def gfa(samples):\n diff = samples - samples.mean(-1)[..., None]\n n = samples.shape[-1]\n numer = n * (diff * diff).sum(-1)\n denom = (n - 1) * (samples * samples).sum(-1)\n return np.sqrt(numer / denom)",
"def test_gan():\n nbr_qubits = 5\n\n # Normal law\n # N = 5*10 ** 3\n #\n # Database = np.random.normal(0, 1, N)\n # test_gan_qiskit(nbr_qubits, Database)\n\n # beta\n arr_beta = beta_proba(nbr_qubits, 2, 5)\n\n general_gantest(arr_beta, nbr_qubits)\n\n # uniform not on [0, 32]\n if nbr_qubits == 5:\n arr_unif = [1 / 24] * 24 + 8 * [0]\n general_gantest(arr_unif, nbr_qubits)",
"def average_away_epoch_occurrences(recording, epoch_regex='^STIM_', use_mask=True):\n if use_mask:\n recording = recording.remove_masked_epochs()\n\n # need to edit the epochs dataframe, so make a working copy\n temp_epochs = recording['resp'].epochs.copy()\n\n # only pull out matching epochs\n regex_mask = temp_epochs['name'].str.contains(pat=epoch_regex, na=False, regex=True)\n epoch_stims = temp_epochs[regex_mask]\n\n # get a list of the unique epoch names\n epoch_names = temp_epochs.loc[regex_mask, 'name'].sort_values().unique()\n\n #import pdb; pdb.set_trace()\n\n # what to round to when checking if epoch timings match\n d = int(np.ceil(np.log10(recording[list(recording.signals.keys())[0]].fs))+1)\n\n # need an end and start to close the bounds for cases where start and end bounds are identical\n s_name_start = pd.Series(epoch_stims['name'].values,\n pd.IntervalIndex.from_arrays(epoch_stims['start'], epoch_stims['end'], closed='left'))\n s_name_end = pd.Series(epoch_stims['name'].values,\n pd.IntervalIndex.from_arrays(epoch_stims['start'], epoch_stims['end'], closed='right'))\n s_cat_start = pd.Series(np.arange(len(epoch_stims['start']), dtype='int'),\n pd.IntervalIndex.from_arrays(epoch_stims['start'], epoch_stims['end'], closed='left'))\n s_cat_end = pd.Series(np.arange(len(epoch_stims['end']), dtype='int'),\n pd.IntervalIndex.from_arrays(epoch_stims['start'], epoch_stims['end'], closed='right'))\n\n # add helper columns using the interval index lookups\n temp_epochs['cat'] = temp_epochs['start'].map(s_cat_start)\n temp_epochs['cat_end'] = temp_epochs['end'].map(s_cat_end)\n temp_epochs['stim'] = temp_epochs['start'].map(s_name_start)\n temp_epochs['stim_end'] = temp_epochs['end'].map(s_name_end)\n\n # only want epochs that fall within a stim epoch, so drop the ones that don't\n drop_mask = temp_epochs['cat'] != temp_epochs['cat_end']\n trial_mask = temp_epochs['name'] == 'TRIAL' # also dorp this\n temp_epochs = temp_epochs.loc[~drop_mask & ~trial_mask, ['name', 'start', 'end', 'cat', 'stim']]\n\n temp_epochs['cat'] = temp_epochs['cat'].astype(int) # cast back to int to make into index\n\n # build another helper series, to map in times to subtract from start/end\n work_mask = temp_epochs['name'].str.contains(pat=epoch_regex, na=False, regex=True)\n s_starts = pd.Series(temp_epochs.loc[work_mask, 'start'].values, temp_epochs.loc[work_mask, 'cat'].values)\n \n temp_epochs['start'] -= temp_epochs['cat'].map(s_starts)\n temp_epochs['end'] -= temp_epochs['cat'].map(s_starts)\n temp_epochs = temp_epochs.round(d)\n \n expected_max = temp_epochs.loc[temp_epochs['name'].str.contains(pat=epoch_regex, na=False, regex=True),'end'].max()\n\n concat = []\n\n offset = 0\n new_epoch_names=[]\n for name, group in temp_epochs.groupby('stim'):\n # build a list of epoch names where all the values are equal\n m_equal =(group.groupby('name').agg({\n 'start': lambda x: len(set(x)) == 1,\n 'end': lambda x: len(set(x)) == 1,\n }).all(axis=1)\n )\n m_equal = list(m_equal.index[m_equal].values)\n m_equal.extend([name,'REFERENCE','PreStimSilence','PostStimSilence'])\n \n # find the epoch names that are common to every group\n s = set()\n for idx, (cat_name, cat_group) in enumerate(group.groupby('cat')):\n if idx == 0:\n s.update(cat_group['name'])\n else:\n s.intersection_update(cat_group['name'])\n\n # drop where values across names aren't equal, or where a group is missing an epoch\n keep_mask = (group['name'].isin(m_equal)) & (group['name'].isin(s))\n\n g = group[keep_mask].drop(['cat', 'stim'], axis=1).drop_duplicates()\n max_end = g['end'].max()\n g[['start', 'end']] += offset\n \n #if max_end>=expected_max:\n concat.append(g)\n offset += max_end\n new_epoch_names.append(name)\n #else:\n # log.info(f\"dropping epoch {name} because it's too short\")\n \n if np.isnan(offset):\n log.info('nan offset')\n\n new_epochs = pd.concat(concat).sort_values(['start', 'end', 'name']).reset_index(drop=True)\n epoch_names=new_epoch_names\n \n # make name the temp_epochs index for quick start/end lookup in loop below\n temp_epochs = (temp_epochs[['name', 'start', 'end']]\n .drop_duplicates()\n .set_index('name')\n .assign(dur=lambda x: (x['end'] - x['start']).astype(float))\n .drop(['start', 'end'], axis='columns')\n )\n\n #averaged_recording = recording.copy()\n averaged_signals = {}\n for signal_name, signal in recording.signals.items():\n # TODO: this may be better done as a method in signal subclasses since\n # some subclasses may have more efficient approaches (e.g.,\n # TiledSignal)\n\n # Extract all occurences of each epoch, returning a dict where keys are\n # stimuli and each value in the dictionary is (reps X cell X bins)\n #print(signal_name)\n epoch_data = signal.rasterize().extract_epochs(epoch_names)\n\n fs = signal.fs\n # Average over all occurrences of each epoch\n data = []\n for epoch_name in epoch_names:\n epoch = epoch_data[epoch_name]\n \n # TODO: fix empty matrix error. do epochs align properly?\n if epoch.dtype == bool:\n epoch = epoch[0,...]\n elif np.sum(np.isfinite(epoch)):\n epoch = np.nanmean(epoch, axis=0)\n else:\n epoch = epoch[0,...]\n\n elen = int(round(np.min(temp_epochs.loc[epoch_name, 'dur'] * fs)))\n\n if epoch.shape[-1] > elen:\n #log.info('truncating epoch_data for epoch %s', epoch_name)\n #epoch = epoch[..., :elen]\n log.info('NOT truncating epoch_data for epoch %s', epoch_name)\n log.info(f\"{epoch}\")\n elif epoch.shape[-1]<elen:\n pad = np.zeros((epoch.shape[0], elen-epoch.shape[1])) * np.nan\n epoch = np.concatenate((epoch, pad), axis=1)\n log.info('padding epoch_data for epoch %s with nan', epoch_name)\n\n data.append(epoch)\n \n data = np.concatenate(data, axis=-1)\n if data.shape[-1] != round(signal.fs * offset):\n raise ValueError('Misalignment issue in averaging signal')\n\n averaged_signal = signal._modified_copy(data, epochs=new_epochs)\n averaged_signals[signal_name] = averaged_signal\n\n# # TODO: Eventually need a smarter check for this in case it's named\n# # something else. Basically just want to preserve spike data.\n# if signal.name == 'resp':\n# spikes = signal.copy()\n# spikes.name = signal.name + ' spikes'\n# averaged_recording.add_signal(spikes)\n averaged_recording = Recording(averaged_signals,\n meta=recording.meta,\n name=recording.name)\n return averaged_recording",
"def classify_eeg(eeg,srate):\r\n bin_size_sec = 30\r\n bin_size_samp = bin_size_sec*srate\r\n t = 0\r\n classified = np.zeros(len(eeg)/bin_size_samp)\r\n while t + bin_size_samp < len(eeg):\r\n classified[t/bin_size_samp] = classify_epoch(eeg[range(t,t+bin_size_samp)],srate)\r\n t = t + bin_size_samp\r\n return classified"
]
| [
"0.68790483",
"0.51099616",
"0.4840694",
"0.47976026",
"0.46058744",
"0.45199078",
"0.451957",
"0.45086294",
"0.44889933",
"0.44735873",
"0.44700673",
"0.44060507",
"0.43793166",
"0.4379234",
"0.43768635",
"0.436418",
"0.43534052",
"0.43524563",
"0.43197927",
"0.43142548",
"0.431034",
"0.43077487",
"0.42977226",
"0.42876273",
"0.42861402",
"0.42480645",
"0.42443216",
"0.4237776",
"0.42331475",
"0.42020267"
]
| 0.6636521 | 1 |
Computes sag ratio using voltage trace. Sag ratio is computed as $$ SR = \frac{V_{min} V_{end}}{V_{min}} $$ | def sag_ratio(V):
Vmin = np.amin(V)
Vend = V[-1]
sr = (Vmin - Vend) / Vmin
if sr < 0:
print("Warning: sag ratio being negative indicates there is no sag")
return sr | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sag(V):\n Vmin = np.amin(V)\n Vend = V[-1]\n return Vmin - Vend",
"def calc_V(A):\n return 1. / calc_rV(A)",
"def vratio(self):\n return self.run_command('vratio')[0]",
"def calc_rV(A):\n return np.sqrt(calc_rVsq(A))",
"def rvs(self):\n return float(self.interp(random.rand()))",
"def sag_ratio_abf(abf, epoch_ind):\n p0 = abf.sweepEpochs.p1s[epoch_ind]\n p1 = abf.sweepEpochs.p1s[epoch_ind+1]\n V = abf.sweepY[p0:p1]\n return sag_ratio(V)",
"def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)",
"def R_SV(take_off_angle, strike, dip, rake, az):\n inc = np.deg2rad(take_off_angle)\n SR = Fault_geom_SR(dip, rake)\n QR = Fault_geom_QR(strike, dip, rake, az)\n PR = Fault_geom_PR(strike, dip, rake, az)\n\n RSV = (3 / 2) * SR * np.sin(2 * inc) + QR * np.cos(2 * inc) + (1 / 2) * PR * np.sin(2 * inc)\n return RSV",
"def compute_gae(V, s, ss, r, absorbing, last, gamma, lam):\n v = V(s)\n v_next = V(ss)\n gen_adv = np.empty_like(v)\n for rev_k in range(len(v)):\n k = len(v) - rev_k - 1\n if last[k] or rev_k == 0:\n gen_adv[k] = r[k] - v[k]\n if not absorbing[k]:\n gen_adv[k] += gamma * v_next[k]\n else:\n gen_adv[k] = r[k] + gamma * v_next[k] - v[k] + gamma * lam * gen_adv[k + 1]\n return gen_adv + v, gen_adv",
"def analytic_value_VaR(x):\n mu_H = -15 * x + 10 * x ** 2\n # z = 0.67448975 # VaR 0.75\n # z = .7978845608028654 # CVaR 0.5\n z = 1.27111 # CVaR 0.75\n sigma_H = np.sqrt(16 * x ** 2 + 4 * x ** 4)\n return mu_H + z * sigma_H",
"def gasoilratio(pressure2, P_bubble, sg2, api, temp2, Rsb):\n import numpy as np\n Rs_array = []\n\n if pressure2 < P_bubble:\n # Using Vazquez and Beggs\n if api <=30:\n c1 = 0.0362\n c2 = 1.0937\n c3 = 25.7240\n if api > 30:\n c1 = 0.0178\n c2 = 1.187\n c3 = 23.9310\n Rs = (pressure2**c2) * c1 * sg2 * np.exp((c3 * api) / (temp2 + 459.67)) \n \n if pressure2 >= P_bubble:\n # Because Rs will be constant above BB\n Rs = Rsb\n \n return Rs",
"def calculate_gear_ratio(front_gear, back_gear):\n return front_gear/back_gear",
"def PressureVesselStressP(r, ri, ro, sigma):\n return (sigma*(ro**2-ri**2)/(1+ro**2/r**2))/ri**2",
"def g_sebal_func(ts, albedo_sur, ndvi):\n g = np.copy(ndvi).astype(np.float64)\n np.power(g, 4, out=g)\n g *= -0.98\n g += 1\n g *= ts\n g *= (albedo_sur * 0.0074 + 0.0038)\n return g",
"def calcDVavg(supplyvol, demandvol):\n dvavg = (supplyvol - demandvol)/(0.5 * (supplyvol + demandvol))\n return dvavg",
"def scv(SP):\n scv = ((np.std(SP,axis=1)/np.mean(SP,axis=1)))\n return scv",
"def cal_sdri(src_ref, src_est, mix):\r\n src_anchor = np.stack([mix, mix], axis=0)\r\n sdr, sir, sar, popt = bss_eval_sources(src_ref, src_est)\r\n sdr0, sir0, sar0, popt0 = bss_eval_sources(src_ref, src_anchor)\r\n avg_sdri = ((sdr[0]-sdr0[0]) + (sdr[1]-sdr0[1])) / 2\r\n return avg_sdri",
"def sigma0_RG(self):\n sigma = np.sqrt(self.cosmo.gs_spectral_moment(l=0,RG=self.RG))\n return sigma",
"def get_voltage_rating(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? (.*?) .*? .*? .*? \\r\\n' \n rating = int(re.findall(pattern,summary).pop())\n return rating",
"def set_vratio(self, setto):\n command = 'vratio ' + str(setto)\n self.run_command(command)",
"def slant_time(sx, sy, sz, rx, ry, rz, v):\n d = np.sqrt((rx - sx) ** 2 + (ry - sy) ** 2)\n h = rz - sz\n r = np.sqrt(d ** 2 + h ** 2)\n return r / v",
"def rvs(self):\n rvsValue = self.ppf(random())\n return rvsValue",
"def rvs(self):\n rvsValue = self.ppf(random())\n return rvsValue",
"def rvs(self):\n rvsValue = self.ppf(random())\n return rvsValue",
"def SNR(S, S0):\n return np.var(S) / np.var(S - S0)",
"def sos_correction(self, ratio):\n\n # Correct velocities\n self.u_mps = self.u_mps * ratio\n self.v_mps = self.v_mps * ratio",
"def __idiv__(self,value):\n if isinstance(value,LiveStat):\n raise Exception (\"Ratio of Statistics is not supported\")\n else:\n if self.vmin is not None:\n # mu(s x) = 1/N sum s x = s/N sum x\n self.vmean /= value\n if value < 0:\n m = self.vmin\n M = self.vmax\n self.vmin = M/value\n self.vmax = m/value\n else:\n self.vmin /= value\n self.vmax /= value\n self.vsum /= value\n # vm2(s x) = sum (s x - mu(s x))^2 = sum (s x - s mu(x))^2 = sum s^2 (x - mu(x))^2 = s^2 sum (x - mu(x))^2 = s^2 vm^2\n self.vm2 /= value*value\n print (\"div Missing: M3 and M4\")\n self.dirty = True\n return self",
"def V(E, g, gl):\n num = 0\n den = 0\n for i in range(len(E)):\n num += E[i][0]*g[i][0] + E[i][1]*g[i][1]\n den += g[i][0] + g[i][1] + gl\n return num / den",
"def get_vcond(lambdam, taum):\n return 2 * lambdam / taum",
"def get_phi_with_S(big_phi_current, S_current, dv=hyper_paras[\"dv\"]):\n return np.true_divide(np.minimum(np.minimum(big_phi_current, country_info['Vac_rate_max']), S_current), dv)"
]
| [
"0.6470241",
"0.61445576",
"0.5787019",
"0.5713984",
"0.57111585",
"0.57061124",
"0.5691221",
"0.5611252",
"0.5577237",
"0.55457896",
"0.5515481",
"0.54366684",
"0.54354954",
"0.542553",
"0.5407385",
"0.53981924",
"0.5393931",
"0.5376168",
"0.53517497",
"0.5343775",
"0.53381824",
"0.5276844",
"0.5276844",
"0.5276844",
"0.52721846",
"0.5249615",
"0.5222495",
"0.51850444",
"0.5179424",
"0.5176363"
]
| 0.8000134 | 0 |
Computes sag ratio using abf object and epoch index. | def sag_ratio_abf(abf, epoch_ind):
p0 = abf.sweepEpochs.p1s[epoch_ind]
p1 = abf.sweepEpochs.p1s[epoch_ind+1]
V = abf.sweepY[p0:p1]
return sag_ratio(V) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sag_abf(abf, epoch_ind):\n p0 = abf.sweepEpochs.p1s[epoch_ind]\n p1 = abf.sweepEpochs.p1s[epoch_ind+1]\n V = abf.sweepY[p0:p1]\n return sag(V)",
"def __call__(self, epoch):\n exp = np.floor((1 + epoch) / self.dropEvery)\n alpha = initAlpha * (self.factor ** exp)\n \n # return alpha \n return float(alpha)",
"def sag_ratio(V):\n\n Vmin = np.amin(V)\n Vend = V[-1]\n sr = (Vmin - Vend) / Vmin\n if sr < 0:\n print(\"Warning: sag ratio being negative indicates there is no sag\")\n return sr",
"def test_avgeraging(self):\n\n num_ensemble = 10\n enn = networks.MLPEnsembleEnn(\n output_sizes=[1],\n num_ensemble=num_ensemble,\n )\n\n dummy_metrics = {'a': 0, 'b': 1}\n # A dummy loss fn that returns the normalized index as loss and two constant\n # metrics. Index is random but normalized such that its mean is 1.\n single_loss_fn = DummySingleIndexLossFn(num_ensemble, dummy_metrics)\n\n num_index_samples = 100\n loss_fn = average_single_index_loss(single_loss_fn, num_index_samples)\n dummy_batch = base.Batch(np.ones([1, 1]), np.ones([1, 1]))\n loss, metrics = loss_fn(\n enn=enn, params=dict(), batch=dummy_batch, key=jax.random.PRNGKey(0))\n\n # Since the single loss has mean 1 the averaged loss also has mean 1 a\n # variance proportional to 1/np.sqrt(num_index_samples).\n self.assertAlmostEqual(\n loss,\n 1.0,\n delta=5 / np.sqrt(num_index_samples),\n msg=f'Expected loss to be ~1.0 but it is {loss}')\n self.assertDictEqual(\n metrics, dummy_metrics,\n f'expected metrics to be {dummy_metrics} but it is {metrics}')",
"def fib(index):\n return round((GR**index)/R5)",
"def apx_ratio_eval():\n print 'starting random eval'\n\n n_pl = randint(2, MAX_PL)\n n_target = randint(n_pl + 1, MAX_TARGETS)\n\n target_values = []\n for t in range(0,n_target):\n target_values.append(random())\n target_values = np.array(target_values)\n\n attacker_strategy = get_random_attacker_strategy(n_target)\n\n #I = get_random_covering_routes(n_pl, n_target)\n I = get_pathologic_covering_routes(n_pl, n_target, attacker_strategy, target_values)\n\n start_opt = time()\n br_opt = ilp.generate_row(I, attacker_strategy, target_values)\n opt_time = time() - start_opt\n\n start_apx = time()\n br_apx = greedy.generate_row(I, attacker_strategy, target_values)\n apx_time = time() - start_apx\n\n print 'opt time: ', opt_time, 'apx time: ', apx_time\n\n apx_val = get_value(br_apx, I, target_values, attacker_strategy)\n opt_val = get_value(br_opt, I, target_values, attacker_strategy)\n\n logging.info('ratio = %f - target = %d - pl = %f - max_r = %d - max_cov_t = %d',apx_val/opt_val, n_target, n_pl, MAX_ROUTES, MAX_ROUTE_LEN)\n\n return apx_val/opt_val, opt_time, apx_time",
"def rF(count, total):\n\treturn float(count)/float(total)",
"def learning_rate(epoch):\n return alpha / (1 + decay_rate * epoch)",
"def learning_rate(epoch):\n return alpha / (1 + decay_rate * epoch)",
"def index(self):\n return self._epochs_completed * self._size + self._index_in_epoch",
"def scheduler(epoch):\n return alpha / (1 + decay_rate * epoch)",
"def compute_rate(self):\n bg_rate = self.counts.data / self.livetime.data\n\n bg_rate /= self.counts.bin_volume\n\n bg_rate = bg_rate.to('MeV-1 sr-1 s-1')\n\n self.bg_rate.data = bg_rate\n self.bg_rate.data_err = (np.sqrt(self.counts.data) / (self.counts.bin_volume * self.livetime.data)).to(\n 'MeV-1 sr-1 s-1')",
"def epoch_detail(self):\n epoch_ratio = self.batch_sampler.offset / len(self)\n # NOTE: this is not accurate when num_workers > 0\n return epoch_ratio",
"def __get_img_augm_idx__(self, idx: int):\n\n images_done = idx * self.batch_size\n return divmod(images_done, self.gen_count)",
"def avg_spike_frequency_abf(abf, epoch):\n p0 = abf.sweepEpochs.p1s[epoch]\n p1 = abf.sweepEpochs.p1s[epoch+1]\n t = abf.sweepX[p0:p1]\n V = abf.sweepY[p0:p1]\n return avg_spike_frequency(t, V)",
"def brate(self):\n try:\n return self.pos / self.runtime\n except ZeroDivisionError:\n return 0",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)",
"def __call__(self, epoch):\n decay = (1 - (epoch / float(self.maxEpochs))) ** self.power\n alpha = self.initAlpha * decay\n \n # return alpha\n return float(alpha)",
"def compute_rate(self):\n bg_rate = self.counts_cube.data / self.livetime_cube.data\n\n bg_rate /= self.counts_cube.bin_volume\n # bg_rate.set_zero_level()\n\n # import IPython; IPython.embed()\n bg_rate = bg_rate.to('1 / (MeV sr s)')\n\n self.background_cube.data = bg_rate",
"def eval_beat(individual):\n # compile the individual\n routine = gp.compile(individual, pset)\n # generate some test output\n try:\n test_output = gen_beat_output(routine)\n except:\n return 0.0,\n ## do some stats on the beat\n sd = np.std(np.array(test_output))\n bpm, correl = bpm_detector(test_output,24000)\n bpm_score = 1 - abs((bpm/120.0)-1)\n sd_score = sd / 128.0\n del test_output\n # return the score\n return float(bpm_score * sd_score),",
"def running_ratio(self) -> np.ndarray:\n result_array = self.result_array\n result = result_array.sum(axis=1) / result_array.sum()\n\n if isinstance(result, np.ndarray):\n result_out = result\n else:\n result_out = np.array(result)\n\n return result_out",
"def golden_ratio(fun, a, b, E):\n K = (1 + sqrt(5)) / 2\n lam = b - (b - a) / K\n mu = a + (b - a) / K\n y1 = fun(lam)\n y2 = fun(mu)\n counter = 2\n n = 0\n while fabs(b - a) > E:\n if y1 > y2:\n a = lam\n lam = mu\n y1 = y2\n mu = a + (b - a) / K\n y2 = fun(mu)\n counter += 1\n else:\n b = mu\n mu = lam\n y2 = y1\n lam = b - (b - a) / K\n y1 = fun(lam)\n counter += 1\n n += 1\n return (b + a) / 2, counter",
"def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)",
"def schedule(epoch):\n return alpha / (1 + (decay_rate * epoch))",
"def ap(self, result, next_item):\n if next_item in result.index:\n rank = result.index.get_loc(next_item) + 1\n return 1.0 / rank\n else:\n return 0",
"def _calculate_a_value(self, bval, nvalue, nyr, cmag, ref_mag):\n\n denominator = np.sum(nyr * np.exp(-bval * (cmag - ref_mag)))\n return nvalue / denominator",
"def erb2freq(n_erb):\n return 24.7 * 9.265 * (np.exp(n_erb / 9.265) - 1)",
"def prescaler(self) -> int:",
"def learning_rate(epoch):\n self.lr = self.lr / 1.00000001\n return self.lr",
"def compute(self):\n rsa = self._session_graph.get_graph_property(self._FACTOR_KEY)\n rsa = rsa if rsa else 0.\n tr = self._session_graph.graph.num_edges()\n tr = tr if tr > 0 else 1\n rs = self._traffic_record['response_size']\n rsa = ((float(rsa) * (float(tr) - 1.)) + float(rs)) / float(tr)\n self.append_graph_factor('float', rsa)\n\n print \"Response Size Average : \", rsa\n pass"
]
| [
"0.604394",
"0.5422874",
"0.53296494",
"0.5298637",
"0.5189429",
"0.5170441",
"0.51297385",
"0.5077913",
"0.5077913",
"0.50770766",
"0.5069971",
"0.5037457",
"0.5031441",
"0.5018347",
"0.5014721",
"0.50000775",
"0.4999061",
"0.49976856",
"0.49907756",
"0.49856532",
"0.49590257",
"0.4955538",
"0.4942003",
"0.4925769",
"0.49198243",
"0.49099234",
"0.4879803",
"0.48691398",
"0.48541677",
"0.48528755"
]
| 0.74809 | 0 |
Computes minimum of voltage. | def Vmin(V):
return np.min(V) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_min_cell_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? (.*?) .*? . .*? .*? . . . .*?'\n minv = float(re.findall(pattern,summary).pop())\n return minv",
"def hemt_gate_min_voltage(self):\n return self._hemt_gate_min_voltage",
"def get_vmin(self, ch_id: int) -> float:\n return float(self.query(':measure:vmin? channel{}'.format(ch_id)))",
"def vmin(self):\n return self._vmin",
"def native_min_value(self) -> float:\n return TEMP_MINIMUM",
"def min_voltage_limit(self):\n return self._read(MX_MIN_VOLTAGE_LIMIT)",
"def get_minimum_air_volume(v_vent: np.ndarray) -> float:\n\n return v_vent.sum()",
"def min(self):\n mins = self.client.map(_call_min, self.vecDask, pure=False)\n min_val = np.inf\n for future, result in daskD.as_completed(mins, with_results=True):\n if result < min_val:\n min_val = result\n return min_val",
"def getMinimum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle < v2.angle:\n return self.p1\n else:\n return self.p2",
"def _minimum(self) -> float:\n if self._type == \"power\":\n return 1.0\n elif self._type == \"setpoint\":\n return self._product.get_data_config_json()[\"_value_setpoint_min\"]\n elif self._type == \"fan1\":\n fan = 1\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan2\":\n fan = 2\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]\n elif self._type == \"fan3\":\n fan = 3\n return self._product.get_data_config_json()[\"_value_fan_limits\"][\n ((fan - 1) * 2)\n ]",
"def min(self) -> float:\n return stats.min(self)",
"def minimum(self):\n \n omega_star = fmin(self.function, 0, disp=False)[0]\n loss = self.function(omega_star)\n return omega_star, loss",
"def min(x):\n pass",
"def min_value(self) -> float:\n return DEFAULT_MIN_VALUE",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def min(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"min\")",
"def u_min(self):\n if self._u_min is None:\n return np.abs(self.uvgrid).min()\n else:\n return self._u_min",
"def min():\n valid=result_alpha.F>0\n src_data.F[valid]=np.minimum( src_data.F[valid],result_data.F[valid] )",
"def state_min(self) -> float:\n raise NotImplementedError",
"def get_minimum_volume(self, pipette_id: str) -> float:\n return self.get_config(pipette_id).min_volume",
"def min_voltage_limit(self, value):\n self._write(MX_MIN_VOLTAGE_LIMIT, value)",
"def minimum_value(self):\n return self._fitness[self._minidx]",
"def min_temp(self):\n return 1",
"def getmin(self):\n\n return self.X",
"def MinimumValue(self):\n return self._fitness[self._minIndex]",
"def min(self):\n return numpy.ma.min(self.data)",
"def min(self):\n if 0 in type(self).flatten_shape(self.shape):\n raise ValueError(\"zero-size array has no minimum\")\n if self.isscalar():\n return self.defval\n # If not all blocks are set, then the tensor has an element of defval\n # somewhere.\n m = np.inf if self.is_full() else self.defval\n for v in self.sects.values():\n try:\n m = min(m, np.min(v))\n except ValueError:\n # This block was zero-size, and has no elements.\n pass\n return m",
"def min(self):\n return self._reduce_for_stat_function(F.min, only_numeric=False)",
"def min(self):\n return self._summarize(DataFrameCpu._cmin)",
"def native_min_value(self) -> float:\n return self._device.min_offset"
]
| [
"0.75651085",
"0.72486717",
"0.7138131",
"0.68351734",
"0.6807372",
"0.6727665",
"0.66549414",
"0.665015",
"0.65709823",
"0.64375454",
"0.64165014",
"0.6410586",
"0.6393397",
"0.63890225",
"0.63823736",
"0.63823736",
"0.6381286",
"0.6368098",
"0.63344324",
"0.63281834",
"0.630848",
"0.6250822",
"0.62347484",
"0.6227573",
"0.6222949",
"0.6197682",
"0.6178148",
"0.6170142",
"0.61666214",
"0.61600447"
]
| 0.7622577 | 0 |
Computes minimum Voltage using abf object and epoch index. | def Vmin_abf(abf, epoch_start):
p0 = abf.sweepEpochs.p1s[epoch_start]
p1 = abf.sweepEpochs.p1s[epoch_start + 1]
V = abf.sweepY[p0:p1]
return Vmin(V) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_min_cell_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. .*? .*? .*? (.*?) .*? . .*? .*? . . . .*?'\n minv = float(re.findall(pattern,summary).pop())\n return minv",
"def min():\n valid=result_alpha.F>0\n src_data.F[valid]=np.minimum( src_data.F[valid],result_data.F[valid] )",
"def voltage_drop_abf(abf, epoch_start):\n vmin = Vmin_abf(abf, epoch_start)\n resting = Vrest_abf(abf, epoch_start)\n return vmin - resting",
"def get_vmin(self, ch_id: int) -> float:\n return float(self.query(':measure:vmin? channel{}'.format(ch_id)))",
"def auxminf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmin_f1_part_i(x,m_ind) \n \n return f",
"def min_voltage_limit(self):\n return self._read(MX_MIN_VOLTAGE_LIMIT)",
"def min(self):\n mins = self.client.map(_call_min, self.vecDask, pure=False)\n min_val = np.inf\n for future, result in daskD.as_completed(mins, with_results=True):\n if result < min_val:\n min_val = result\n return min_val",
"def minimum_value(self):\n return self._fitness[self._minidx]",
"def _call_min(vecObj):\n res = vecObj.min()\n return res",
"def get_fmin(self):\n return self.model.predict(self.model.X)[0].min()",
"def Vmin(V):\n return np.min(V)",
"def get_minimum_air_volume(v_vent: np.ndarray) -> float:\n\n return v_vent.sum()",
"def set_minimum_meas_cv(self, integrand, value):\n if value is not None:\n fvalue = float(value)\n assert fvalue >= 0.0\n else:\n fvalue = 0\n integrand = IntegrandEnum[integrand]\n dmf = self.dismod_file\n if dmf:\n dmf.integrand.loc[dmf.integrand.integrand_name == integrand.name, \"minimum_meas_cv\"] = fvalue\n else:\n CODELOG.info(f\"minimum_meas_cv not set because dismod_file is None.\")",
"def MinimumValue(self):\n return self._fitness[self._minIndex]",
"def _getBusVoltageLambdaSensor(self):\n muVmin = array([b.mu_vmin for b in self.market.case.connected_buses])\n muVmax = array([b.mu_vmax for b in self.market.case.connected_buses])\n muVmin = -1.0 * muVmin\n diff = muVmin + muVmax\n return diff",
"def __findStartValue(self):\n\t\tself.minEvidence = 1e120*np.ones(self.nStartValues)\n\t\tfor idx1 in range(self.nStartValues):\n\t\t\tself.minEvidence[self.nStartValues-1-idx1] = self.__minBayesianEvidence(self.logLamStart[self.nStartValues-1-idx1])\n\t\t\t# If minEvidence > 1e100 a negative value has been found for wMP which\n\t\t\t# implies that lower values of lambda do not need to be considered.\n\t\t\tif self.minEvidence[self.nStartValues-1-idx1] > 1e100:\n\t\t\t\tbreak\n\t\tself.startIdx = np.argmin(self.minEvidence)\n\t\t\t\t\n\t\treturn self.logLamStart[self.startIdx]",
"def vmin(self):\n return self._vmin",
"def min_voltage_limit(self, value):\n self._write(MX_MIN_VOLTAGE_LIMIT, value)",
"def get_f_minimum(self):\n return np.min(self._Y)",
"def min_flux(self):\n return np.min(self.flux)",
"def min_value(self, state, min_alpha, min_beta, min_depth):\r\n if state.terminal_test():\r\n return state.utility(0)\r\n if min_depth <=0 :\r\n return self.score(state)\r\n\r\n v = float(\"inf\")\r\n for a in state.actions():\r\n v = min(v, self.max_value(state.result(a), min_alpha, min_beta, min_depth - 1))\r\n if v <= min_alpha:\r\n return v\r\n min_beta = min(min_beta, v)\r\n return v",
"def auxmaxf1(x):\n \n# Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxmax_f1_part_i(x,m_ind) \n \n return f",
"def set_Ec_min(self, x):\n x = float(x)\n if self.Ec_min != x:\n self.Ec_min = x\n self.Ec[0] = x",
"def native_min_value(self) -> float:\n return TEMP_MINIMUM",
"def Vrest_abf(abf, epoch_start):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n p1 = abf.sweepEpochs.p1s[epoch_start+1]\n V = abf.sweepY[p0:p1]\n return Vrest(V)",
"def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)",
"def setMinValue(self, min_value):\r\n\t\tself.MinValue = min_value\r\n\t\tself.OutputValueIncrement = (self.MaxValue - self.MinValue)/(self.RampDuration/self.Ts)",
"def a_test_bbvi_mini_batch():\n model = ARIMAX(formula=\"y ~ x1\", data=data, ar=1, ma=1, family=Exponential())\n x = model.fit('BBVI',iterations=100, mini_batch=32)\n assert(len(model.latent_variables.z_list) == 4)\n lvs = np.array([i.value for i in model.latent_variables.z_list])\n assert(len(lvs[np.isnan(lvs)]) == 0)",
"def min_value(self) -> float:\n return DEFAULT_MIN_VALUE",
"def input_membrane_resistance_abf(abf, epoch_start):\n p0 = abf.sweepEpochs.p1s[epoch_start]\n p1 = abf.sweepEpochs.p1s[epoch_start + 1]\n\n V = abf.sweepY[p0:p1]\n I = abf.sweepC[p0-1:p1]\n\n return input_membrane_resistance(I, V)"
]
| [
"0.6273566",
"0.6256282",
"0.6005075",
"0.587969",
"0.578635",
"0.5720461",
"0.570958",
"0.566614",
"0.5658306",
"0.5648726",
"0.55853295",
"0.5554305",
"0.55336726",
"0.550037",
"0.5492651",
"0.5490647",
"0.540645",
"0.53950757",
"0.5371377",
"0.53660285",
"0.5365001",
"0.5360241",
"0.5321182",
"0.5300744",
"0.5283064",
"0.528195",
"0.528195",
"0.52788645",
"0.5272261",
"0.52645874"
]
| 0.7292721 | 0 |
Computes voltage drop using voltage trace. | def voltage_drop(V):
vmin = Vmin(V)
resting = Vrest(V)
return vmin - resting | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def voltage_drop_abf(abf, epoch_start):\n vmin = Vmin_abf(abf, epoch_start)\n resting = Vrest_abf(abf, epoch_start)\n return vmin - resting",
"def drop(u, v):\n return u - v * (u.dot(v) / v.dot(v))",
"def dvdt(self, args: List[float]) -> float:\n v, h_nav, n_kvhh, h_kva, m_kvsi, s_ampar, _, s_nmdar, s_gabar, ca = args\n return ((-10.0*self.params.area \n * (self.leak.i(v)\n + self.nav.i(v, h=h_nav) \n + self.kvhh.i(v, n=n_kvhh)\n + self.kva.i(v, h=h_kva)\n + self.kvsi.i(v, m=m_kvsi)\n + self.cav.i(v)\n + self.kca.i(v, ca=ca)\n + self.nap.i(v)\n + self.kir.i(v))\n - (self.ampar.i(v, s=s_ampar)\n + self.nmdar.i(v, s=s_nmdar)\n + self.gabar.i(v, s=s_gabar))) \n / (10.0*self.params.cm*self.params.area))",
"def vd(v2,v1):\n return v2-v1",
"def _getBusVoltageLambdaSensor(self):\n muVmin = array([b.mu_vmin for b in self.market.case.connected_buses])\n muVmax = array([b.mu_vmax for b in self.market.case.connected_buses])\n muVmin = -1.0 * muVmin\n diff = muVmin + muVmax\n return diff",
"def ramp_down(self):\n value = self.current_event[\"ramp_down\"][\"value\"]\n self.current_value.append(self.current_value[-1] - value)",
"def vol(x):\r\n return pi*(topdia(x)/2000.)**2 * length (x)",
"def vstop(price, multiplier, atr, prev):\n max_price = max(price, prev['max_price'])\n min_price = min(price, prev['min_price'])\n\n had_up_trend = (prev['trend'] == 'up')\n\n stop = (max_price - multiplier * atr) if had_up_trend else (min_price + multiplier * atr)\n vstop = max(prev['vstop'], stop) if had_up_trend else min(prev['vstop'], stop)\n \n trend = 'up' if price >= vstop else 'down' \n is_trend_changed = trend != prev['trend']\n \n if is_trend_changed:\n max_price = price\n min_price = price\n vstop = max_price - multiplier * atr if trend == 'up' else (min_price + multiplier * atr)\n \n return {'trend': trend,\n 'vstop': vstop,\n 'max_price': max_price,\n 'min_price': min_price}",
"def sweepUpDown(self):\r\n self._vna.makeSweepUnprocessed()\r\n print('Beginning Data Collection')\r\n\r\n vnaData = []\r\n # Start voltage at zero\r\n self._power.voltsSetpointSet(0)\r\n negStep = (-1)*self._power.voltageStep; time.sleep(1)\r\n\r\n # Step to 30V and record each step\r\n for _ in range(0, 15, self._power.voltageStep):\r\n vnaData.append(self._vna.makeSweepUnprocessed()); time.sleep(10)\r\n self._power.incrementVolt(self._power.voltageStep); time.sleep(1)\r\n # Step back to 0V and record each step\r\n for _ in range(0, 15, self._power.voltageStep):\r\n vnaData.append(self._vna.makeSweepUnprocessed()); time.sleep(10)\r\n self._power.incrementVolt(negStep); time.sleep(1) \r\n # Switch polarity on power supply\r\n assert self._power.voltsMeas() == 0; time.sleep(2)\r\n self._power.changePolarity(); time.sleep(2)\r\n # Step to -30V and record each step\r\n for _ in range(0, 15, self._power.voltageStep):\r\n vnaData.append(self._vna.makeSweepUnprocessed()); time.sleep(10)\r\n self._power.incrementVolt(self._power.voltageStep); time.sleep(1)\r\n # Step back to 0V and record each step\r\n for _ in range(0, 15, self._power.voltageStep):\r\n vnaData.append(self._vna.makeSweepUnprocessed()); time.sleep(10)\r\n self._power.incrementVolt(negStep); time.sleep(1)\r\n self._power.voltsSetpointSet(0)\r\n\r\n # Switch polarity on power supply\r\n assert self._power.voltsMeas() == 0; time.sleep(2)\r\n self._power.changePolarity(); time.sleep(2)\r\n\r\n self._processData(vnaData)",
"def test_vertical_velocity(self):\n L_x = self.x_edge[-1]\n np.testing.assert_array_equal(self.dVbox_dz(self.t, 0), 0)\n np.testing.assert_array_less(self.dVbox_dz(self.t, 0.5 * L_x), 0)\n np.testing.assert_array_equal(self.dVbox_dz(self.t, L_x), 0)",
"def calc_out_voltage(self, input_photocurrent_file):\n pass",
"def dvdt(self, args: Dict) -> float:\n if self.channel_bool['leak']:\n i_leak: float = self.leak.i(args['v'])\n else:\n i_leak: float = 0.\n \n if self.channel_bool['nav']:\n i_nav: float = self.nav.i(args['v'], h=args['h_nav'])\n else:\n i_nav: float = 0.\n\n if self.channel_bool['kvhh']:\n i_kvhh: float = self.kvhh.i(args['v'], n=args['n_kvhh'])\n else:\n i_kvhh: float = 0.\n\n if self.channel_bool['kva']:\n i_kva: float = self.kva.i(args['v'], h=args['h_kva'])\n else:\n i_kva: float = 0.\n\n if self.channel_bool['kvsi']:\n i_kvsi: float = self.kvsi.i(args['v'], m=args['m_kvsi'])\n else:\n i_kvsi: float = 0.\n\n if self.channel_bool['cav']:\n i_cav: float = self.cav.i(args['v'])\n else:\n i_cav: float = 0.\n\n if self.channel_bool['kca']:\n i_kca: float = self.kca.i(args['v'], ca=args['ca'])\n else:\n i_kca: float = 0.\n \n if self.channel_bool['nap']:\n i_nap: float = self.nap.i(args['v'])\n else:\n i_nap: float = 0.\n\n if self.channel_bool['kir']:\n i_kir: float = self.kir.i(args['v'])\n else:\n i_kir: float = 0.\n\n if self.channel_bool['ampar']:\n i_ampar: float = self.ampar.i(args['v'], s=args['s_ampar'])\n else:\n i_ampar: float = 0.\n\n if self.channel_bool['nmdar']:\n i_nmdar: float = self.nmdar.i(args['v'], s=args['s_nmdar'])\n else:\n i_nmdar: float = 0.\n\n if self.channel_bool['gabar']:\n i_gabar: float = self.gabar.i(args['v'], s=args['s_gabar'])\n else:\n i_gabar: float = 0.\n\n return ((-10.0*self.params.area \n * (i_leak\n + i_nav \n + i_kvhh \n + i_kva \n + i_kvsi \n + i_cav \n + i_kca \n + i_nap \n + i_kir) \n - (i_ampar \n + i_nmdar \n + i_gabar))\n / (10.0*self.params.cm*self.params.area))",
"def filter_diff_vel(self, setting, threshold=None):\n\n self.d_filter = setting\n if setting == 'Manual':\n self.d_filter_threshold = threshold\n\n # Set filter characteristics\n multiplier = 5\n minimum_window = 0.01\n\n # Initialize variables\n d_vel = copy.deepcopy(self.d_mps)\n d_vel_max_ref = 0\n d_vel_min_ref = 0\n\n # Apply selected method\n if self.d_filter == 'Manual':\n d_vel_max_ref = np.abs(self.d_filter_threshold)\n d_vel_min_ref = -1 * d_vel_max_ref\n elif self.d_filter == 'Off':\n d_vel_max_ref = np.nanmax(d_vel) + 99\n d_vel_min_ref = np.nanmin(d_vel) - 99\n elif self.d_filter == 'Auto':\n # Initialize variables\n d_vel_filtered = copy.deepcopy(d_vel)\n\n # Initialize variables\n std_diff = np.repeat(1., 1000)\n k = 0\n\n # Loop until no additional data are removed\n while (std_diff[k] != 0) and (k < 1000) and (np.isnan(std_diff[k]) == False):\n k += 1\n\n # Compute inner quartile range\n d_vel_std = iqr(d_vel_filtered)\n threshold_window = multiplier * d_vel_std\n if threshold_window < minimum_window:\n threshold_window = minimum_window\n\n # Compute maximum and minimum thresholds\n d_vel_max_ref = np.nanmedian(d_vel_filtered) + threshold_window\n d_vel_min_ref = np.nanmedian(d_vel_filtered) - threshold_window\n\n # Identify valid and invalid data\n d_vel_less_idx = np.where(d_vel_filtered <= d_vel_max_ref)[0]\n d_vel_greater_idx = np.where(d_vel_filtered >= d_vel_min_ref)[0]\n d_vel_good_idx = list(np.intersect1d(d_vel_less_idx, d_vel_greater_idx))\n\n # Update filtered data array\n d_vel_filtered = copy.deepcopy(d_vel_filtered[d_vel_good_idx])\n\n # Determine differences due to last filter iteration\n if len(d_vel_filtered) > 0:\n d_vel_std2 = iqr(d_vel_filtered)\n std_diff[k] = d_vel_std2 - d_vel_std\n else:\n std_diff[k] = 0\n\n # Set valid data row 3 for difference velocity filter results\n self.valid_data[2, ] = False\n d_vel_less_idx = np.where(d_vel <= d_vel_max_ref)[0]\n d_vel_greater_idx = np.where(d_vel >= d_vel_min_ref)[0]\n d_vel_good_idx = list(np.intersect1d(d_vel_less_idx, d_vel_greater_idx))\n self.valid_data[2, d_vel_good_idx] = True\n self.valid_data[2, self.valid_data[1, :] == False] = True\n self.valid_data[2, np.isnan(self.d_mps)] = True\n if np.ma.is_masked(d_vel_max_ref):\n self.d_filter_threshold = np.nan\n else:\n self.d_filter_threshold = d_vel_max_ref\n\n\n # Combine all filter data to composite filter data\n self.valid_data[0, :] = np.all(self.valid_data[1:, :], 0)\n self.num_invalid = np.sum(self.valid_data[0, :] == False)",
"def delta_vel(p1=database['K+'], p2=database['pi+'], p3=database['p+'], pmin=0, pmax=80):\r\n p_range = np.linspace(pmin, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n m3 = p3.mass\r\n dv2, dv3 = [], []\r\n for p in p_range:\r\n v1 = c*beta(p, m1)\r\n v2 = c*beta(p, m2)\r\n v3 = c*beta(p, m3)\r\n dv2.append(abs(v1-v2))\r\n dv3.append(abs(v1-v3))\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n# p1_name = r'K$^+$'\r\n# p2_name = r'$\\pi^+$'\r\n# p3_name = r'p$^+$'\r\n ax.plot(p_range, dv2, 'r', label=r'$\\left|v_{K^+}-v_{\\pi^+}\\right|$')\r\n ax.plot(p_range, dv3, 'b', label=r'$\\left|v_{K^+}-v_{p^+}\\right|$')\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n ax.set_ylabel(r'$\\left|\\Delta v\\right|$ / $ms^{-1}$', fontsize=20)\r\n ax.axvline(75, color='k', label='p = 75 GeV')\r\n ax.set_xticks(np.arange(pmin, pmax+1, 1))\r\n ax.set_xticklabels(np.arange(pmin, pmax+1, 1))\r\n ax.grid()\r\n ax.minorticks_on()\r\n ax.set_xlim(pmin, pmax)\r\n# ax.set_ylim(np.min(v1+v2))\r\n ax.legend(fontsize=20, loc=[0.65, 0.2])\r\n plt.show\r\n return",
"def get_voltage(self):\n print(\"voici le voltage de la batterie\")",
"def get_voltage(self):\n summary = \" \".join(self.get_summary().split())\n pattern = '\\$.... .. (.*?) .*? .*? .*? .*? . .*? .*? . . . .*?'\n voltage = float(re.findall(pattern,summary).pop())\n return voltage",
"def droplet(r_drop=0.02): # [dm]\n alpha_pom = float(76.8)\n r_real = r_drop / np.sin(alpha_pom) # [dm]\n height = r_real * (1 - np.cos(alpha_pom)) # [dm]\n s_drop = np.pi * (4 * r_real * height - height ** 2) # [dm2]\n v_drop = np.pi * height ** 2 * (r_real - height / 3) # [dm3]\n s0 = np.pi * r_drop ** 2 # [dm2]\n return s_drop, v_drop, s0 # , h_max, s_max, v_max, s1",
"def dvdt(self, args: List[float]) -> float:\n v, n_kvhh, ca = args\n return ((-10.0*self.params.area \n * (self.kvhh.i(v, n=n_kvhh) \n + self.cav.i(v) \n + self.kca.i(v, ca=ca) \n + self.nap.i(v) \n + self.leak.i(v))) \n / (10.0*self.params.cm*self.params.area))",
"def reduce_velocity(self):\n if self.controls[\"make_velocity_0\"]:\n # print(self.controls[\"bar_move_velocity\"])\n self.controls[\"bar_move_velocity\"] = 0",
"def velocity(df0, df1):\n velocity = df1 - df0\n return velocity",
"def vol_down_and_validate(self):\n self.logger.info('Decreasing volume')\n before_vol = self.dut.volume('Down', 1)\n time.sleep(2)\n after_vol = self.dut.volume('Down', 1)\n if not after_vol or not before_vol or after_vol >= before_vol:\n self.logger.error(\n 'Unable to decrease the volume. Before: %s. After: %s' %\n (before_vol, after_vol))\n raise TestActsError('error decreasing volume')",
"def meas_voltage(instrument, v_range=10, resolution=0.003):\n return float(instrument.query('MEAS:VOLTage:DC? %s,%s' % (v_range, resolution)))",
"def volume_down(self) -> None:\n self.volume = max(self.volume - self.config.volume_step, 0)",
"def del_velocity(self, mask=None, input_rec_track=None):\r\n if input_rec_track is None:\r\n self.nsegments = self.ndetects - 1\r\n rec_track = self.rec_track\r\n else:\r\n rec_track = input_rec_track\r\n if mask is not None:\r\n rec_track = rec_track[mask]\r\n rec_seg = self.make_segments(input_rec_track = rec_track)\r\n nseg = len(rec_seg)\r\n dspeed = np.nan*np.ones(nseg+1, np.float64)\r\n for ns in range(nseg-1):\r\n seg1 = rec_seg[ns]\r\n seg2 = rec_seg[ns+1]\r\n du = seg1.u - seg2.u\r\n dv = seg1.v - seg2.v\r\n nd = ns + 1\r\n dspeed[nd] = np.sqrt(du*du + dv*dv)\r\n\r\n return dspeed",
"def ts_delapsed_func(ts, elevation, datum, lapse_rate=6.0):\n ts_adjust = np.copy(elevation).astype(np.float64)\n ts_adjust -= datum\n ts_adjust *= (lapse_rate * 0.001)\n ts_adjust += ts\n return ts_adjust.astype(np.float32)",
"def detrend_and_decimate_new(trace,f_sample, params):\n\n logging.info(\"detrending\")\n \n f_new = int(params.f_new)\n print(f_sample,f_new)\n f_sample2= (int(f_sample)//1000)*1000\n print(f_sample2,f_new)\n leng =len(trace)\n\n up = int(f_new/np.gcd(f_sample2,f_new))\n down = int(f_sample2*up/f_new)\n print(up,down)\n factor=down/up\n logging.info(f\"up = {up}, down = {down}\")\n\n # up = int(100_000//f_sample)\n # down = int(100_000//f_new)\n\n\n trace_sub = resample_poly(trace,up,down,padtype='edge')\n dt=1/f_new\n times_sub = np.linspace(0.0,leng/f_sample,len(trace_sub))\n\n ord_filt_len = 2*(int(params.ord_len_ms*f_new/1000)//2)+1\n trace_sub2_ord = order_filter(trace_sub, np.ones(ord_filt_len), ord_filt_len//10) # 10 percentile filter\n\n down_temp = int(f_new//params.f_ord_decimate) \n print(f\"down_temp = {down_temp}\")\n trace_sub2_ord = decimate(trace_sub2_ord, down_temp, ftype='fir')\n trace_sub2_ord = medfilt(trace_sub2_ord) #median filter after decimation\n trace_sub2_ord = resample_poly(trace_sub2_ord, down_temp, 1,padtype='edge')\n\n savgol_len1 = 2*(int(25*f_new/1000)//2)+1\n\n # trace_sub2_ord = savgol_filter(trace_sub2_ord, savgol_len1, 3, mode='interp')\n\n #added to fix length errors, URGH\n last_ind=min(len(trace_sub),len(trace_sub2_ord))\n \n trace_zerod = trace_sub[:last_ind]-trace_sub2_ord[:last_ind]\n \n times_sub = times_sub[:last_ind]\n\n\n MAD = stats.median_absolute_deviation(trace_zerod)\n\n\n\n if params.post_savgol: # False\n savgol_len2 = 2*(int(params.savgol_len_ms*f_new/1000)//2)+1\n trace_zerod = savgol_filter(trace_zerod, savgol_len2, 3, mode='interp') # params.savgol_len=7\n \n trace_zerod = trace_zerod - np.quantile(trace_zerod, params.subs_quantile) # params.subs_quantile=0.25\n logging.info(\"finished detrending\")\n \n # times[]\n\n return trace_zerod, times_sub, MAD , factor",
"def step_v(u_old, v_old, dt, dx2):\n\treturn v_old + dt*(e*(u_old + a0) + delta*laplacian(v_old,dx2))",
"def cvar_down_func(self, level: float = 0.95, months_from_last: int = None,\n from_date: dt.date = None, to_date: dt.date = None) -> float:\n earlier, later = self.calc_range(months_from_last, from_date, to_date)\n how_many = self.tsdf.loc[earlier:later, self.tsdf.columns.values[0]].pct_change().count()\n return self.tsdf.loc[earlier:later, self.tsdf.columns.values[0]].pct_change() \\\n .sort_values().iloc[:int(math.ceil((1 - level) * how_many))].mean()",
"def get_voltage(self):\n status = self.get_status_response()\n volts = status[20] + (status[21] * 0x100) + (status[22] * 0x10000) + (status[23] * 0x1000000)\n volts = float(volts)\n volts /= (1000.0 * 1000.0)\n return volts\n #end get_voltage",
"def bias_subtract(side='blue', trace=None):\r\n\r\n # update the headers\r\n iraf.asthedit('%s????.fits' % side, BASE_DIR + '/cal/DBSP.hdr')\r\n if side == 'blue':\r\n iraf.hedit('blue*.fits', 'DISPAXIS', 2, update=\"yes\", \r\n verify=\"no\", add=\"yes\", show=\"no\")\r\n else:\r\n iraf.hedit('red*.fits', 'DISPAXIS', 1, update=\"yes\", \r\n verify=\"no\", add=\"yes\", show=\"no\")\r\n\r\n # Need to define an instrument translation file in iraf 2.16.1\r\n iraf.unlearn('setinst')\r\n iraf.setinst.instrument = 'kpnoheaders'\r\n iraf.setinst.review = 'no'\r\n iraf.setinst.mode = 'h'\r\n iraf.setinst()\r\n\r\n # bias subtraction using the overscan\r\n filenames = glob(\"%s????.fits\" % side)\r\n hdr = pyfits.getheader(filenames[0])\r\n iraf.unlearn('ccdproc')\r\n iraf.ccdproc.zerocor = \"no\"\r\n iraf.ccdproc.flatcor = \"no\"\r\n iraf.ccdproc.fixpix = \"no\"\r\n# iraf.ccdproc.fixfile = \"../bluebpm\"\r\n if side == 'blue':\r\n iraf.ccdproc.biassec = hdr['BSEC1']\r\n iraf.ccdproc.trimsec = \"[%d:%d,*]\" % (trace-100, trace+100)\r\n iraf.ccdproc.function = \"spline3\"\r\n iraf.ccdproc.order = 3\r\n else:\r\n # trim the specified region\r\n iraf.ccdproc.biassec = det_pars['red']['biassec'] # this may not work for the old camera...\r\n tsec_x = hdr['TSEC1'].split(',')[0]\r\n iraf.ccdproc.trimsec = tsec_x + \",%d:%d]\" % (trace-100, trace+100)\r\n iraf.ccdproc.function = \"legendre\"\r\n iraf.ccdproc.order = 1\r\n iraf.ccdproc.darkcor = \"no\"\r\n iraf.ccdproc.ccdtype = \"\"\r\n iraf.ccdproc.niterate = 3\r\n iraf.ccdproc('%s????.fits' % side)"
]
| [
"0.64880204",
"0.5908261",
"0.5774052",
"0.5733856",
"0.56801546",
"0.5569362",
"0.55316657",
"0.55160147",
"0.5500494",
"0.5499955",
"0.54843986",
"0.5478288",
"0.5471517",
"0.5442814",
"0.5422475",
"0.5381904",
"0.5378291",
"0.5374405",
"0.5367712",
"0.5356332",
"0.5355079",
"0.5349323",
"0.5289308",
"0.5286125",
"0.5281639",
"0.5279824",
"0.52724123",
"0.5271698",
"0.5252135",
"0.5241404"
]
| 0.75108176 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.