query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Loads volume mesh using meshio. Not meant for mixed shape meshes. | def load_volume_mesh(fname):
fname = abs_fname_(fname)
m = meshio.read(fname)
mesh = Mesh()
mesh.vertices = m.points
for i, c in enumerate(m.cells):
if i == 0:
elements = c.data
else:
elements = np.vstack((elements, c.data))
mesh.elements = elements
return mesh | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read(self, mesh_path: str) -> None:\n\n reader = VtuReader(mesh_path)\n self.set_mesh_data(mesh=reader.mesh, bc=reader.bc, mpc=reader.mpc)",
"def load_volume_mixd(dim, fname=None, mxyz=None, mien=None, hexa=False):\n vertices, elements = mixd_load_(fname, mxyz, mien)\n\n mesh = Mesh()\n mesh.vertices = vertices.reshape(-1, dim)\n\n if hexa:\n mesh.elements = elements.reshape(-1, 8)\n else:\n mesh.elements = elements.reshape(-1, 4)\n\n return mesh",
"def load_mesh(self, name: str = None) -> dolfin.mesh:\n if self.mesh is None:\n self.mesh = df.Mesh()\n if name is None:\n mesh_name = self._casedir / Path(\"mesh.xdmf\")\n else:\n mesh_name = self._casedir / Path(f\"{name}.xdmf\")\n with df.XDMFFile(str(mesh_name)) as infile:\n infile.read(self.mesh)\n return self.mesh",
"def load_mesh(self, script_to_apply=None): \n # convert to an obj file using meshlab\n if script_to_apply is None:\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(self.filename, self.obj_filename)\n else:\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\" -s \\\"%s\\\"' %(self.filename, self.obj_filename, script_to_apply) \n os.system(meshlabserver_cmd)\n logging.info('MeshlabServer Command: %s' %(meshlabserver_cmd))\n\n if not os.path.exists(self.obj_filename):\n raise ValueError('Meshlab conversion failed for %s' %(self.obj_filename))\n \n # read mesh from obj file\n of = obj_file.ObjFile(self.obj_filename)\n self.mesh_ = of.read()\n return self.mesh_",
"def load_mesh(name):\n if name[-4:] == \".obj\":\n bpy.ops.import_scene.obj(filepath=name)\n mesh_name = (os.path.basename(name)).replace('.obj','')\n return mesh_name\n else:\n raise ValueError(\"{} not an obj file\".format(name))",
"def import_mesh(self, scenegroup):\n logger.debug((\"mesh\", scenegroup[\"asset\"]))\n if scenegroup[\"asset\"] in self._imported_assets:\n return self._imported_assets[scenegroup[\"asset\"]]\n asset = self.gridinfo.getAsset(scenegroup[\"asset\"])\n if not asset[\"type\"] == str(AssetType.OgreMesh):\n logger.debug(\"(\"+asset[\"type\"]+\")\")\n return\n materials = []\n if \"materials\" in scenegroup:\n materials = scenegroup[\"materials\"]\n mesh = self.create_mesh_frombinary(scenegroup[\"asset\"], asset[\"name\"], asset[\"data\"])\n return self.create_mesh_fromomesh(scenegroup[\"asset\"], asset[\"name\"],\n mesh, materials)",
"def read_mesh(self, src):\n self.logger.debug(\"Reading mesh information from file %s\",\n src.file_name)\n\n self._title = src.title\n self._datetime = src.datetime\n\n self._ndim = src.ndim\n\n # copying mesh quantities\n self._npoin3 = src.npoin3\n self._nelem3 = src.nelem3\n self._ndp3 = src.ndp3\n self._nplan = src.nplan\n self.typ_elem = src.typ_elem\n\n # Copying mesh coordiantes\n self._meshx = src.meshx\n self._meshy = src.meshy\n self._meshz = src.meshz\n\n # Copying connectivity\n self._ikle3 = src.ikle3\n\n # Parallel interface information\n self._nptir = src.nptir\n self._knolg = src.knolg\n\n # Boundary information\n # nptfr and ipob3 are read reagrdless of presence of boundary file\n # As they are need in serafin format\n self._nptfr = src.nptfr\n self._ipob3 = src.ipob3\n if self.boundary_file is not None:\n self.typ_bnd_elem = src.typ_bnd_elem\n self._nelebd = src.nelebd\n self._bnd_info = src.bnd_info\n self._ikle_bnd = src.ikle_bnd\n self._nbor = src.nbor",
"def load_mesh(fname):\n fname = abs_fname_(fname)\n\n m = meshio.read(fname)\n mesh = Mesh()\n mesh.vertices = m.points\n\n for i, c in enumerate(m.cells):\n if i == 0:\n faces = c.data\n else:\n faces = np.vstack((faces, c.data))\n\n mesh.faces = faces\n\n return mesh",
"def _load_serialized_mesh(filename):\n print 'Loading mesh data from NPZ file', filename\n npzfile = np.load(filename)\n\n k = npzfile['k'].item()\n initial_point = npzfile['initial_point']\n initial_face_index = npzfile['initial_face_index'].item()\n\n all_vertices = npzfile['all_vertices']\n triangles = npzfile['triangles']\n face_local_bases = npzfile['face_local_bases']\n neighbor_faces = npzfile['neighbor_faces']\n\n return [k, initial_point, initial_face_index,\n all_vertices, triangles, face_local_bases, neighbor_faces]",
"def __init__(self, file_path):\n\n # Comments\n # mtllib mtl_name\n # o object_name\n # v x y z\n # vt u v\n # vn x y z\n # f v0/t0/n0 v1/t1/n1 v2/t2/n2\n\n print('loading mesh \"%s\"' % file_path)\n mesh_file = open(file_path, 'r')\n\n verts = []\n texs = []\n normals = []\n faces = []\n\n # For each line of the input file\n for line in mesh_file:\n line = line.rstrip(' \\r\\n')\n\n # Skip comments\n if line.startswith('#') or line == '':\n continue\n\n tokens = line.split(' ')\n tokens = map(lambda t: t.strip(' '), tokens)\n tokens = list(filter(lambda t: t != '', tokens))\n\n prefix = tokens[0]\n tokens = tokens[1:]\n\n if prefix == 'v':\n vert = list(map(lambda v: float(v), tokens))\n verts.append(vert)\n\n if prefix == 'vt':\n tc = list(map(lambda v: float(v), tokens))\n texs.append(tc)\n\n if prefix == 'vn':\n normal = list(map(lambda v: float(v), tokens))\n normals.append(normal)\n\n if prefix == 'f':\n assert len(tokens) == 3, \"only triangle faces are supported\"\n\n face = []\n for token in tokens:\n indices = list(map(lambda idx: int(idx), token.split('/')))\n face.append(indices)\n\n faces.append(face)\n\n mesh_file.close()\n\n self.num_faces = len(faces)\n\n print('num verts=%d' % len(verts))\n print('num_faces=%d' % self.num_faces)\n\n # Create numpy arrays to store the vertex data\n list_verts = np.zeros(shape=(3 * self.num_faces, 3), dtype=np.float32)\n list_texcs = np.zeros(shape=3 * 2 * self.num_faces, dtype=np.float32)\n list_norms = np.zeros(shape=3 * 3 * self.num_faces, dtype=np.float32)\n\n cur_vert_idx = 0\n\n # For each triangle\n for face in faces:\n # For each triplet of indices\n for triplet in face:\n v_idx, t_idx, n_idx = triplet\n\n # Note: OBJ uses 1-based indexing\n vert = verts[v_idx-1]\n texc = texs[t_idx-1]\n normal = normals[n_idx-1]\n\n list_verts[cur_vert_idx, :] = vert\n list_texcs[2*cur_vert_idx:2*(cur_vert_idx+1)] = texc\n list_norms[3*cur_vert_idx:3*cur_vert_idx+3] = normal\n\n cur_vert_idx += 1\n\n # Re-center the object so that y=0 is at the base,\n # and the object is centered in x and z\n x_coords = list_verts[:, 0]\n z_coords = list_verts[:, 2]\n min_y = list_verts[:, 1].min()\n mean_x = (x_coords.min() + x_coords.max()) / 2\n mean_z = (z_coords.min() + z_coords.max()) / 2\n list_verts[:, 1] -= min_y\n list_verts[:, 0] -= mean_x\n list_verts[:, 2] -= mean_z\n\n # Compute the object extents after centering\n x_coords = list_verts[:, 0]\n y_coords = list_verts[:, 1]\n z_coords = list_verts[:, 2]\n self.y_max = y_coords.max()\n\n # Create a vertex list to be used for rendering\n self.vlist = pyglet.graphics.vertex_list(\n 3 * self.num_faces,\n ('v3f', list_verts.reshape(-1)),\n ('t2f', list_texcs),\n ('n3f', list_norms)\n )\n\n # Load the texture associated with this mesh\n file_name = os.path.split(file_path)[-1]\n tex_name = file_name.split('.')[0]\n tex_path = get_file_path('textures', tex_name, 'png')\n self.texture = load_texture(tex_path)",
"def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)",
"def create_mesh_from_data(mesh_name, bsp_verts, bsp_faces, materials, scale_factor):\n\n\n def vertex_stream(vertices, stream_id):\n for vertex in vertices:\n yield vertex[stream_id]\n\n # Create mesh and object\n me = bpy.data.meshes.new(mesh_name+'Mesh')\n ob = bpy.data.objects.new(\"LEVEL\" + mesh_name, me)\n ob.show_name = True\n\n # Link object to scene\n bpy.context.scene.objects.link(ob)\n \n # Create the vertex data\n face_list = list(vertex_stream(bsp_faces, 1))\n mesh_verts = list(vertex_stream(bsp_verts, 0))\n\n me.from_pydata(mesh_verts, [], face_list)\n\n # Update mesh with new data\n me.update()\n apply_uvs(me, bsp_verts)\n\n # Add materials to mesh\n for cmaterial in materials:\n me.materials.append(cmaterial)\n\n # Apply material indexes to mesh faces\n face_materials = list(vertex_stream(bsp_faces, 0))\n\n for polygon_idx, current_polygon in enumerate(me.polygons):\n current_polygon.material_index = face_materials[polygon_idx]\n\n # Add additional properties to the new object\n ob['scale_factor'] = scale_factor\n\n return ob",
"def importMesh(self, name, file, mtype, material, **args):\n args = dictToTuple(**args)\n\n if not self.rank:\n logging.info('Importing mesh from {}'.format(file))\n\n self.lmp.command('fix {} all {} file {} type {} '.format(name, mtype, file, material) + ('{} ' * len(args)).format(*args))",
"def from_mesh_data(mesh):\n if len(mesh.normals) > 0 and len(mesh.uvs) > 0:\n vformat = p3d.GeomVertexFormat.get_v3n3t2()\n vertices = np.column_stack((mesh.vertices, mesh.normals, mesh.uvs))\n elif len(mesh.normals) > 0:\n vformat = p3d.GeomVertexFormat.get_v3n3()\n vertices = np.column_stack((mesh.vertices, mesh.normals))\n elif len(mesh.uvs) > 0:\n vformat = p3d.GeomVertexFormat.get_v3t2()\n vertices = np.column_stack((mesh.vertices, mesh.uvs))\n else:\n vformat = p3d.GeomVertexFormat.get_v3()\n vertices = mesh.vertices\n return Mesh._make(vformat, vertices, mesh.faces)",
"def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main",
"def __init__(self, outprefix: str):\n paths = PhenoXPaths(outprefix)\n mesh_json_path = os.path.join(paths.data_dir, 'mesh.json')\n self.mesh = dict()\n\n if not os.path.exists(mesh_json_path):\n mesh_bin_file = glob.glob(os.path.join(paths.data_dir, '*.bin'))\n if mesh_bin_file:\n self._parse_mesh_bin(mesh_bin_file[0], mesh_json_path)\n\n self.mesh = json.load(open(mesh_json_path, 'r'))",
"def __init__(self, mesh):\n self._mesh = mesh",
"def from_file(filename=None, io='auto', prefix_dir=None,\n omit_facets=False, file_format=None):\n if isinstance(filename, Mesh):\n return filename\n\n if io == 'auto':\n if filename is None:\n output('filename or io must be specified!')\n raise ValueError\n else:\n io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir,\n file_format=file_format)\n\n output('reading mesh (%s)...' % io.filename)\n timer = Timer(start=True)\n\n trunk = io.get_filename_trunk()\n mesh = Mesh(trunk)\n mesh = io.read(mesh, omit_facets=omit_facets)\n\n output('...done in %.2f s' % timer.stop())\n\n mesh._set_shape_info()\n\n return mesh",
"def parse_mesh(\n file_path: str,\n node: Node,\n verbose=False,\n):\n prefix_file_path, ext = os.path.splitext(file_path)\n if ext.lower() == \".spv\":\n file_path = prefix_file_path + \".SPM\"\n binary_file = open(file_path, \"rb\")\n node.name = os.path.splitext(os.path.basename(file_path))[0]\n g = BinaryReader(binary_file)\n n = 0\n\n current_offset = g.tell()\n node.offset = current_offset\n\n # Handle SPM file\n logger.debug(\"=== DEBUG MESH PARSER ===\")\n g.seek(current_offset)\n B = g.i(4)\n meshes = B[3]\n offset_seek = current_offset + B[2]\n logger.debug({\n \"B\": B,\n \"meshes\": B[3],\n \"offset_seek\": offset_seek,\n })\n g.seek(offset_seek)\n C = g.i(5)\n C1 = []\n logger.debug(\"Current offset: %s\" % g.tell())\n for m in range(meshes):\n a = g.i(8)\n logger.debug({\n \"g.i(8)\": a,\n })\n C1.append(a)\n for m in range(meshes):\n a = g.i(4)\n logger.debug({\n \"g.i(4)\": a,\n })\n node.data[\"mesh_list\"] = []\n\n for _mesh_idx, m in enumerate(range(meshes)):\n logger.debug(\"%s Looping Mesh %s %s>\" % (('=' * 64), (_mesh_idx), ('=' * 64)))\n D = g.i(15)\n logger.debug({\n \"D\": D,\n \"D[13]\": D[13],\n })\n tm = g.tell()\n name_offset = tm - 2 * 4 + D[13]\n g.seek(name_offset)\n name = g.find(b\"\\x00\")\n logger.debug({\n \"name\": name,\n \"name_offset\": name_offset,\n })\n\n offset_1 = tm - 1 * 4 + D[14]\n logger.debug(\"offset_1: %s - 1 * 4 + %s = %s\" % (tm, D[14], offset_1))\n g.seek(offset_1)\n\n mesh_list = []\n node.data[\"mesh_list\"].append(mesh_list)\n\n offset_2 = tm - 9 * 4 + D[6]\n logger.debug(\"offset_2: %s - 9 * 4 + %s = %s\" % (tm, D[6], offset_2))\n g.seek(offset_2)\n\n unknown = g.i(1)\n unkCount = unknown[0]\n logger.debug({\n \"unknown\": unknown,\n \"unkCount\": unkCount,\n })\n logger.debug({\n \"indice_start_offset\": g.tell(),\n \"D[11]\": D[11],\n })\n E = []\n\n if unkCount >= 1:\n # Original approach. Works great for CH mesh.\n logger.debug(\"FOUND %s SUBMESHES - Original Approach\" % unkCount)\n for i in range(unkCount):\n mesh = Mesh()\n mesh.name = name\n mesh.diffuseID = D[4] - 1\n E1 = g.H(2)\n logger.debug({\n \"E1\": E1,\n })\n mesh.vertUVCount = E1[0]\n logger.debug(\"mesh.vertUVCount: %s\" % mesh.vertUVCount)\n mesh_list.append(mesh)\n E.append(E1)\n\n for i in range(unkCount):\n face_idx = E[i][1]\n indiceList = g.H(face_idx)\n logger.debug(\"indiceList size: %s face_idx: %s\" % (len(indiceList), face_idx))\n mesh = mesh_list[i]\n mesh.indiceList = indiceList\n\n logger.debug(\"mesh.indiceList: %s\" % len(mesh.indiceList))\n\n else:\n # Blender combined approach. Faces still incorrectly parsed.\n logger.debug(\"FOUND %s SUBMESHES - Blender Combined Approach\" % unkCount)\n for i in range(unkCount):\n mesh = Mesh()\n mesh.name = name\n mesh.diffuseID = D[4] - 1\n mesh_list.append(mesh)\n E1 = g.H(2)\n logger.debug({\n \"E1\": E1,\n })\n mesh.vertUVCount += E1[0]\n E.append(E1)\n logger.debug(\"mesh.vertUVCount: %s\" % mesh.vertUVCount)\n for i in range(unkCount):\n indiceList = g.H(E[i][1])\n mesh = mesh_list[i]\n mesh.indiceList = indiceList\n\n logger.debug(\"mesh.indiceList size: %s\" % len(mesh.indiceList))\n\n mesh_offset = tm - 8 * 4 + D[7]\n logger.debug(\"mesh_offset: %s - 8 * 4 + %s = %s\" % (tm, D[7], mesh_offset))\n g.seek(mesh_offset)\n logger.debug(\"C1[%s]: %s\" % (m, C1[m]))\n if D[0] in (1792,):\n logger.debug(\"VERDICT: Unskinned mesh? %s\" % name)\n mesh = mesh_list[0]\n for i in range(C1[m][4]):\n mesh.vertPosList.append(g.f(3))\n\n elif D[0] in (1024, 1026, 1027):\n logger.debug(\"VERDICT: BG mesh? %s\" % name)\n mesh = mesh_list[0]\n vertices = C1[m][4]\n if vertices == 0:\n # NOTE: Don't bother trying other index values besides D[10]\n logger.debug(\"No vertices found! Probably BG or static mesh. Using D[10]: %s\" % D[10])\n vertices = D[10]\n\n total_v = []\n total_vn = []\n total_indices = mesh.indiceList\n print(\"total_indices:\", len(total_indices))\n\n for i in range(vertices):\n # Vertex Position\n v_offset = g.tell()\n vertex = g.f(3)\n if verbose:\n logger.debug({\n \"v\": vertex,\n \"v_offset\": v_offset,\n })\n total_v.append(vertex)\n mesh.vertPosList.append(vertex)\n\n # Vertex Normal\n vn_offset = v_offset\n if not D[0] in (1024, 1026):\n vn_offset = v_offset + 888\n g.seek(vn_offset)\n vertex_normal = g.f(3)\n if verbose:\n logger.debug({\n \"vn\": vertex_normal,\n \"vn_offset\": vn_offset,\n })\n total_vn.append(vertex_normal)\n mesh.vertNormList.append(vertex_normal)\n g.seek(v_offset + 12)\n\n start_vertUVCount = 0\n end_vertUVCount = 0\n start_indiceList = 0\n end_indiceList = 0\n\n for idx, mesh in enumerate(mesh_list):\n end_vertUVCount += mesh.vertUVCount\n mesh.vertPosList = total_v[start_vertUVCount:end_vertUVCount]\n mesh.vertNormList = total_vn[start_vertUVCount:end_vertUVCount]\n start_vertUVCount += mesh.vertUVCount\n\n logger.debug({\n \"submesh_name\": mesh.name,\n \"v\": len(mesh.vertPosList),\n \"vn\": len(mesh.vertNormList),\n })\n\n elif D[0] in (258, 256):\n logger.debug(\"VERDICT: Skinned mesh? %s\" % name)\n mesh = mesh_list[0]\n\n g.seek(mesh_offset)\n v1 = C1[m][4]\n v2 = C1[m][5]\n v3 = C1[m][6]\n v4 = C1[m][7]\n logger.debug({\n \"v1\": v1,\n \"v2\": v2,\n \"v3\": v3,\n \"v4\": v4,\n })\n get_vertex_data(mesh, g, v1, v2, v3, v4, n, verbose)\n mesh_range = unkCount - 1\n logger.debug(\"mesh_range: %s\" % mesh_range)\n for x in range(mesh_range):\n logger.debug(\"Loop Submesh %s\" % x)\n mesh = mesh_list[1 + x]\n E = g.i(4)\n v1 = E[0]\n v2 = E[1]\n v3 = E[2]\n v4 = E[3]\n logger.debug({\n \"v1\": v1,\n \"v2\": v2,\n \"v3\": v3,\n \"v4\": v4,\n })\n get_vertex_data(mesh, g, v1, v2, v3, v4, n, verbose)\n\n else:\n logger.warning({\n \"msg\": \"Invalid mesh object.\",\n \"D[1]\": D[1],\n \"g.f(12)\": g.f(12),\n })\n break\n\n g.seek(tm)\n\n F = g.i(C[0])\n node.data[\"hash_list\"] = F\n\n # Handle SPV file\n spv_file = os.path.splitext(file_path)[0] + \".SPV\"\n logger.debug({\n \"spv_file\": spv_file,\n })\n parse_uv(spv_file, node, verbose=verbose)\n g.close()",
"def test_convert_MeshVTK():\n mesh = MeshVTK(\n path=join(TEST_DATA_DIR, \"StructElmer\"), name=\"case_t0001\", format=\"vtu\"\n )\n\n meshmat = mesh.convert(meshtype=\"MeshMat\", scale=1)\n\n # meshsol = MeshSolution(mesh=[meshmat])\n # meshsol.plot_mesh(is_show_fig=False)",
"def load_verts(file_data, headers, scale_factor):\n\n\n def vert_from_pack(vert_data):\n return (\n (vert_data[0] * scale_factor, vert_data[1] * scale_factor, vert_data[2] * scale_factor,), #XYZ\n (vert_data[3], vert_data[4],), #UV1\n (vert_data[5], vert_data[6],), #UV2\n (vert_data[7], vert_data[8], vert_data[9],), #Normal\n (vert_data[10], vert_data[11], vert_data[12], vert_data[13],), #RGBA\n )\n\n vert_offset, vert_length = headers[10]\n vert_chunk = Struct(\"3f2f2f3f4B\") \n vert_size = vert_chunk.size\n vert_count = int(vert_length / vert_size)\n\n print (\"Found {} vertices\".format(vert_count))\n\n vertices = []\n\n for current_vert_idx in range(vert_count):\n vert_file_position = vert_offset + current_vert_idx * vert_size\n current_vert = vert_chunk.unpack(file_data[vert_file_position : vert_file_position+vert_size])\n vertices.append(vert_from_pack(current_vert))\n\n return vertices",
"def volume():\n # Get the active object\n obj = bpy.context.active_object\n \n scene = bpy.context.scene\n unit = scene.unit_settings\n \n # Set blender unit in mm\n unit.scale_length = 0.001\n bpy.context.scene.unit_settings.length_unit = 'MILLIMETERS' \n \n # Get the scale\n scale = 1.0 if unit.system == 'NONE' else unit.scale_length\n \n # Switch in object mode \n bpy.ops.object.mode_set(mode='EDIT')\n \n # Load mesh\n me = bpy.context.edit_object.data\n bm_orig = bmesh.from_edit_mesh(me)\n \n # Make a copy of the mesh\n bm = bm_orig.copy()\n\n # Apply modifier to the copy\n bm.transform(obj.matrix_world)\n \n print(scale)\n print(bm.calc_volume())\n \n # Calcul the volume\n bpy.types.Scene.volume = bm.calc_volume() * (scale ** 3.0) / (0.001 ** 3.0)\n print(bpy.types.Scene.volume)\n \n # Delete the copy\n bm.free()\n \n # Switch in object mode \n bpy.ops.object.mode_set(mode='OBJECT')",
"def importMeshes(self, name=None):\n wall = False\n\n if 'mesh' in self.pargs:\n for mesh in self.pargs['mesh'].keys():\n\n if 'file' in self.pargs['mesh'][mesh]:\n if name:\n if mesh == name:\n self.pargs['mesh'][mesh]['import'] = True\n self.importMesh(mesh, self.pargs['mesh'][mesh]['file'], self.pargs['mesh'][mesh]['mtype'], self.pargs['mesh'][mesh]['id'], **self.pargs['mesh'][mesh]['args']) \n wall = True\n\n elif 'import' in self.pargs['mesh'][mesh]:\n if self.pargs['mesh'][mesh]['import']:\n self.importMesh(mesh, self.pargs['mesh'][mesh]['file'], self.pargs['mesh'][mesh]['mtype'], self.pargs['mesh'][mesh]['id'], **self.pargs['mesh'][mesh]['args']) \n wall = True\n \n if wall:\n self.setupWall(wtype='mesh')",
"def LoadSphere():\n return vtkInterface.PolyData(spherefile)",
"def mesh_hook(mesh, mode):\n if mode == 'read':\n mesh = gen_block_mesh(dims, shape, [0, 0], name='user_block',\n verbose=False)\n return mesh\n\n elif mode == 'write':\n pass",
"def get(self, mesh_name):\n\n # Assemble the absolute path to the mesh file\n file_path = get_file_path('meshes', mesh_name, 'obj')\n\n if file_path in self.cache:\n return self.cache[file_path]\n\n mesh = ObjMesh(file_path)\n self.cache[file_path] = mesh\n\n return mesh",
"def load_phong_mesh(file, shader,depth):\n global light_dir\n try:\n pp = assimpcy.aiPostProcessSteps\n flags = pp.aiProcess_Triangulate | pp.aiProcess_GenSmoothNormals\n scene = assimpcy.aiImportFile(file, flags)\n except assimpcy.all.AssimpError as exception:\n print('ERROR loading', file + ': ', exception.args[0].decode())\n return []\n\n # prepare mesh nodes\n meshes = []\n for mesh in scene.mMeshes:\n mat = scene.mMaterials[mesh.mMaterialIndex].properties\n mesh = PhongMesh(shader, [mesh.mVertices, mesh.mNormals], depth, mesh.mFaces,\n k_d=mat.get('COLOR_DIFFUSE', (1, 1, 1)),\n k_s=mat.get('COLOR_SPECULAR', (1, 1, 1)),\n k_a=mat.get('COLOR_AMBIENT', (0, 0, 0)),\n s=mat.get('SHININESS', 16.),\n light_dir=light_dir)\n meshes.append(mesh)\n\n size = sum((mesh.mNumFaces for mesh in scene.mMeshes))\n print('Loaded %s\\t(%d meshes, %d faces)' % (file, len(meshes), size))\n return meshes",
"def import_droplet(ply_path, object_name, dim, scale, material_name):\n\n # Import geometry\n bpy.ops.import_mesh.ply(filepath = ply_path)\n \n # Get name of just-imported object \n name_starts_with = os.path.basename(ply_path)[:-4] # Base name of ply file without \".ply\" extension\n print(object_name)\n print(bpy.data.objects.keys())\n for object_name_infile in bpy.data.objects.keys():\n if object_name_infile.startswith(name_starts_with):\n current_object = object_name_infile\n break\n \n # Select this object\n bpy.context.scene.objects.active = bpy.data.objects[current_object]\n \n # Get this object\n ob = bpy.context.active_object\n\n # Re-name current object\n ob.name = object_name\n\n # Remove doubled vertices\n remove_doubles()\n\n # Move object to center stage and rescale to appropriate size\n center_databox(dim[0], dim[1], dim[2], scale)\n\n # Get interface material\n mat = bpy.data.materials.get(material_name)\n # Assign it to object\n if ob.data.materials:\n # assign to 1st material slot\n ob.data.materials[0] = mat\n else:\n # no slots; create new slot\n ob.data.materials.append(mat)\n\n # Enable smooth shading on current mesh object\n bpy.ops.object.shade_smooth()\n\n return ob",
"def read_from_np(self, filename1, filename2):\n if os.path.exists(filename1) and os.path.exists(filename2):\n logger.info(\"Reading mesh from files {0} and {1}\".format(filename1, filename2))\n\n datamesh = np.loadtxt(filename2)\n self.nnodes = int(datamesh[0])\n self.ninterfaces = int(datamesh[1])\n self.nelements = int(datamesh[2])\n\n # Load mesh nodes\n meshnodes = np.genfromtxt(filename1, skip_footer=self.nelements + self.ninterfaces)\n meshnodes = meshnodes.flatten()\n\n # Load mesh elements\n meshelements = np.genfromtxt(filename1, skip_header=self.nnodes + self.ninterfaces)\n meshelements = np.fromstring(meshelements)\n meshelements = np.int_(meshelements)\n\n # Extract node coordinates\n self.xnode = meshnodes[np.arange(1, self.nnodes * 3, 3)]\n self.ynode = meshnodes[np.arange(2, self.nnodes * 3, 3)]\n\n # Indices of the elements\n self.i1 = meshelements[np.arange(0, self.nelements * 6, 6)] - 1\n self.i2 = meshelements[np.arange(2, self.nelements * 6, 6)] - 1\n self.i3 = meshelements[np.arange(4, self.nelements * 6, 6)] - 1\n\n return self\n\n elif os.path.exists(filename1):\n logger.error(\"Mesh topography file {0} does not exist\".format(filename2))\n raise FileNotFoundError('File does not exist')\n\n elif os.path.exists(filename2):\n logger.error(\"Mesh file {0} does not exist\".format(filename1))\n raise FileNotFoundError('File does not exist')\n\n else:\n logger.error(\"Mesh files {0} and {1} don't exist\".format(filename1, filename2))\n raise FileNotFoundError('File does not exist')",
"def init_mesh(self):\n inputs = self.inputs\n read_mesh = \"input_db\" in inputs\n if read_mesh:\n _lgr.info(\"NaluTaskRunner: initializing mesh meta data\")\n self.mesh.init_mesh_meta(inputs.input_db)\n\n for task in self.task_list:\n task.init_meta_data()\n\n read_time = 0.0\n if read_mesh:\n _lgr.info(\"NaluTaskRunner: populating bulk data\")\n read_time = self.mesh.init_mesh_bulk(inputs.input_db)\n else:\n self.mesh.meta.commit()\n self.read_time = read_time"
] | [
"0.70592856",
"0.6962647",
"0.69257474",
"0.6415933",
"0.6392293",
"0.6390476",
"0.6343733",
"0.6273749",
"0.6219491",
"0.620209",
"0.609297",
"0.60837203",
"0.6062914",
"0.6002005",
"0.5998866",
"0.5905394",
"0.586846",
"0.585365",
"0.5845951",
"0.580518",
"0.57808256",
"0.57735777",
"0.57655925",
"0.5747426",
"0.57121634",
"0.5708973",
"0.57015926",
"0.56908184",
"0.567997",
"0.5649486"
] | 0.7867217 | 0 |
Loads mixd volume meshes. | def load_volume_mixd(dim, fname=None, mxyz=None, mien=None, hexa=False):
vertices, elements = mixd_load_(fname, mxyz, mien)
mesh = Mesh()
mesh.vertices = vertices.reshape(-1, dim)
if hexa:
mesh.elements = elements.reshape(-1, 8)
else:
mesh.elements = elements.reshape(-1, 4)
return mesh | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_volume_mesh(fname):\n fname = abs_fname_(fname)\n\n m = meshio.read(fname)\n mesh = Mesh()\n mesh.vertices = m.points\n\n for i, c in enumerate(m.cells):\n if i == 0:\n elements = c.data\n else:\n elements = np.vstack((elements, c.data))\n\n mesh.elements = elements\n\n return mesh",
"def load_meshes(self):\n for meta_mesh in self.gltf.meshes:\n # Returns a list of meshes\n meshes = meta_mesh.load(self.materials)\n self.meshes.append(meshes)\n\n for mesh in meshes:\n self.scene.meshes.append(mesh)",
"def load_vecs():\n global VECTORIZER\n global CECTORIZER\n\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n\n if os.path.isfile(v_file) and os.path.isfile(d_file):\n with open(v_file, 'rb') as f:\n VECTORIZER = pickle.load(f)\n with open(d_file, 'rb') as f:\n CECTORIZER = pickle.load(f)\n return True\n\n return False",
"def _load_msh(self):\n log_gui.debug(\"_load_msh begin\")\n mod = self._mod\n mesh = mod.load_mesh_from_selection()\n if not mesh:\n mod.launch(GC.ERROR, \"The selected entry is not a mesh \"\n \"or the SMESH component must be activated\")\n return \n mdim = mesh.give_dim()\n if (mdim != self._data.get_dim()):\n mess = self.no_dim_mess\n if type(mdim) is int:\n mess = self.dim_mess % (mdim, self._data.get_dim())\n mod.launch(GC.ERROR, mess)\n return\n \n self.setdata(mesh)\n log_gui.debug(\"_load_msh end\")",
"def load_data(self, filenames):\n filenames = filenames[::self.step]\n num = len(filenames)\n first = read_tiff(filenames[0])[::self.step, ::self.step]\n width, height = first.shape\n data = np.empty((width, height, num), dtype=np.float32)\n data[:,:,0] = first\n\n for i, filename in enumerate(filenames[1:]):\n data[:, :, i + 1] = read_tiff(filename)[::self.step, ::self.step]\n\n volume = create_volume(data)\n dx, dy, dz, _ = volume.shape\n\n volume_item = gl.GLVolumeItem(volume, sliceDensity=self.density)\n volume_item.translate(-dx / 2, -dy / 2, -dz / 2)\n volume_item.scale(0.05, 0.05, 0.05, local=False)\n self.volume_view.addItem(volume_item)",
"def load_materials(self):\n # Create material objects\n for meta_mat in self.gltf.materials:\n mat = Material(meta_mat.name)\n mat.color = meta_mat.baseColorFactor or [1.0, 1.0, 1.0, 1.0]\n mat.double_sided = meta_mat.doubleSided\n\n if meta_mat.baseColorTexture is not None:\n mat.mat_texture = self.textures[meta_mat.baseColorTexture[\"index\"]]\n\n self.materials.append(mat)\n self.scene.materials.append(mat)",
"def load_meshes_from(self, med_fname):\n from salome import lcc\n from SMESH import SMESH_Gen\n sstd = self.sstd\n ceng = lcc.FindOrLoadComponent(\"FactoryServer\", \"SMESH\")\n eng = ceng._narrow(SMESH_Gen)\n eng.SetCurrentStudy(sstd)\n cmeshes = eng.CreateMeshesFromMED(med_fname)[0]\n meshes = []\n for cmesh in cmeshes:\n meshes.append(self.attach_mesh_from(cmesh))\n return meshes",
"def mixd_load_(fname=None, mxyz=None, mien=None):\n fname = abs_fname_(fname)\n\n if fname is None and (mxyz is None and mien is None):\n raise ValueError(\n \"Either `fname` or (`mxyz` and `mien`) needs to be defined.\"\n )\n\n if fname is None:\n if (\n (mxyz is None and mien is not None)\n or (mxyz is not None and mien is None)\n ):\n raise ValueError(\n \"Both `mxyz` and `mien` needs to be defined.\"\n )\n\n if fname is not None:\n base, ext = os.path.splitext(fname)\n\n if ext == \".campiga\":\n mxyz = base + \".coords\"\n mien = base + \".connectivity\"\n\n elif ext == \".xns\":\n mxyz = base + \".mxyz\"\n mien = base + \".mien\"\n\n vertices = np.fromfile(mxyz, dtype=\">d\").astype(np.double)\n #> Starts at 1, but need 0. Thus, -1.\n connectivity = (np.fromfile(mien, dtype=\">i\") - int(1)).astype(np.int32)\n\n return vertices, connectivity",
"def loaddata(path):\n if path.endswith(\".tiff\") or path.endswith(\".tif\"):\n try:\n from vigra.impex import readVolume\n except ImportError:\n raise ImportError(\"Vigra is needed to read/write TIFF volumes, but could not be imported.\")\n\n volume = readVolume(path)\n return volume\n\n elif path.endswith(\".h5\"):\n try:\n from Antipasti.netdatautils import fromh5\n except ImportError:\n raise ImportError(\"h5py is needed to read/write HDF5 volumes, but could not be imported.\")\n\n volume = fromh5(path)\n return volume\n\n else:\n raise NotImplementedError(\"Can't load: unsupported format. Supported formats are .tiff and .h5\")",
"def load_volume(name, nx, ny, nz):\n\n # load raw volume into memory\n img = np.fromfile(name, dtype=np.float32)\n img = np.reshape(img, (ny, nx, nz))\n\n return img.transpose(0, 2, 1)",
"def _load_msh(self):\n log_gui.debug(\"_load_msh begin\")\n mod = self._mod\n mesh = mod.load_mesh_from_selection()\n if not mesh:\n mod.launch(GC.ERROR, \"The selected entry is not a mesh \"\n \"or the SMESH component must be activated\")\n return\n mdim = mesh.give_dim()\n if (mdim != self._model.dim):\n mess = self.no_dim_mess\n if type(mdim) is int:\n mess = self.dim_mess % (mdim, self._model.dim)\n mod.launch(GC.ERROR, mess)\n return\n self._lab.setText(mesh.read_name())\n self._exp_sel.notify(mesh)\n log_gui.debug(\"_load_msh update field with mesh = %s\", mesh)\n self._wfield.update(mesh)\n log_gui.debug(\"_load_msh end\")",
"def load_mesh(self, name: str = None) -> dolfin.mesh:\n if self.mesh is None:\n self.mesh = df.Mesh()\n if name is None:\n mesh_name = self._casedir / Path(\"mesh.xdmf\")\n else:\n mesh_name = self._casedir / Path(f\"{name}.xdmf\")\n with df.XDMFFile(str(mesh_name)) as infile:\n infile.read(self.mesh)\n return self.mesh",
"def _load_volume_from_jpg(files: List[str]) -> np.ndarray:\n volume = []\n for file in files:\n img = cv.imread(file, cv.IMREAD_GRAYSCALE)\n volume.append(img)\n # plt.imshow(img, cmap='gray')\n # plt.show()\n volume = np.stack(volume)\n volume = volume / volume.max() * 1024\n return volume",
"def load_verts(file_data, headers, scale_factor):\n\n\n def vert_from_pack(vert_data):\n return (\n (vert_data[0] * scale_factor, vert_data[1] * scale_factor, vert_data[2] * scale_factor,), #XYZ\n (vert_data[3], vert_data[4],), #UV1\n (vert_data[5], vert_data[6],), #UV2\n (vert_data[7], vert_data[8], vert_data[9],), #Normal\n (vert_data[10], vert_data[11], vert_data[12], vert_data[13],), #RGBA\n )\n\n vert_offset, vert_length = headers[10]\n vert_chunk = Struct(\"3f2f2f3f4B\") \n vert_size = vert_chunk.size\n vert_count = int(vert_length / vert_size)\n\n print (\"Found {} vertices\".format(vert_count))\n\n vertices = []\n\n for current_vert_idx in range(vert_count):\n vert_file_position = vert_offset + current_vert_idx * vert_size\n current_vert = vert_chunk.unpack(file_data[vert_file_position : vert_file_position+vert_size])\n vertices.append(vert_from_pack(current_vert))\n\n return vertices",
"def load(self):\n metalist = []\n metalist_files = glob.glob(os.path.join(self.__pickle_path, '*.pickle'))\n for metalist_dump in metalist_files:\n with open(metalist_dump, 'rb') as file_desc:\n metalist += pickle.load(file_desc)\n return metalist",
"def LoadFluxData(self, *args):\n return _gmat_py.SolarFluxReader_LoadFluxData(self, *args)",
"def load_dicom_volume(filename):\n # load the supplied file and get the UID of the series\n ds = pydicom.read_file(filename)\n seriesUID = ds.SeriesInstanceUID\n\n # get the position of the image\n position = numpy.array(list(map(float, ds.ImagePositionPatient)))\n\n # get the direction normal to the plane of the image\n row_vector = numpy.array(ds.ImageOrientationPatient[:3])\n col_vector = numpy.array(ds.ImageOrientationPatient[3:])\n normal_vector = numpy.cross(row_vector, col_vector)\n\n # we order slices by their distance along the normal\n def normal_distance(coords):\n return numpy.dot(normal_vector, coords)\n\n # create a dictionary to hold the slices as we load them\n slices = {normal_distance(position): ds.pixel_array}\n\n # extract the path to the folder of the file so we can look for others from the same series\n folder, _ = os.path.split(filename)\n for name in os.listdir(folder):\n if name.lower().endswith(\".ima\") or name.lower().endswith(\".dcm\"):\n new_dicom_name = os.path.join(folder, name)\n new_ds = pydicom.read_file(new_dicom_name)\n\n # check that the series UID matches\n if new_ds.SeriesInstanceUID == seriesUID:\n if new_ds.pixel_array.shape != ds.pixel_array.shape:\n continue\n new_position = list(map(float, new_ds.ImagePositionPatient))\n slices[normal_distance(new_position)] = new_ds.pixel_array\n\n # we set the overall position of the volume with the position\n # of the lowest slice\n if normal_distance(new_position) < normal_distance(position):\n position = new_position\n\n # that is all the slices in the folder, assemble them into a 3d volume\n voxel_array = numpy.zeros((len(slices),\n ds.pixel_array.shape[0],\n ds.pixel_array.shape[1]), dtype=ds.pixel_array.dtype)\n sorted_slice_positions = sorted(slices.keys())\n for i, slice_position in enumerate(sorted_slice_positions):\n voxel_array[i] = slices[slice_position]\n\n # the voxel spacing is a combination of PixelSpacing and slice separation\n voxel_spacing = list(map(float, ds.PixelSpacing))\n voxel_spacing.append(sorted_slice_positions[1] - sorted_slice_positions[0])\n\n # replace the initial slice z position with the lowest slice z position\n # position[2] = sorted_slice_positions[0]\n\n transform = transformation_matrix(row_vector,\n col_vector,\n position,\n voxel_spacing)\n\n return {\n \"voxel_spacing\": voxel_spacing,\n \"position\": position,\n \"volume\": voxel_array,\n \"vectors\": [row_vector, col_vector, normal_vector],\n \"transform\": transform\n }",
"def load_file(path):\n with open(path, \"rb\") as f: # bsps are binary files\n byte_list = f.read() # stores all bytes in bytes1 variable (named like that to not interfere with builtin names\n header = load_header(byte_list)\n skin_names = [byte_list[header.ofs_skins + 64 * x:header.ofs_skins + 64 * x + 64].decode(\"ascii\", \"ignore\") for x in range(header.num_skins)]\n triangles = load_triangles(byte_list[header.ofs_tris:header.ofs_frames], header)\n frames = load_frames(byte_list[header.ofs_frames:header.ofs_glcmds], header)\n texture_coordinates = load_texture_coordinates(byte_list[header.ofs_st:header.ofs_tris], header)\n gl_commands = load_gl_commands(byte_list[header.ofs_glcmds:header.ofs_end])\n # print(header)\n # print(skin_names)\n # print(triangles)\n # print(frames)\n # print(texture_coordinates)\n for i in range(len(texture_coordinates)):\n texture_coordinates[i].s = texture_coordinates[i].s/header.skinwidth\n texture_coordinates[i].t = texture_coordinates[i].t / header.skinheight\n # print(texture_coordinates)\n # print(header.num_xyz)\n for i_frame in range(len(frames)):\n for i_vert in range((header.num_xyz)):\n frames[i_frame].verts[i_vert].v[0] = frames[i_frame].verts[i_vert].v[0]*frames[i_frame].scale.x+frames[i_frame].translate.x\n frames[i_frame].verts[i_vert].v[1] = frames[i_frame].verts[i_vert].v[1] * frames[i_frame].scale.y + frames[i_frame].translate.y\n frames[i_frame].verts[i_vert].v[2] = frames[i_frame].verts[i_vert].v[2] * frames[i_frame].scale.z + frames[i_frame].translate.z\n model = md2_object(header, skin_names, triangles, frames, texture_coordinates, gl_commands)\n return model",
"def importMeshes(self, name=None):\n wall = False\n\n if 'mesh' in self.pargs:\n for mesh in self.pargs['mesh'].keys():\n\n if 'file' in self.pargs['mesh'][mesh]:\n if name:\n if mesh == name:\n self.pargs['mesh'][mesh]['import'] = True\n self.importMesh(mesh, self.pargs['mesh'][mesh]['file'], self.pargs['mesh'][mesh]['mtype'], self.pargs['mesh'][mesh]['id'], **self.pargs['mesh'][mesh]['args']) \n wall = True\n\n elif 'import' in self.pargs['mesh'][mesh]:\n if self.pargs['mesh'][mesh]['import']:\n self.importMesh(mesh, self.pargs['mesh'][mesh]['file'], self.pargs['mesh'][mesh]['mtype'], self.pargs['mesh'][mesh]['id'], **self.pargs['mesh'][mesh]['args']) \n wall = True\n \n if wall:\n self.setupWall(wtype='mesh')",
"def load_materials(file_data, headers, base_path):\n\n\n def load_material_texture(texture_file):\n filename = os.path.join(base_path, texture_file + \".jpg\")\n try:\n img = bpy.data.images.load(str(filename))\n cTex = bpy.data.textures.new('ColorTex', type = 'IMAGE')\n cTex.image = img\n return cTex\n except:\n print (\"Cannot load image {}\".format(filename))\n return None\n\n\n def material_from_pack(material):\n \"\"\" \n Extract just the data we want from the full chunk\n \"\"\"\n texture_file_name = material[0].decode(\"utf-8\").replace('\\x00', '').strip()\n return (\n texture_file_name,\n load_material_texture(texture_file_name)\n )\n texture_offset, texture_length = headers[1]\n texture_chunk = Struct(\"64sii\") \n texture_size = texture_chunk.size\n texture_count = int(texture_length / texture_size)\n\n textures = []\n for current_texture_idx in range(texture_count):\n texture_file_position = texture_offset + current_texture_idx * texture_size\n packed_texture = texture_chunk.unpack(file_data[texture_file_position : texture_file_position+texture_size])\n current_texture = material_from_pack(packed_texture)\n textures.append(current_texture)\n \n return textures",
"def load_data(self):\n @Logger.runtime\n def process_coords():\n \"\"\"\n The placement of locations on our minimap is crucial. Panda3D objects however have a coordinate range from\n -1 to 1 on all axis, meaning that if we read a coordinate of a location from some image processing software\n by hand, we have to transform those coordinates into coordinates Panda would understand. This function does\n just that.\n :return: Normalized coordinates of location coordinates.\n \"\"\"\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed\n\n @Logger.runtime\n def process_texture():\n texture_path = Path(\"resource/textures/{}\".format(row[\"texture\"]))\n texture = self.loader.loadTexture(texture_path)\n return texture\n\n # the cylinder is loaded here but it does not yet show up, until it's specifically asked to\n self.scene_3d_model = self.loader.loadModel(self.PATHS[\"3D_SCENE_MODEL\"])\n\n try:\n with open(self.PATHS[\"LOCATIONS_DB\"], \"r\") as l_file:\n data = csv.DictReader(l_file, delimiter=\"|\")\n for row in data:\n id = int(row[\"id\"])\n x, y = process_coords()\n neighbors = [int(neighbor_id) for neighbor_id in row[\"neighbors\"].split(',')]\n texture = process_texture()\n location = Location(id, x, y, neighbors, texture)\n location.reparentTo(self.render2d)\n self.locations.append(location)\n Logger.log_info('The locations_db has been loaded')\n except:\n Logger.error('{} file not found!'.format(self.PATHS[\"LOCATIONS_DB\"]))\n\n self.active_location = self.locations[0]",
"def load(gmshfile, scale, dx, dy, dz):\n\n # noinspection PyPep8Naming,PyShadowingNames\n def getAveNormals(nodes, elems):\n \"\"\"Calcula las normales promedio por cada vertice\"\"\"\n nodetrilist = []\n for nodenum in range(len(nodes)):\n nodetrilist.append([])\n for elemnum in range(len(elems)):\n if nodenum in elems[elemnum]:\n nodetrilist[nodenum].append(elemnum)\n avenorms = []\n for tri in nodetrilist:\n aveNi = 0.0\n aveNj = 0.0\n aveNk = 0.0\n denom = max(float(len(tri)), 1)\n for elem in tri:\n vert1 = [nodes[elems[elem][0]][0], nodes[elems[elem][0]][1],\n nodes[elems[elem][0]][2]]\n vert2 = [nodes[elems[elem][1]][0], nodes[elems[elem][1]][1],\n nodes[elems[elem][1]][2]]\n vert3 = [nodes[elems[elem][2]][0], nodes[elems[elem][2]][1],\n nodes[elems[elem][2]][2]]\n normals = getNormals(vert1, vert2, vert3)\n aveNi += normals[0]\n aveNj += normals[1]\n aveNk += normals[2]\n avenorms.append([aveNi / denom, aveNj / denom, aveNk / denom])\n return avenorms\n\n # noinspection PyPep8Naming\n def getNormals(vertA, vertB, vertC):\n \"\"\"Calcula las normales por cada 3 vertices\"\"\"\n xA = vertA[0]\n xB = vertB[0]\n xC = vertC[0]\n yA = vertA[1]\n yB = vertB[1]\n yC = vertC[1]\n zA = vertA[2]\n zB = vertB[2]\n zC = vertC[2]\n ABx = xB - xA\n ABy = yB - yA\n ABz = zB - zA\n BCx = xC - xB\n BCy = yC - yB\n BCz = zC - zB\n Nx = ABy * BCz - ABz * BCy\n Ny = ABz * BCx - ABx * BCz\n Nz = ABx * BCy - ABy * BCx\n VecMag = math.sqrt(Nx ** 2 + Ny ** 2 + Nz ** 2)\n Ni = Nx / VecMag\n Nj = Ny / VecMag\n Nk = Nz / VecMag\n return [Ni, Nj, Nk]\n\n # Lee el archivo\n try:\n infile = open(gmshfile)\n except:\n raise Exception(\"el archivo del modelo no existe\")\n\n # Crea el modeo\n try:\n gmshlines = infile.readlines()\n readnodes = False\n readelems = False\n skipline = 0\n elems = []\n lnum = 0\n nnodes = 0\n for line in gmshlines:\n if \"$Nodes\" in line:\n readnodes = True\n skipline = 2\n nnodes = int(gmshlines[lnum + 1].strip())\n nodes = []\n for i in range(nnodes):\n nodes.append(99999.9)\n elif \"$EndNodes\" in line:\n readnodes = False\n skipline = 1\n elif \"$Elements\" in line:\n readelems = True\n skipline = 2\n elif \"$EndElements\" in line:\n readelems = False\n skipline = 1\n if skipline < 1:\n if readnodes:\n nXYZ = line.strip().split()\n nodenum = int(nXYZ[0]) - 1\n nX = float(nXYZ[1]) * scale + dx\n nY = float(nXYZ[2]) * scale + dy\n nZ = float(nXYZ[3]) * scale + dz\n if neg_normal:\n nZ *= -1\n nodes[nodenum] = [nX, nY, nZ]\n elif readelems:\n n123 = line.split()\n if n123[1] == \"2\":\n n1 = int(n123[-3]) - 1\n n2 = int(n123[-1]) - 1\n n3 = int(n123[-2]) - 1\n elems.append([n1, n2, n3])\n else:\n skipline -= 1\n lnum += 1\n triarray = []\n normarray = []\n avenorms = []\n nodeavenorms = getAveNormals(nodes, elems)\n for elem in elems:\n vert1 = [nodes[elem[0]][0], nodes[elem[0]][1],\n nodes[elem[0]][2]]\n vert2 = [nodes[elem[1]][0], nodes[elem[1]][1],\n nodes[elem[1]][2]]\n vert3 = [nodes[elem[2]][0], nodes[elem[2]][1],\n nodes[elem[2]][2]]\n avenorm0 = nodeavenorms[elem[0]]\n avenorm1 = nodeavenorms[elem[1]]\n avenorm2 = nodeavenorms[elem[2]]\n normals = getNormals(vert1, vert2, vert3)\n triarray.append(vert1)\n triarray.append(vert2)\n triarray.append(vert3)\n normarray.append(normals)\n normarray.append(normals)\n normarray.append(normals)\n avenorms.append(avenorm0)\n avenorms.append(avenorm1)\n avenorms.append(avenorm2)\n return triarray, normarray, avenorms\n\n except:\n raise Exception(\"error al cargar el modelo\")",
"def processLoading(self, cs, dbLoad: bool = False):\n runLog.header(\n \"=========== Initializing Mesh, Assembly Zones, and Nuclide Categories =========== \"\n )\n\n for b in self.getBlocks():\n if b.p.molesHmBOL > 0.0:\n break\n else:\n # Good easter egg, but sometimes a user will want to use the framework do\n # only decay analyses and heavy metals are not required.\n runLog.warning(\n \"The system has no heavy metal and therefore is not a nuclear reactor.\\n\"\n \"Please make sure that this is intended and not a input error.\"\n )\n\n if dbLoad:\n # reactor.blueprints.assemblies need to be populated\n # this normally happens during armi/reactor/blueprints/__init__.py::constructAssem\n # but for DB load, this is not called so it must be here.\n self.parent.blueprints._prepConstruction(cs)\n else:\n # set reactor level meshing params\n nonUniformAssems = [\n Flags.fromStringIgnoreErrors(t) for t in cs[\"nonUniformAssemFlags\"]\n ]\n # some assemblies, like control assemblies, have a non-conforming mesh\n # and should not be included in self.p.referenceBlockAxialMesh and self.p.axialMesh\n uniformAssems = [\n a\n for a in self.getAssemblies()\n if not any(a.hasFlags(f) for f in nonUniformAssems)\n ]\n self.p.referenceBlockAxialMesh = self.findAllAxialMeshPoints(\n assems=uniformAssems,\n applySubMesh=False,\n )\n self.p.axialMesh = self.findAllAxialMeshPoints(\n assems=uniformAssems,\n applySubMesh=True,\n )\n\n self.numRings = self.getNumRings() # TODO: why needed?\n\n self.getNuclideCategories()\n\n # Generate list of flags that are to be stationary during assembly shuffling\n stationaryBlockFlags = []\n\n for stationaryBlockFlagString in cs[\"stationaryBlockFlags\"]:\n stationaryBlockFlags.append(Flags.fromString(stationaryBlockFlagString))\n\n self.stationaryBlockFlagsList = stationaryBlockFlags\n\n self.setBlockMassParams()\n\n self.p.maxAssemNum = self.getMaxParam(\"assemNum\")\n\n getPluginManagerOrFail().hook.onProcessCoreLoading(\n core=self, cs=cs, dbLoad=dbLoad\n )",
"def load(self):\n #print self.fileInfo.name\n progress = self.progress\n filePath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n self.fileSize = os.path.getsize(filePath)\n #--Localize\n cells = self.cells\n records = self.records\n canSave = self.canSave\n skipObjRecords = self.skipObjRecords\n contTypes = set(['CREC','CNTC','NPCC'])\n levTypes = set(('LEVC','LEVI'))\n debrisIds = self.debrisIds\n debrisTypes = set(debrisIds.keys())\n #--Header\n inPath = os.path.join(self.fileInfo.dir,self.fileInfo.name)\n ins = Tes3Reader(self.fileInfo.name,file(inPath,'rb'))\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n self.tes3 = Tes3(name,size,delFlag,recFlag,ins,True)\n if not canSave: del self.tes3.others[:]\n #--Progress info\n progress = self.progress\n progress(0.0,'Loading '+self.fileInfo.name)\n #--Raw data read\n while not ins.atEnd():\n #--Get record info and handle it\n (name,size,delFlag,recFlag) = ins.unpackRecHeader()\n #print \"%s [%d]\" % (name,size)\n #--CELL?\n if name == 'CELL':\n record = Cell(name,size,delFlag,recFlag,ins,0,skipObjRecords)\n cells.append(record)\n if canSave: records.append(record)\n #--Contents\n elif canSave and name in contTypes:\n if name == 'CREC':\n record = Crec(name,size,delFlag,recFlag,ins,True)\n elif name == 'CNTC':\n record = Cntc(name,size,delFlag,recFlag,ins,True)\n else:\n record = Npcc(name,size,delFlag,recFlag,ins,True)\n self.conts.append(record)\n self.conts_id[record.getId()] = record\n records.append(record)\n #--File Map\n elif name == 'FMAP':\n record = Fmap(name,size,delFlag,recFlag,ins)\n self.fmap = record\n records.append(record)\n #--Landscapes\n elif name == 'LAND':\n record = Land(name,size,delFlag,recFlag,ins)\n self.lands[record.getId()] = record\n records.append(record)\n #--Scripts\n elif canSave and name == 'SCPT':\n record = Scpt(name,size,delFlag,recFlag,ins,True)\n records.append(record)\n if record.getRef():\n self.refs_scpt[record] = record.getRef()\n #--Save debris info?\n elif name in debrisTypes:\n record = Record(name,size,delFlag,recFlag,ins)\n id = record.getId()\n if id:\n debrisIds[name].append(id.lower())\n if canSave:\n records.append(record)\n #--Skip Non-cell?\n elif not canSave:\n ins.seek(size,1,name)\n #--Keep non-cell?\n else:\n records.append(Record(name,size,delFlag,recFlag,ins))\n #--Done Reading\n ins.close()\n #--Analyze Cells\n cntCells = 0\n progress.setMax(len(self.cells))\n for cell in self.cells:\n cell.load(None,1)\n self.cells_id[cell.getId()] = cell\n if not canSave:\n cell.data = None #--Free some memory\n #--Progress\n cntCells += 1\n progress(cntCells)\n #--Scripts\n if self.refs_scpt:\n self.updateScptRefs()",
"def load_spine_meshes(self):\n # Load all the template spines and ignore the verbose messages of loading\n nmv.utilities.disable_std_output()\n self.spine_meshes = nmv.file.load_spines(nmv.consts.Paths.SPINES_MESHES_HQ_DIRECTORY)\n nmv.utilities.enable_std_output()\n\n # Create the material\n material = nmv.shading.create_material(\n name='%spine_material', color=self.options.mesh.spines_color,\n material_type=self.options.mesh.material)\n\n # Apply the shader\n for spine_object in self.spine_meshes:\n\n # Apply the shader to each spine mesh\n nmv.shading.set_material_to_object(spine_object, material)",
"def import_mesh(self, scenegroup):\n logger.debug((\"mesh\", scenegroup[\"asset\"]))\n if scenegroup[\"asset\"] in self._imported_assets:\n return self._imported_assets[scenegroup[\"asset\"]]\n asset = self.gridinfo.getAsset(scenegroup[\"asset\"])\n if not asset[\"type\"] == str(AssetType.OgreMesh):\n logger.debug(\"(\"+asset[\"type\"]+\")\")\n return\n materials = []\n if \"materials\" in scenegroup:\n materials = scenegroup[\"materials\"]\n mesh = self.create_mesh_frombinary(scenegroup[\"asset\"], asset[\"name\"], asset[\"data\"])\n return self.create_mesh_fromomesh(scenegroup[\"asset\"], asset[\"name\"],\n mesh, materials)",
"def _load_msh(self):\n log_gui.debug(\"_load_msh begin\")\n mod = self._mod\n mesh = mod.load_mesh_from_selection()\n if not mesh:\n mod.launch(GC.ERROR, \"The selected entry is not a mesh \"\n \"or the SMESH component must be activated\")\n return \n self._lab.setText(mesh.read_name())\n self._exp_sel.notify(mesh)\n log_gui.debug(\"_load_msh end\")",
"def read_from_ses3d_block(directory):\n\n # Initialise arrays of Cartesian coordinates.\n\n x=[]\n y=[]\n z=[]\n\n # Read yaml file containing information on the ses3d submodel.\n with io.open(os.path.join(directory,'modelinfo.yml'), 'rt') as fh:\n model_info = yaml.load(fh)\n\n rot_vec = np.array([model_info['geometry']['rot_x'], model_info['geometry']['rot_y'], model_info['geometry']['rot_z']])\n rot_angle = model_info['geometry']['rot_angle']\n\n # Read block files.\n\n fid_x = open(os.path.join(directory,'block_x'), 'r')\n fid_y = open(os.path.join(directory,'block_y'), 'r')\n fid_z = open(os.path.join(directory,'block_z'), 'r')\n\n dx = np.array(fid_x.read().strip().split('\\n'), dtype=float)\n dy = np.array(fid_y.read().strip().split('\\n'), dtype=float)\n dz = np.array(fid_z.read().strip().split('\\n'), dtype=float)\n\n fid_x.close()\n fid_y.close()\n fid_z.close()\n\n # Setup of coordinate lines.\n\n nsubvol = int(dx[0])\n\n idx = np.ones(nsubvol, dtype=int)\n idy = np.ones(nsubvol, dtype=int)\n idz = np.ones(nsubvol, dtype=int)\n\n for k in np.arange(1, nsubvol, dtype=int):\n idx[k] = int(dx[idx[k - 1]]) + idx[k - 1] + 1\n idy[k] = int(dy[idy[k - 1]]) + idy[k - 1] + 1\n idz[k] = int(dz[idz[k - 1]]) + idz[k - 1] + 1\n\n for k in np.arange(nsubvol, dtype=int):\n\n # Coordinates of the box corners.\n colat = dx[(idx[k] + 1):(idx[k] + 1 + int(dx[idx[k]]))]\n lon = dy[(idy[k] + 1):(idy[k] + 1 + int(dy[idy[k]]))]\n rad = dz[(idz[k] + 1):(idz[k] + 1 + int(dz[idz[k]]))]\n \n # Coordinates of the box centroids.\n colat_c = (np.array(colat[0:-1])+np.array(colat[1:]))/2.0\n lon_c = (np.array(lon[0:-1]) + np.array(lon[1:]))/2.0\n rad_c = (np.array(rad[0:-1]) + np.array(rad[1:]))/2.0\n \n # Compute Cartesian coordinates for all grid points.\n for c in colat_c:\n for l in lon_c:\n xx=np.cos(l*np.pi/180.0)*np.sin(c*np.pi/180.0)\n yy=np.sin(l*np.pi/180.0)*np.sin(c*np.pi/180.0)\n zz=np.cos(c*np.pi/180.0)\n for r in rad_c:\n x.append(r*xx)\n y.append(r*yy)\n z.append(r*zz)\n \n\n # Rotate, if needed.\n\n if (rot_angle!=0.0):\n rot_mat = get_rot_matrix(rot_angle*np.pi/180.0, *rot_vec)\n x, y, z = rotate(x, y, z, rot_mat)\n\n # Return.\n\n return x, y, z",
"def _load_all_cubes(self, files_to_load):\n if self.process_workers > 1:\n arguments = [[self, load_file] for load_file in files_to_load]\n pool = multiprocessing.Pool(processes=self.process_workers)\n try:\n all_cubelists = pool.map(run_load_file, arguments)\n pool.close()\n pool.join()\n except KeyboardInterrupt:\n pool.terminate()\n else:\n all_cubelists = []\n for load_file in files_to_load:\n cubelist = self._load_file(load_file)\n if cubelist:\n all_cubelists.append(cubelist)\n \n all_cubes = []\n for cubelist in all_cubelists:\n for cube in cubelist:\n all_cubes.append(cube)\n\n if len(all_cubes) == 0:\n raise UserWarning('No data loaded.')\n \n # Gather universal information from the first cube.\n if self.xy_coords is None:\n self.xy_coords = [coord.name() \n for coord in get_xy_coords(\n all_cubes[0])]\n if self._area_inst.bounds_range is None:\n self._area_inst.bounds_range = self._area_inst.\\\n get_cube_area_bounds(all_cubes[0],\n self.xy_coords)\n if self.area_bounds is None:\n self.area_bounds = self._area_inst.get_cube_area_bounds(\n all_cubes[0],\n self.xy_coords)\n self.time_unit = all_cubes[0].coord(self.time_coord).units\n \n return iris.cube.CubeList(all_cubes)",
"def loadDicomsFromDatabase(self, dicomFiles):\n\n #--------------------\n # Create dictionary of downloaded DICOMS\n # for quick retrieval when comparing with files\n # in the slicer.dicomDatabase. Speed preferred over\n # memory consumption here.\n #-------------------- \n dlDicomObj = {}\n for dlFile in dicomFiles:\n dlDicomObj[os.path.basename(dlFile)] = dlFile\n\n\n \n #--------------------\n # Parse through the slicer.dicomDatabase\n # to get all of the files, as determined by series.\n #--------------------\n matchedDatabaseFiles = []\n for patient in slicer.dicomDatabase.patients():\n for study in slicer.dicomDatabase.studiesForPatient(patient):\n for series in slicer.dicomDatabase.seriesForStudy(study):\n seriesFiles = slicer.dicomDatabase.filesForSeries(series)\n #\n # Compare files in series with what was just downloaded.\n # If there's a match, append to 'matchedDatabaseFiles'.\n #\n for sFile in seriesFiles:\n if os.path.basename(sFile) in dlDicomObj: \n matchedDatabaseFiles.append(sFile)\n\n\n \n #--------------------\n # Acquire loadabes as determined by\n # the 'DICOMScalarVolumePlugin' class, by feeding in \n # 'matchedDatabaseFiles' as a nested array.\n #--------------------\n dicomScalarVolumePlugin = \\\n slicer.modules.dicomPlugins['DICOMScalarVolumePlugin']()\n loadables = dicomScalarVolumePlugin.examine([matchedDatabaseFiles])\n\n\n \n #--------------------\n # Determine loadable with the highest file count. \n # This is usually all DICOM files collated as one volume.\n #--------------------\n highestFileCount = 0\n highestFileCountIndex = 0\n for i in range(0, len(loadables)):\n if len(loadables[i].files) > highestFileCount:\n highestFileCount = len(loadables[i].files)\n highestFileCountIndex = i\n\n\n \n #--------------------\n # Load loadable with the highest file count.\n # This is assumed to be the volume file that contains\n # the majority of the downloaded DICOMS.\n #--------------------\n dicomScalarVolumePlugin.load(loadables[highestFileCountIndex])\n \n\n\n \n #--------------------\n # Return true if login successful.\n #-------------------- \n return True"
] | [
"0.6619064",
"0.6575824",
"0.5888349",
"0.5782488",
"0.5687946",
"0.56595165",
"0.55847037",
"0.5549612",
"0.5535285",
"0.54974794",
"0.5464577",
"0.54594773",
"0.54511726",
"0.5433815",
"0.5415109",
"0.53885984",
"0.53758585",
"0.5352629",
"0.5302441",
"0.53019255",
"0.5300438",
"0.529536",
"0.52821213",
"0.52661",
"0.5265331",
"0.5264014",
"0.52488303",
"0.52440494",
"0.52367777",
"0.5186182"
] | 0.7286509 | 0 |
Loads spline files of extension `.iges` `.xml` `.itd` | def load_splines(fname):
fname = str(fname)
fname = abs_fname_(fname)
sr = splinelibpy.Reader()
ext = os.path.splitext(fname)[1]
if ext == ".iges":
loaded_splines = sr.read_iges(fname)
elif ext == ".xml":
loaded_splines = sr.read_xml(fname)
elif ext == ".itd":
loaded_splines = sr.read_irit(fname)
else:
raise ImportError(
"We can only import < .iges | .xml | .itd > spline files"
)
splines = []
# Format s => [weights, degrees, knot_vectors, control_points]
for s in loaded_splines:
if s[0] is None:
# Bbspline.
tmp_spline = BSpline()
tmp_spline.degrees = s[1]
tmp_spline.knot_vectors = s[2]
tmp_spline.control_points = s[3]
splines.append(tmp_spline)
else:
# Make nurbs
tmp_spline = NURBS()
tmp_spline.weights = s[0]
tmp_spline.degrees = s[1]
tmp_spline.knot_vectors = s[2]
tmp_spline.control_points = s[3]
splines.append(tmp_spline)
return splines | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(self):\n if self.__fname == '':\n print('You must pass in a file name to load!')\n return []\n\n ext = os.path.splitext(self.__fname)[1]\n first_pt = None\n if len(self.__fea.points) > 0:\n first_pt = self.__fea.points[0]\n if ext == '.dxf':\n parts = self.__load_dxf()\n elif ext in ['.brep', '.brp', '.iges', '.igs', '.step', '.stp']:\n self.__make_geo()\n parts = self.__load_geo()\n last_pt = None\n if first_pt != None:\n if len(self.__fea.points) > 2:\n last_pt = self.__fea.points[-1]\n if self.__scale != '':\n # call scale\n pass\n return parts",
"def loadFNIRS(self,filepath):\r\n self.tree = ET.parse(filepath)\r\n self.data = self.tree.getroot().find(\"data\")\r\n self.samplerate = float(self.tree.getroot().find('device').find('samplerate').text)\r\n self.sensors = [i.text for i in self.tree.getroot().find('columns')]\r\n self.sensorMask = [True]*len(self.sensors)\r\n self.measurements = len(self.tree.getroot().find('data'))",
"def load_interpolator(self):\n filename = f'interpolator_{self.source}_V{self.version}'\n filepath = os.path.join(GRIDS_PATH, 'sources', self.source,\n 'interpolator', filename)\n self.printv(f'Loading interpolator: {filepath}')\n self.interpolator = pickle.load(open(filepath, 'rb'))",
"def load_data(self):\n\t\ti = 0\n\n\t\tpaths = glob.glob(self.file_path+'/rollout_*')\n\t\tself.rollouts = []\n\n\n\t\tfor path in paths:\n\t\t\tdata_point = np.load(path,encoding='latin1')\n\t\t\tself.rollouts.append(data_point)\n\n\t\treturn paths",
"def LoadBatch(filename):",
"def load(path):\n pass",
"def load_cityscapes(path, fdr):\n dataset = Dataset(path, split='val', mode=\"fine\", target_type=[\"semantic\", \"instance\"])\n\n from PATH import SCRI_PATH as spath\n\n for image, (sseg, inst), name in dataset:\n image = np.array(image)\n sseg = gt_covert(sseg)\n inst = np.array(inst)\n if os.path.exists(spath + \"/\" + fdr + \"/\" + name + \"_scri.png\"):\n scribbles = np.array(Image.open(spath + \"/\" + fdr + \"/\" + name + \"_scri.png\"))\n else:\n scribbles = None\n # scribbles = scribble_convert(scribbles)\n yield name, image, sseg, inst, scribbles",
"def load_pts_features(path):\n\n #\n # Your code here\n #\n\n pts = [np.empty((123, 2)), np.empty((123, 2))]\n feats = [np.empty((123, 128)), np.empty((123, 128))]\n\n return pts, feats",
"def load_senzory_locations(file_name):\n check_file_existence(file_name)\n _, ext = os.path.splitext(file_name)\n if ext == '.mat':\n return load_senzory_locations_from_matlab(file_name)\n elif ext == '.csv':\n return load_senzory_locations_from_csv(file_name)\n else:\n raise ValueError(\"Unknown file type at {}. Expected .mat or .csv\".format(file_name))",
"def load_many_images(paths):\r\n \r\n lpop = __g.pop\r\n \r\n for k in __g.keys()[1:]:\r\n lpop(k)\r\n \r\n if type(paths) == str or type(paths) == tuple and len(paths) == 2 and type(paths[0]) == int:\r\n __g[1] = Surface(paths)\r\n elif type(paths) == list:\r\n for p in range(1, len(paths) + 1):\r\n __g[p] = Surface(paths[p-1])",
"def load_storedsens(self):\n\n l = glob.glob(self.gireds_dir + '/data/*.fits')\n l.sort()\n idx = np.arange(len(l))\n\n headers = [fits.open(i)[0].header for i in l]\n\n field_names = ['filename', 'observatory', 'instrument', 'detector',\n 'grating', 'filter1', 'maskname']\n types = ['S120'] + ['S60' for i in range(6)]\n hdrkeys = ['observat', 'instrume', 'detector', 'grating', 'filter1',\n 'maskname']\n\n hdrpars_type = [\n (field_names[i], types[i]) for i in range(len(field_names))]\n\n stored_sensfunc = np.array([\n ((l[i],) + tuple([headers[i][j] for j in hdrkeys])) for i in idx],\n dtype=hdrpars_type)\n\n self.stored_sensfunc = stored_sensfunc",
"def load(self, path):\n pass",
"def load(self, path):\n pass",
"def test_load_gtis(self):\n fits_file = os.path.join(self.datadir, 'monol_testA.evt')\n hen.io.load_gtis(fits_file)",
"def read_gp_files_into_models(dataset, path_saved_gp_files):\n print('Restarting from stored Gaussian Processes...')\n time_start_reading = time.time()\n for obj in dataset.object_names:\n obj_saved_gps_file = os.path.join(path_saved_gp_files, 'gp_' + obj)\n try:\n obj_saved_gps = Table.read(obj_saved_gps_file, format='ascii')\n except UnicodeDecodeError:\n obj_saved_gps = Table.read(obj_saved_gps_file, format='fits',\n character_as_bytes=False)\n except FileNotFoundError:\n print('The file {} does not exist.'.format(obj_saved_gps_file))\n dataset.models[obj] = obj_saved_gps\n print('Models fitted with the Gaussian Processes values.')\n print_time_difference(time_start_reading, time.time())",
"def load(self, path: str):\n pass",
"def load_file(path, data_type=None, *args, **kwargs):\n\n path = os.path.normpath(path)\n if os.path.isdir(path) and path[-1] != os.sep:\n path = path + os.sep\n\n if data_type == None:\n data_type = autodetect(path)\n\n if data_type == \"prospa\":\n return dnpIO.prospa.import_prospa(path, *args, **kwargs)\n\n elif data_type == \"topspin\":\n return dnpIO.topspin.import_topspin(path, *args, **kwargs)\n\n elif data_type == \"topspin dir\":\n return dnpIO.topspin.import_topspin_dir(path, *args, **kwargs)\n\n elif data_type == \"delta\":\n return dnpIO.delta.import_delta(path, *args, **kwargs)\n\n elif data_type == \"vnmrj\":\n return dnpIO.vnmrj.import_vnmrj(path, *args, **kwargs)\n\n elif data_type == \"tnmr\":\n return dnpIO.tnmr.import_tnmr(path, *args, **kwargs)\n\n elif data_type == \"specman\":\n return dnpIO.specman.import_specman(path, *args, **kwargs)\n\n elif data_type == \"xepr\" or data_type == \"xenon\":\n return dnpIO.bes3t.import_bes3t(path, *args, **kwargs)\n\n elif data_type == \"winepr\" or data_type == \"esp\":\n return dnpIO.winepr.import_winepr(path, *args, **kwargs)\n\n elif data_type == \"h5\":\n return dnpIO.h5.load_h5(path, *args, **kwargs)\n\n elif data_type == \"power\":\n return dnpIO.power.importPower(path, *args, **kwargs)\n\n elif data_type == \"vna\":\n return dnpIO.vna.import_vna(path, *args, **kwargs)\n\n elif data_type == \"cnsi_powers\":\n return dnpIO.cnsi.get_powers(path, *args, **kwargs)\n\n else:\n raise ValueError(\"Invalid data type: %s\" % data_type)",
"def loadFile(filterExt):\n basicFilter = \"*.\" + filterExt\n filePath = fileDialog2(fileFilter=basicFilter, dialogStyle=2, fm=1)\n if(filePath != None):\n #openfile = open('/Users/camtton/Desktop/drawing.svg', 'r')\n tokens = getSVGpath(filePath[0])\n return tokens\n else:\n print 'Please select a %s file'%(filterExt)",
"def load_features(feature_path):\n if not os.path.exists(os.path.join(feature_path, f\"0_features.npy\")): \n raise ValueError(f\"The provided location {feature_path} does not contain any representation files\")\n\n ds_list, chunk_id = [], 0\n while os.path.exists(os.path.join(feature_path, f\"{chunk_id}_features.npy\")): \n features = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_features.npy\"))).float()\n labels = ch.from_numpy(np.load(os.path.join(feature_path, f\"{chunk_id}_labels.npy\"))).long()\n ds_list.append(ch.utils.data.TensorDataset(features, labels))\n chunk_id += 1\n\n print(f\"==> loaded {chunk_id} files of representations...\")\n return ch.utils.data.ConcatDataset(ds_list)",
"def load_ZG(iso,s=0.00005):\n Zs= iso.Zs()\n Zs= Zs[Zs < 0.05]\n tage= iso.logages()[62]\n Gs= numpy.array([G(tage,z,iso)[0] for z in Zs])\n sindx= numpy.argsort(Gs)\n Zs= Zs[sindx]\n Gs= Gs[sindx]\n goodIndx= True-numpy.isnan(Gs)\n return interpolate.UnivariateSpline(Gs[goodIndx],Zs[goodIndx],k=3,\n s=s)",
"def _load_sources(self):\n self.point_sources= []\n if os.path.exists(os.path.join(self.folder,'pickle.zip')):\n pzip = zipfile.ZipFile(os.path.join(self.folder,'pickle.zip'))\n files = ['pickle/HP12_%04d.pickle' %i for i in range(1728)]\n assert all(f in pzip.namelist() for f in files), 'Improper model zip file'\n opener = pzip.open\n else:\n files = glob.glob(os.path.join(self.folder, 'pickle', '*.pickle'))\n files.sort()\n opener = open\n self.nside = int(np.sqrt(len(files)/12))\n if len(files) != 12*self.nside**2:\n msg = 'Number of pickled ROI files, %d, found in folder %s, not consistent with HEALpix' \\\n % (len(files),os.path.join(self.folder, 'pickle'))\n raise Exception(msg)\n \n ####self.global_sources = sources.GlobalSourceList() # allocate list to index parameters for global sources\n self.extended_sources=[] # list of unique extended sources\n self.changed=set() # to keep track of extended models that are different from catalog\n moved=0\n nfreed = 0\n self.tagged=set()\n source_names =[]\n for i,file in enumerate(files):\n p = pickle.load(opener(file))\n index = int(os.path.splitext(file)[0][-4:])\n assert i==index, 'logic error: file name %s inconsistent with expected index %d' % (file, i)\n roi_sources = p.get('sources', {}) # don't know why this needed\n extended_names = {} if (self.__dict__.get('extended_catalog') is None) else self.extended_catalog.names\n for key,item in roi_sources.items():\n if key in extended_names: continue\n if key in source_names:\n #if not self.quiet: print ('SkyModel warning: source with name %s in ROI %d duplicates previous entry: ignored'%(key, i))\n continue\n source_names.append(key)\n skydir = item['skydir']\n if self.update_positions is not None:\n ellipse = item.get('ellipse', None)\n ts = item['ts']\n if ellipse is not None and not np.any(np.isnan(ellipse)) :\n fit_ra, fit_dec, a, b, ang, qual, delta_ts = ellipse\n if qual<5 and a < 0.2 and \\\n ts>self.update_positions and delta_ts>0.1:\n skydir = SkyDir(float(fit_ra),float(fit_dec))\n moved +=1\n self.tagged.add(i)\n \n ps = sources.PointSource(name=key,\n skydir=skydir, model= sources.convert_model(item['model']),\n ts=item['ts'],band_ts=item['band_ts'], index=index)\n if sources.validate(ps,self.nside, self.filter):\n self._check_position(ps) # check that it is not coincident with previous source(warning for now?)\n self.point_sources.append( ps)\n # make a list of extended sources used in the model \n names = p.get('diffuse_names')\n for name, oldmodel in zip(names, p['diffuse']):\n model = sources.convert_model(oldmodel) # convert from old Model version if necessary \n key = name.split('_')[0]\n if key in self.diffuse_dict:\n self.diffuse_dict.add_model(index, name, model)\n elif self.extended_catalog_name=='ignore': \n continue\n else:\n try:\n es = self.extended_catalog.lookup(name) if self.extended_catalog is not None else None\n except Exception as msg:\n print ('Skymodel: Failed to create model for %s' %name)\n raise\n if es is None:\n #raise Exception( 'Extended source %s not found in extended catalog' %name)\n print ('SkyModel warning: Extended source %s not found in extended catalog, removing' %name)\n continue\n if self.hpindex(es.skydir)!=index: continue\n \n if es.model.name!=model.name:\n if name not in self.changed:\n if not self.quiet: print ('SkyModel warning: catalog model %s changed from %s for source %s: keeping change'%\\\n (es.model.name, model.name, name))\n self.changed.add(name)\n es.smodel=es.model=model #update with current fit values always\n if sources.validate(es,self.nside, self.filter): #lambda x: True): \n self.extended_sources.append(es)\n # check for new extended sources not yet in model\n self._check_for_extended()\n if self.update_positions and moved>0:\n print ('updated positions of %d sources, healpix ids in tagged' % moved)",
"def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")",
"def load_assets(self, paths):\n try:\n self.background = load(paths['background'])\n self.bullet = load(paths['bullet'])\n self.bullet_red = load(paths['bullet_red'])\n self.icon = load(paths['icon'])\n\n self.Ship = load(paths['ship'])\n self.Ship_CR = load(paths['ship_cr'])\n self.Ship_CL = load(paths['ship_cl'])\n self.Ship_CC = load(paths['ship_cc'])\n\n self.InvaderA1 = load(paths['invadera1'])\n self.InvaderA2 = load(paths['invadera2'])\n self.InvaderB1 = load(paths['invaderb1'])\n self.InvaderB2 = load(paths['invaderb2'])\n self.InvaderC1 = load(paths['invaderc1'])\n self.InvaderC2 = load(paths['invaderc2'])\n\n except Exception as e:\n print(\" \"+str(e))\n return 0\n else:\n return 1",
"def load_pathway(path_idx=1, preprocess=True):\n\n assert path_idx in [1, 2], 'Unavailable index, must be 1 or 2.'\n url = f'https://raw.githubusercontent.com/PengTao-HUST/GDNB/master/data/pathway{path_idx}.txt'\n cache_dir = sys.modules['gdnb'].__path__[0] + '/data/'\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n data_file = os.path.basename(url)\n full_path = cache_dir + data_file\n\n if not os.path.exists(full_path):\n urlretrieve(url, cache_dir + data_file)\n\n if preprocess:\n traj = np.loadtxt(full_path)\n traj = np.apply_along_axis(normalize_by_mean, 0, traj[:, 1:])\n disdat = traj.reshape(10, 50, -1).transpose((0, 2, 1))\n return disdat\n else:\n return full_path",
"def load(self, path: Union[str, pathlib.Path]):\n super().load(path)\n path = pathlib.Path(path)\n elite_path = path / self._ELITE_FNAME\n if pathlib.Path.is_file(elite_path):\n warnings.warn(\n \"Future versions of GaussianMLP will load elite models from the same \"\n \"checkpoint file as the model weights.\"\n )\n with open(elite_path, \"rb\") as f:\n self.elite_models = pickle.load(f)\n else:\n warnings.warn(\"No elite model information found in model load directory.\")",
"def load_paths(self, paths):\n paths = list(str(p) for p in paths)\n\n # This is where more cleverness will go if/when needed.\n\n return SimpleFitsCollection(\n paths,\n hdu_index=self.hdu_index,\n blankval=self.blankval,\n )",
"def test_load_gtis(self):\n fname = os.path.join(datadir, 'monol_testA.evt')\n load_gtis(fname, gtistring=\"GTI\")",
"def load_graph(self, path):\n if path.split('.')[-1]=='gexf':\n self.graph = nx.read_gexf(path)\n else:\n self.graph = nx.read_gpickle(path)",
"def load(src_path):\n satdat = rasterio.open(src_path)\n return satdat",
"def load(file_name):\n ferme_fenetre()\n Hitori(file_name)"
] | [
"0.598407",
"0.5837179",
"0.56815714",
"0.5675505",
"0.5656372",
"0.56464905",
"0.56287754",
"0.55418855",
"0.5446652",
"0.54027593",
"0.5383158",
"0.5353577",
"0.5353577",
"0.53077507",
"0.5288568",
"0.5288151",
"0.5274512",
"0.52620083",
"0.5251903",
"0.52356094",
"0.523395",
"0.52265716",
"0.5208644",
"0.5203579",
"0.51992327",
"0.5190051",
"0.51762176",
"0.51659703",
"0.5157134",
"0.5156305"
] | 0.71475726 | 0 |
Checks if fname is absolute. If not, turns it into an abspath. Tilde safe. | def abs_fname_(fname):
if os.path.isabs(fname):
pass
elif '~' in fname:
fname = os.path.expanduser(fname)
else:
fname = os.path.abspath(fname)
return fname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _makeAbsolute(fname):\n if fname[0] != '/':\n return os.path.join(os.getcwd(), fname)\n else:\n return fname",
"def getAbsFileName(fname):\n\tfileAbsPath=os.path.abspath(fname)\n\treturn fileAbsPath",
"def abspath(filename, relative_to = None):\n # Create filename relative to the reference, if it exists.\n import os.path\n fname = from_posix(filename)\n if relative_to and not os.path.isabs(fname):\n relative_to = from_posix(relative_to)\n if os.path.isdir(relative_to):\n fname = os.path.join(relative_to, fname)\n else:\n fname = os.path.join(os.path.dirname(relative_to), fname)\n\n # Make the result canonical\n fname = canonical_filename(fname)\n return to_posix(fname)",
"def check_absolute_path(path):\n current_dir = os.getcwd()\n if os.path.isabs(path) is False:\n if str(path).startswith(\"./\"):\n return current_dir + path[1:]\n else:\n return current_dir + \"/\" + path\n else:\n return path",
"def robust_abspath(p):\n try:\n return abspath(p)\n except OSError as exc:\n if not isabs(p):\n try:\n os.getcwd()\n # if no exception raised it was not the reason, raise original\n raise\n except:\n return normpath(join(getpwd(), p))\n raise",
"def get_abs(s):\n return os.path.abspath(s)",
"def normalize_path(working_dir, filename):\n\n if not os.path.isabs(filename):\n filename = os.path.join(working_dir, filename)\n\n return filename",
"def ensure_file_abs_path_valid(file_abs_path: Text) -> Text:\n project_meta = load_project_meta(file_abs_path)\n raw_abs_file_name, file_suffix = os.path.splitext(file_abs_path)\n file_suffix = file_suffix.lower()\n\n raw_file_relative_name = convert_relative_project_root_dir(raw_abs_file_name)\n if raw_file_relative_name == \"\":\n return file_abs_path\n\n path_names = []\n for name in raw_file_relative_name.rstrip(os.sep).split(os.sep):\n\n if name[0] in string.digits:\n # ensure file name not startswith digit\n # 19 => T19, 2C => T2C\n name = f\"T{name}\"\n\n if name.startswith(\".\"):\n # avoid \".csv\" been converted to \"_csv\"\n pass\n else:\n # handle cases when directory name includes dot/hyphen/space\n name = name.replace(\" \", \"_\").replace(\".\", \"_\").replace(\"-\", \"_\")\n\n path_names.append(name)\n\n new_file_path = os.path.join(\n project_meta.RootDir, f\"{os.sep.join(path_names)}{file_suffix}\"\n )\n return new_file_path",
"def test_sanitized_filename(self):\n value = \"/absolute/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"\n\n value = \"../relative/path/to/the/file.txt\"\n response = clean.filename(value)\n assert response == \"file.txt\"",
"def abspath(fpath):\n from os import path, getcwd, chdir\n original = getcwd()\n chdir(reporoot)\n result = path.abspath(path.expanduser(fpath))\n chdir(original)\n return result",
"def normalizeFilename(filename):\n return os.path.abspath(os.path.expanduser(filename))",
"def force_absolute(base, path):\n if os.path.abspath(path) and os.path.exists(path):\n return path\n else:\n return path_format(base + path)",
"def _normalized_path(path):\n return os.path.abspath(os.path.expanduser(path))",
"def _abspath(filename):\r\n if os.path.isabs(filename):\r\n return filename\r\n return os.path.join(settings.MEDIA_ROOT, filename)",
"def absolute_path(path):\n return os.path.abspath(os.path.normpath(path))",
"def _make_abspath(value):\n value = value.strip()\n if not os.path.isabs(value):\n value = os.path.abspath(os.path.join(os.getcwd(), value))\n return value",
"def to_absolute_path(path):\n if not os.path.isabs(path):\n return os.path.join(os.getcwd(), path)\n else:\n return path",
"def is_absolute_path(path: str) -> bool:\n # This is a rather weak test, may be enhanced if desired\n return \"//\" in path \\\n or \":\" in path \\\n or path.startswith(\"/\")",
"def isabs(path):\n # If detected as storage path, it is an absolute path.\n return True",
"def _abs_path(fn):\n return os.path.join(os.path.dirname(__file__), fn)",
"def abspath(path):\n\n return os.path.abspath(path).replace(\"\\\\\", \"/\")",
"def real_absolute_path(path):\n return os.path.realpath(absolute_path(path))",
"def normalize_name(filename):\n filename = os.path.expanduser(filename)\n if not os.path.isabs(filename):\n filename = os.path.abspath(filename)\n return os.path.normpath(filename)",
"def sanitize_fname(directory, fname):\n return opath.join(\n bytes(directory, encoding='ascii'),\n opath.normpath(\n b'/' + fname).lstrip(b'/'))",
"def abspath(path):\n path = os.fspath(path)\n if not os.path.isabs(path):\n path = os.path.join(get_app_root(), path)\n return os.path.normpath(path)",
"def abspath(path: str) -> str:\n pass",
"def _fixpath(p):\n return os.path.abspath(os.path.expanduser(p))",
"def getAbsPath(*p):\n\tfrom os.path import abspath, join\n\tif len(p) >= 1:\n\t\treturn normalizePath(join(abspath(p[0]), *p))\n\treturn \"\"",
"def to_posix(fname):\n import sys\n if sys.platform == 'win32': # pragma: nocover\n import os.path\n if os.path.isabs(fname):\n fname = '/' + fname\n fname = fname.replace('\\\\', '/')\n return fname",
"def _absPath(self, relpath):\n\n # Pass through URIs and absolute paths.\n if self.isUrl(relpath) or relpath[0] == '/':\n return relpath\n\n # This won't deal with ~user/ syntax, but it's much less\n # common anyway.\n if relpath.startswith('~/') and 'HOME' in os.environ:\n return os.path.join(os.environ['HOME'], relpath[2:])\n\n if self._configFileStack:\n relativeTo = os.path.dirname(self._configFileStack[-1])\n else:\n relativeTo = os.getcwd()\n\n if self.isUrl(relativeTo):\n parts = urlparse.urlsplit(relativeTo)\n return urlparse.urlunsplit((parts.scheme, parts.netloc, os.path.normpath(os.path.join(parts.path, relpath)), parts.query, parts.fragment))\n return os.path.normpath(os.path.join(relativeTo, relpath))"
] | [
"0.82105744",
"0.7471553",
"0.69280857",
"0.69023055",
"0.6898607",
"0.6897593",
"0.6879507",
"0.6845178",
"0.68191797",
"0.6804619",
"0.67667115",
"0.67245203",
"0.67103535",
"0.6708995",
"0.66932917",
"0.66385156",
"0.6597944",
"0.6584722",
"0.65428245",
"0.6516904",
"0.65098387",
"0.6491459",
"0.6461347",
"0.6438296",
"0.6418956",
"0.63960767",
"0.6327889",
"0.63150084",
"0.63137794",
"0.6306853"
] | 0.83377725 | 0 |
Checks to see if the user is a librarian for certain routes | def librarian(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.lflag == 0:
flash("You are not a librarian! Please sign in to a librarian account")
return redirect(url_for('main'))
return f(*args, **kwargs)
return decorated_function | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def librarian_list(request):\n librarian = User.objects.filter(is_student=False, is_lecturer=False, is_parent=False, is_superuser=False)\n user_type = \"Librarian\"\n context = {\n \"librarian\": librarian,\n \"user_type\": user_type,\n }\n return render(request, 'library/librarians_list.html', context)",
"def user_roles_check(request):\n logger.debug('right_user_check')\n options = {\n 'api_file': {'GET': True, 'POST': False}\n }\n url_name = request.request.resolver_match.url_name\n if not request.request.user.is_authenticated:\n return False\n user_have_right = options[url_name][request.request.method]\n if user_have_right:\n return True\n raise PermissionDenied",
"def is_admin(user):\n return get_organisations_as_admin(user).count() > 0",
"def can_access_location(self, location: Location, routing_path):\n if location.section_id not in self.enabled_section_ids:\n return False\n\n if location.list_item_id and not self.is_list_item_in_list_store(\n location.list_item_id, location.list_name\n ):\n return False\n\n return location.block_id in self._get_allowable_path(routing_path)",
"def has_permission(self, request, view):\n user = request.user\n if isinstance(user, TokenUser) and LTI_ROLES[INSTRUCTOR] & set(\n user.token.payload.get(\"roles\", [])\n ):\n return True\n\n return super().has_permission(request, view)",
"def user_is_admin(user):\n return user in admins",
"def can_view(self, user):\r\n return True",
"def is_user_admin(request):\n return request.user.is_superuser",
"def _check_access(user, course_id):\r\n if not has_access(user, 'staff', course_id):\r\n raise Http404\r\n\r\n return",
"def authorized_to_access_arpa_file(user, morpheme_language_model):\n if (morpheme_language_model.restricted and user.role != u'administrator' and\n user not in h.get_unrestricted_users()):\n return False\n return True",
"def has_object_permission(self, request, view, obj):\n # Users authentified via LTI are identified by a TokenUser with the\n # resource_link_id as user ID.\n user = request.user\n if (\n isinstance(user, TokenUser)\n and LTI_ROLES[INSTRUCTOR] & set(user.token.payload.get(\"roles\", []))\n and str(self.get_video_id(obj)) != user.id\n ):\n raise exceptions.PermissionDenied()\n\n return super().has_object_permission(request, view, obj)",
"def has_super_access():\n current_user = frappe.get_doc('User', frappe.session.user)\n roles = set([role.role for role in current_user.roles])\n return bool(roles & {'Administrator', 'Instructor', 'Education Manager', 'System Manager', 'Academic User'})",
"def is_user_allowed(self, user):\n return user.is_staff",
"def current_venue_allows_role_state_routing() -> bool:\n venue_instance = CommonLocationUtils.get_venue_of_current_lot()\n if venue_instance is None:\n return False\n # noinspection PyUnresolvedReferences\n return venue_instance.allow_rolestate_routing_on_navmesh",
"def has_authority(self, user):\n UserModel = get_user_model()\n ADMINISTRATOR = UserModel.ROLE_MAP[UserModel.ADMINISTRATOR]\n result = True\n\n if not (user.is_superuser or user.role == ADMINISTRATOR):\n try:\n self.memberships.get(user=user)\n except Membership.DoesNotExist:\n result = False\n\n return result",
"def is_admin(self, user):\n return user.name in self.admins",
"def _is_correct_lti_request(self):\r\n lti_endpoint = self.server.config.get('lti_endpoint', self.DEFAULT_LTI_ENDPOINT)\r\n return lti_endpoint in self.path",
"def is_organizer(user):\n if user is None:\n return False\n url = app.config['USERS_ENDPOINT'] + 'authorization'\n response = requests.post(url, data={'user_id': user['user_id']})\n return response.json()['is_organizer'] is True",
"async def is_launcher(ctx):\n member = ctx.message.author\n staff = await is_staff(ctx)\n lhRole = discord.utils.get(member.guild.roles, name=ROLE_LH)\n if staff or lhRole in member.roles: return True",
"def can_view(self, user):\n if self.applicant == user:\n return True\n elif user.has_perm('funding.view_all_applications'):\n # Fundihg commitee\n return True\n elif user.has_perm('funding.make_application_decisions'):\n # Fundihg manager - should have the view permissions, but just in case\n return True\n return False",
"def is_accessible_by(self, user):\n return (self.public or\n (user.is_authenticated and\n (user.is_staff or self.users.filter(pk=user.pk).exists())))",
"def has_permission(self, request):\n\t\treturn request.user.is_active",
"def test_permissions(self):\n taxonomy = self.get_taxonomy()\n return True if self.request.user == taxonomy.author else taxonomy.public",
"def has_organizer(self, user):\n return self.main_organizer == user or self.team.filter(id=user.id).exists()",
"def can_be_viewed_by(self,user):\n return True",
"def _is_restricted(self, real_path):\n if real_path.find(self._root) != 0:\n return True\n\n # get rid of the root part plus the following /\n real_path = real_path[len(self._root)+1:]\n for folder in self._restricted_folders:\n if real_path.find(folder) == 0:\n return True\n\n return False",
"def is_admin(self, user):\n return (acl.action_allowed(self.request, 'OperatorDashboard', '*') or\n acl.action_allowed(self.request, 'Feed', 'Curate'))",
"def __has_access_to(self, user, object_dn, object_type, attr):\n aclresolver = PluginRegistry.getInstance(\"ACLResolver\")\n if user:\n topic = \"%s.objects.%s.attributes.%s\" % (self.env.domain, object_type, attr)\n return aclresolver.check(user, topic, \"r\", base=object_dn)\n else:\n return True",
"def _lti_role_allowed(request, lti_roles_permitted, raise_exception=False):\n user_allowed_roles = is_allowed(request, lti_roles_permitted,\n raise_exception)\n return len(user_allowed_roles) > 0",
"def is_ligand(self):\n if any(LigandComponentAdaptor().fetch_by_residue_id(r.residue_id) for r in self.Residues):\n return True\n else:\n return False"
] | [
"0.62187976",
"0.55780613",
"0.5519553",
"0.54693735",
"0.5451162",
"0.5360155",
"0.53441876",
"0.53240615",
"0.53006685",
"0.5292587",
"0.5280629",
"0.5270738",
"0.5257878",
"0.52497697",
"0.5249029",
"0.52322376",
"0.5221979",
"0.521075",
"0.52094173",
"0.5189399",
"0.51697147",
"0.51562065",
"0.5153858",
"0.51525855",
"0.51516527",
"0.51418793",
"0.51273924",
"0.5120065",
"0.5099593",
"0.50955725"
] | 0.60173243 | 1 |
Instantiate a model from local directory or remote model repo. Note that when loading from remote, the model revision can be specified. | def from_pretrained(cls,
model_name_or_path: str,
revision: Optional[str] = DEFAULT_MODEL_REVISION,
cfg_dict: Config = None,
device: str = None,
**kwargs):
prefetched = kwargs.get('model_prefetched')
if prefetched is not None:
kwargs.pop('model_prefetched')
if osp.exists(model_name_or_path):
local_model_dir = model_name_or_path
else:
if prefetched is True:
raise RuntimeError(
'Expecting model is pre-fetched locally, but is not found.'
)
local_model_dir = snapshot_download(model_name_or_path, revision)
logger.info(f'initialize model from {local_model_dir}')
if cfg_dict is not None:
cfg = cfg_dict
else:
cfg = Config.from_file(
osp.join(local_model_dir, ModelFile.CONFIGURATION))
task_name = cfg.task
if 'task' in kwargs:
task_name = kwargs.pop('task')
model_cfg = cfg.model
if hasattr(model_cfg, 'model_type') and not hasattr(model_cfg, 'type'):
model_cfg.type = model_cfg.model_type
model_cfg.model_dir = local_model_dir
for k, v in kwargs.items():
model_cfg[k] = v
if device is not None:
model_cfg.device = device
model = build_model(
model_cfg, task_name=task_name, default_args=kwargs)
else:
model = build_model(
model_cfg, task_name=task_name, default_args=kwargs)
# dynamically add pipeline info to model for pipeline inference
if hasattr(cfg, 'pipeline'):
model.pipeline = cfg.pipeline
if not hasattr(model, 'cfg'):
model.cfg = cfg
model.name = model_name_or_path
return model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_model(self, model_path: str):",
"def load_model(fname: os.PathLike) -> Model:\n return Model.load(fname)",
"def _load_from(cls, model_state: dict) -> AbstractModel:\n return cls(model=model_state.get('model'), **model_state.get('kwargs'))",
"def load_model_from_file(path, as_builder=False):\n module = load_model_module(path)\n model = module.model\n if not as_builder:\n model = model()\n del sys.path_importer_cache[os.path.dirname(module.__file__)]\n del sys.modules[module.__name__]\n return model",
"def load_model(self, model_name, model_url):\n\n fname = join(self.root, model_name)\n if not isfile(fname):\n if self.verbose:\n print(\"Could not find \" + fname + \".. attempt download\")\n with urllib.request.urlopen(model_url) as res, open(fname, 'wb') as f:\n shutil.copyfileobj(res, f)\n if self.verbose:\n print(\"Download complete.. model: \" + fname)\n elif self.verbose:\n print(\"Found model \" + fname + \"! :)\")\n\n model = load_model(fname)\n self.model = model",
"def load_model(self, filename):\r\n pass",
"def load_model() -> None:\n global model\n\n if app.testing:\n current_dir = os.path.dirname(__file__)\n model_path = os.path.join(current_dir, \"models/model.pkl\")\n else:\n model_path = os.getenv(\"PATH_TO_MODEL\")\n\n if model_path is None:\n err = f\"PATH_TO_MODEL {model_path} is None\"\n raise RuntimeError(err)\n\n with open(model_path, \"rb\") as model_file:\n model = pickle.load(model_file)",
"def import_model(filename, instance_options):\n model = _import_from_file(filename, instance_options)\n model.set_project_lists()\n model.set_lecturer_lists()\n model.set_rank_lists()\n return model",
"def create_model(mode: str, path_to_checkpoint = None) -> LightningModule:\n\n assert mode != None and mode != ''\n\n if mode == 'scratch':\n if path_to_checkpoint != None:\n model = DogsBreedClassifier.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifier()\n elif mode == 'densenet':\n if path_to_checkpoint != None:\n model = DogsBreedClassifierDenseNet.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifierDenseNet()\n else:\n if path_to_checkpoint != None:\n model = DogsBreedClassifierEfficientNet.load_from_checkpoint(path_to_checkpoint)\n else:\n model = DogsBreedClassifierEfficientNet()\n\n return model",
"def load(path_to_model):\n pass",
"def load_model(uri: str, env: AbstractEnv = compat.env) -> \"Model\":\n from ell.predictions import Model\n\n uri = ensure_uri(uri)\n filesystem = env.get_fs_for_uri(uri)\n\n if uri.endswith(\"/\"):\n # If it's a directory, load the first \"*.pkl\" file in it\n glob_result = filesystem.glob(uri.file(\"*.pkl\"), detail=False)\n if not glob_result:\n raise FileNotFoundError(f\"Couldn't find a pickled model in {uri!r}\")\n uri = uri.file(os.path.basename(glob_result[0]))\n\n LOGGER.info(\"Loading model from %r\", uri)\n with filesystem.open(uri, \"rb\") as f:\n model = joblib.load(f)\n if not isinstance(model, Model):\n raise TypeError(\n f\"Expected loaded object to be of type AbstractClassifier, but got \"\n f\"{model.__class__.__name__}\"\n )\n LOGGER.info(\"Model loaded\")\n return model",
"def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m",
"def load_model(self, path):\n pass",
"def __init__(self, root='/tmp', url=None, name=None):\n if url is None:\n url = 'http://188.138.127.15:81/models/model_heavy_89acc.h5'\n if name is None:\n name = 'model_heavy_89acc.h5'\n if not isdir(root):\n makedirs(root)\n\n filepath = join(root, name)\n if not isfile(filepath):\n print('could not find model.. downloading it')\n dl.download(url, filepath)\n\n self.model = load_model(filepath)",
"def load_model(model_path: str) -> object:\n model = torch.load(model_path)\n model.eval()\n return model",
"def load_model(model, model_index, device=\"cpu\"):\n with open(\"trained_local_model\"+str(model_index), \"rb\") as f_:\n model.load_state_dict(torch.load(f_))\n model.to(device)\n return model",
"def get_model(model_name: str, *args, **kwargs):\n try:\n if '.' in model_name:\n module_name, class_name = model_name.rsplit('.', 1)\n else:\n module_name = model_name\n class_name = model_name.capitalize().replace(\"_\",\"\")\n\n model_module = import_module('.' + module_name, package='models')\n\n model_class = getattr(model_module, class_name)\n\n instance = model_class(*args, **kwargs)\n\n except (AttributeError, ModuleNotFoundError):\n raise ImportError('{} is not part of our model/architecture collection.'.format(model_name))\n else:\n if not issubclass(model_class, Model):\n raise ImportError(\"{} is not a valid model/architecture.\".format(model_class))\n\n return instance",
"def load_model(self, path_model: Optional[PathLike]) -> None:\n raise NotImplementedError",
"def load(cls, filename, model_format):\n handle = ctypes.c_void_p()\n if not _isascii(model_format):\n raise ValueError('model_format parameter must be an ASCII string')\n model_format = model_format.lower()\n if model_format == 'lightgbm':\n _check_call(_LIB.TreeliteLoadLightGBMModel(c_str(filename),\n ctypes.byref(handle)))\n elif model_format == 'xgboost':\n _check_call(_LIB.TreeliteLoadXGBoostModel(c_str(filename),\n ctypes.byref(handle)))\n elif model_format == 'protobuf':\n _check_call(_LIB.TreeliteLoadProtobufModel(c_str(filename),\n ctypes.byref(handle)))\n else:\n raise ValueError('Unknown model_format: must be one of ' \\\n + '{lightgbm, xgboost, protobuf}')\n return Model(handle)",
"def init_model(model_type):\n if model_type == 'magnitude':\n model = Magnitude('../model/crawl-300d-2M.magnitude')\n elif model_type == 'gensim':\n model = KeyedVectors.load('../model/pre_trained_word2vec_embeddings.bin')\n else:\n print(\"Invalid model type.\")\n sys.exit(1)\n return model, model_type",
"def get_model(existing_model_path=None):\n model = None\n if existing_model_path is not None and path.isfile(existing_model_path):\n model = load_pickle(existing_model_path)\n logging.info('loaded model from ' + existing_model_path)\n if not isinstance(model, Sequential):\n logging.info('model is no valid model object')\n model = Sequential()\n logging.info('created new model')\n return model",
"def initialize_default_model(config: BareConfig, model_class) -> torch.nn.Module:\n model = model_class()\n default_model_path = f\"{config.get_default_model_folder_path()}/{model_class.__name__}.model\"\n model.load_state_dict(torch.load(default_model_path))\n return model",
"def _load_model(self, loc):\n\n # If not a string, return input\n if not (isinstance(loc, str) or isinstance(loc, unicode)):\n return loc\n\n # If location is in S3, copy to local, then unpickle \n to_delete = False\n if \"s3\" in loc:\n tmp_loc = \"{0}/tmp_file_{1}.obj\".format(tmpdir, random.randint(1,1000))\n s3 = boto3.client('s3')\n bucket = loc.split(\"/\")[2]\n key = \"/\".join(loc.split(\"/\")[3:])\n with open(tmp_loc, \"wb\") as data:\n s3.download_fileobj(bucket, key, data)\n loc = tmp_loc\n to_delete = True\n with open(loc, \"rb\") as f:\n model = pickle.load(f)\n if to_delete:\n os.remove(tmp_loc)\n return model",
"def load_model(model_name):\r\n model = joblib.load(model_name)\r\n return model",
"def load_model():\n with open(MODEL_FILENAME, \"rb\") as file:\n model = pickle.load(file)\n return model",
"def fetch_and_load(this_class, model_name, models_directory=None,\n verbose=False, extra_loading_options=None):\n from .ModelFetcher import download_and_install_model\n model_dir = download_and_install_model(model_name,\n models_directory,\n verbose)\n\n kwargs = extra_loading_options or {}\n return this_class.from_unified_model_dir(model_dir, **kwargs)",
"def load_model(model_path):\n nlp = spacy.blank('en') \n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner)\n #load pretrained model from the path\n ner = nlp.from_disk(model_path)\n return ner",
"def load_model():\n with open(MODEL_SAVE_JSON, 'r') as fp:\n json_string = fp.read()\n model = model_from_json(json_string)\n return model",
"def _load_from(cls, model_state: dict) -> 'AbstractModel':\n raise NotImplementedError",
"def from_path(cls, path: str) -> Union[None, Type[AbstractModel]]:\n\n if not (path and isinstance(path, str)):\n msg = f\"Need a valid path to load a text/tagger model in AutoModel. \" \\\n f\"Found path={path} of type({type(path)})\"\n raise ValueError(msg)\n\n if not path.endswith(\".pkl\"):\n msg = \"Model Path must end with .pkl for AutoModel to be able to identify the model\"\n raise ValueError(msg)\n\n try:\n # if loading from path, determine the ABCModel type & return after doing xxxModel.load()\n model_config = AbstractModel.load_model_config(path)\n\n # get model type upon validation\n model_config = cls._resolve_model_config(model_config)\n model_type = cls._get_model_type(model_config)\n\n # load metadata and return\n if model_type == \"text\":\n model_class = AutoTextModel.get_model_class(model_config)\n elif model_type == \"tagger\":\n model_class = AutoTaggerModel.get_model_class(model_config)\n\n return model_class.load(path)\n\n except FileNotFoundError:\n # sometimes a model (and its config file) might not be dumped, eg. in role classifiers\n # or even if dumped, can be of NoneType enclosed in a dictionary\n return None"
] | [
"0.67073345",
"0.6637043",
"0.65116197",
"0.6482459",
"0.64491147",
"0.64228743",
"0.6422444",
"0.64197296",
"0.63858056",
"0.6369337",
"0.6368034",
"0.6346652",
"0.6345023",
"0.62734",
"0.62559134",
"0.62544537",
"0.6245681",
"0.6237426",
"0.6231261",
"0.6220943",
"0.6198383",
"0.6189115",
"0.6187328",
"0.6185334",
"0.6178323",
"0.6176592",
"0.61747044",
"0.61177486",
"0.6114139",
"0.6097771"
] | 0.7069769 | 0 |
Generates the trading instance objects from their class types. This method attaches all of the trading objects (DataHandler, Strategy, Portfolio, and ExecutionHandler) to various internal members. This ties together all the other classes to the Backtester object. | def _generate_trading_instances(self):
print("Creating DataHandler, Strategy, Portfolio, and ExecutionHandler for")
# Set internal data members equal to the classes we passed in earlier, along with necessary parameters.
# https://softwareengineering.stackexchange.com/questions/131403/what-is-the-name-of-in-python/131415
self.data_handler = self.data_handler_class(self.events, self.csv_dir, self.symbol_list)
self.strategy = self.strategy_class(self.data_handler, self.events)
self.portfolio = self.portfolio_class(self.data_handler, self.events, self.start_date, self.initial_capital)
self.execution_handler = self.execution_handler_class(self.events) # The Event Queue sent to ExecutionHandler | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_trading_instances(self, strategy_params_dict):\n print(\"Creating DataHandler, Strategy, Portfolio, and ExecutionHandler for\")\n print(\"strategy parameter list: %s...\" % strategy_params_dict)\n\n # Set internal data members equal to the classes we passed in earlier, along with necessary parameters.\n # https://softwareengineering.stackexchange.com/questions/131403/what-is-the-name-of-in-python/131415\n self.data_handler = self.data_handler_class(self.events, self.csv_dir, self.symbol_list)\n self.strategy = self.strategy_class(self.data_handler, self.events, **strategy_params_dict)\n self.portfolio = self.portfolio_class(self.data_handler, self.events, self.start_date, self.initial_capital)\n self.execution_handler = self.execution_handler_class(self.events) # The Event Queue sent to ExecutionHandler",
"def _generate_trading_instances(self):\n print(\n \"Initizalization...\"\n )\n\n self.data_handler = self.data_handler_cls(self.events, self.csv_dir, self.symbol_list, self.start_date,\n self.end_date)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.initial_capital, self.start_date,\n self.strategy_id, self.spread, self.commission,self.csv_dir)\n self.strategy = self.strategy_cls(self.data_handler, self.events, self.portfolio, self.spread, self.commission)\n self.plot = self.plot_cls(self.csv_dir, self.portfolio, self.strategy_id)",
"def _generate_trading_instances(self, start_date, end_date, instruments, params):\n configuration = self.configuration\n configuration.start_date = start_date\n configuration.end_date = end_date\n configuration.instruments = instruments\n\n logger.info(\"Creating DataHandler, Strategy, Portfolio and ExecutionHandler\")\n logger.info(\"Start date: %s\" % start_date)\n logger.info(\"End date: %s\" % end_date)\n logger.info(\"Instrument(s): %s...\" % instruments)\n logger.info(\"Params: %s...\" % params)\n\n self.data_handler = self.data_handler_cls(self.events, configuration)\n self.strategy = self.strategy_cls(self.data_handler, self.events, configuration, **params)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, configuration)\n self.execution_handler = self.execution_handler_cls(self.data_handler, self.events, configuration)",
"def _generate_trading_instances(self, sp):\n print(\n \"Initialization...\"\n )\n self.data_handler = self.data_handler_cls(self.events, self.csv_dir, self.symbol_list, self.start_date,\n self.end_date)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.initial_capital, self.start_date,\n self.strategy_id, self.spread, self.commission,self.csv_dir)\n self.strategy = self.strategy_cls(self.data_handler, self.events, self.portfolio, self.spread, self.commission,\n sp)\n self.plot = self.plot_cls(self.csv_dir, self.portfolio, self.strategy_id)",
"def create_all(self, registry):\n for cls in registry.values():\n self.create_class(cls)",
"def _fill_class_dicts():\n global _taxonomy_classes\n global _data_classes\n if not _taxonomy_classes:\n _taxonomy_classes = get_taxonomies()\n if not _data_classes:\n stack = []\n next_module = data\n while next_module is not None:\n stack += _inspect_module(next_module)\n if stack:\n next_module = stack.pop()\n else:\n next_module = None",
"def make_objects(self):\n pass",
"def __init__(self):\n if DynamicImporter._instance is not None:\n raise Exception(\"DynamicImporter instance already exists!\")\n DynamicImporter._instance = self\n\n current_path = Path(__file__).parent\n test_path = current_path / \"testdata\"\n files = test_path.rglob(\"*.py\")\n\n for file in files:\n\n if file.name in [\"__init__.py\", \"test_module.py\", \"test_registry.py\", \"connections.py\"]:\n continue\n\n name = file.stem\n module = import_module(f\"testdata.{name}\")\n class_title = f\"{name.title()}Test\"\n\n try:\n _class = getattr(module, class_title) # get the class\n self.class_list[class_title] = _class # add the class to the class list\n except AttributeError: # don't throw exceptions for files that don't have a test\n continue",
"def __init__(self):\n for base in AutomationSetup.__bases__:\n base.__init__(self)",
"def setUpClass(cls):\n\n Base._Base__nb_objects = 0\n cls.b1 = Base()\n cls.b2 = Base()\n cls.b3 = Base(22)\n cls.b4 = Base(2.2)\n cls.b5 = Base(\"two\")\n cls.r1 = Rectangle(10, 7, 2, 8)\n cls.r2 = Rectangle(2, 4)",
"def setup_class(self):\n\n class SubFLRW(FLRW):\n def w(self, z):\n return super().w(z)\n\n self.cls = SubFLRW\n # H0, Om0, Ode0\n self.cls_args = (70 * u.km / u.s / u.Mpc, 0.27 * u.one, 0.689 * u.one)\n self.cls_kwargs = dict(Tcmb0=3.0 * u.K, name=self.__class__.__name__, meta={\"a\": \"b\"})",
"def init_elect_types(self):\n self.wta = WinnerTakeAll()\n self.proportional = Proportional()\n self.schulze = Schulze()\n\n session.add_all([self.wta, self.proportional, self.schulze])",
"def _create_Work(classname, dataclass):\n globals()[classname] = type(classname, (Work, dataclass), {})",
"def XtremObjFactory(object_type, object_data, parent_connection):\r\n for cls in XtremObject.__subclasses__():\r\n if cls.is_class_for(object_type):\r\n return cls(object_data, parent_connection)",
"def _setup(self):\n\n # Get user data\n self.symbols = self._get_symbols()\n self.data_dict = self._get_data()\n self.portfolio = self.initialize_portfolio()\n\n if 'slippage' in self.portfolio:\n self.slippage = self.portfolio['slippage']\n else:\n self.slippage = None\n\n # Keep track of all trades\n self.trade_manager = TradeManager(\n self.symbols, self.portfolio, self.sql_config\n )\n\n # Initialize state variables that are updated each iteration\n self.date = None\n self.data = None\n self.symbol = None\n self.currency = None\n self.last_buy = None\n self.num_unresolved = 0\n self.unresolved_trade = False",
"def setup_class(klass):",
"def setup_class(klass):",
"def make_class(attributes, base_classes=()):\r\n \"*** YOUR CODE HERE ***\"",
"def _classes_(cls):\n for base_cls in cls.__bases__:\n # Avoid infinite loop\n if base_cls == Sandbox:\n continue\n\n yield base_cls",
"def _prepare_wsdl_objects(self):\r\n\r\n\t# Default behavior is to not request transit information\r\n\tself.ReturnTransitAndCommit = False\r\n\r\n # This is the primary data structure for processShipment requests.\r\n self.RequestedShipment = self.client.factory.create('RequestedShipment')\r\n self.RequestedShipment.ShipTimestamp = datetime.now()\r\n \r\n TotalWeight = self.client.factory.create('Weight')\r\n # Start at nothing.\r\n TotalWeight.Value = 0.0\r\n # Default to pounds.\r\n TotalWeight.Units = 'LB'\r\n # This is the total weight of the entire shipment. Shipments may\r\n # contain more than one package.\r\n self.RequestedShipment.TotalWeight = TotalWeight\r\n \r\n # This is the top level data structure for Shipper information.\r\n ShipperParty = self.client.factory.create('Party')\r\n ShipperParty.Address = self.client.factory.create('Address')\r\n ShipperParty.Contact = self.client.factory.create('Contact')\r\n \r\n # Link the ShipperParty to our master data structure.\r\n self.RequestedShipment.Shipper = ShipperParty\r\n\r\n # This is the top level data structure for Recipient information.\r\n RecipientParty = self.client.factory.create('Party')\r\n RecipientParty.Contact = self.client.factory.create('Contact')\r\n RecipientParty.Address = self.client.factory.create('Address')\r\n \r\n # Link the RecipientParty object to our master data structure.\r\n self.RequestedShipment.Recipient = RecipientParty\r\n \r\n Payor = self.client.factory.create('Payor')\r\n # Grab the account number from the FedexConfig object by default.\r\n Payor.AccountNumber = self._config_obj.account_number\r\n # Assume US.\r\n Payor.CountryCode = 'US'\r\n \r\n ShippingChargesPayment = self.client.factory.create('Payment')\r\n ShippingChargesPayment.Payor = Payor\r\n\r\n self.RequestedShipment.ShippingChargesPayment = ShippingChargesPayment\r\n \r\n # ACCOUNT or LIST\r\n self.RequestedShipment.RateRequestTypes = ['ACCOUNT'] \r\n \r\n # Start with no packages, user must add them.\r\n self.RequestedShipment.PackageCount = 0\r\n self.RequestedShipment.RequestedPackageLineItems = []\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.RequestedShipment)",
"def setUpClass(cls):\r\n cls.jon = Person.create(name='Jon', age=143)\r\n cls.eric = Person.create(name='Eric', age=25)\r\n cls.blake = Person.create(name='Blake', age=14)\r\n\r\n cls.physics = Course.create(name='Physics 264', credits=1.0)\r\n cls.beekeeping = Course.create(name='Beekeeping', credits=15.0)\r\n cls.theoretics = Course.create(name='Theoretical Theoretics', credits=-3.5)\r\n\r\n cls.eric_in_physics = EnrolledIn.create(cls.eric, cls.physics, date_enrolled=datetime.now(),\r\n enthusiasm=10) # eric loves physics\r\n cls.jon_in_beekeeping = EnrolledIn.create(cls.jon, cls.beekeeping, date_enrolled=datetime.now(),\r\n enthusiasm=1) # jon hates beekeeping\r\n\r\n cls.blake_in_theoretics = EnrolledIn.create(cls.blake, cls.theoretics, date_enrolled=datetime.now(),\r\n enthusiasm=8)\r\n\r\n cls.blake_beekeeping = TaughtBy.create(cls.beekeeping, cls.blake, overall_mood='Pedantic')\r\n cls.jon_physics = TaughtBy.create(cls.physics, cls.jon, overall_mood='Creepy')\r\n cls.eric_theoretics = TaughtBy.create(cls.theoretics, cls.eric, overall_mood='Obtuse')",
"def __init__(self):\n self.classes = {}",
"def setup_class(cls):",
"def setup_class(cls):",
"def _prepare_wsdl_objects(self):\r\n self.DeletionControlType = self.client.factory.create('DeletionControlType')\r\n self.TrackingId = self.client.factory.create('TrackingId')\r\n self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')",
"def setup_class(cls):\n cls._patch_logger()\n cls.multiplexer = Multiplexer(\n [DummyConnection(connection_id=DUMMY_CONNECTION_PUBLIC_ID)]\n )\n cls.outbox = OutBox(cls.multiplexer)\n private_key_pem_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n eth_private_key_pem_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n cls.wallet = Wallet(\n {FETCHAI: private_key_pem_path, ETHEREUM: eth_private_key_pem_path}\n )\n cls.ledger_apis = LedgerApis({FETCHAI: DEFAULT_FETCHAI_CONFIG}, FETCHAI)\n cls.agent_name = \"test\"\n cls.ownership_state = OwnershipState()\n cls.preferences = Preferences()\n cls.decision_maker = DecisionMaker(\n agent_name=cls.agent_name,\n max_reactions=MAX_REACTIONS,\n outbox=cls.outbox,\n wallet=cls.wallet,\n ledger_apis=cls.ledger_apis,\n )\n cls.multiplexer.connect()\n\n cls.tx_id = \"transaction0\"\n cls.tx_sender_addr = \"agent_1\"\n cls.tx_counterparty_addr = \"pk\"\n cls.info = {\"some_info_key\": \"some_info_value\"}\n cls.ledger_id = \"fetchai\"\n\n cls.decision_maker.start()",
"def setUpClass(cls):\n super().setUpClass()\n\n cls.accessor = OCPReportDBAccessor(cls.schema)\n cls.report_schema = cls.accessor.report_schema\n cls.all_tables = list(OCP_REPORT_TABLE_MAP.values())\n cls.creator = ReportObjectCreator(cls.schema)\n cls.date_accessor = DateHelper()\n cls.manifest_accessor = ReportManifestDBAccessor()\n cls.dh = DateHelper()",
"def load_asset_classes(self) -> None:\n provider = AlphaVantageProvider()\n trends_provider = GoogleTrendsProvider()\n self.asset_classes.append(AssetClass.CurrencyClass(provider, trends_provider))\n self.asset_classes.append(AssetClass.StockClass(provider, trends_provider))",
"def gen_extractors():\n return [klass() for klass in gen_extractor_classes()]",
"def build_etl_classes(self):\n\n self.clear_etl_classes()\n\n for config in list(self.configs.values()):\n\n etl_class = self.build(config)\n\n self.add_etl_class(etl_class)"
] | [
"0.71847904",
"0.7100767",
"0.6478167",
"0.6361291",
"0.56882477",
"0.56326246",
"0.5572381",
"0.5539028",
"0.55039656",
"0.54438514",
"0.5403827",
"0.5379698",
"0.53721094",
"0.5297526",
"0.5287005",
"0.52309954",
"0.52309954",
"0.51850206",
"0.51665777",
"0.5139257",
"0.5119804",
"0.5095001",
"0.5056946",
"0.5056946",
"0.50322807",
"0.50245565",
"0.50245273",
"0.5017071",
"0.5011593",
"0.5004934"
] | 0.7943233 | 0 |
Executes the backtest. This is where the signal handling of the Backtesting engine is carried out. There are two while loops, the outerloop (heartbeat) and the nested innerloop, which checks if there is an event in the Event Queue object. The inner loop acts on the Event by calling the appropriate method | def _run_backtest(self):
i = 0
while True:
i += 1
print(i)
# Update the market bars
if self.data_handler.continue_backtest == True:
self.data_handler.update_bars()
else:
break
# Handle the Events
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
# The inner-loop acts on the events by calling the appropriate method of the appropriate object
if event is not None:
if event.type == 'MARKET':
self.strategy.calculate_signals(event)
self.portfolio.update_timeindex(event)
elif event.type == 'SIGNAL':
self.signals += 1
self.portfolio.update_signal(event)
elif event.type == 'ORDER':
self.orders += 1
self.execution_handler.execute_order(event)
elif event.type == 'FILL':
self.fills += 1
self.portfolio.update_fill(event)
# Pauses for a duration of self.heartbeat seconds
time.sleep(self.heartbeat) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _run_backtest(self):\n i = 0\n\n while True:\n i += 1\n print(i)\n\n # Update the market bars\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n else:\n break\n\n # Handle the Events\n while True:\n try:\n event = self.events.get(False)\n except queue.Empty:\n break\n else:\n # The inner-loop acts on the events by calling the appropriate method of the appropriate object\n if event is not None:\n if event.type == 'MARKET':\n self.strategy.calculate_signals(event)\n self.portfolio.update_timeindex(event)\n\n elif event.type == 'SIGNAL':\n self.signals += 1\n self.portfolio.update_signal(event)\n\n elif event.type == 'ORDER':\n self.orders += 1\n self.execution_handler.execute_order(event)\n\n elif event.type == 'FILL':\n self.fills += 1\n self.portfolio.update_fill(event)\n\n # Pauses for a duration of self.heartbeat seconds\n time.sleep(self.heartbeat)",
"def _run_backtest(self):\n i = 0\n while True:\n i += 1\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n else:\n break\n while True:\n try:\n event = self.events.get(False)\n except Empty:\n break\n else:\n if event is not None:\n if event.type == EventType.MARKET:\n self.strategy.On_Bars(event)\n self.portfolio.update_balance(event)\n self.portfolio.order_check(event)\n elif event.type == EventType.ORDER_SEND:\n self.portfolio.update_order(event)\n elif event.type == EventType.ORDER_CLOSE:\n self.portfolio.update_order(event)\n self.portfolio.update_euity(event)\n elif event.type == EventType.ORDER_MODIFY:\n self.portfolio.update_order(event)\n time.sleep(self.heartbeat)",
"def _run_backtest(self):\n i = 0\n while True:\n i += 1\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n #print(self.data_handler.get_latest_bar_datetime(self.symbol_list[0]))\n else:\n break\n while self.backtest:\n try:\n event = self.events.get(False)\n except Empty:\n break\n else:\n if event is not None:\n if event.type == EventType.MARKET:\n try:\n self.strategy.On_Bars(event)\n self.portfolio.update_balance(event)\n self.portfolio.order_check(event)\n except EquityError:\n print('Not Engough Equity,Backtest Will be Stop...')\n self.backtest=False\n break\n elif event.type == EventType.ORDER_SEND:\n self.portfolio.update_order(event)\n elif event.type == EventType.ORDER_CLOSE:\n try:\n self.portfolio.update_order(event)\n self.portfolio.update_euity(event)\n except EquityError:\n print ('Not Engough Equity,Backtest Will be Stop...')\n self.backtest=False\n break\n elif event.type == EventType.ORDER_MODIFY:\n self.portfolio.update_order(event)\n time.sleep(self.heartbeat)",
"def run(self):\n\n self.halt = False\n\n print(\"Starting heartbeat.\")\n while not self.halt:\n event = events.TickEvent()\n AppState.get_state().get_event_manager().post_event(event)\n AppState.get_state().get_clock().tick(settings.MAX_FPS)",
"def run(self) -> None:\n\n while not self.stop_event.is_set():\n if self.my_queue:\n # if heartbeat received at '/heartbeat' route from the monitored peer,\n # sleep until next\n self.my_queue.clear()\n time.sleep(7)\n\n else:\n # else drop peer data from database and inform central server appending '0'\n # to my queue\n self.db_access.drop_peer(self.peer_id)\n self.my_queue.append(0)\n break",
"def event_loop(self):\n while self.ack is False:\n gevent.sleep(self.loop_interval)\n output_service = self.get_directory_service_proxy().get_service(\"mock-output-service\")\n output_service.put(\"test-worker-work-result\")\n self.ack = True",
"def __call__(self, event_queue, in_queue, out_queue):\n\n running = True\n self.in_q = in_queue # command received from the main thread\n self.out_q = out_queue # responses, commands to the main thread\n self.ev_q = event_queue # return pending events to the main thread\n shutdown = False\n self.light_state = False # current state of beat light\n\n # send first beat light message\n if self.btic.BeatLight() is True:\n self.out_q.put(\"beaton\")\n else:\n self.out_q.put(\"beatoff\")\n\n # run thread loop\n while running is True:\n if self.die_pending is False:\n self.sendPendingEvents()\n self.processCommands()\n\n \"\"\"\n if self.btic.BeatLightToggle() == True:\n self.out_q.put(\"beat\")\n wx.WakeUpIdle()\n \"\"\"\n\n # display beat light on UI\n light = self.btic.BeatLight()\n if light != self.light_state:\n self.light_state = light\n if light is True:\n self.out_q.put(\"beatoff\")\n else:\n self.out_q.put(\"beaton\")\n wx.WakeUpIdle() \n \n if self.allClear() is True:\n time.sleep(.01)\n #pass\n else:\n # stop the loop/thread when all is cleaned up\n self.sendPendingEvents()\n if self.allClear() is True:\n self.clearBank() \n self.die_pending = False\n running = False\n else:\n time.sleep(.01)\n # pass",
"def awaitVerification(self):\r\n method = moduleName + '.' + self.className + '.' + 'awaitVerification'\r\n while True:\r\n try:\r\n self._stopevent.wait(self._sleepperiod)\r\n verification = self.localCommQueue.get_nowait()\r\n if verification == terminationVerificationMsg.COMMIT:\r\n #The parent AE agrees that we can shutdown. Terminate\r\n break\r\n elif verification == terminationVerificationMsg.ROLLBACK:\r\n #Roll back the termination\r\n raise Exceptions.WorkerThreadTerminationRollback()\r\n elif verification == terminationVerificationMsg.ERROR:\r\n errorMsg = \"Worker thread for landmark %s is improperly indexed\" %self.queueID\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n raise Exceptions.WorkerThreadIndexError(errorMsg)\r\n else:\r\n #Should not happen\r\n errorMsg = \"Unexpected shutdown verification response for worker thread on landmark %s\" %self.queueID\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n raise Exceptions.WorkerThreadIndexError(errorMsg)\r\n break\r\n except queue.Empty:\r\n pass\r\n except Exceptions.WorkerThreadTerminationRollback:\r\n raise Exceptions.WorkerThreadTerminationRollback()\r\n except Exception as e:\r\n errorMsg = \"Unexpected error during shutdown verification process for worker thread on landmark %s. Traceback= %s\" %(self.queueID, e)\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n raise e",
"def _eventloop(self):\n logging.debug(\"%s - eventloop started\" % self.name)\n while not self.stopped:\n event = self.inqueue.get()\n if not event: break\n self.doevent(event)\n logging.debug(\"%s - eventloop stopped\" % self.name)",
"def _do_test(self):\n\n process_all_events()\n\n if self.list:\n (callback, args, kwargs) = self.list.pop(0)\n callback(*args, **kwargs)\n else:\n safe_exit(force=1)",
"def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise",
"def run(self):\n \n # Wrap the outer loop in a try block so we can do an orderly shutdown\n # should an exception occur:\n try:\n # Send out a STARTUP event:\n self.dispatchEvent(weewx.Event(weewx.STARTUP))\n \n syslog.syslog(syslog.LOG_INFO, \"engine: Starting main packet loop.\")\n\n last_gc = int(time.time())\n\n # This is the outer loop. \n while True:\n\n # See if garbage collection is scheduled:\n if int(time.time()) - last_gc > self.gc_interval:\n ngc = gc.collect()\n syslog.syslog(syslog.LOG_INFO, \"engine: garbage collected %d objects\" % ngc)\n last_gc = int(time.time())\n\n # First, let any interested services know the packet LOOP is\n # about to start\n self.dispatchEvent(weewx.Event(weewx.PRE_LOOP))\n \n # Get ready to enter the main packet loop. An exception of type\n # BreakLoop will get thrown when a service wants to break the\n # loop and interact with the console.\n try:\n \n # And this is the main packet LOOP. It will continuously\n # generate LOOP packets until some service breaks it by\n # throwing an exception (usually when an archive period\n # has passed).\n for packet in self.console.genLoopPackets():\n \n # Package the packet as an event, then dispatch it.\n self.dispatchEvent(weewx.Event(weewx.NEW_LOOP_PACKET, packet=packet))\n\n # Allow services to break the loop by throwing\n # an exception:\n self.dispatchEvent(weewx.Event(weewx.CHECK_LOOP, packet=packet))\n\n syslog.syslog(syslog.LOG_CRIT, \"engine: Internal error. Packet loop has exited.\")\n \n except BreakLoop:\n \n # Send out an event saying the packet LOOP is done:\n self.dispatchEvent(weewx.Event(weewx.POST_LOOP))\n\n finally:\n # The main loop has exited. Shut the engine down.\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Main loop exiting. Shutting engine down.\")\n self.shutDown()",
"def heartbeat_event(self, event):\r\n while not self.imm_jobs.empty():\r\n imm_job = self.imm_jobs.get_nowait()\r\n imm_job(self)\r\n \r\n if self.do_reconfigure:\r\n self.selmgr.reconfigure(self.current_consensus())\r\n self.do_reconfigure = False\r\n \r\n if self.run_all_jobs:\r\n while not self.low_prio_jobs.empty() and self.run_all_jobs:\r\n imm_job = self.low_prio_jobs.get_nowait()\r\n imm_job(self)\r\n self.run_all_jobs = False\r\n return\r\n\r\n # If event is stream:NEW*/DETACHED or circ BUILT/FAILED, \r\n # don't run low prio jobs.. No need to delay streams for them.\r\n if PathBuilder.is_urgent_event(event): return\r\n \r\n # Do the low prio jobs one at a time in case a \r\n # higher priority event is queued \r\n if not self.low_prio_jobs.empty():\r\n delay_job = self.low_prio_jobs.get_nowait()\r\n delay_job(self)",
"def run(self):\n self.workhorse_.run()\n try:\n while(True):\n self.workhorse_.heartbeat()\n self.periodic_snapshot()\n except workflow.NoMoreWork:\n print \"Fini.\"\n exit(0)\n exit(-1)",
"def test_heartbeat(self):\n pass",
"def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return",
"def simulateCore(self):\n while len(self.event_q) > 0:\n evts = self.nextEvents()\n self.handleEvents(evts)\n self.gatherSystemStatistics(self.scheduler.system)\n self.dumpEventQueue()",
"def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)",
"def run():\n\n while True:\n\n # get event, blah\n event_name, event_data = revent.get_event(block=True, timeout=5)\n\n if event_name is not None:\n print 'received: %s' % event_name\n\n if event_name.endswith('_oembed_details'):\n handle_new_oembed_details(event_data)\n\n elif event_name == 'new_tweet':\n handle_new_tweet(event_data)\n\n # and we're done\n assert revent.verify_msg(event_name, event_data), \\\n \"Could not verify %s\" % event_name",
"def run():\r\n event = threading.Event()\r\n while (event.is_set() == False):\r\n # perform database backup\r\n backup()\r\n\r\n # sleep for the predefined amount interval\r\n event.wait(BACKUP_INTERVAL)",
"def _heartbeat_loop(self):\n # set last time so that \"if t_now - t_last >= HEARTBEAT_LOG_INTERVAL\"\n # below evalutes to True on the first run\n t_last = time.time() - HEARTBEAT_LOG_INTERVAL - 1\n while True:\n alive = 0\n # count alive processes \n for p in PROCESSES:\n if p.is_alive():\n alive += 1\n\n # no processes are alive - exit heartbeat loop\n if alive == 0:\n return\n\n t_now = time.time()\n if t_now - t_last >= HEARTBEAT_LOG_INTERVAL:\n # log heartbeat\n obj = { \n 'timestamp': time.time(),\n 'child_procs_total': self._procs_total,\n 'child_procs_alive': alive,\n 'probe_req_queue_len': self._probe_request_queue.qsize(),\n 'probe_resp_queue_len': \\\n self._probe_response_queue.qsize(), \n }\n \n # push to shared mem\n self._sm.set(config.BASE['SHARED_MEM_HEARTBEAT_KEY'],\n json.dumps(obj), HEARTBEAT_TTL)\n LOG.debug('pushed a heartbeat to the shared memory')\n\n t_last = t_now\n\n time.sleep(HEARTBEAT_LOOP_INTERVAL)",
"def test_cbbackupmgr_with_eventing(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This eventing test is only for cb version 5.5 and later. \")\n from pytests.eventing.eventing_constants import HANDLER_CODE\n from lib.testconstants import STANDARD_BUCKET_PORT\n\n self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')\n self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')\n self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')\n self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')\n self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')\n self.create_functions_buckets = self.input.param('create_functions_buckets', True)\n self.docs_per_day = self.input.param(\"doc-per-day\", 1)\n self.use_memory_manager = self.input.param('use_memory_manager', True)\n self.backup_before_eventing = self.input.param('backup_before_eventing', False)\n bucket_params = self._create_bucket_params(server=self.master, size=256,\n replicas=self.num_replicas)\n self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.buckets = RestConnection(self.master).get_buckets()\n self.src_bucket = RestConnection(self.master).get_buckets()\n self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.backup_create()\n if (self.backup_before_eventing):\n self.backup_cluster()\n self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.buckets = RestConnection(self.master).get_buckets()\n self.gens_load = self.generate_docs(self.docs_per_day)\n self.expiry = 3\n\n self.restServer = self.get_nodes_from_services_map(service_type=\"eventing\")\n self.rest = RestConnection(self.restServer)\n\n\n self.load(self.gens_load, buckets=self.buckets, flag=self.item_flag, verify_data=False,\n batch_size=self.batch_size)\n function_name = \"Function_{0}_{1}\".format(randint(1, 1000000000), self._testMethodName)\n self.function_name = function_name[0:90]\n body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)\n bk_events_created = False\n rs_events_created = False\n try:\n self.deploy_function(body)\n bk_events_created = True\n self.backup_cluster()\n rest_bk = RestConnection(self.backupset.cluster_host)\n bk_fxn = rest_bk.get_all_functions()\n\n backup_index = 0\n\n if self.backup_before_eventing:\n backup_index = 1\n self.backupset.start = 1\n self.backupset.end = 2\n\n if bk_fxn != \"\":\n self._verify_backup_events_definition(json.loads(bk_fxn), body, backup_index = backup_index)\n\n self.backup_restore()\n\n rest_rs = RestConnection(self.backupset.restore_cluster_host)\n\n if self.backup_before_eventing:\n self.assertTrue('metadata' in [bucket.name for bucket in rest_rs.get_buckets()])\n\n self.bkrs_resume_function(body, rest_rs)\n rs_events_created = True\n self._verify_restore_events_definition(bk_fxn)\n except Exception as e:\n self.fail(e)\n finally:\n master_nodes = [self.backupset.cluster_host,\n self.backupset.restore_cluster_host]\n for node in master_nodes:\n rest = RestConnection(node)\n self.bkrs_undeploy_and_delete_function(body, rest, node)\n self.rest = RestConnection(self.master)",
"def stopeventmonitor(self):\n self.doeventloop = False\n if self.service is not None:\n self.service.breakloop()\n # reset the service, otherwise nextEvent won\"t work\n self.initeventservice(shutdown=True)\n if self.eventmonthread is not None:\n if emane.VERSION >= emane.EMANE091:\n self.eventmonthread._Thread__stop()\n self.eventmonthread.join()\n self.eventmonthread = None",
"def run(self):\n\n (robotProc, iRMsg, robotStat, robotInfo, robotCmd, bcMsg, cbpaeRun, wsInfo) = self.prepVars()\n\n broadcasterProc = self.startBroadcaster(cbpaeRun, bcMsg, iRMsg)\n\n# =============================================================================\n# # pass additional queues to the robot processes by overloading this method\n# =============================================================================\n robotProc = self.startRobots(robotProc, iRMsg, bcMsg, robotInfo, robotCmd, robotStat)\n\n guiProc = self.startGui(wsInfo, robotInfo, robotCmd)\n\n# =============================================================================\n# # This is the main loop checking robotProcs\n# =============================================================================\n rJoinable = self.checkRJoinable(robotProc, robotStat)\n\n self.stopBroadcaster(cbpaeRun)\n\n self.clearQueues(iRMsg, robotCmd, robotInfo)\n\n self.joinRobotProc(robotProc)\n\n self.logBasicInfo()\n\n print (\"CBPAE Trial Finished!!!\")",
"def run(self):\n \n # Loop through all checkers to do an initial state check\n for checker in self.checkers:\n checker.update_last_state()\n\n # Send initial heartbeat\n self._send_heartbeat()\n \n # Main loop\n while True: \n html = \"\"\n for checker in self.checkers:\n if checker.just_changed_state():\n log.warn(\"Checker {} has changed state.\"\n .format(checker.name))\n html += \"<li>\" + checker.html() + \"</li>\\n\"\n \n if isinstance(checker, Process) and checker.state() == FAIL:\n log.warn(\"Process {} is not running.\"\n .format(checker.name))\n html += (\"<li>Attempting to restart \" + \n escape(checker.name) + \"...</li>\\n\")\n try:\n checker.restart()\n except MaxRetriesError, e:\n self.shutdown_reason = str(e)\n return\n time.sleep(5)\n html += (\"<li>State after restart: \" + \n checker.html() + \"</li>\\n\")\n\n if html:\n html = \"<h2>STATE CHANGED:</h2>\\n<ul>\\n\" + html + \"</ul>\\n\" \n html += self.html()\n html += run_commands(self.state_change_cmds)\n self.send_email_with_time(html=html,\n subject=\"Babysitter detected\"\n \" state change.\")\n\n if self._need_to_send_heartbeat():\n self._send_heartbeat()\n\n # Check if a new data subdir has been created\n if self.base_data_dir and self.sub_data_dir:\n if self._find_last_numeric_subdir() != self.sub_data_dir:\n self._send_heartbeat(\"<p>New subdir found so about to restart \"\n \"babysitter. Below are the last stats \"\n \"for the old data subdirectory.</p>\\n\")\n raise NewDataDirError()\n \n time.sleep(UPDATE_PERIOD)",
"def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()",
"def handle_wake_up(_):\n loop.awaken()",
"async def test_events_handled_on_event_loop(self):\n session = _create_test_session(asyncio.get_running_loop())\n\n handle_event_spy = MagicMock(\n side_effect=session._handle_scriptrunner_event_on_event_loop\n )\n session._handle_scriptrunner_event_on_event_loop = handle_event_spy\n\n # Send a ScriptRunner event from another thread\n thread = threading.Thread(\n target=lambda: session._on_scriptrunner_event(\n sender=MagicMock(), event=ScriptRunnerEvent.SCRIPT_STARTED\n )\n )\n thread.start()\n thread.join()\n\n # _handle_scriptrunner_event_on_event_loop won't have been called\n # yet, because we haven't yielded the eventloop.\n handle_event_spy.assert_not_called()\n\n # Yield to let the AppSession's callbacks run.\n # _handle_scriptrunner_event_on_event_loop will be called here.\n await asyncio.sleep(0)\n\n handle_event_spy.assert_called_once()",
"def test_dispatch_event(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [])\n msg = msg_helper.make_ack()\n yield worker_helper.dispatch_event(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [msg])",
"def main_loop(self):\n # run for only the allotted time (lifetime)\n for _ in range(self.lifetime * self.ticks_per_second):\n start_time = time()\n new_message, queue_len = self.communicator.get_message()\n if new_message is None: # no incoming messages\n self.do_random_task()\n else:\n # Convert string message back into tuple of ints\n new_message = list(map(int, new_message.split('@@@')))\n self.handle_incoming_message(new_message, queue_len)\n\n # this accounts for the time already taken in test_communication\n # and other activities from the total time allotted for the loop iteration\n already_taken = time() - start_time\n sleep_time = max(1/self.ticks_per_second - already_taken, 0)\n sleep(sleep_time)"
] | [
"0.7295389",
"0.7248762",
"0.6957512",
"0.67003644",
"0.66377956",
"0.6541956",
"0.64540994",
"0.64049834",
"0.62294203",
"0.62108946",
"0.61638576",
"0.61331415",
"0.6129716",
"0.60833454",
"0.60628366",
"0.6045858",
"0.6027141",
"0.6023538",
"0.5971657",
"0.59704083",
"0.593853",
"0.5863316",
"0.58441013",
"0.58319986",
"0.58279014",
"0.5823859",
"0.58149815",
"0.581178",
"0.5810279",
"0.5801412"
] | 0.7251965 | 1 |
Generates the trading instance objects from their class types. This method attaches all of the trading objects (DataHandler, Strategy, Portfolio, and ExecutionHandler) to various internal members. This ties together all the other classes to the Backtester object. | def _generate_trading_instances(self, strategy_params_dict):
print("Creating DataHandler, Strategy, Portfolio, and ExecutionHandler for")
print("strategy parameter list: %s..." % strategy_params_dict)
# Set internal data members equal to the classes we passed in earlier, along with necessary parameters.
# https://softwareengineering.stackexchange.com/questions/131403/what-is-the-name-of-in-python/131415
self.data_handler = self.data_handler_class(self.events, self.csv_dir, self.symbol_list)
self.strategy = self.strategy_class(self.data_handler, self.events, **strategy_params_dict)
self.portfolio = self.portfolio_class(self.data_handler, self.events, self.start_date, self.initial_capital)
self.execution_handler = self.execution_handler_class(self.events) # The Event Queue sent to ExecutionHandler | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generate_trading_instances(self):\n print(\"Creating DataHandler, Strategy, Portfolio, and ExecutionHandler for\")\n\n # Set internal data members equal to the classes we passed in earlier, along with necessary parameters.\n # https://softwareengineering.stackexchange.com/questions/131403/what-is-the-name-of-in-python/131415\n self.data_handler = self.data_handler_class(self.events, self.csv_dir, self.symbol_list)\n self.strategy = self.strategy_class(self.data_handler, self.events)\n self.portfolio = self.portfolio_class(self.data_handler, self.events, self.start_date, self.initial_capital)\n self.execution_handler = self.execution_handler_class(self.events) # The Event Queue sent to ExecutionHandler",
"def _generate_trading_instances(self):\n print(\n \"Initizalization...\"\n )\n\n self.data_handler = self.data_handler_cls(self.events, self.csv_dir, self.symbol_list, self.start_date,\n self.end_date)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.initial_capital, self.start_date,\n self.strategy_id, self.spread, self.commission,self.csv_dir)\n self.strategy = self.strategy_cls(self.data_handler, self.events, self.portfolio, self.spread, self.commission)\n self.plot = self.plot_cls(self.csv_dir, self.portfolio, self.strategy_id)",
"def _generate_trading_instances(self, start_date, end_date, instruments, params):\n configuration = self.configuration\n configuration.start_date = start_date\n configuration.end_date = end_date\n configuration.instruments = instruments\n\n logger.info(\"Creating DataHandler, Strategy, Portfolio and ExecutionHandler\")\n logger.info(\"Start date: %s\" % start_date)\n logger.info(\"End date: %s\" % end_date)\n logger.info(\"Instrument(s): %s...\" % instruments)\n logger.info(\"Params: %s...\" % params)\n\n self.data_handler = self.data_handler_cls(self.events, configuration)\n self.strategy = self.strategy_cls(self.data_handler, self.events, configuration, **params)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, configuration)\n self.execution_handler = self.execution_handler_cls(self.data_handler, self.events, configuration)",
"def _generate_trading_instances(self, sp):\n print(\n \"Initialization...\"\n )\n self.data_handler = self.data_handler_cls(self.events, self.csv_dir, self.symbol_list, self.start_date,\n self.end_date)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.initial_capital, self.start_date,\n self.strategy_id, self.spread, self.commission,self.csv_dir)\n self.strategy = self.strategy_cls(self.data_handler, self.events, self.portfolio, self.spread, self.commission,\n sp)\n self.plot = self.plot_cls(self.csv_dir, self.portfolio, self.strategy_id)",
"def create_all(self, registry):\n for cls in registry.values():\n self.create_class(cls)",
"def _fill_class_dicts():\n global _taxonomy_classes\n global _data_classes\n if not _taxonomy_classes:\n _taxonomy_classes = get_taxonomies()\n if not _data_classes:\n stack = []\n next_module = data\n while next_module is not None:\n stack += _inspect_module(next_module)\n if stack:\n next_module = stack.pop()\n else:\n next_module = None",
"def make_objects(self):\n pass",
"def __init__(self):\n if DynamicImporter._instance is not None:\n raise Exception(\"DynamicImporter instance already exists!\")\n DynamicImporter._instance = self\n\n current_path = Path(__file__).parent\n test_path = current_path / \"testdata\"\n files = test_path.rglob(\"*.py\")\n\n for file in files:\n\n if file.name in [\"__init__.py\", \"test_module.py\", \"test_registry.py\", \"connections.py\"]:\n continue\n\n name = file.stem\n module = import_module(f\"testdata.{name}\")\n class_title = f\"{name.title()}Test\"\n\n try:\n _class = getattr(module, class_title) # get the class\n self.class_list[class_title] = _class # add the class to the class list\n except AttributeError: # don't throw exceptions for files that don't have a test\n continue",
"def __init__(self):\n for base in AutomationSetup.__bases__:\n base.__init__(self)",
"def setUpClass(cls):\n\n Base._Base__nb_objects = 0\n cls.b1 = Base()\n cls.b2 = Base()\n cls.b3 = Base(22)\n cls.b4 = Base(2.2)\n cls.b5 = Base(\"two\")\n cls.r1 = Rectangle(10, 7, 2, 8)\n cls.r2 = Rectangle(2, 4)",
"def setup_class(self):\n\n class SubFLRW(FLRW):\n def w(self, z):\n return super().w(z)\n\n self.cls = SubFLRW\n # H0, Om0, Ode0\n self.cls_args = (70 * u.km / u.s / u.Mpc, 0.27 * u.one, 0.689 * u.one)\n self.cls_kwargs = dict(Tcmb0=3.0 * u.K, name=self.__class__.__name__, meta={\"a\": \"b\"})",
"def init_elect_types(self):\n self.wta = WinnerTakeAll()\n self.proportional = Proportional()\n self.schulze = Schulze()\n\n session.add_all([self.wta, self.proportional, self.schulze])",
"def _create_Work(classname, dataclass):\n globals()[classname] = type(classname, (Work, dataclass), {})",
"def XtremObjFactory(object_type, object_data, parent_connection):\r\n for cls in XtremObject.__subclasses__():\r\n if cls.is_class_for(object_type):\r\n return cls(object_data, parent_connection)",
"def _setup(self):\n\n # Get user data\n self.symbols = self._get_symbols()\n self.data_dict = self._get_data()\n self.portfolio = self.initialize_portfolio()\n\n if 'slippage' in self.portfolio:\n self.slippage = self.portfolio['slippage']\n else:\n self.slippage = None\n\n # Keep track of all trades\n self.trade_manager = TradeManager(\n self.symbols, self.portfolio, self.sql_config\n )\n\n # Initialize state variables that are updated each iteration\n self.date = None\n self.data = None\n self.symbol = None\n self.currency = None\n self.last_buy = None\n self.num_unresolved = 0\n self.unresolved_trade = False",
"def setup_class(klass):",
"def setup_class(klass):",
"def make_class(attributes, base_classes=()):\r\n \"*** YOUR CODE HERE ***\"",
"def _classes_(cls):\n for base_cls in cls.__bases__:\n # Avoid infinite loop\n if base_cls == Sandbox:\n continue\n\n yield base_cls",
"def _prepare_wsdl_objects(self):\r\n\r\n\t# Default behavior is to not request transit information\r\n\tself.ReturnTransitAndCommit = False\r\n\r\n # This is the primary data structure for processShipment requests.\r\n self.RequestedShipment = self.client.factory.create('RequestedShipment')\r\n self.RequestedShipment.ShipTimestamp = datetime.now()\r\n \r\n TotalWeight = self.client.factory.create('Weight')\r\n # Start at nothing.\r\n TotalWeight.Value = 0.0\r\n # Default to pounds.\r\n TotalWeight.Units = 'LB'\r\n # This is the total weight of the entire shipment. Shipments may\r\n # contain more than one package.\r\n self.RequestedShipment.TotalWeight = TotalWeight\r\n \r\n # This is the top level data structure for Shipper information.\r\n ShipperParty = self.client.factory.create('Party')\r\n ShipperParty.Address = self.client.factory.create('Address')\r\n ShipperParty.Contact = self.client.factory.create('Contact')\r\n \r\n # Link the ShipperParty to our master data structure.\r\n self.RequestedShipment.Shipper = ShipperParty\r\n\r\n # This is the top level data structure for Recipient information.\r\n RecipientParty = self.client.factory.create('Party')\r\n RecipientParty.Contact = self.client.factory.create('Contact')\r\n RecipientParty.Address = self.client.factory.create('Address')\r\n \r\n # Link the RecipientParty object to our master data structure.\r\n self.RequestedShipment.Recipient = RecipientParty\r\n \r\n Payor = self.client.factory.create('Payor')\r\n # Grab the account number from the FedexConfig object by default.\r\n Payor.AccountNumber = self._config_obj.account_number\r\n # Assume US.\r\n Payor.CountryCode = 'US'\r\n \r\n ShippingChargesPayment = self.client.factory.create('Payment')\r\n ShippingChargesPayment.Payor = Payor\r\n\r\n self.RequestedShipment.ShippingChargesPayment = ShippingChargesPayment\r\n \r\n # ACCOUNT or LIST\r\n self.RequestedShipment.RateRequestTypes = ['ACCOUNT'] \r\n \r\n # Start with no packages, user must add them.\r\n self.RequestedShipment.PackageCount = 0\r\n self.RequestedShipment.RequestedPackageLineItems = []\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.RequestedShipment)",
"def setUpClass(cls):\r\n cls.jon = Person.create(name='Jon', age=143)\r\n cls.eric = Person.create(name='Eric', age=25)\r\n cls.blake = Person.create(name='Blake', age=14)\r\n\r\n cls.physics = Course.create(name='Physics 264', credits=1.0)\r\n cls.beekeeping = Course.create(name='Beekeeping', credits=15.0)\r\n cls.theoretics = Course.create(name='Theoretical Theoretics', credits=-3.5)\r\n\r\n cls.eric_in_physics = EnrolledIn.create(cls.eric, cls.physics, date_enrolled=datetime.now(),\r\n enthusiasm=10) # eric loves physics\r\n cls.jon_in_beekeeping = EnrolledIn.create(cls.jon, cls.beekeeping, date_enrolled=datetime.now(),\r\n enthusiasm=1) # jon hates beekeeping\r\n\r\n cls.blake_in_theoretics = EnrolledIn.create(cls.blake, cls.theoretics, date_enrolled=datetime.now(),\r\n enthusiasm=8)\r\n\r\n cls.blake_beekeeping = TaughtBy.create(cls.beekeeping, cls.blake, overall_mood='Pedantic')\r\n cls.jon_physics = TaughtBy.create(cls.physics, cls.jon, overall_mood='Creepy')\r\n cls.eric_theoretics = TaughtBy.create(cls.theoretics, cls.eric, overall_mood='Obtuse')",
"def __init__(self):\n self.classes = {}",
"def setup_class(cls):",
"def setup_class(cls):",
"def _prepare_wsdl_objects(self):\r\n self.DeletionControlType = self.client.factory.create('DeletionControlType')\r\n self.TrackingId = self.client.factory.create('TrackingId')\r\n self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')",
"def setup_class(cls):\n cls._patch_logger()\n cls.multiplexer = Multiplexer(\n [DummyConnection(connection_id=DUMMY_CONNECTION_PUBLIC_ID)]\n )\n cls.outbox = OutBox(cls.multiplexer)\n private_key_pem_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n eth_private_key_pem_path = os.path.join(CUR_PATH, \"data\", \"fet_private_key.txt\")\n cls.wallet = Wallet(\n {FETCHAI: private_key_pem_path, ETHEREUM: eth_private_key_pem_path}\n )\n cls.ledger_apis = LedgerApis({FETCHAI: DEFAULT_FETCHAI_CONFIG}, FETCHAI)\n cls.agent_name = \"test\"\n cls.ownership_state = OwnershipState()\n cls.preferences = Preferences()\n cls.decision_maker = DecisionMaker(\n agent_name=cls.agent_name,\n max_reactions=MAX_REACTIONS,\n outbox=cls.outbox,\n wallet=cls.wallet,\n ledger_apis=cls.ledger_apis,\n )\n cls.multiplexer.connect()\n\n cls.tx_id = \"transaction0\"\n cls.tx_sender_addr = \"agent_1\"\n cls.tx_counterparty_addr = \"pk\"\n cls.info = {\"some_info_key\": \"some_info_value\"}\n cls.ledger_id = \"fetchai\"\n\n cls.decision_maker.start()",
"def setUpClass(cls):\n super().setUpClass()\n\n cls.accessor = OCPReportDBAccessor(cls.schema)\n cls.report_schema = cls.accessor.report_schema\n cls.all_tables = list(OCP_REPORT_TABLE_MAP.values())\n cls.creator = ReportObjectCreator(cls.schema)\n cls.date_accessor = DateHelper()\n cls.manifest_accessor = ReportManifestDBAccessor()\n cls.dh = DateHelper()",
"def load_asset_classes(self) -> None:\n provider = AlphaVantageProvider()\n trends_provider = GoogleTrendsProvider()\n self.asset_classes.append(AssetClass.CurrencyClass(provider, trends_provider))\n self.asset_classes.append(AssetClass.StockClass(provider, trends_provider))",
"def gen_extractors():\n return [klass() for klass in gen_extractor_classes()]",
"def build_etl_classes(self):\n\n self.clear_etl_classes()\n\n for config in list(self.configs.values()):\n\n etl_class = self.build(config)\n\n self.add_etl_class(etl_class)"
] | [
"0.7944597",
"0.71021026",
"0.6479502",
"0.6362634",
"0.56881684",
"0.56321007",
"0.5572392",
"0.55376",
"0.5502691",
"0.5442817",
"0.5403161",
"0.5379905",
"0.5372698",
"0.5296769",
"0.52877635",
"0.5230109",
"0.5230109",
"0.5185296",
"0.5165134",
"0.5138819",
"0.5119774",
"0.50943846",
"0.5055732",
"0.5055732",
"0.50314504",
"0.5024732",
"0.5023778",
"0.50164944",
"0.5010884",
"0.500447"
] | 0.71861804 | 1 |
Executes the backtest. This is where the signal handling of the Backtesting engine is carried out. There are two while loops, the outerloop (heartbeat) and the nested innerloop, which checks if there is an event in the Event Queue object. The inner loop acts on the Event by calling the appropriate method | def _run_backtest(self):
i = 0
while True:
i += 1
print(i)
# Update the market bars
if self.data_handler.continue_backtest == True:
self.data_handler.update_bars()
else:
break
# Handle the Events
while True:
try:
event = self.events.get(False)
except queue.Empty:
break
else:
# The inner-loop acts on the events by calling the appropriate method of the appropriate object
if event is not None:
if event.type == 'MARKET':
self.strategy.calculate_signals(event)
self.portfolio.update_timeindex(event)
elif event.type == 'SIGNAL':
self.signals += 1
self.portfolio.update_signal(event)
elif event.type == 'ORDER':
self.orders += 1
self.execution_handler.execute_order(event)
elif event.type == 'FILL':
self.fills += 1
self.portfolio.update_fill(event)
# Pauses for a duration of self.heartbeat seconds
time.sleep(self.heartbeat) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _run_backtest(self):\n i = 0\n \n while True:\n i += 1\n print(i)\n \n # Update the market bars\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n else:\n break\n \n # Handle the Events\n while True:\n try:\n event = self.events.get(False)\n except queue.Empty:\n break\n else:\n # The inner-loop acts on the events by calling the appropriate method of the appropriate object\n if event is not None:\n if event.type == 'MARKET':\n self.strategy.calculate_signals(event)\n self.portfolio.update_timeindex(event)\n \n elif event.type == 'SIGNAL':\n self.signals += 1\n self.portfolio.update_signal(event)\n \n elif event.type == 'ORDER':\n self.orders += 1\n self.execution_handler.execute_order(event)\n \n elif event.type == 'FILL':\n self.fills += 1\n self.portfolio.update_fill(event)\n \n # Pauses for a duration of self.heartbeat seconds\n time.sleep(self.heartbeat)",
"def _run_backtest(self):\n i = 0\n while True:\n i += 1\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n else:\n break\n while True:\n try:\n event = self.events.get(False)\n except Empty:\n break\n else:\n if event is not None:\n if event.type == EventType.MARKET:\n self.strategy.On_Bars(event)\n self.portfolio.update_balance(event)\n self.portfolio.order_check(event)\n elif event.type == EventType.ORDER_SEND:\n self.portfolio.update_order(event)\n elif event.type == EventType.ORDER_CLOSE:\n self.portfolio.update_order(event)\n self.portfolio.update_euity(event)\n elif event.type == EventType.ORDER_MODIFY:\n self.portfolio.update_order(event)\n time.sleep(self.heartbeat)",
"def _run_backtest(self):\n i = 0\n while True:\n i += 1\n if self.data_handler.continue_backtest == True:\n self.data_handler.update_bars()\n #print(self.data_handler.get_latest_bar_datetime(self.symbol_list[0]))\n else:\n break\n while self.backtest:\n try:\n event = self.events.get(False)\n except Empty:\n break\n else:\n if event is not None:\n if event.type == EventType.MARKET:\n try:\n self.strategy.On_Bars(event)\n self.portfolio.update_balance(event)\n self.portfolio.order_check(event)\n except EquityError:\n print('Not Engough Equity,Backtest Will be Stop...')\n self.backtest=False\n break\n elif event.type == EventType.ORDER_SEND:\n self.portfolio.update_order(event)\n elif event.type == EventType.ORDER_CLOSE:\n try:\n self.portfolio.update_order(event)\n self.portfolio.update_euity(event)\n except EquityError:\n print ('Not Engough Equity,Backtest Will be Stop...')\n self.backtest=False\n break\n elif event.type == EventType.ORDER_MODIFY:\n self.portfolio.update_order(event)\n time.sleep(self.heartbeat)",
"def run(self):\n\n self.halt = False\n\n print(\"Starting heartbeat.\")\n while not self.halt:\n event = events.TickEvent()\n AppState.get_state().get_event_manager().post_event(event)\n AppState.get_state().get_clock().tick(settings.MAX_FPS)",
"def run(self) -> None:\n\n while not self.stop_event.is_set():\n if self.my_queue:\n # if heartbeat received at '/heartbeat' route from the monitored peer,\n # sleep until next\n self.my_queue.clear()\n time.sleep(7)\n\n else:\n # else drop peer data from database and inform central server appending '0'\n # to my queue\n self.db_access.drop_peer(self.peer_id)\n self.my_queue.append(0)\n break",
"def event_loop(self):\n while self.ack is False:\n gevent.sleep(self.loop_interval)\n output_service = self.get_directory_service_proxy().get_service(\"mock-output-service\")\n output_service.put(\"test-worker-work-result\")\n self.ack = True",
"def __call__(self, event_queue, in_queue, out_queue):\n\n running = True\n self.in_q = in_queue # command received from the main thread\n self.out_q = out_queue # responses, commands to the main thread\n self.ev_q = event_queue # return pending events to the main thread\n shutdown = False\n self.light_state = False # current state of beat light\n\n # send first beat light message\n if self.btic.BeatLight() is True:\n self.out_q.put(\"beaton\")\n else:\n self.out_q.put(\"beatoff\")\n\n # run thread loop\n while running is True:\n if self.die_pending is False:\n self.sendPendingEvents()\n self.processCommands()\n\n \"\"\"\n if self.btic.BeatLightToggle() == True:\n self.out_q.put(\"beat\")\n wx.WakeUpIdle()\n \"\"\"\n\n # display beat light on UI\n light = self.btic.BeatLight()\n if light != self.light_state:\n self.light_state = light\n if light is True:\n self.out_q.put(\"beatoff\")\n else:\n self.out_q.put(\"beaton\")\n wx.WakeUpIdle() \n \n if self.allClear() is True:\n time.sleep(.01)\n #pass\n else:\n # stop the loop/thread when all is cleaned up\n self.sendPendingEvents()\n if self.allClear() is True:\n self.clearBank() \n self.die_pending = False\n running = False\n else:\n time.sleep(.01)\n # pass",
"def awaitVerification(self):\r\n method = moduleName + '.' + self.className + '.' + 'awaitVerification'\r\n while True:\r\n try:\r\n self._stopevent.wait(self._sleepperiod)\r\n verification = self.localCommQueue.get_nowait()\r\n if verification == terminationVerificationMsg.COMMIT:\r\n #The parent AE agrees that we can shutdown. Terminate\r\n break\r\n elif verification == terminationVerificationMsg.ROLLBACK:\r\n #Roll back the termination\r\n raise Exceptions.WorkerThreadTerminationRollback()\r\n elif verification == terminationVerificationMsg.ERROR:\r\n errorMsg = \"Worker thread for landmark %s is improperly indexed\" %self.queueID\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n raise Exceptions.WorkerThreadIndexError(errorMsg)\r\n else:\r\n #Should not happen\r\n errorMsg = \"Unexpected shutdown verification response for worker thread on landmark %s\" %self.queueID\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n raise Exceptions.WorkerThreadIndexError(errorMsg)\r\n break\r\n except queue.Empty:\r\n pass\r\n except Exceptions.WorkerThreadTerminationRollback:\r\n raise Exceptions.WorkerThreadTerminationRollback()\r\n except Exception as e:\r\n errorMsg = \"Unexpected error during shutdown verification process for worker thread on landmark %s. Traceback= %s\" %(self.queueID, e)\r\n Graph.logQ.put( [logType , logLevel.ERROR , method , errorMsg])\r\n raise e",
"def _eventloop(self):\n logging.debug(\"%s - eventloop started\" % self.name)\n while not self.stopped:\n event = self.inqueue.get()\n if not event: break\n self.doevent(event)\n logging.debug(\"%s - eventloop stopped\" % self.name)",
"def _do_test(self):\n\n process_all_events()\n\n if self.list:\n (callback, args, kwargs) = self.list.pop(0)\n callback(*args, **kwargs)\n else:\n safe_exit(force=1)",
"def main_loop(self):\n LOGGER.info('Entering main event loop...')\n try:\n while self._handle_faucet_events():\n while not self._faucet_events.event_socket_connected:\n LOGGER.info('Attempting faucet event sock connection...')\n time.sleep(1)\n try:\n self._faucet_events.connect()\n self._restore_states()\n self._faucet_collector.set_state_restored(True)\n except Exception as e:\n LOGGER.error(\"Cannot restore states or connect to faucet: %s\", e)\n self._faucet_collector.set_state_restored(False, e)\n except KeyboardInterrupt:\n LOGGER.info('Keyboard interrupt. Exiting.')\n self._faucet_events.disconnect()\n except Exception as e:\n LOGGER.error(\"Exception: %s\", e)\n raise",
"def run(self):\n \n # Wrap the outer loop in a try block so we can do an orderly shutdown\n # should an exception occur:\n try:\n # Send out a STARTUP event:\n self.dispatchEvent(weewx.Event(weewx.STARTUP))\n \n syslog.syslog(syslog.LOG_INFO, \"engine: Starting main packet loop.\")\n\n last_gc = int(time.time())\n\n # This is the outer loop. \n while True:\n\n # See if garbage collection is scheduled:\n if int(time.time()) - last_gc > self.gc_interval:\n ngc = gc.collect()\n syslog.syslog(syslog.LOG_INFO, \"engine: garbage collected %d objects\" % ngc)\n last_gc = int(time.time())\n\n # First, let any interested services know the packet LOOP is\n # about to start\n self.dispatchEvent(weewx.Event(weewx.PRE_LOOP))\n \n # Get ready to enter the main packet loop. An exception of type\n # BreakLoop will get thrown when a service wants to break the\n # loop and interact with the console.\n try:\n \n # And this is the main packet LOOP. It will continuously\n # generate LOOP packets until some service breaks it by\n # throwing an exception (usually when an archive period\n # has passed).\n for packet in self.console.genLoopPackets():\n \n # Package the packet as an event, then dispatch it.\n self.dispatchEvent(weewx.Event(weewx.NEW_LOOP_PACKET, packet=packet))\n\n # Allow services to break the loop by throwing\n # an exception:\n self.dispatchEvent(weewx.Event(weewx.CHECK_LOOP, packet=packet))\n\n syslog.syslog(syslog.LOG_CRIT, \"engine: Internal error. Packet loop has exited.\")\n \n except BreakLoop:\n \n # Send out an event saying the packet LOOP is done:\n self.dispatchEvent(weewx.Event(weewx.POST_LOOP))\n\n finally:\n # The main loop has exited. Shut the engine down.\n syslog.syslog(syslog.LOG_DEBUG, \"engine: Main loop exiting. Shutting engine down.\")\n self.shutDown()",
"def heartbeat_event(self, event):\r\n while not self.imm_jobs.empty():\r\n imm_job = self.imm_jobs.get_nowait()\r\n imm_job(self)\r\n \r\n if self.do_reconfigure:\r\n self.selmgr.reconfigure(self.current_consensus())\r\n self.do_reconfigure = False\r\n \r\n if self.run_all_jobs:\r\n while not self.low_prio_jobs.empty() and self.run_all_jobs:\r\n imm_job = self.low_prio_jobs.get_nowait()\r\n imm_job(self)\r\n self.run_all_jobs = False\r\n return\r\n\r\n # If event is stream:NEW*/DETACHED or circ BUILT/FAILED, \r\n # don't run low prio jobs.. No need to delay streams for them.\r\n if PathBuilder.is_urgent_event(event): return\r\n \r\n # Do the low prio jobs one at a time in case a \r\n # higher priority event is queued \r\n if not self.low_prio_jobs.empty():\r\n delay_job = self.low_prio_jobs.get_nowait()\r\n delay_job(self)",
"def run(self):\n self.workhorse_.run()\n try:\n while(True):\n self.workhorse_.heartbeat()\n self.periodic_snapshot()\n except workflow.NoMoreWork:\n print \"Fini.\"\n exit(0)\n exit(-1)",
"def test_heartbeat(self):\n pass",
"def processEvent(self):\n # Note: break out of event dispatch loop when closedown event is received\n # and closing flag is set. This is to prevent DoS attack by faked closedown\n # event type, and to ensure that prior events received are all processed.\n delay_on_error_min = 0.125 # Back off retry interval on error..\n delay_on_error_max = 20.0 # ..\n delay_on_error = delay_on_error_min # ..\n while True:\n if delay_on_error < delay_on_error_max:\n delay_on_error *= 2\n try:\n # PLEASE NOTE: In the event that the HTTPC is run as duplex, not simplex\n # then the post methods will be delayed if nothing is sent down to the client\n # from the server. This timeout is controlled by QUEUE_WAIT_TIMEOUT in EventRouterHTTPS.py\n if self._simplex == True:\n self._queueEvent.wait()\n self._queueEvent.clear()\n \n if not self._queue.empty():\n Trace(\"%s queue.get ...\"%(self.getUri()), \"EventLib.EventRelayHTTPC\")\n ###msgbody = self._queue.get()\n ###Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n ###self._event.set()\n msgbody = self.getQueuedItem()\n [typ,env] = msgbody\n if typ == \"closedown\":\n if self._closing: break\n else:\n # process request as an HTTP POST request\n data = makeEnvelopeData(env)\n headers = { \"Content-type\": \"text/plain\",\n \"Accept\": \"text/plain\",\n \"Content-length\": str(len(data)) }\n self._httpcon.request(\"POST\", \"/request_path_ignored\", data, headers)\n response = self._httpcon.getresponse()\n delay_on_error = delay_on_error_min\n elif self._simplex == False:\n # Nothing in queue:\n # issue a GET for incoming events\n _log.info(\"%s HTTP get ...\"%(self.getUri()))\n headers = { \"Accept\": \"text/plain\" }\n self._httpcon.request(\"GET\", \"/request_path_ignored\", None, headers)\n response = self._httpcon.getresponse()\n if response.status == 200:\n delay_on_error = delay_on_error_min\n msgbody = response.read()\n Trace(\"%s get msgbody: %s\"%(self.getUri(),msgbody), \"EventLib.EventRelayHTTPC\")\n # Parse message and act accordingly\n msgdata = parseMessageData(msgbody)\n Trace(\"%s get msgdata: %s\"%(self.getUri(),str(msgdata)), \"EventLib.EventRelayHTTPC\")\n if msgdata == None:\n #TODO: Log \"Request body malformed\"\n pass\n elif msgdata[0] == \"forward\":\n # msgdata = [\"forward\", [['R1', 'R2', 'R3'], 'ev:typ', 'ev:src', 'payload']]\n event = makeEvent(evtype=msgdata[1][1],source=msgdata[1][2],payload=msgdata[1][3])\n env = constructEnvelope(msgdata[1][0], event)\n self.forward(event, env)\n elif msgdata[0] == \"idle\":\n # Idle response gives client a chance to send if anything is queued\n pass\n else:\n #TODO: handle closedown message?\n Warn( \"%s Request body unrecognized option: %s\"%(self.getUri(),msgdata[0]), \"EventRelayHTTPC\")\n pass\n elif response.status == 503:\n Trace( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n # Remote end closed down\n break\n else:\n # TODO: (log error response)\n Warn( \"%s processEvent error response: %u, %s\"%(self.getUri(),response.status,response.reason), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n \n except httplib.BadStatusLine, e:\n # This can happen at closedown\n Info( \"%s processEvent bad response: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.CannotSendRequest, e:\n # This can happen at closedown\n Info( \"%s Cannot send request: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except httplib.ResponseNotReady, e:\n # This can happen at startup and sometimes other times:\n # maybe multiple requests on a single HTTP connection object?\n Info( \"%s Response not ready: (%s)\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n except socket.error, e:\n Warn( \"%s Socket error: %s\"%(self.getUri(), str(e)), \"EventLib.EventRelayHTTPC\")\n time.sleep(delay_on_error)\n return",
"def simulateCore(self):\n while len(self.event_q) > 0:\n evts = self.nextEvents()\n self.handleEvents(evts)\n self.gatherSystemStatistics(self.scheduler.system)\n self.dumpEventQueue()",
"def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)",
"def run():\n\n while True:\n\n # get event, blah\n event_name, event_data = revent.get_event(block=True, timeout=5)\n\n if event_name is not None:\n print 'received: %s' % event_name\n\n if event_name.endswith('_oembed_details'):\n handle_new_oembed_details(event_data)\n\n elif event_name == 'new_tweet':\n handle_new_tweet(event_data)\n\n # and we're done\n assert revent.verify_msg(event_name, event_data), \\\n \"Could not verify %s\" % event_name",
"def run():\r\n event = threading.Event()\r\n while (event.is_set() == False):\r\n # perform database backup\r\n backup()\r\n\r\n # sleep for the predefined amount interval\r\n event.wait(BACKUP_INTERVAL)",
"def _heartbeat_loop(self):\n # set last time so that \"if t_now - t_last >= HEARTBEAT_LOG_INTERVAL\"\n # below evalutes to True on the first run\n t_last = time.time() - HEARTBEAT_LOG_INTERVAL - 1\n while True:\n alive = 0\n # count alive processes \n for p in PROCESSES:\n if p.is_alive():\n alive += 1\n\n # no processes are alive - exit heartbeat loop\n if alive == 0:\n return\n\n t_now = time.time()\n if t_now - t_last >= HEARTBEAT_LOG_INTERVAL:\n # log heartbeat\n obj = { \n 'timestamp': time.time(),\n 'child_procs_total': self._procs_total,\n 'child_procs_alive': alive,\n 'probe_req_queue_len': self._probe_request_queue.qsize(),\n 'probe_resp_queue_len': \\\n self._probe_response_queue.qsize(), \n }\n \n # push to shared mem\n self._sm.set(config.BASE['SHARED_MEM_HEARTBEAT_KEY'],\n json.dumps(obj), HEARTBEAT_TTL)\n LOG.debug('pushed a heartbeat to the shared memory')\n\n t_last = t_now\n\n time.sleep(HEARTBEAT_LOOP_INTERVAL)",
"def test_cbbackupmgr_with_eventing(self):\n if \"5.5\" > self.cb_version[:3]:\n self.fail(\"This eventing test is only for cb version 5.5 and later. \")\n from pytests.eventing.eventing_constants import HANDLER_CODE\n from lib.testconstants import STANDARD_BUCKET_PORT\n\n self.src_bucket_name = self.input.param('src_bucket_name', 'src_bucket')\n self.eventing_log_level = self.input.param('eventing_log_level', 'INFO')\n self.dst_bucket_name = self.input.param('dst_bucket_name', 'dst_bucket')\n self.dst_bucket_name1 = self.input.param('dst_bucket_name1', 'dst_bucket1')\n self.metadata_bucket_name = self.input.param('metadata_bucket_name', 'metadata')\n self.create_functions_buckets = self.input.param('create_functions_buckets', True)\n self.docs_per_day = self.input.param(\"doc-per-day\", 1)\n self.use_memory_manager = self.input.param('use_memory_manager', True)\n self.backup_before_eventing = self.input.param('backup_before_eventing', False)\n bucket_params = self._create_bucket_params(server=self.master, size=256,\n replicas=self.num_replicas)\n self.cluster.create_standard_bucket(name=self.src_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.buckets = RestConnection(self.master).get_buckets()\n self.src_bucket = RestConnection(self.master).get_buckets()\n self.cluster.create_standard_bucket(name=self.dst_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.backup_create()\n if (self.backup_before_eventing):\n self.backup_cluster()\n self.cluster.create_standard_bucket(name=self.metadata_bucket_name, port=STANDARD_BUCKET_PORT + 1,\n bucket_params=bucket_params)\n self.buckets = RestConnection(self.master).get_buckets()\n self.gens_load = self.generate_docs(self.docs_per_day)\n self.expiry = 3\n\n self.restServer = self.get_nodes_from_services_map(service_type=\"eventing\")\n self.rest = RestConnection(self.restServer)\n\n\n self.load(self.gens_load, buckets=self.buckets, flag=self.item_flag, verify_data=False,\n batch_size=self.batch_size)\n function_name = \"Function_{0}_{1}\".format(randint(1, 1000000000), self._testMethodName)\n self.function_name = function_name[0:90]\n body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_ON_UPDATE, worker_count=3)\n bk_events_created = False\n rs_events_created = False\n try:\n self.deploy_function(body)\n bk_events_created = True\n self.backup_cluster()\n rest_bk = RestConnection(self.backupset.cluster_host)\n bk_fxn = rest_bk.get_all_functions()\n\n backup_index = 0\n\n if self.backup_before_eventing:\n backup_index = 1\n self.backupset.start = 1\n self.backupset.end = 2\n\n if bk_fxn != \"\":\n self._verify_backup_events_definition(json.loads(bk_fxn), body, backup_index = backup_index)\n\n self.backup_restore()\n\n rest_rs = RestConnection(self.backupset.restore_cluster_host)\n\n if self.backup_before_eventing:\n self.assertTrue('metadata' in [bucket.name for bucket in rest_rs.get_buckets()])\n\n self.bkrs_resume_function(body, rest_rs)\n rs_events_created = True\n self._verify_restore_events_definition(bk_fxn)\n except Exception as e:\n self.fail(e)\n finally:\n master_nodes = [self.backupset.cluster_host,\n self.backupset.restore_cluster_host]\n for node in master_nodes:\n rest = RestConnection(node)\n self.bkrs_undeploy_and_delete_function(body, rest, node)\n self.rest = RestConnection(self.master)",
"def stopeventmonitor(self):\n self.doeventloop = False\n if self.service is not None:\n self.service.breakloop()\n # reset the service, otherwise nextEvent won\"t work\n self.initeventservice(shutdown=True)\n if self.eventmonthread is not None:\n if emane.VERSION >= emane.EMANE091:\n self.eventmonthread._Thread__stop()\n self.eventmonthread.join()\n self.eventmonthread = None",
"def run(self):\n\n (robotProc, iRMsg, robotStat, robotInfo, robotCmd, bcMsg, cbpaeRun, wsInfo) = self.prepVars()\n\n broadcasterProc = self.startBroadcaster(cbpaeRun, bcMsg, iRMsg)\n\n# =============================================================================\n# # pass additional queues to the robot processes by overloading this method\n# =============================================================================\n robotProc = self.startRobots(robotProc, iRMsg, bcMsg, robotInfo, robotCmd, robotStat)\n\n guiProc = self.startGui(wsInfo, robotInfo, robotCmd)\n\n# =============================================================================\n# # This is the main loop checking robotProcs\n# =============================================================================\n rJoinable = self.checkRJoinable(robotProc, robotStat)\n\n self.stopBroadcaster(cbpaeRun)\n\n self.clearQueues(iRMsg, robotCmd, robotInfo)\n\n self.joinRobotProc(robotProc)\n\n self.logBasicInfo()\n\n print (\"CBPAE Trial Finished!!!\")",
"def run(self):\n \n # Loop through all checkers to do an initial state check\n for checker in self.checkers:\n checker.update_last_state()\n\n # Send initial heartbeat\n self._send_heartbeat()\n \n # Main loop\n while True: \n html = \"\"\n for checker in self.checkers:\n if checker.just_changed_state():\n log.warn(\"Checker {} has changed state.\"\n .format(checker.name))\n html += \"<li>\" + checker.html() + \"</li>\\n\"\n \n if isinstance(checker, Process) and checker.state() == FAIL:\n log.warn(\"Process {} is not running.\"\n .format(checker.name))\n html += (\"<li>Attempting to restart \" + \n escape(checker.name) + \"...</li>\\n\")\n try:\n checker.restart()\n except MaxRetriesError, e:\n self.shutdown_reason = str(e)\n return\n time.sleep(5)\n html += (\"<li>State after restart: \" + \n checker.html() + \"</li>\\n\")\n\n if html:\n html = \"<h2>STATE CHANGED:</h2>\\n<ul>\\n\" + html + \"</ul>\\n\" \n html += self.html()\n html += run_commands(self.state_change_cmds)\n self.send_email_with_time(html=html,\n subject=\"Babysitter detected\"\n \" state change.\")\n\n if self._need_to_send_heartbeat():\n self._send_heartbeat()\n\n # Check if a new data subdir has been created\n if self.base_data_dir and self.sub_data_dir:\n if self._find_last_numeric_subdir() != self.sub_data_dir:\n self._send_heartbeat(\"<p>New subdir found so about to restart \"\n \"babysitter. Below are the last stats \"\n \"for the old data subdirectory.</p>\\n\")\n raise NewDataDirError()\n \n time.sleep(UPDATE_PERIOD)",
"def loop_run(self):\n super(EventLoop, self).loop_run()\n self.inq = self.cothread.EventQueue()",
"def handle_wake_up(_):\n loop.awaken()",
"async def test_events_handled_on_event_loop(self):\n session = _create_test_session(asyncio.get_running_loop())\n\n handle_event_spy = MagicMock(\n side_effect=session._handle_scriptrunner_event_on_event_loop\n )\n session._handle_scriptrunner_event_on_event_loop = handle_event_spy\n\n # Send a ScriptRunner event from another thread\n thread = threading.Thread(\n target=lambda: session._on_scriptrunner_event(\n sender=MagicMock(), event=ScriptRunnerEvent.SCRIPT_STARTED\n )\n )\n thread.start()\n thread.join()\n\n # _handle_scriptrunner_event_on_event_loop won't have been called\n # yet, because we haven't yielded the eventloop.\n handle_event_spy.assert_not_called()\n\n # Yield to let the AppSession's callbacks run.\n # _handle_scriptrunner_event_on_event_loop will be called here.\n await asyncio.sleep(0)\n\n handle_event_spy.assert_called_once()",
"def test_dispatch_event(self):\n msg_helper = MessageHelper()\n worker_helper = WorkerHelper()\n broker = self.setup_broker(worker_helper)\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [])\n msg = msg_helper.make_ack()\n yield worker_helper.dispatch_event(msg, 'fooconn')\n self.assertEqual(broker.get_messages('vumi', 'fooconn.event'), [msg])",
"def test_sighup(self):\n self.render_config_template(\n )\n\n proc = self.start_beat()\n self.wait_until(lambda: self.log_contains(\"mockbeat start running.\"))\n proc.proc.send_signal(signal.SIGHUP)\n proc.check_wait()\n assert self.log_contains(\"mockbeat stopped.\")"
] | [
"0.7250805",
"0.7247759",
"0.6956706",
"0.6701028",
"0.6637288",
"0.6541509",
"0.64544195",
"0.64051026",
"0.62292856",
"0.621116",
"0.6163216",
"0.6132048",
"0.61309826",
"0.60833305",
"0.60645205",
"0.60457534",
"0.6026709",
"0.6022797",
"0.5971004",
"0.59705234",
"0.59382254",
"0.58634293",
"0.58442754",
"0.58312064",
"0.5826717",
"0.5822629",
"0.581594",
"0.58116394",
"0.5810759",
"0.58028376"
] | 0.72942674 | 0 |
Outputs the strategy performance and other metrics from the backtest. | def _output_performance(self):
self.portfolio.create_equity_curve_dataframe()
print("Creating summary statistics...")
stats = self.portfolio.output_summary_stats()
print("Creating equity curve...")
print(self.portfolio.equity_curve.tail(10))
pprint.pprint(stats)
print("Signals: %s" % self.signals)
print("Orders: %s" % self.orders)
print("Fills: %s" % self.fills) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def report_performance(self):\n performance = self.amygdala.visualize(self.timestep, \n self.name, \n self.log_dir)\n print('Final performance is {0:.3}'.format(performance))\n self.backup()\n return performance",
"def _output_performance(self):\n self.portfolio.create_equity_curve_dataframe()\n \n print(\"Creating summary statistics...\")\n stats = self.portfolio.output_summary_stats()\n \n print(\"Creating equity curve...\")\n print(self.portfolio.equity_curve.tail(10))\n pprint.pprint(stats)\n \n print(\"Signals: %s\" % self.signals)\n print(\"Orders: %s\" % self.orders)\n print(\"Fills: %s\" % self.fills)",
"def print_performance_info(self):\n pass",
"def test(self):\n statistics = self.__progress(self.testing, self.__val_fn)\n print('Loss: {}'.format(statistics[0]))\n print('Precision: {:.3%}'.format(statistics[1]))\n print('Recall: {:.3%}'.format(statistics[2]))\n print('Accuracy: {:.3%}'.format(statistics[3]))\n self.report['test_loss'] = statistics[0]\n self.report['test_precision'] = statistics[1]\n self.report['test_recall'] = statistics[2]\n self.report['test_accuracy'] = statistics[3]",
"def print_statistics(self) -> None:\n e = self.current_epoch\n if len(self.loss_history[\"test_loss\"]) > 0:\n template = 'Epoch: {} Training loss: {:.4f}, Test loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1],\n self.loss_history[\"test_loss\"][-1]))\n else:\n template = 'Epoch: {} Training loss: {:.4f}'\n print(template.format(e, self.loss_history[\"training_loss\"][-1]))",
"def simulate_trading(self):\n # Create the file output stream\n posix_now = datetime.datetime.timestamp(datetime.datetime.now())\n out_path = os.getcwd() + \"/OutputResults/backtest_{}\".format(posix_now)[:-7:] + \".csv\"\n\n out = open(out_path, \"w+\")\n\n spl = len(self.strat_params_list)\n for i, sp in enumerate(self.strat_params_list): # http://book.pythontips.com/en/latest/enumerate.html\n print(\"Strategy %s out of %s...\" % (i + 1, spl))\n self._generate_trading_instances(sp)\n self._run_backtest()\n stats = self._output_performance()\n pprint.pprint(stats)\n\n tot_ret = float(stats[0][1].replace(\"%\", \"\"))\n cagr = float(stats[1][1].replace(\"%\", \"\"))\n sharpe = float(stats[2][1])\n max_dd = float(stats[3][1].replace(\"%\", \"\"))\n dd_dur = int(stats[4][1])\n\n # This should be more general in future implementations...\n out.write(\n \"%s,%s,%s,%s,%s,%s,%s,%s\\n\" % (sp[\"ols_window\"], sp[\"zscore_high\"], sp[\"zscore_low\"],\n tot_ret, cagr, sharpe, max_dd, dd_dur)\n )\n\n out.close()",
"def create_test_report(test_generator, test_history):\n \n df_res_labels = create_res_labels_df(test_generator, test_history)\n \n print_metric_to_console = False\n lvls=['']\n \n metrics_dict = {}\n \n n_samples = df_res_labels.shape[0]\n print('.'*50)\n print('showing test metrics for {} samples'.format(n_samples))\n print('`'*50)\n \n lvl_metrics_dict = {}\n for lvl in lvls:\n y_tr = df_res_labels['y_true' + lvl]\n y_pre = df_res_labels['y_pred' + lvl] \n \n lvl_metrics_dict = {}\n \n # Macro / Micro Driven Metrics\n for avg in ['macro', 'micro']:\n \n met_name = 'precision' + ('_'+ avg) \n res = metrics.precision_score(y_tr, y_pre, average=avg)\n lvl_metrics_dict[met_name] = res\n \n met_name = 'f1' + ('_'+ avg) \n res = metrics.f1_score(y_tr, y_pre, average=avg)\n lvl_metrics_dict[met_name] = res\n \n met_name = 'recall' + ('_'+ avg) \n res = metrics.recall_score(y_tr, y_pre, average=avg)\n lvl_metrics_dict[met_name] = res\n \n met_name = 'accuracy' \n res = metrics.accuracy_score(y_tr, y_pre)\n lvl_metrics_dict[met_name] = res\n \n metrics_dict[lvl] = lvl_metrics_dict\n \n df_test_results = pd.DataFrame(metrics_dict).sort_values(by=lvls, ascending=False)\n df_test_results=df_test_results.reindex(columns=lvls)\n \n print(df_test_results)\n print('- '*70)\n \n plot_confusion_matrix(df_res_labels)\n \n return df_res_labels",
"def print_stats(self):\n if self.df_avg is None:\n self.collect_stats()\n\n print(\"Simulation Results\")\n print(tabulate(self.df_avg, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"FleetManager stats\")\n print(tabulate(self.manager_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Customer stats\")\n print(tabulate(self.customer_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Transport stats\")\n print(tabulate(self.transport_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))\n print(\"Station stats\")\n print(tabulate(self.station_df, headers=\"keys\", showindex=False, tablefmt=\"fancy_grid\"))",
"def print_results(self):\n self.accuracy = round(accuracy_score(self.y_val, self.y_pred, 'weighted'), 4)\n self.f1 = round(f1_score(self.y_val, self.y_pred, average='weighted'), 4)\n self.precision = round(precision_score(self.y_val, self.y_pred, average='weighted'), 4)\n\n print(f'Results for {self.title}:')\n print(f'{self.title} accuracy: {self.accuracy}')\n print(f'{self.title} f-score: {self.f1}')\n print(f'{self.title} precision: {self.precision}')",
"def showWorstStats(self) :\n Scenario.messageWorstStats()\n self.showWorstGainWon()\n self.showWorstBetUse()\n self.showNbLevelLose()",
"def log_results(best_model, model_name, max_features, train_score, test_score,\n score_fp):\n\n # ensure the directorys where metrics are stored are created\n if not os.path.exists(os.path.dirname(score_fp)):\n os.makedirs(os.path.dirname(score_fp), exist_ok=True)\n\n st = datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')\n with open(score_fp, 'a+') as f:\n f.write(st + '\\n')\n f.write('-' * 100 + '\\n')\n f.write('Model Run: {}\\n\\n'.format(model_name))\n f.write('Params: {}\\n\\n'.format(best_model.get_params())) \n f.write('Max features: {}\\n\\n'.format(max_features))\n f.write('Train Score: {}\\n\\n'.format(train_score))\n f.write('Test Score: {}\\n\\n'.format(test_score))",
"def final_strategy_test():\r\n print('-- Testing final_strategy --')\r\n print('Win rate:', compare_strategies(final_strategy))",
"def run_test(self):\n self.output_analytics = self.run_inference()\n self.output_df = pd.DataFrame(self.output_analytics)",
"def output_report(x, y, clf, test_flag=False):\n\n # Calculate recall at k time series\n y, y_prob, y_pred, recall_at_k, avg_recall = calculate_recall_at_k_time_series(x, y, clf)\n # Output confusion_matrix\n print(\"Confusion matrix: \\n\", confusion_matrix(y, y_pred))\n # Output recall_at_k\n print(\"Recall at {}, {}, {}, {}, {}: \".format(5, 10, 20, 50, 100), end=' ')\n for top in [5, 10, 20, 50, 100]:\n if top == 100:\n print(str(round(recall_at_k[top - 1], 2)) + \" accordingly\")\n else:\n print(\"{}, \".format(round(recall_at_k[top - 1], 2)), end='')\n print(\"Average recalls over 100: \", round(avg_recall, 2))\n if test_flag:\n _, recall_at_k, _, _, _ = calculate_recall_at_k(y_prob[:, 1], y, k_max=y.shape[0])\n print(\n \"Positions of escalation flags: \", ([1] if recall_at_k[0] != 0 else []) +\n [i + 1 for i in range(1, len(recall_at_k)) if\n recall_at_k[i] != recall_at_k[i - 1]])",
"def print_score(classifier,X_test,y_test):\n print(\"Test results:\\n\")\n print('Accuracy Score: {0:.4f}\\n'.format(accuracy_score(y_test,classifier.predict(X_test))))\n print('Classification Report:\\n{}\\n'.format(classification_report(y_test,classifier.predict(X_test))))\n print('Confusion Matrix:\\n{}\\n'.format(confusion_matrix(y_test,classifier.predict(X_test))))",
"def summary(self):\n print '%s Portfolio\\'s %s Strategy' % (self.portfolio.name, self.name)\n print '-' * COL_DASH_WIDTH\n\n self.display_trades()\n\n for symbol in self.portfolio.assets.keys():\n perf = self.performance[symbol]\n\n print '\\nSummary for %s from %s (first trade) to %s (last trade)' % (symbol, perf['start'], perf['end'])\n print '.' * COL_DASH_WIDTH\n print 'Summary:'\n data = [[fmtn(perf['trades']), fmtn(perf['wins']), fmtn(perf['losses']), fmtn(perf['washes'])]]\n print tabulate.tabulate(data, headers=['Total Trades', '# Wins', '# Losses', '# Washes'])\n\n print '\\nPerformance:'\n data = [[\n fmtn(perf['profit']), fmtn(perf['loss']), fmtn(perf['net_profit']),\n fmtp(perf['profit_factor']), fmtp(perf['percent_profitable']), fmtn(perf['average_trade_net_profit'])\n ]]\n print tabulate.tabulate(data, headers=['Profit', 'Loss', 'Net Profit', 'Profit Factor', 'Percent Profitable', 'Average Net Profit per Trade'])\n\n print '\\nDrawdown:'\n data = [[fmtn(perf['max_drawdown']), fmtn(perf['average_drawdown']), fmtn(perf['max_drawdown_days']), fmtn(perf['average_drawdown_days'])]]\n print tabulate.tabulate(data, headers=['Max', 'Average', 'Max Days', 'Average Days'])\n\n print '\\nRisk:'\n data = [[fmtn(perf['volatility_risk']), fmtn(perf['beta']), fmtn(perf['lower_partial_moment_risk']), fmtn(perf['t_r']), fmtn(perf['s_r'])]]\n print tabulate.tabulate(data, headers=['Volatility', 'Beta', 'Lower Partial Moment', 'Treynor Ratio', 'Sharpe Ratio'])",
"def print_scores(result_collector):\n # print(\"\\n# Metric: Cohen's kappa\")\n # result_collector.set_metric(['k_cohen', 'k'])\n # result_collector.print_all_results()\n print(\"\\n# Metric: Macro avg. F1\")\n result_collector.set_metric([\"macro_avg\", \"fscore\"])\n # result_collector.print_all_results()\n result_collector.print_result_for_level(\"cc\")\n result_collector.print_result_for_level(\"ro\", print_header=False)\n result_collector.print_result_for_level(\"fu\", print_header=False)\n result_collector.print_result_for_level(\"at\", print_header=False)\n\n # print(\"\\nMetric: Positive attachment F1\")\n # result_collector.set_metric(['classwise', '1', 'fscore'])\n # result_collector.print_result_for_level('at')\n print(\"\\n# Metric: Labelled attachment score\")\n result_collector.set_metric([\"accuracy\"])\n result_collector.print_result_for_level(\"lat\")",
"def print_stats(self):\n if self.n_iter % 5 != 0:\n return\n\n s_iter = \"%7i - \" % self.n_iter\n s_stat = ' || '.join([\n '{}: {:7.4f}'.format(k, np.mean(v)) for k, v in self.stats.items()\n if type(v) is list and len(v) > 0\n ])\n for k in self.stats.keys():\n if type(self.stats[k]) is list:\n del self.stats[k][:]\n\n # transformer learning rate\n # learning rates\n s_lr = \" - \"\n for k, v in self.optimizers.items():\n s_lr = s_lr + (\" - %s LR: \" % k) + \" / \".join(\n \"{:.4e}\".format(group['lr']) for group in v.param_groups)\n\n # processing speed\n new_time = time.time()\n diff = new_time - self.last_time\n s_speed = \"{:7.2f} sent/s - {:8.2f} words/s - \".format(\n self.stats['processed_s'] * 1.0 / diff,\n self.stats['processed_w'] * 1.0 / diff\n )\n self.stats['processed_s'] = 0\n self.stats['processed_w'] = 0\n self.last_time = new_time\n\n # log speed + stats + learning rate\n logger.info(s_iter + s_speed + s_stat + s_lr)",
"def test_print_results(self):\n calculated = super().predict_and_print()\n self.assertEqual(calculated, EXP_PRINT_OUTPUT_BASE.format(.18, .1, 0.186, self.test_model.model.train_time) +\n \"Max tree max_depth: 1\\n\"\n \"Number of n_estimators: 1\\n\"\n \"Impurity method: entropy\\n\")",
"def sprint_statistics(\n self,\n dataset_name: str,\n scoring_functions: List[autoPyTorchMetric],\n metric: autoPyTorchMetric\n ) -> str:\n search_results = self.get_search_results(scoring_functions, metric)\n success_status = (StatusType.SUCCESS, StatusType.DONOTADVANCE)\n sio = io.StringIO()\n sio.write(\"autoPyTorch results:\\n\")\n sio.write(f\"\\tDataset name: {dataset_name}\\n\")\n sio.write(f\"\\tOptimisation Metric: {metric}\\n\")\n\n num_runs = len(search_results.status_types)\n num_success = sum([s in success_status for s in search_results.status_types])\n num_crash = sum([s == StatusType.CRASHED for s in search_results.status_types])\n num_timeout = sum([s == StatusType.TIMEOUT for s in search_results.status_types])\n num_memout = sum([s == StatusType.MEMOUT for s in search_results.status_types])\n\n if num_success > 0:\n best_score = metric._sign * np.max(metric._sign * search_results.opt_scores)\n sio.write(f\"\\tBest validation score: {best_score}\\n\")\n\n sio.write(f\"\\tNumber of target algorithm runs: {num_runs}\\n\")\n sio.write(f\"\\tNumber of successful target algorithm runs: {num_success}\\n\")\n sio.write(f\"\\tNumber of crashed target algorithm runs: {num_crash}\\n\")\n sio.write(f\"\\tNumber of target algorithms that exceeded the time \"\n f\"limit: {num_timeout}\\n\")\n sio.write(f\"\\tNumber of target algorithms that exceeded the memory \"\n f\"limit: {num_memout}\\n\")\n\n return sio.getvalue()",
"def output_test():\n\toutput_comparison_page(TEST_EVENT_LIST, TEST_COMPARISON_PAGE_FILEPATH)",
"def print_outcomes(sim_output, strategy_name):\n\n # mean and confidence interval text of game reward\n reward_mean_CI_text = Format.format_estimate_interval(\n estimate=sim_output.get_ave_reward(),\n interval=sim_output.get_CI_reward(alpha=P.alpha),\n deci=1)\n\n # print game reward statistics\n print(strategy_name)\n print(\" Estimate of the mean game reward and {:.{prec}%} confidence interval:\".format(1 - P.alpha, prec=0),\n reward_mean_CI_text)",
"def showBestStats(self) :\n Scenario.messageBestStats()\n self.showBestStatLevelReached()\n self.showNbCoupFindFirstAttempt()\n self.showBestGainWon()\n self.showBestBetUse()\n self.showNbLevelWon()",
"def show_stats(self, output_type='count'):\n if not self._stats:\n raise TypeError(\"self._stats is not defined. Try running run_parser first!\")\n self._stats.print_spec(output_type)",
"def _t_test_results(self):\n t, df, p = self.api.m.math_utils.welchs_t_test(\n self.lkgr.values, self.fkbr.values)\n lines = [\n 'LKGR values: %r' % self.lkgr.values,\n 'FKBR values: %r' % self.fkbr.values,\n 't-statistic: %r' % t,\n 'deg. of freedom: %r' % df,\n 'p-value: %r' % p,\n 'Confidence score: %r' % (100 * (1 - p))\n ]\n return '\\n'.join(lines)",
"def statistics_on_test(self, predicted_results, result):\n # Print confusion matrix and mean average precision score\n predicted_results_binary = self.predicted_results_to_binary(predicted_results)\n print(\"\\nConfusion matrix : \")\n print(confusion_matrix(result, predicted_results_binary))\n print(\"\\nAverage precision score : \", average_precision_score(result, predicted_results_binary))",
"def dump(self):\n if self.logger is None:\n return\n Ys = self.get_ys()\n vals = []\n for i in range(min(20, len(Ys))):\n vals.append(round(Ys[i],2)) \n dt = dtime(self.t0) \n \n message = '{0} {1} {2} {3} {4:.6f} {5:.2f} {6:.2f} {7!s} {8!s}'.format(\n dt, int(self.count_evals.value / dt), self.count_runs.value, self.count_evals.value, \\\n self.best_y.value, self.get_y_mean(), self.get_y_standard_dev(), vals, self.best_x[:])\n self.logger.info(message)",
"def main(output_file):\n with open(output_file, 'w+') as fl:\n poor_perf_stats = pstats.Stats('poor_perf.log', stream=fl)\n good_perf_stats = pstats.Stats('good_perf.log', stream=fl)\n\n poor_perf_stats.sort_stats('cumtime')\n\n fl.write('--------------------------------------------\\n')\n fl.write('POOR PERFORMANCE STATS\\n')\n fl.write(f\"Time: {poor_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {poor_perf_stats.total_calls}\\n\")\n fl.write(f\"Top cumulative times\\n\")\n poor_perf_stats.print_stats(20)\n\n fl.write('--------------------------------------------\\n')\n fl.write('GOOD PERFORMANCE STATS\\n')\n fl.write(f\"Time: {good_perf_stats.total_tt}\\n\")\n fl.write(f\"Function Calls: {good_perf_stats.total_calls}\\n\")\n fl.write(f\"Top 20 cumulative times\\n\")\n good_perf_stats.print_stats(20)",
"def printReport(self): \n \n print('Distribution: ', self._distribution_type)\n print('Distribution Type: ', str(self._measure_type).replace('MeasureType.','')) \n print('Type Detection Match: ', str(self._measure_type_match))\n print('MLE: ', str(self._mle))\n print('Goodness of Fit: ', str(self._gof)) \n print('Goodness of Fit Pass: ', str(self._pass)) \n print('Overall Score: ', str(self._score)) \n print('-------------')",
"def print_evaluations(ytrue, ypred, model):\n\n print(f'How does model {model} score:')\n print(f'The accuracy of the model is: {round(accuracy_score(ytrue, ypred), 3)}')\n print(f'The precision of the model is: {round(precision_score(ytrue, ypred, pos_label=\"bastille_\" ), 3)}')\n print(f'The recall of the model is: {round(recall_score(ytrue, ypred, pos_label=\"bastille_\"), 3)}')\n print(f'The f1-score of the model is: {round(f1_score(ytrue, ypred, pos_label=\"bastille_\"), 3)}')"
] | [
"0.6337549",
"0.6247714",
"0.62319505",
"0.62179327",
"0.6136802",
"0.61355233",
"0.5989832",
"0.5975646",
"0.59036446",
"0.5874888",
"0.5862709",
"0.584473",
"0.58438706",
"0.5822467",
"0.57994264",
"0.5798621",
"0.5797651",
"0.5784963",
"0.5765707",
"0.576413",
"0.5757759",
"0.575347",
"0.5734205",
"0.5733494",
"0.5715078",
"0.5704774",
"0.5703272",
"0.5691885",
"0.5684237",
"0.56605446"
] | 0.6336608 | 1 |
Simulates the backtest and outputs portfolio performance. Loops over all variants of strategy parameters of a space generated by a cartesian product of hyperparameter values. Generates new instances of all the data handlers, event queues, and portfolio objects upon each iteration, in order to ensure a "clean slate" for each trading instance on every simulation. The parameter combinations and their performance metrics are stored in an output CSV file, which will subsequently be used to plot performance characteristics. | def simulate_trading(self):
# Create the file output stream
posix_now = datetime.datetime.timestamp(datetime.datetime.now())
out_path = os.getcwd() + "/OutputResults/backtest_{}".format(posix_now)[:-7:] + ".csv"
out = open(out_path, "w+")
spl = len(self.strat_params_list)
for i, sp in enumerate(self.strat_params_list): # http://book.pythontips.com/en/latest/enumerate.html
print("Strategy %s out of %s..." % (i + 1, spl))
self._generate_trading_instances(sp)
self._run_backtest()
stats = self._output_performance()
pprint.pprint(stats)
tot_ret = float(stats[0][1].replace("%", ""))
cagr = float(stats[1][1].replace("%", ""))
sharpe = float(stats[2][1])
max_dd = float(stats[3][1].replace("%", ""))
dd_dur = int(stats[4][1])
# This should be more general in future implementations...
out.write(
"%s,%s,%s,%s,%s,%s,%s,%s\n" % (sp["ols_window"], sp["zscore_high"], sp["zscore_low"],
tot_ret, cagr, sharpe, max_dd, dd_dur)
)
out.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parameter_optimization(self):\n out = open(self.csv_dir + self.strategy_id + '_gridsearch.csv', \"w\")\n spl = len(self.para_list)\n for i, sp in enumerate(self.para_list):\n print(\"Strategy %s out of %s...\" % (i + 1, spl))\n self._generate_trading_instances(sp)\n self._run_backtest()\n stats = self.portfolio.get_statistics()\n tot_profit = float(stats[0][1])\n sharpe = float(stats[1][1])\n max_dd = float(stats[2][1])\n win_rate = float(stats[7][1].replace(\"%\", \"\"))\n profit_factor = float(stats[8][1])\n\n out.write(\n \"%s,%s,%s,%s,%s,%s,%s\\n\" %\n (sp[\"takeprofit\"], sp[\"period\"], tot_profit, sharpe, max_dd, win_rate, profit_factor)\n )\n out.close()",
"def run():\n\n for simulation in range(0, N_SIMULATIONS):\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=True) # specify agent to track\n # NOTE: You can set enforce_deadline=False while debugging to allow longer trials\n # TODO: Change later enforce_deadline=True\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.001, display=False) # create simulator (uses pygame when display=True, if available)\n # NOTE: To speed up simulation, reduce update_delay and/or set display=False\n\n sim.run(n_trials=N_TRIALS) # run for a specified number of trials\n # NOTE: To quit midway, press Esc or close pygame window, or hit Ctrl+C on the command-line\n\n if simulation == N_SIMULATIONS - 1:\n\n with open('results.csv', 'a') as csvfile:\n fieldnames = ['alpha', 'gamma', 'epsilon', 'success_rate', 'last_failure']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n for index in range(0,len(simulation_rates)):\n writer.writerow({\n 'alpha': get_simulation_params(0)[0],\n 'gamma': get_simulation_params(0)[1],\n 'epsilon': get_simulation_params(0)[2],\n 'success_rate': simulation_rates[index],\n 'last_failure': last_errors[index]})\n\n\n if N_SIMULATIONS > 1: #multiple simulation AND last simulation\n\n plt.figure(1)\n\n plt.subplot(211)\n plt.plot(simulation_rates)\n plt.title('Success Rate/Simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Success Rate')\n\n plt.subplot(212)\n plt.plot(last_errors)\n plt.title('Last failed trial per simulation')\n plt.xlabel('# Simulation')\n plt.ylabel('Last failed trial')\n\n plt.show()",
"def target_portfolio_simulation(num_of_years=30, trials=100, method='normal'):\n print(\"Running method target_portfolio_simulation()\")\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n # read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'median_returns_unsorted.csv', index_col=[0], parse_dates=True)\n cols = [read_normal.columns[c].split('_')[1] for c in np.arange(len(read_normal.columns))]\n read_normal.rename(columns=dict(zip(list(read_normal.columns), cols)), inplace=True)\n\n read_small = pd.read_csv(src + 'median_returns_smallest.csv', index_col=[0], parse_dates=True)\n read_small.rename(columns=dict(zip(list(read_small.columns), cols)), inplace=True)\n\n read_large = pd.read_csv(src + 'median_returns_largest.csv', index_col=[0], parse_dates=True)\n read_large.rename(columns=dict(zip(list(read_large.columns), cols)), inplace=True)\n\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = read_normal.copy()\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n\n # dataframe for smallest to largest returns\n median_returns_smallest = read_small.copy()\n median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'FIA')})\n\n # dataframe for largest to smallest returns\n median_returns_largest = read_large.copy()\n median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, 'FIA']\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, 'FIA']\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # --------------------BASE MODEL---------------------------------------------\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = base_assets\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n for c in range(len(r_cols)):\n ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n random_returns = read_small.copy()\n\n elif method == 'largest':\n random_returns = read_large.copy()\n\n else:\n random_returns = read_normal.copy()\n\n base_df = random_returns.copy()\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # -------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n # ---------income breakdown for Base portfolio----------------------------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n income_breakdown_base.loc[:, 'fia_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_portfolio'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n\n # ---------income breakdown for FIA portfolio----------------------------------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_portfolio'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ----drop year 0--------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ----------------quantile analysis for base terminal value--------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n # ----------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # -------------quantile analysis for portfolio terminal value ----------------\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ---------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -----------------------\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n col_names = ['50th', 'age', 'comment']\n writer = pd.ExcelWriter(src + method + '_simulated_income_summary.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_income_quantiles')\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # port_income_qcut.loc[:, 'ending_contract_value'] = sim_fia_cv\n port_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n # prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n # prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n # prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'age'] = age_index\n # prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_port.loc[:, 'ending_contract_value'] = income_df.loc[:, 'contract_value']\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n if method == 'normal':\n # median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n median_returns_normal.to_excel(writer, sheet_name='gr_port_median_normal')\n\n elif method == 'smallest':\n # median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n median_returns_smallest.to_excel(writer, sheet_name='gr_port_median_asc')\n\n else:\n # median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n median_returns_largest.to_excel(writer, sheet_name='gr_port_median_desc')\n\n terminal_val = pd.read_csv(src + 'terminal_values.csv', index_col=[0])\n ending_val = pd.read_csv(src + 'ending_values.csv', index_col=[0])\n ending_val_ror = pd.read_csv(src + 'ending_values_ror.csv', index_col=[0])\n\n terminal_val.to_excel(writer, sheet_name='terminal_values')\n ending_val.to_excel(writer, sheet_name='port_ending_values')\n ending_val_ror.to_excel(writer, sheet_name='port_annual_growth')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n plt.savefig(src + \"quantile_terminal_base.png\")\n plt.close('all')\n\n base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n plt.savefig(src + \"quantile_income_base.png\")\n plt.close('all')\n\n base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n plt.savefig(src + \"success_probabilty_base.png\")\n plt.close('all')\n\n (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n plt.savefig(src + \"ruin_probability_base.png\")\n plt.close('all')\n\n port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n plt.savefig(src + \"quantile_terminal_fia.png\")\n plt.close('all')\n\n port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n plt.savefig(src + \"quantile_income_fia.png\")\n plt.close('all')\n\n port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n plt.savefig(src + \"success_probabilty_fia.png\")\n plt.close('all')\n\n (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n plt.savefig(src + \"ruin_probability_fia.png\")\n plt.close('all')\n\n print(\"simulation completed for {}\".format(method))",
"def simulate_trading(self):\n self._generate_trading_instances()\n self._run_backtest()\n self.portfolio.output_equity()\n res=self.portfolio.get_statistics()\n self.plot.plot_equity()\n return res",
"def run_simulator(simulation_parameters, spectrum_portfolio,\n ant_heights, site_radii, modulation_and_coding_lut, costs):\n\n unprojected_point = {\n 'type': 'Feature',\n 'geometry': {\n 'type': 'Point',\n 'coordinates': (-0.07496, 51.42411),\n },\n 'properties': {\n 'site_id': 'Crystal Palace Radio Tower'\n }\n }\n\n unprojected_crs = 'epsg:4326'\n projected_crs = 'epsg:3857'\n\n environments =[\n 'urban',\n 'suburban',\n 'rural'\n ]\n\n for environment in environments:\n for site_radius in site_radii[environment]:\n\n print('--working on {}: {}'.format(environment, site_radius))\n\n transmitter, interfering_transmitters, site_area, interfering_site_areas = \\\n produce_sites_and_site_areas(\n unprojected_point['geometry']['coordinates'],\n site_radius,\n unprojected_crs,\n projected_crs\n )\n\n receivers = generate_receivers(site_area, SIMULATION_PARAMETERS, 1)\n\n for frequency, bandwidth, generation in spectrum_portfolio:\n for ant_height in ant_heights:\n\n MANAGER = SimulationManager(\n transmitter, interfering_transmitters, receivers,\n site_area, SIMULATION_PARAMETERS\n )\n\n results = MANAGER.estimate_link_budget(\n frequency, bandwidth, generation, ant_height,\n environment,\n MODULATION_AND_CODING_LUT,\n SIMULATION_PARAMETERS\n )\n\n folder = os.path.join(BASE_PATH, '..', 'results', 'full_tables')\n filename = 'full_capacity_lut_{}_{}_{}_{}.csv'.format(\n environment, site_radius, frequency, ant_height)\n\n write_full_results(results, environment, site_radius,\n frequency, bandwidth, generation, ant_height,\n folder, filename, simulation_parameters)\n\n average_site_results = obtain_average_values(\n results, simulation_parameters\n )\n\n results_directory = os.path.join(BASE_PATH, '..', 'results')\n\n write_frequency_lookup_table(average_site_results, environment,\n site_radius, frequency, bandwidth, generation,\n ant_height, results_directory,\n 'average_capacity_lut.csv',\n simulation_parameters\n )\n\n if frequency == spectrum_portfolio[0][0]:\n\n percentile_site_results = obtain_percentile_values(\n results, simulation_parameters\n )\n\n percentile_site_results = calculate_costs(\n percentile_site_results, costs, simulation_parameters,\n site_radius, environment\n )\n\n write_cost_lookup_table(percentile_site_results, environment,\n site_radius, frequency, bandwidth, generation,\n ant_height, results_directory,\n 'percentile_{}_capacity_lut.csv'.format(\n simulation_parameters['percentile']),\n simulation_parameters\n )\n\n ## write out as shapes, if desired, for debugging purposes\n geojson_receivers = convert_results_geojson(results)\n\n write_shapefile(\n geojson_receivers, os.path.join(results_directory, 'shapes'),\n 'receivers_{}.shp'.format(site_radius),\n projected_crs\n )\n\n write_shapefile(\n transmitter, os.path.join(results_directory, 'shapes'),\n 'transmitter_{}.shp'.format(site_radius),\n projected_crs\n )\n\n write_shapefile(\n site_area, os.path.join(results_directory, 'shapes'),\n 'site_area_{}.shp'.format(site_radius),\n projected_crs\n )\n\n write_shapefile(\n interfering_transmitters, os.path.join(results_directory, 'shapes'),\n 'interfering_transmitters_{}.shp'.format(site_radius),\n projected_crs\n )\n\n write_shapefile(\n interfering_site_areas, os.path.join(results_directory, 'shapes'),\n 'interfering_site_areas_{}.shp'.format(site_radius),\n projected_crs\n )",
"def simulation_using_historical_returns(num_of_years=30, trials=100, method='normal'):\n\n print(\"Running method simulation_using_historical_returns()\")\n\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n # read the base portfolio from accumulation model\n fia_name = read_income_inputs.loc['fia_name', 'inputs']\n path = src + fia_name + '/'\n file_name = fia_name + '_portfolio.csv'\n\n # base_port = pd.read_csv(path + 'base_portfolio.csv', index_col=[0], parse_dates=True)\n\n base_port = pd.read_csv(path + file_name, index_col=[0], parse_dates=True)\n\n ls1 = list(read_asset_weights.index[:-2])\n ls2 = ['Cash', fia_name, 'Total']\n combined_ls = ls1 + ls2\n base_port = base_port.loc[:, combined_ls]\n base_port.loc[:, 'port_return'] = base_port['Total'].pct_change().fillna(0)\n base_port.drop('Total', axis=1, inplace=True)\n yearly_base_port = base_port.groupby(by=base_port.index.year).apply(sum)\n\n # --Simulate only for first 20 years of returns--------\n first_n_years = int(read_income_inputs.loc['simulate_for_first_n_years', 'inputs'])\n yearly_base_port = yearly_base_port.loc[yearly_base_port.index[-(first_n_years + 1):], :]\n\n # --------Unsorted returns ---------------------\n read_normal = yearly_base_port.copy()\n read_normal.iloc[0] = 0.0\n read_normal.to_csv(src + 'base_port_ann_ret_unsorted.csv')\n read_normal.reset_index(drop=True, inplace=True)\n\n # ---make a copy to add to the consolidated summary file----\n save_normal = read_normal.copy()\n read_normal.drop('port_return', axis=1, inplace=True)\n read_normal.rename(columns={fia_name: 'FIA'}, inplace=True)\n\n # - Shift returns by one year and assign 0 returns to all assets in teh year 0, the year of initial investment.\n # read_normal = read_normal.shift().fillna(0)\n read_normal.to_csv(src + 'historical_port_return_unsorted.csv')\n\n # ----Best to worst returns----\n read_large = yearly_base_port.copy()\n\n # --Logic to sort the last 20 years of returns and assign 0 to first year, the year of investment\n read_large.iloc[0] = 1000.0\n read_large = read_large.sort_values(by=['port_return'], ascending=False)\n # --assign 0 to first year, the year of investment\n read_large.iloc[0] = 0.0\n read_large.to_csv(src + 'base_port_ann_ret_desc.csv')\n read_large.reset_index(drop=True, inplace=True)\n # read_large = read_large.shift().fillna(0)\n\n # ---make a copy to add to the consolidated summary file---\n save_desc = read_large.copy()\n read_large.drop('port_return', axis=1, inplace=True)\n read_large.rename(columns={fia_name: 'FIA'}, inplace=True)\n read_large.to_csv(src + 'historical_port_return_desc.csv')\n\n # ------Worst to Best Returns------------\n read_small = yearly_base_port.copy()\n read_small.iloc[0] = -1000.0\n read_small = read_small.sort_values(by=['port_return'], ascending=True)\n read_small.iloc[0] = 0.0\n read_small.to_csv(src + 'base_port_ann_ret_asc.csv')\n read_small.reset_index(drop=True, inplace=True)\n # read_small = read_small.shift().fillna(0)\n\n # ---make a copy to add to the consolidated summary file---\n save_asc = read_small.copy()\n read_small.drop('port_return', axis=1, inplace=True)\n read_small.rename(columns={fia_name: 'FIA'}, inplace=True)\n read_small.to_csv(src + 'historical_port_return_asc.csv')\n\n # read_asset_weights.drop(read_asset_weights.index[-1], axis=0, inplace=True)\n\n # read random returns for simulation\n # read_normal = pd.read_csv(src + 'median_returns_unsorted.csv', index_col=[0], parse_dates=True)\n # cols = [read_normal.columns[c].split('_')[1] for c in np.arange(len(read_normal.columns))]\n # read_normal.rename(columns=dict(zip(list(read_normal.columns), cols)), inplace=True)\n #\n # read_small = pd.read_csv(src + 'median_returns_smallest.csv', index_col=[0], parse_dates=True)\n # read_small.rename(columns=dict(zip(list(read_small.columns), cols)), inplace=True)\n #\n # read_large = pd.read_csv(src + 'median_returns_largest.csv', index_col=[0], parse_dates=True)\n # read_large.rename(columns=dict(zip(list(read_large.columns), cols)), inplace=True)\n\n assets_col_names = list(read_normal.columns)\n\n tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = read_normal.copy()\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n # median_returns_normal.rename(columns={fia_name: 'FIA'}, inplace=True)\n # median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n\n # dataframe for smallest to largest returns\n median_returns_smallest = read_small.copy()\n median_returns_smallest.loc[:, 'portfolio_return'] = median_returns_smallest.dot(wts)\n # median_returns_smallest.rename(columns={fia_name: 'FIA'}, inplace=True)\n # median_smallest_fia = pd.DataFrame({'FIA': asset_median_returns(read_small, 'FIA')})\n\n # dataframe for largest to smallest returns\n median_returns_largest = read_large.copy()\n median_returns_largest.loc[:, 'portfolio_return'] = median_returns_largest.dot(wts)\n # median_returns_largest.rename(columns={fia_name: 'FIA'}, inplace=True)\n # median_largest_fia = pd.DataFrame({'FIA': asset_median_returns(read_large, 'FIA')})\n\n # years = list(range(0, num_of_years + 1))\n years = np.arange(len(read_normal))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n # --Income start right away, not deferred ----\n income_starts = 0\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # --------------------------------------INCOME MODEL--------------------------------------------\n runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n if method == 'normal':\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n\n elif method == 'smallest':\n income_df.loc[:, 'index_returns'] = read_small.loc[:, 'FIA']\n\n else:\n income_df.loc[:, 'index_returns'] = read_large.loc[:, 'FIA']\n\n # income_df.loc[:, 'index_returns'] = np.random.normal(fia_ret, fia_risk, size=(len(years), 1))\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # ---------------------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------------------------required income--------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # --------------------------------------------RANDOM RETURNS-----------------------------------\n r_cols = base_assets\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n for c in range(len(r_cols)):\n ret = np.random.normal(base_returns[c], base_std[c], size=(len(random_returns.index), 1))\n\n if method == 'smallest':\n random_returns = read_small.copy()\n\n elif method == 'largest':\n random_returns = read_large.copy()\n\n else:\n random_returns = read_normal.copy()\n\n base_df = random_returns.copy()\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # -------------------------------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n # income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n # -------------------------------------income breakdown for Base portfolio----------------------------------\n base_df.to_csv(src + method + '_base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n income_breakdown_base.loc[:, 'fia_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_portfolio'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ------------------------------------Block Ends-------------------------------------------------------------\n\n # ------------------------------income breakdown for FIA portfolio----------------------------------\n fia_portfolio_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value']\n fia_portfolio_df.to_csv(src + method + '_fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_portfolio'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # ------------------------------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ----------------------------------drop year 0---------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # --------------------------------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n # ----------------------------------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # -------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ---------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -------------------------\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n col_names = ['50th', 'age', 'comment']\n writer = pd.ExcelWriter(src + method + '_historical_simulated_income_summary.xlsx', engine='xlsxwriter')\n read_income_inputs.to_excel(writer, sheet_name='inputs_for_income')\n\n read_returns_est.to_excel(writer, sheet_name='asset_returns_estimates')\n\n age_index = list(range(clients_age + 1, clients_age + len(base_qcut) + 1))\n base_qcut.loc[:, 'age'] = age_index\n base_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_ending_value_quantiles')\n\n base_income_qcut = base_income_qcut.loc[1:, :]\n base_income_qcut.loc[:, 'age'] = age_index\n base_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n base_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='base_income_quantiles')\n\n port_qcut.loc[:, 'age'] = age_index\n port_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_ending_value_quantiles')\n\n port_income_qcut = port_income_qcut.loc[1:, :]\n port_income_qcut.loc[:, 'age'] = age_index\n port_income_qcut.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n port_income_qcut.loc[income_starts:, col_names].to_excel(writer, sheet_name='fia_port_income_quantiles')\n\n # prob_success_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n # prob_success_df.rename(columns={prob_success_df.columns[0]: 'prob(ending_value>0)_base',\n # prob_success_df.columns[1]: 'prob(ending_value>0)_port'}, inplace=True)\n\n # prob_success_df.loc[:, 'age'] = age_index\n # prob_success_df.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n # prob_success_df.to_excel(writer, sheet_name='success_probability')\n\n income_breakdown_base = income_breakdown_base.loc[1:, :]\n income_breakdown_base.loc[:, 'age'] = age_index\n income_breakdown_base.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n income_breakdown_base.loc[income_starts:, :].to_excel(writer, sheet_name='base_income_breakdown_median')\n\n income_breakdown_port = income_breakdown_port.loc[1:, :]\n income_breakdown_port.loc[:, 'age'] = age_index\n income_breakdown_port.loc[:, 'ending contract value'] = income_df.loc[:, 'contract_value'].fillna(0)\n income_breakdown_port.loc[life_expectancy - clients_age, 'comment'] = 'expected_life'\n\n income_breakdown_port.loc[income_starts:, :].to_excel(writer, sheet_name='fia_income_breakdown_median')\n\n if method == 'normal':\n # median_returns_normal.loc[:, 'fia_median_returns'] = median_normal_fia\n # median_returns_normal.to_excel(writer, sheet_name='unsorted_port_return')\n save_normal.to_excel(writer, sheet_name='port_return_unsorted')\n\n elif method == 'smallest':\n # median_returns_smallest.loc[:, 'fia_median_returns'] = median_smallest_fia\n # median_returns_smallest.to_excel(writer, sheet_name='port_return_asc')\n save_asc.to_excel(writer, sheet_name='port_return_asc')\n\n else:\n # median_returns_largest.loc[:, 'fia_median_returns'] = median_largest_fia\n # median_returns_largest.to_excel(writer, sheet_name='port_return_desc')\n save_desc.to_excel(writer, sheet_name='port_return_desc')\n\n # terminal_val = pd.read_csv(src + 'terminal_values.csv', index_col=[0])\n # ending_val = pd.read_csv(src + 'ending_values.csv', index_col=[0])\n # ending_val_ror = pd.read_csv(src + 'ending_values_ror.csv', index_col=[0])\n\n # terminal_val.to_excel(writer, sheet_name='terminal_values')\n # ending_val.to_excel(writer, sheet_name='port_ending_values')\n # ending_val_ror.to_excel(writer, sheet_name='port_annual_growth')\n\n writer.save()\n\n # -----------------Plotting charts--------------------------------------------\n # base_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - Base Portfolio')\n # plt.savefig(src + \"quantile_terminal_base.png\")\n # plt.close('all')\n #\n # base_income_qcut.plot(grid=True, title='Quantile Income - Base Portfolio')\n # plt.savefig(src + \"quantile_income_base.png\")\n # plt.close('all')\n #\n # base_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - Base Portfolio')\n # plt.savefig(src + \"success_probabilty_base.png\")\n # plt.close('all')\n #\n # (1 - base_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - Base Portfolio')\n # plt.savefig(src + \"ruin_probability_base.png\")\n # plt.close('all')\n #\n # port_qcut.loc[income_starts:].plot(grid=True, title='Quantile Terminal Value - FIA Portfolio')\n # plt.savefig(src + \"quantile_terminal_fia.png\")\n # plt.close('all')\n #\n # port_income_qcut.plot(grid=True, title='Quantile Income - FIA Portfolio')\n # plt.savefig(src + \"quantile_income_fia.png\")\n # plt.close('all')\n #\n # port_psuccess.plot(grid=True, title='Probability of Success (Portfolio Ending Value > 0) - FIA Portfolio')\n # plt.savefig(src + \"success_probabilty_fia.png\")\n # plt.close('all')\n #\n # (1 - port_psuccess).plot(grid=True, title='Probability of Ruin (Portfolio Ending Value < 0) - FIA Portfolio')\n # plt.savefig(src + \"ruin_probability_fia.png\")\n # plt.close('all')\n\n print(\"simulation completed for {}\".format(method))",
"def _output_performance(self):\n self.portfolio.create_equity_curve_dataframe()\n \n print(\"Creating summary statistics...\")\n stats = self.portfolio.output_summary_stats()\n \n print(\"Creating equity curve...\")\n print(self.portfolio.equity_curve.tail(10))\n pprint.pprint(stats)\n \n print(\"Signals: %s\" % self.signals)\n print(\"Orders: %s\" % self.orders)\n print(\"Fills: %s\" % self.fills)",
"def _output_performance(self):\n self.portfolio.create_equity_curve_dataframe()\n\n print(\"Creating summary statistics...\")\n stats = self.portfolio.output_summary_stats()\n\n print(\"Creating equity curve...\")\n print(self.portfolio.equity_curve.tail(10))\n pprint.pprint(stats)\n\n print(\"Signals: %s\" % self.signals)\n print(\"Orders: %s\" % self.orders)\n print(\"Fills: %s\" % self.fills)",
"def _generate_trading_instances(self, strategy_params_dict):\n print(\"Creating DataHandler, Strategy, Portfolio, and ExecutionHandler for\")\n print(\"strategy parameter list: %s...\" % strategy_params_dict)\n\n # Set internal data members equal to the classes we passed in earlier, along with necessary parameters.\n # https://softwareengineering.stackexchange.com/questions/131403/what-is-the-name-of-in-python/131415\n self.data_handler = self.data_handler_class(self.events, self.csv_dir, self.symbol_list)\n self.strategy = self.strategy_class(self.data_handler, self.events, **strategy_params_dict)\n self.portfolio = self.portfolio_class(self.data_handler, self.events, self.start_date, self.initial_capital)\n self.execution_handler = self.execution_handler_class(self.events) # The Event Queue sent to ExecutionHandler",
"def generate_portfolio(S_0, params):\n\tpublic_client = gdax.PublicClient()\n\tallvar = []\n\tsumvar = 0\n\tfor coin in params:\n\t\ttheta = coin[0]\n\t\tv = theta[0]\n\t\tT = coin[1]\n\t\tprod_id = coin[2]\n\t\t# Get the current value of the coin, i.e. how much you bought\n\t\tname = prod_id + '-USD'\n\t\tstats = public_client.get_product_24hr_stats(name)\n\t\tvalue = (float(stats['high']) + float(stats['low']))/2\n\t\tallvar.append([prod_id, value, v])\n\t\tsumvar += v\n\tpriority = sorted(allvar, key=lambda i: i[2])\n\tportfolio = []\n\tfor i in priority:\n\t\tinvestment = S_0*i[2]/sumvar\n\t\tcurrency = investment/i[1]\n\t\tportfolio.append((i[0], currency, investment)) # id, investment, currency\n\tprint(\"\\nYour suggested investments are: \\n\")\n\tfor coin in portfolio:\n\t\tprint(\"%s: %s for %s USD\" % (coin[0], coin[1], coin[2]))\n\t# Prompt to save the portfolio\n\tdone = False\n\twhile done != True:\n\t\tinp = input(\"\\nWould you like to save this portfolio? (y/n)\t\")\n\t\ttry:\n\t\t\tif inp.lower() == 'y':\n\t\t\t\tpublic_client = gdax.PublicClient()\n\t\t\t\tcurrent_date = np.datetime64(public_client.get_time().get(\"iso\").split('T')[0])\n\t\t\t\t# Save the file\n\t\t\t\twith open(\"portfolios/%s.txt\" % (current_date), \"w\") as f:\n\t\t\t\t\tfor coin in portfolio:\n\t\t\t\t\t\tf.write(str(coin[0]) + ', ' + str(coin[1]) + ', ' + str(coin[2]) + '\\n')\n\t\t\t\tprint(\"Portfolio saved. Exiting.\\n\")\n\t\t\t\tdone = True\n\t\t\tif inp.lower() == 'n':\n\t\t\t\tprint(\"Program complete. Exiting.\\n\")\n\t\t\t\tdone = True\n\t\texcept ValueError:\n\t\t\tprint(\"Your input could not be interpreted.\")",
"def simulate_trading(self):\n self._run_backtest()\n self._output_performance()",
"def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()",
"def _generate_trading_instances(self):\n print(\n \"Initizalization...\"\n )\n\n self.data_handler = self.data_handler_cls(self.events, self.csv_dir, self.symbol_list, self.start_date,\n self.end_date)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.initial_capital, self.start_date,\n self.strategy_id, self.spread, self.commission,self.csv_dir)\n self.strategy = self.strategy_cls(self.data_handler, self.events, self.portfolio, self.spread, self.commission)\n self.plot = self.plot_cls(self.csv_dir, self.portfolio, self.strategy_id)",
"def main(ft_setups, ft_strategies):\n\n num_procs = 16\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-09\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n space_transfer_params = dict()\n space_transfer_params['finter'] = True\n space_transfer_params['rorder'] = 2\n space_transfer_params['iorder'] = 6\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n for setup in ft_setups:\n if setup == 'HEAT':\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.5\n problem_params['freq'] = 1\n problem_params['nvars'] = [255, 127]\n problem_params['bc'] = 'dirichlet-zero'\n\n level_params['dt'] = 0.5\n\n space_transfer_params['periodic'] = False\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = heatNd_forced # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 8.0\n\n elif setup == 'ADVECTION':\n # initialize problem parameters\n problem_params = dict()\n problem_params['c'] = 1.0\n problem_params['nvars'] = [256, 128]\n problem_params['freq'] = 2\n problem_params['order'] = 2\n problem_params['bc'] = 'periodic' # boundary conditions\n\n level_params['dt'] = 0.125\n\n space_transfer_params['periodic'] = True\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = advectionNd # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 2.0\n\n else:\n raise NotImplementedError('setup not implemented')\n\n # do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)\n ft.strategy = 'NOFAULT'\n\n controller = controller_nonMPI_hard_faults(\n num_procs=num_procs, controller_params=controller_params, description=description\n )\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n ref_niter = max([item[1] for item in sortedlist_stats])\n\n print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))\n\n # loop over all strategies\n for strategy in ft_strategies:\n ft_iter = range(1, ref_niter + 1)\n ft_step = range(0, num_procs)\n\n print('------------------------------------------ working on strategy ', strategy)\n\n iter_count = np.zeros((len(ft_step), len(ft_iter)))\n\n # loop over all steps\n xcnt = -1\n for step in ft_step:\n xcnt += 1\n\n # loop over all iterations\n ycnt = -1\n for iter in ft_iter:\n ycnt += 1\n\n ft.hard_step = step\n ft.hard_iter = iter\n ft.strategy = strategy\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n niter = max([item[1] for item in sortedlist_stats])\n iter_count[xcnt, ycnt] = niter\n\n print(iter_count)\n\n np.savez(\n 'data/' + setup + '_results_hf_' + strategy,\n iter_count=iter_count,\n description=description,\n ft_step=ft_step,\n ft_iter=ft_iter,\n )",
"def run(sim_attr_generator):\n#TODO: clean\n#TODO: integrate analyses\n def analyze_and_save(simulation,simulation_attributes):\n#? Ugly conf file analyses integration.\n if simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving analyses for {0}.\".format(simulation_attributes.id_name),2)\n results = analyze_datas(\n simulation.result,\n simulation_attributes.analyses\n )\n plotables = ana_results_to_plotables(\n results,\n simulation_attributes.analyses\n )\n#TODO error handling for save\n analysis_save_dm(\n results,\n plotables,\n simulation_attributes.analyses,\n simulation_attributes.id_name\n )\n\n def save_simulation(simulation,simulation_attributes):\n if not simulation_attributes.analyses and Args.output_file != None:\n verbose_print(\"Saving simulation datas of {0}.\".format(\n simulation_attributes.id_name\n ),2) \n try:\n np.save(\n simulation_attributes.id_name,\n simulation.result\n )\n except:\n raise EnvironmentError(\"Can't save data to {}.\".format(\n simulation_attributes.id_name\n ))\n\n verbose_print(\"Starting simulation run.\",1)\n for i,simulation_attributes in enumerate(sim_attr_generator):\n verbose_print(\"Starting simulation number {0}: {1}\".format(\n i,\n simulation_attributes.id_name\n ),2)\n simulation = Simulation(\n SimulationVariables(simulation_attributes)\n )\n simulation.start()\n save_simulation(simulation,simulation_attributes)\n analyze_and_save(simulation,simulation_attributes)",
"def evolve(self, max_iter=40, warm_start=False, sigma_bound=None, info=True, cross_method=1, par_count=3,\n max_distance=2.0, archive_iter=30, max_archive_size=200):\n if sigma_bound is None:\n sigma_bound = [0.01, 0.2]\n\n if not warm_start:\n self.archive = []\n else:\n self.archive = self.archive.tolist()\n\n # outer loop\n for q in range(0, archive_iter):\n # create initial generation with strategy parameters\n init_gen = np.empty(shape=(self.gen_size, self.num_variables))\n init_sigma = np.empty(shape=(self.gen_size, self.num_variables))\n for i in range(0, self.num_variables):\n init_gen[:, i] = np.random.uniform(self.lower_bound[i], self.upper_bound[i], self.gen_size)\n init_sigma[:, i] = np.random.uniform(sigma_bound[0] * self.total_bound[i],\n sigma_bound[1] * self.total_bound[i], self.gen_size)\n\n self.gen = np.copy(init_gen)\n self.sigma = np.copy(init_sigma)\n\n if info:\n msg = \" Archive Iteration: {}/{}\".format(q+1, archive_iter)\n print(msg)\n self.gen = np.copy(init_gen)\n self.sigma = np.copy(init_sigma)\n\n # inner loop\n for k in range(0, max_iter):\n\n if info:\n if k == max_iter - 1:\n msg = \"Generation {}/{}\\n\".format(k+1, max_iter)\n else:\n msg = \"Generation {}/{}\".format(k+1, max_iter)\n sys.stdout.write(\"\\r\"+msg)\n\n # random selection based off number of parents\n selected = []\n for i in range(0, par_count):\n selected.append(np.random.choice(range(0, self.gen_size), self.gen_size, replace=False))\n selected = np.asarray(selected).T\n ch_val = []\n ch_sigma = []\n # reproduce from parents\n for i in range(0, self.gen_size):\n c_v, c_s = self.__reproduction(self.gen[selected[i]], self.sigma[selected[i]], cross_method)\n ch_val.append(c_v)\n ch_sigma.append(c_s)\n\n ch_val = np.asarray(ch_val)\n ch_sigma = np.asarray(ch_sigma)\n # check bounds of offspring\n for i in range(0, self.gen_size):\n for j in range(0, self.num_variables):\n if ch_val[i][j] > self.domain[0][j]:\n ch_val[i][j] = self.domain[0][j]\n ch_sigma[i][j] *= .90\n elif ch_val[i][j] < self.domain[1][j]:\n ch_val[i][j] = self.domain[1][j]\n ch_sigma[i][j] *= .90\n\n # combine offspring with parents\n parents_offspring_val = np.vstack((self.gen, ch_val))\n parents_offspring_sigma = np.vstack((self.sigma, ch_sigma))\n ind = list(range(0, 2 * self.gen_size))\n rel_fit = []\n # compute relative fitness\n for i in range(0, 2 * self.gen_size):\n tourn = np.random.choice(ind[0:i] + ind[(i + 1):], self.tourn_size)\n rel_fit.append(self.__pareto_dominance(parents_offspring_val[i], parents_offspring_val[tourn]))\n\n if len(self.archive) != 0: # if archive is not emtpy\n # loop over each member in archive\n for arch in self.archive:\n for i in range(0, 2 * self.gen_size):\n # calculate euclidean distance\n dist = np.linalg.norm(arch - parents_offspring_val[i])\n # if their distance is small -> they're too close,\n # larger the max distance the more spread out solutions become\n if dist <= max_distance:\n # penalize fitness down to 25%\n rel_fit[i] *= 0.25\n\n # sort individuals based on relative fitness\n rel_fit = np.asarray(rel_fit)\n sorted_ind = np.asarray(ind)[np.argsort(-rel_fit)]\n\n # use elitism to take the best half from pooled\n # parents and offspring\n self.gen = parents_offspring_val[sorted_ind[0:self.gen_size]]\n self.sigma = parents_offspring_sigma[sorted_ind[0:self.gen_size]]\n\n fits = self.fitness_functions(self.gen)\n # update archive\n if len(self.archive) !=0:\n # loop over every individual in surviving generation\n for j in range(0, self.gen_size):\n # indices of members in archive that are dominated by\n # the current individual\n ind_del = []\n f_arch = self.fitness_functions(np.asarray(self.archive))\n dominated = False\n for i in range(0, len(self.archive)):\n # individual strongly dominates member of archive\n if np.sum(f_arch[i] > fits[j]) == len(fits[j]):\n ind_del.append(i)\n # individual is strongly dominated by member\n elif np.sum(f_arch[i] < fits[j]) == len(fits[j]):\n dominated = True\n if len(ind_del) != 0:\n # delete members that are strongly dominated\n for index in sorted(ind_del, reverse=True):\n del self.archive[index]\n if not dominated: # weakly dominates members of archive\n if len(self.archive) <= max_archive_size:\n self.archive.append(self.gen[j])\n else: # archive is full\n temp = np.vstack((self.gen[j], self.archive))\n dist = self.__distance(temp)\n index = np.argmin(dist)\n if index != 0:\n del self.archive[index-1]\n self.archive.append(self.gen[j])\n\n dist = pdist(np.asarray(self.archive))\n #dist = self.__distance(np.asarray(self.archive))\n dist_min = np.min(dist)\n dist_max = np.max(dist)\n dist_median = np.median(dist)\n dist_std = np.std(dist)\n if info:\n msg = \" Archive Information:\\n\" \\\n \" Distance - Min: {}, Max: {}, Median: {}, Std: {}\\n\" \\\n \" Archive Size: {}\".format(dist_min, dist_max, dist_median,dist_std, len(self.archive))\n print(msg)\n\n else: # if first iteration of outer loop, just add best solution from gen\n self.archive.append(self.gen[0])\n self.archive = np.asarray(self.archive)",
"def run(self, num_iterations = 50, **kwargs):\n \n #setup system\n self.cost_calculator = t.CostCalculator(self.suppliers_allcards, self.all_ensembles_dict)\n bounds = np.array(self.cost_calculator.ensemble_sizes) - 1\n #define cost functions\n cost_func = lambda p: sum(self.cost_calculator.get_cost(p))\n #create model\n self.model = ga(cost_func, bounds, **kwargs)\n \n fitness_list = [];\n \n for i in range(num_iterations):\n #Update\n f = next(self.model)\n #get fitness values\n fitness_list.append(f[0])\n #Output\n print('\\r(%d/%d) '%(i+1,num_iterations), end = '')\n print('top ensemble fitness: %1.1f '%f[0], end = '')\n \n print('\\nDone')\n self.solution = self.cost_calculator.decode_arrangement(self.model.get_solution())",
"def portfolio_simulations_using_target_returns(num_of_years=30, trials=100):\n print(\"Running portfolio_simulations_using_target_returns() method\")\n sim_fia_cv = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_base_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_base_income = pd.DataFrame(index=range(num_of_years + 1))\n\n sim_port_total = pd.DataFrame(index=range(num_of_years + 1))\n sim_port_income = pd.DataFrame(index=range(num_of_years + 1))\n\n read_income_inputs = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_model_inputs',\n index_col=[0])\n\n read_returns_est = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='income_assets_returns_estimates',\n index_col=[0])\n\n read_asset_weights = pd.read_excel(src + \"portfolio_information.xlsx\", sheet_name='asset_weights',\n index_col=[0])\n\n # read random returns for simulation\n read_normal = pd.read_csv(src + 'median_returns_unsorted.csv', index_col=[0], parse_dates=True)\n cols = [read_normal.columns[c].split('_')[1] for c in np.arange(len(read_normal.columns))]\n read_normal.rename(columns=dict(zip(list(read_normal.columns), cols)), inplace=True)\n idx = list(read_normal.index)\n\n runs = 0\n while runs <= trials:\n # --------Shuffling the path of random returns from the median portfolio\n np.random.shuffle(idx)\n read_normal = read_normal.set_index([idx])\n read_normal = read_normal.sort_index()\n\n # assets_col_names = list(read_normal.columns)\n # tickers = list(read_asset_weights.index)\n wts = np.array(read_asset_weights.loc[:, 'base'])\n\n def asset_median_returns(data, ticker):\n return data.filter(regex=ticker).median(axis=1)\n\n # dataframe for unsorted returns (normal)\n median_returns_normal = read_normal.copy()\n median_returns_normal.loc[:, 'portfolio_return'] = median_returns_normal.dot(wts)\n median_normal_fia = pd.DataFrame({'FIA': asset_median_returns(read_normal, 'FIA')})\n\n years = list(range(0, num_of_years + 1))\n income_cols = ['year', 'strategy_term', 'index_returns', 'term_ret', 'term_ret_with_par', 'term_annualize',\n 'ann_net_spread', 'term_ret_netspr', 'high_inc_benefit_base', 'rider_fee', 'eoy_income',\n 'contract_value']\n\n term = int(read_income_inputs.loc['term', 'inputs'])\n fia_ret = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Returns']\n fia_risk = read_returns_est.loc[read_returns_est.index[-1], 'Annualized Risk']\n par_rate = float(read_income_inputs.loc['par_rate', 'inputs'])\n spread = float(read_income_inputs.loc['spread', 'inputs'])\n bonus_term = int(read_income_inputs.loc['bonus_term', 'inputs'])\n premium = float(read_income_inputs.loc['premium', 'inputs'])\n income_bonus = float(read_income_inputs.loc['income_bonus', 'inputs'])\n\n income_starts = int(read_income_inputs.loc['start_income_years', 'inputs'])\n income_growth = float(read_income_inputs.loc['income_growth', 'inputs'])\n rider_fee = float(read_income_inputs.loc['rider_fee', 'inputs'])\n inc_payout_factor = float(read_income_inputs.loc['income_payout_factor', 'inputs'])\n contract_bonus = float(read_income_inputs.loc['contract_bonus', 'inputs'])\n social = float(read_income_inputs.loc['social', 'inputs'])\n inflation = float(read_income_inputs.loc['inflation', 'inputs'])\n wtd_cpn_yield = float(read_income_inputs.loc['wtd_coupon_yld', 'inputs'])\n life_expectancy = int(read_income_inputs.loc['life_expectancy_age', 'inputs'])\n clients_age = int(read_income_inputs.loc['clients_age', 'inputs'])\n\n # ---------------INCOME MODEL--------------------------------------------\n # runs = 0\n returns_dict = {}\n asset_dict = {}\n fia_dict = {}\n\n income_df = pd.DataFrame(index=years, columns=income_cols)\n income_df.loc[:, 'year'] = years\n income_df.loc[:, 'strategy_term'] = income_df.loc[:, 'year'] % term\n income_df.loc[:, 'strategy_term'] = income_df['strategy_term'].apply(lambda x: 1 if x == 0 else 0)\n\n income_df.loc[:, 'index_returns'] = read_normal.loc[:, 'FIA']\n\n cumprod = (1. + income_df['index_returns']).rolling(window=term).agg(lambda x: x.prod()) - 1\n income_df.loc[:, 'term_ret'] = np.where(income_df.loc[:, 'strategy_term'] == 1, cumprod, 0)\n income_df.loc[:, 'term_ret_with_par'] = income_df.loc[:, 'term_ret'] * par_rate\n income_df.loc[:, 'term_annualize'] = income_df.loc[:, 'term_ret_with_par'].apply(\n lambda x: (1 + x) ** (1 / term) - 1)\n income_df.loc[:, 'ann_net_spread'] = income_df.loc[:, 'term_annualize'] - spread\n income_df.loc[:, 'ann_net_spread'] = np.where(income_df.loc[:, 'strategy_term'] == 1,\n income_df.loc[:, 'ann_net_spread'], 0)\n income_df.loc[:, 'term_ret_netspr'] = income_df.loc[:, 'ann_net_spread'].apply(lambda x: (1 + x) ** term - 1)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'high_inc_benefit_base'] = premium * (1 + income_bonus)\n\n elif counter <= min(bonus_term, income_starts):\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base'] * \\\n (1 + income_growth)\n else:\n income_df.loc[counter, 'high_inc_benefit_base'] = income_df.loc[counter - 1, 'high_inc_benefit_base']\n\n income_df.loc[:, 'rider_fee'] = income_df.loc[:, 'high_inc_benefit_base'] * rider_fee\n income_df.loc[:, 'eoy_income'] = np.where(income_df.loc[:, 'year'] > income_starts,\n income_df.loc[:, 'high_inc_benefit_base'] * inc_payout_factor, 0)\n\n for counter in years:\n if counter == 0:\n income_df.loc[counter, 'contract_value'] = premium * (1 + contract_bonus)\n\n elif income_df.loc[counter, 'strategy_term'] == 1:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee']\n x2 = (x1 * (1 + income_df.loc[counter, 'term_ret_netspr'])) - income_df.loc[counter, 'eoy_income']\n income_df.loc[counter, 'contract_value'] = x2\n\n else:\n x1 = income_df.loc[counter - 1, 'contract_value'] - income_df.loc[counter, 'rider_fee'] - \\\n income_df.loc[counter, 'eoy_income']\n\n income_df.loc[counter, 'contract_value'] = x1\n\n # variable stores the income number that is used in the base and fia portfolio calcs.\n\n income_from_fia = income_df.loc[income_df.index[-1], 'eoy_income']\n\n income_df.loc[:, 'contract_value'] = income_df.loc[:, 'contract_value'].apply(lambda x: 0 if x <= 0 else x)\n\n sim_fia_cv.loc[:, str(runs)] = income_df.loc[:, 'contract_value']\n\n # --------------------BASE MODEL---------------------------------------------\n\n base_wts = read_asset_weights.loc[:, 'base']\n base_assets = list(base_wts.index)\n base_weights = list(base_wts.values)\n base_returns = list(read_returns_est.loc[:, 'Annualized Returns'].values)\n base_std = list(read_returns_est.loc[:, 'Annualized Risk'].values)\n\n base_investment = float(read_income_inputs.loc['risky_assets', 'Base'])\n adv_fees = float(read_income_inputs.loc['advisor_fees', 'Base'])\n\n # -------------------required income----------------------------------\n req_annual_income = float(read_income_inputs.loc['annual_income', 'inputs'])\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n cpn_income_base = base_investment * wtd_cpn_yield\n\n # ----------------------RANDOM RETURNS--------------------------\n r_cols = base_assets\n boy_value = ['bv_{}'.format(name) for name in base_assets]\n eoy_value = ['ev_{}'.format(name) for name in base_assets]\n\n random_returns = pd.DataFrame(index=income_df.index, columns=r_cols)\n\n random_returns = read_normal.copy()\n\n base_df = random_returns.copy()\n fia_portfolio_df = random_returns.copy()\n port_investment = float(read_income_inputs.loc['risky_assets', 'FIA'])\n cpn_income_port = port_investment * wtd_cpn_yield\n\n # -------------BASE PORTFOLIO----------------------------\n for name in boy_value:\n base_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment for c in range(len(boy_value))]\n\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'total_net_fees'] = 0.0\n base_df.loc[counter, 'income'] = 0.0\n base_investment = base_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - base_df.loc[\n counter, 'adv_fees']\n\n # --coupon payment is invested back into the risky portfolio until the income is withdrawn----\n base_investment = base_df.loc[counter, 'total_net_fees'] + cpn_income_base\n\n else:\n\n base_df.loc[counter, boy_value] = [base_weights[c] * base_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n base_df.loc[counter, 'total'] = base_df.loc[counter, boy_value].sum()\n base_df.loc[counter, 'adv_fees'] = base_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n base_df.loc[counter, 'income'] = income_needed - cpn_income_base\n income_needed = income_needed + social\n\n base_df.loc[counter, 'total_net_fees'] = base_df.loc[counter, 'total'] - \\\n base_df.loc[counter, 'adv_fees'] - \\\n base_df.loc[counter, 'income']\n\n base_investment = base_df.loc[counter, 'total_net_fees']\n\n base_df.loc[:, 'adj_total'] = base_df.loc[:, 'total_net_fees'].apply(lambda x: x if x > 0 else 0)\n sim_base_total.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'total_net_fees']\n sim_base_income.loc[:, 's_{}'.format(str(runs))] = base_df.loc[:, 'income']\n\n # ----------------------------FIA PORTFOLIO----------------------------------------------\n for name in boy_value:\n fia_portfolio_df.loc[:, name] = 0.0\n\n for counter in years:\n period_returns = list(random_returns.loc[counter, :])\n if counter == 0:\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'total_net_fees'] = 0.0\n fia_portfolio_df.loc[counter, 'income'] = 0.0\n port_investment = fia_portfolio_df.loc[counter, boy_value].sum()\n\n elif (counter > 0) and (counter < income_starts):\n\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees'] + cpn_income_port\n\n else:\n fia_portfolio_df.loc[counter, boy_value] = [base_weights[c] * port_investment * (1 + period_returns[c])\n for c in range(len(boy_value))]\n fia_portfolio_df.loc[counter, 'total'] = fia_portfolio_df.loc[counter, boy_value].sum()\n fia_portfolio_df.loc[counter, 'adv_fees'] = fia_portfolio_df.loc[counter, 'total'] * adv_fees\n\n # ---req. income is adjusted for inflation from the second year of withdrawal. Reinvestment of coupon\n # stops from the year income starts. Req. income is reduced by the coupon payments\n\n if counter == income_starts:\n\n income_needed = req_annual_income - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = req_annual_income\n\n else:\n income_needed = income_needed * (1 + inflation) - social\n income_net_fia_income = max(0, income_needed - income_from_fia)\n fia_portfolio_df.loc[counter, 'income'] = max(0, income_net_fia_income - cpn_income_port)\n income_needed = income_needed + social\n\n fia_portfolio_df.loc[counter, 'total_net_fees'] = fia_portfolio_df.loc[counter, 'total'] - \\\n fia_portfolio_df.loc[counter, 'adv_fees'] - \\\n fia_portfolio_df.loc[counter, 'income']\n\n port_investment = fia_portfolio_df.loc[counter, 'total_net_fees']\n\n sim_port_total.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'total_net_fees'] + \\\n income_df.loc[:, 'contract_value']\n\n sim_port_income.loc[:, 's_{}'.format(str(runs))] = fia_portfolio_df.loc[:, 'income']\n\n fia_portfolio_df.loc[:, 'adj_total'] = fia_portfolio_df.loc[:, 'total_net_fees'].apply(\n lambda x: x if x > 0 else 0)\n\n runs = runs + 1\n\n # ----% of trials ending value at expected life is less than 0\n inflation_factor = (1 + annual_inflation) ** (life_expectancy - clients_age - income_starts)\n required_income_horizon = income_needed * inflation_factor\n required_income_horizon_net_fia = required_income_horizon - income_from_fia\n\n # prob_success_base = (sim_base_total.iloc[life_expectancy - clients_age] < 0).sum() / trials\n # prob_success_fia_port = (sim_port_total.iloc[life_expectancy - clients_age] < 0).sum() / trials\n\n prob_failure_base = (sim_base_total.iloc[life_expectancy - clients_age] < required_income_horizon).sum() / trials\n prob_failure_fia_port = (sim_port_total.iloc[life_expectancy - clients_age] < required_income_horizon_net_fia) \\\n .sum() / trials\n\n # ----Calculate at total average lifetime income for base portfolio----\n base_inc = sim_base_income.copy()\n base_inc = base_inc.cumsum()\n avg_income_base = base_inc.iloc[life_expectancy - clients_age].mean()\n\n # ----Calculate at total average lifetime income for FIA portfolio----\n port_inc = sim_port_income.copy()\n port_inc = port_inc.cumsum()\n avg_income_port = port_inc.iloc[life_expectancy - clients_age].mean()\n avg_income_port = avg_income_port + (income_from_fia * (life_expectancy - clients_age))\n\n # ---------income breakdown for Base portfolio----------------------------------\n base_df.to_csv(src + 'base_port_detail.csv')\n sim_base_total.to_csv(src + 'base_ending_values.csv')\n income_breakdown_base = pd.DataFrame(sim_base_total.quantile(0.5, axis=1))\n income_breakdown_base.loc[:, 'income_from_portfolio'] = sim_base_income.quantile(0.5, axis=1)\n income_breakdown_base.loc[:, 'fia_income'] = 0.0\n income_breakdown_base.loc[:, 'social_security_income'] = social\n income_breakdown_base.loc[:, 'coupon_income'] = cpn_income_base\n\n income_breakdown_base.rename(columns={income_breakdown_base.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_base.loc[:, 'income_from_portfolio'][\n income_breakdown_base.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_base.loc[:, 'total_income'] = income_breakdown_base.loc[:, income_breakdown_base.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n\n # ---------income breakdown for FIA portfolio----------------------------------\n fia_portfolio_df.to_csv(src + 'fia_port_detail.csv')\n sim_port_total.to_csv(src + 'fiaport_ending_values.csv')\n\n income_breakdown_port = pd.DataFrame(sim_port_total.quantile(0.5, axis=1))\n income_breakdown_port.loc[:, 'income_from_portfolio'] = sim_port_income.quantile(0.5, axis=1)\n income_breakdown_port.loc[:, 'fia_income'] = income_from_fia\n income_breakdown_port.loc[:, 'social_security_income'] = social\n income_breakdown_port.loc[:, 'coupon_income'] = cpn_income_port\n\n income_breakdown_port.rename(columns={income_breakdown_port.columns[0]: 'portfolio_ending_value'}, inplace=True)\n income_breakdown_port.loc[:, 'income_from_portfolio'][\n income_breakdown_port.loc[:, 'portfolio_ending_value'] <= 0] = 0\n income_breakdown_port.loc[:, 'total_income'] = income_breakdown_port.loc[:, income_breakdown_port.columns[1:]].sum(\n axis=1)\n\n # ------------Block Ends-------------------------------------------------------------\n q_cut = [0.0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n sim_base_income[sim_base_total < income_needed] = 0.0\n\n sim_port_income[sim_port_total < income_net_fia_income] = 0\n\n sim_port_income = sim_port_income + income_from_fia\n\n # base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n #\n # port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile([0.05, 0.25, 0.50, 0.75, 0.90])\n\n base_quantile = sim_base_total.loc[sim_base_total.index[-1]].quantile(q_cut)\n\n port_quantile = sim_port_total.loc[sim_port_total.index[-1]].quantile(q_cut)\n\n # q_cut = [0.0, .05, 0.25, 0.5, 0.75, 0.95, 1.0]\n cols = ['Min', '5th', '25th', '50th', '75th', '90th', 'Max']\n\n # ----------------------------drop year 0--------------------------------\n sim_base_total = sim_base_total[1:]\n sim_port_total = sim_port_total[1:]\n\n # ---------quantile analysis for base terminal value-----------------------------\n base_qcut = pd.DataFrame(index=sim_base_total.index, columns=cols)\n for c in range(len(cols)):\n base_qcut.loc[:, cols[c]] = sim_base_total.quantile(q_cut[c], axis=1)\n\n base_qcut.clip(lower=0, inplace=True)\n\n # ---------quantile analysis for base income----------------------------\n base_income_qcut = pd.DataFrame(index=sim_base_income.index, columns=cols)\n for c in range(len(cols)):\n base_income_qcut.loc[:, cols[c]] = sim_base_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # base_income_qcut = base_income_qcut.loc[income_starts:]\n\n # -------------quantile analysis for portfolio terminal value ---------------\n\n port_qcut = pd.DataFrame(index=sim_port_total.index, columns=cols)\n for c in range(len(cols)):\n port_qcut.loc[:, cols[c]] = sim_port_total.quantile(q_cut[c], axis=1)\n\n port_qcut.clip(lower=0, inplace=True)\n\n # ---------------quantile analysis for portfolio income----------------------------\n port_income_qcut = pd.DataFrame(index=sim_port_income.index, columns=cols)\n for c in range(len(cols)):\n port_income_qcut.loc[:, cols[c]] = sim_port_income.quantile(q_cut[c], axis=1)\n\n # ----Remove NaN's prior to the income start years------------\n # port_income_qcut = port_income_qcut.loc[income_starts:]\n\n # ----------probability ending value will be less than 0 at the end of the horizon -------------------------\n\n base_legacy_risk = (sim_base_total.loc[sim_base_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n port_legacy_risk = (sim_port_total.loc[sim_port_total.index[life_expectancy - clients_age]] < 0).sum() / (\n trials + 1)\n\n legacy_risk = pd.DataFrame([base_legacy_risk, port_legacy_risk,\n 'Prob. of portfolio value less than 0 at the end of the expected life'],\n index=['base', 'fia_portfolio', 'Notes'],\n columns=['Ruin Probability'])\n\n # -----------Year-wise probability of ending value greater than 0 -----------------\n base_psuccess = sim_base_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n port_psuccess = sim_port_total.apply(lambda x: x > 0).sum(axis=1) / (trials + 1)\n\n # -----------------------WRITING FILES TO EXCEL ---------------------------\n writer = pd.ExcelWriter(src + 'simulated_portfolios_summary.xlsx', engine='xlsxwriter')\n sim_base_total.to_excel(writer, sheet_name='base_ending_value')\n sim_port_total.to_excel(writer, sheet_name='fiaport_ending_value')\n base_qcut.to_excel(writer, sheet_name='base_quantile_ending')\n base_income_qcut.to_excel(writer, sheet_name='base_quantile_income')\n port_qcut.to_excel(writer, sheet_name='port_quantile_ending')\n port_income_qcut.to_excel(writer, sheet_name='port_quantile_income')\n\n sucess_df = pd.concat([base_psuccess, port_psuccess], axis=1)\n sucess_df.rename(columns={sucess_df.columns[0]: 'Base', sucess_df.columns[1]: 'Fia_Port'}, inplace=True)\n\n base_mean = sim_base_total[sim_base_total <= 0].isnull().sum().mean()\n port_mean = sim_port_total[sim_port_total <= 0].isnull().sum().mean()\n\n base_median = sim_base_total[sim_base_total <= 0].isnull().sum().median()\n port_median = sim_port_total[sim_port_total <= 0].isnull().sum().median()\n\n stats_df = pd.DataFrame([[base_mean, port_mean, 'Average years portfolio ending value > 0, out of N trials'],\n [base_median, port_median, 'Ending Value >0, 50% of the time']],\n index=['Mean years', 'Median years'], columns=['Base', 'fia_port', 'Comment'])\n\n # ---Average of terminal values at the end of horizon from N Trials\n stats_df.loc['Average Portfolio', 'Base'] = sim_base_total.iloc[-1].mean() + clients_age\n stats_df.loc['Average Portfolio', 'fia_port'] = sim_port_total.iloc[-1].mean() + clients_age\n stats_df.loc['Average Portfolio', 'Comment'] = \"Average of terminal values at the end of analysis period\" \\\n \" from N Trials\"\n\n # ----Median of terminal values at the end of horizon from N Trials\n stats_df.loc['Median Portfolio', 'Base'] = sim_base_total.iloc[-1].median() + clients_age\n stats_df.loc['Median Portfolio', 'fia_port'] = sim_port_total.iloc[-1].median() + clients_age\n stats_df.loc['Median Portfolio', 'Comment'] = \"Median of terminal values at the end of analysis period \" \\\n \"from N Trials\"\n\n # ---Average of terminal values at the end of Actuarial life from N Trials Base Portfolio\n stats_df.loc['Average Portfolio (end of expected_life)', 'Base'] = sim_base_total.iloc[\n life_expectancy - clients_age].mean()\n\n # ----Median of terminal values at the end of horizon from N Trials Base Portfolio\n stats_df.loc['Median Portfolio (end of expected_life)', 'Base'] = sim_base_total.iloc[\n life_expectancy - clients_age].median()\n\n # ---Average of terminal values at the end of Actuarial life from N Trials - FIA portfolio\n stats_df.loc['Average Portfolio (end of expected_life)', 'fia_port'] = sim_port_total.iloc[\n life_expectancy - clients_age].mean()\n stats_df.loc['Average Portfolio (end of expected_life)', 'Comment'] = \"Average of terminal values at the end of \" \\\n \"Actuarial life from N Trials\"\n\n # ----Median of terminal values at the end of horizon from N Trials - FIA Portfolio\n stats_df.loc['Median Portfolio (end of expected_life)', 'fia_port'] = sim_port_total.iloc[\n life_expectancy - clients_age].median()\n stats_df.loc['Median Portfolio (end of expected_life)', 'Comment'] = \"Median of terminal values at the end of \" \\\n \"horizon from N Trials\"\n\n stats_df.loc['% Success(value>0 at the end of expected_life)', 'Base'] = 1 - prob_failure_base\n stats_df.loc['% Success(value>0 at the end of expected_life)', 'fia_port'] = 1 - prob_failure_fia_port\n stats_df.loc['% Success(value>0 at the end of expected_life)', 'Comment'] = \"% of N trials yearly ending value \" \\\n \"greater than 0\"\n\n # -----Mininum of N portfolios terminal value at the end of Actuarial Age\n stats_df.loc['Minimum Value', 'Base'] = sim_base_total.iloc[life_expectancy - clients_age].min()\n stats_df.loc['Minimum Value', 'fia_port'] = sim_port_total.iloc[life_expectancy - clients_age].min()\n stats_df.loc['Minimum Value', 'Comment'] = \"Mininum of N portfolios terminal value at the end of Actuarial Age\"\n\n # -----Maxinum of N portfolios terminal value at the end of Actuarial Age\n stats_df.loc['Maximum Value', 'Base'] = sim_base_total.iloc[life_expectancy - clients_age].max()\n stats_df.loc['Maximum Value', 'fia_port'] = sim_port_total.iloc[life_expectancy - clients_age].max()\n stats_df.loc['Maximum Value', 'Comment'] = \"Maxinum of N portfolios terminal value at the end of Actuarial Age\"\n\n # -----Average income from N portfolios at the ned of Actuarial Age\n stats_df.loc['Avg Income (lifetime)', 'Base'] = avg_income_base\n stats_df.loc['Avg Income (lifetime)', 'fia_port'] = avg_income_port\n stats_df.loc['Avg Income (lifetime)', 'Comment'] = \"Average income from N portfolios at the end of Actuarial Age\"\n\n sucess_df.to_excel(writer, sheet_name='yearly_success_rates')\n stats_df.to_excel(writer, sheet_name='Stats')\n\n writer.save()\n\n print(\"simulation completed.\")",
"def simulation(cython: bool = False, parallel: bool = False):\n # Create the population\n cfg = Config()\n cfg.game.duration = 200\n cfg.update()\n pop = Population(\n name=\"delete_me\",\n config=cfg,\n folder_name=\"test_performance\",\n use_backup=True,\n overwrite=True, # Every iteration, create a new population from scratch\n )\n \n # Perform the simulations of experiment3\n train_games, _ = get_game_ids(experiment_id=3)\n if cython:\n multi_env = MultiEnvironmentCy(game_config=cfg, pop_config=pop.config)\n else:\n multi_env = MultiEnvironment(game_config=cfg, pop_config=pop.config)\n multi_env.set_games(train_games, noise=True)\n \n # Prepare the generation's reporters for the generation\n pop.reporters.start_generation(gen=pop.generation, logger=pop.log)\n \n # Fetch the dictionary of genomes\n genomes = list(iteritems(pop.population))\n \n if parallel:\n # Initialize the evaluation-pool\n pool = mp.Pool(mp.cpu_count())\n manager = mp.Manager()\n return_dict = manager.dict()\n \n for genome in genomes:\n pool.apply_async(func=multi_env.eval_genome, args=(genome, return_dict))\n pool.close() # Close the pool\n pool.join() # Postpone continuation until everything is finished\n else:\n return_dict = dict()\n for genome in genomes:\n multi_env.eval_genome(genome, return_dict)\n \n path = f\"population_backup/storage/test_performance/\"\n if os.path.exists(path):\n shutil.rmtree(path)",
"def after_run(self):\n # Calculate the performance of the strategy and portfolio\n self.portfolio.calc_stats()\n self.calc_performance()\n\n return self",
"def simulate_trajectories(kav):\n print \"Simulating \"+str(kav)\n wt_trajectories = []\n avp_trajectories = []\n vip_trajectories = []\n for tn in range(100):\n # get random initial condition\n # initial phases\n init_conditions_AV = [single_osc.lc(wt_T*np.random.rand()) \n for i in range(AVPcells+VIPcells)]\n init_conditions_NAV = [single_osc.lc(wt_T*np.random.rand())[:-1]\n for i in range(NAVcells)]\n y0_random = np.hstack(init_conditions_AV+init_conditions_NAV)\n\n # do the simulation\n model = GonzeModelManyCells(param, kav=kav, \n initial_values=y0_random)\n wt_trajectories.append(model.run(show_labels=False, seed=0))\n\n # avp bmalko\n avp_model = GonzeModelManyCells(param, bmalko='AVP', kav=kav, \n initial_values=y0_random)\n avp_trajectories.append(avp_model.run(show_labels=False, seed=0))\n\n # vip bmalko\n vip_model = GonzeModelManyCells(param, bmalko='VIP', kav=kav, \n initial_values=y0_random)\n vip_trajectories.append(vip_model.run(show_labels=False, seed=0))\n\n # save results\n with open(\"Data/params/wt_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(wt_trajectories, output_file)\n with open(\"Data/params/avp_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(avp_trajectories, output_file)\n with open(\"Data/params/vip_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(vip_trajectories, output_file)\n\n return {'wt': wt_trajectories,\n 'avp': avp_trajectories,\n 'vip': vip_trajectories}",
"def _generate_trading_instances(self, sp):\n print(\n \"Initialization...\"\n )\n self.data_handler = self.data_handler_cls(self.events, self.csv_dir, self.symbol_list, self.start_date,\n self.end_date)\n self.portfolio = self.portfolio_cls(self.data_handler, self.events, self.initial_capital, self.start_date,\n self.strategy_id, self.spread, self.commission,self.csv_dir)\n self.strategy = self.strategy_cls(self.data_handler, self.events, self.portfolio, self.spread, self.commission,\n sp)\n self.plot = self.plot_cls(self.csv_dir, self.portfolio, self.strategy_id)",
"def _main():\n ############### STOCK INPUT DATA #############\n \n StockTicker = [\"TEA\",\"POP\",\"ALE\",\"GIN\",\"JOE\"]\n StockType = [\"Com\",\"Com\",\"Com\",\"Pref\",\"Com\"] #($)\n TickerPrice = [2,1.5,3,1,1.8] #(USD)\n LastDividend = [0,0.08,0.23,0.08,0.13] #(GBP)\n FixedDividend = [0,0,0,0.02,0]\n ParValue = [1,1,0.6,1,2.5] #(GBP)\n\n \n ############# GET STOCK PARAMETERS ###########\n \n Get_StockParameters(len(StockTicker),StockTicker,StockType,TickerPrice,LastDividend,FixedDividend,ParValue)\n \n \n ############# PERFORM A TRADING FOR EACH STOCK ###########\n \n\n pathdir = \"./DataFiles/StockTrde_\"\n \n for i in range(0,len(StockTicker)):\n \n #Check if the data file exists\n datafile = pathdir+\"{}.csv\".format(i+1)\n if os.path.isfile(datafile) == False :\n Get_HistoricalStockData(i,datafile,datetime.datetime(2015, 1, 1),datetime.datetime(2016, 1, 1))\n\n # Get the Strategy\n Get_Trade(StockTicker[i],datafile)\n \n ############# PLOT THE TRADING ###########\n \n Plot_StockTrade(StockTicker[i])",
"def run(outputfile, graph_each, graph_avg, iterations):\n # runs auction the requested number of times\n bids = simulate_auction(iterations)\n \n # export results to csv format\n csvfile = open(outputfile, 'w', newline='')\n writer = csv.writer(csvfile, dialect='excel')\n writer.writerow([\"Bidder\", \"BidValue\", \"BidWeight\", \"CreationTimestep\", \"TimestepPaid\"])\n\n for bid in bids:\n entry = [bid.bidder, bid.value, bid.weight, bid.creation_timestep, bid.payment_timestep]\n writer.writerow(entry)\n\n # display data visualisation if --graphEach flag is used on cli\n if graph_each:\n plt.title('Auction simulation results')\n plt.xlabel('Simulation Timestep')\n plt.ylabel('Price Paid per Transaction by Agent')\n\n # for each unique bidder\n for bidder in list(set([bid.bidder for bid in bids])):\n x = []\n y = []\n\n for bid in bids:\n if bidder == bid.bidder:\n x.append(bid.payment_timestep)\n y.append(bid.value)\n \n # add results for current bidder to graph plot\n plt.scatter(x, y, label=bidder, alpha=0.5, s=2)\n\n plt.legend(loc='best')\n plt.show()\n\n # display data visualisation if --graphAvg flag is used on cli\n if graph_avg:\n plt.title('Auction simulation results')\n plt.xlabel('Simulation Timestep')\n plt.ylabel('Average Price Paid per Transaction by Block')\n\n # unique timesteps in the history\n x = list(set(bid.creation_timestep for bid in bids))\n y = []\n\n import statistics\n for timestep in x:\n bids_accepted = [bid.value for bid in bids if bid.creation_timestep == timestep]\n y.append(statistics.mean(bids_accepted))\n\n plt.plot(x, y, label=\"Avg Gas Price Paid\")\n plt.legend(loc='best')\n plt.show()",
"def run_experiment(x_loops=15, max_steps=0, display_on=True, max_fps=10,\n garden_size=8, tako_number=1, pop_max=30, max_width=1800,\n max_height=900, collect_data=True, export_all=False,\n rand_nets=False, max_gen = 505, genetic_mode=\"Plain\",\n learning_on=False, seeds=None, garden_mode=\"Diverse Static\",\n family_detection=None, family_mod=0, record_inbreeding=True,\n inbreed_lim = 1.1, hla_genes=0, binary_health=0,\n carrier_percentage=40, two_envs=False, diff_envs=False,\n migration_rate=0, phen_pref=False, filename=\"\"): \n #round width/height down to nearest multiple of 50 if need be\n if max_width % 50 != 0:\n max_width = max_width - (max_width % 50)\n if max_height % 50 != 0:\n max_height = max_height - (max_height % 50)\n\n i = 0\n #create csv files if they don't already exist\n if collect_data or export_all:\n if filename == \"\":\n filename = str(int(time.time())) + \".csv\"\n elif len(filename) < 4:\n filename = filename + \".csv\"\n elif filename[-4:] != \".csv\":\n filename = filename + \".csv\"\n\n if not os.path.exists(\"Data\"):\n os.makedirs(\"Data\")\n\n if collect_data:\n if not os.path.exists(os.path.join(\"Data\", filename)):\n with open(os.path.join(\"Data\", filename), 'a', newline='') as\\\n csvfile:\n writ = csv.writer(csvfile)\n writ.writerow(['iteration', 'env #', 'ID', 'parent1',\n 'parent2', 'age', 'generation', '# children',\n 'mating attempts', 'accum pain',\n 'cause of death', 'timestep', 'mutations',\n 'parent_degree', 'parent_genoverlap',\n '# disorders',\n 'health a', 'health b', 'preference'])\n else:\n with open(os.path.join(\"Data\", filename), newline='') as\\\n csvfile:\n reader = csv.DictReader(csvfile)\n row = None\n for row in reader: pass\n if row != None:\n i = int(row[\"iteration\"]) + 1\n\n if export_all:\n h = make_headers()\n f = os.path.join('Data', (filename[:-4] + ' gene data.csv'))\n if not os.path.exists(f):\n with open(f, 'a') as file:\n writ = csv.writer(file)\n writ.writerow(h)\n\n tako.rand_nets = rand_nets\n tako.family_mod = family_mod\n tako.family_detection = family_detection\n gt.family_detection = family_detection\n tako.record_inbreeding = record_inbreeding\n tako.inbreed_lim = inbreed_lim\n tako.hla_genes = hla_genes\n tako.binary_health = binary_health\n tako.carrier_percentage = carrier_percentage\n tako.phen_pref = phen_pref\n gt.phen_pref = phen_pref\n \n loop_limit = x_loops\n if loop_limit < 1:\n loop_limit = 1\n\n if seeds == None:\n seeds = [None for i in range(x_loops)]\n\n while loop_limit > 0:\n #check if seeds is long enough\n if len(seeds) < loop_limit + i:\n for j in range(loop_limit + i - len(seeds)):\n seeds.append(seeds[j])\n if seeds[0] != None:\n tako.set_seed(seeds[i])\n g = garden_game(garden_size, tako_number, pop_max, max_width,\n max_height, display_on, max_fps, learning_on,\n genetic_mode, rand_nets, garden_mode, filename,\n export_all, family_mod, family_detection,\n two_envs, diff_envs, migration_rate,\n seeds[i])\n if display_on:\n main_window = g\n main_window.main_loop(max_steps, max_gen, display_on,\n collect_data, garden_mode, i)\n else:\n g.main_loop(max_steps, max_gen, display_on, collect_data,\n garden_mode, i)\n loop_limit -= 1\n i += 1",
"def run_genetic_algorithm(bayes_params):\n\n print('Running genetic algorithm')\n\n # Unpacks parameters (unfortunately can't feed dataframe (or series or\n # array) data into a function with hyperopt, so am having to pickle the\n # parameters not being optimised with hyperopt\n params_file = '{}/Program_input/Input_params.pkl'.format(\n bayes_params['workingdirectory']\n )\n with open(params_file, 'rb') as f:\n fixed_params = pickle.load(f)\n if not type(fixed_params) in [dict, OrderedDict]:\n raise TypeError('Data in {} is not a pickled dictionary'.format(params_file))\n params = {**bayes_params, **fixed_params}\n\n # Records sequences and their fitnesses after each generation\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'w') as f:\n f.write('Tracking GA optimisation progress\\n')\n\n ga_calcs = run_ga_calcs(params)\n\n # Defines whether sequences are compared by their raw or rank propensities.\n # Since BUDE scores and frequency values have to be compared by their rank\n # values, have made the decision to also compare propensity values by their\n # rankings.\n \"\"\"\n if params['matingpopmethod'] in ['fittest', 'roulettewheel']:\n raw_or_rank = 'raw'\n elif params['matingpopmethod'] in ['rankroulettewheel']:\n raw_or_rank = 'rank'\n \"\"\"\n raw_or_rank = 'rank'\n\n # Calculates propensity and/or BUDE energy of input structure\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('Input structure\\n')\n\n if params['fitnessscoremethod'] == 'alternate':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, propensity_score, frequency_score,'\n ' BUDE energy, clashscore\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency,\n params['inputpdbenergy'], params['inputpdbclash']\n ))\n f.write('\\n')\n\n if params['fitnessscoremethod'] == 'propensity':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, propensity_score, frequency_score\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency\n ))\n f.write('\\n')\n\n elif params['fitnessscoremethod'] == 'allatom':\n network_energies = ga_calcs.measure_fitness_allatom(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, BUDE energy\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n energy = network_energies[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, energy))\n f.write('\\n')\n\n elif params['fitnessscoremethod'] == 'molprobity':\n network_clashes = ga_calcs.measure_fitness_clashscore(params['initialnetwork'])\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network_id, sequence, clashscore\\n')\n for network, G in params['initialnetwork'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n clashscore = network_clashes[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, clashscore))\n f.write('\\n')\n\n # Runs GA cycles\n gen = params['startgen']\n while gen < params['stopgen']:\n gen += 1\n print('Generation {}'.format(gen))\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('\\n\\n\\n\\n\\nGeneration {}\\n'.format(gen))\n\n\n all_networks_list = [params['sequencesdict']]\n pop_sizes = [params['populationsize']]\n\n for index, networks_dict in enumerate(all_networks_list):\n # Measures fitness of sequences in starting population.\n if (\n (params['fitnessscoremethod'] == 'propensity')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 2 == 1)\n ):\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(networks_dict)\n network_fitness_scores = ga_calcs.combine_prop_and_freq_scores(\n network_propensity_scores, network_frequency_scores, raw_or_rank\n )\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, propensity, frequency, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency, probability\n ))\n f.write('Total: {}, {}, {}'.format(\n sum(network_propensity_scores.values()),\n sum(network_frequency_scores.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n elif (\n (params['fitnessscoremethod'] == 'allatom')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 4 == 2)\n ):\n # Runs BUDE energy scoring on parallel processors\n network_energies = ga_calcs.measure_fitness_allatom(networks_dict)\n (network_fitness_scores\n ) = ga_calcs.convert_energies_to_probabilities(network_energies)\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, BUDE score, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n energy = network_energies[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, energy, probability\n ))\n f.write('Total: {}, {}'.format(\n sum(network_energies.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n\n elif (\n (params['fitnessscoremethod'] == 'molprobity')\n or\n (params['fitnessscoremethod'] == 'alternate' and gen % 4 == 0)\n ):\n # Runs MolProbity scoring on parallel processors\n network_clashes = ga_calcs.measure_fitness_clashscore(networks_dict)\n (network_fitness_scores\n ) = ga_calcs.convert_clashscores_to_probabilities(network_clashes)\n\n # Records sequences output from this generation and their\n # associated fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('network, sequence, clashscore, probability\\n')\n for network, G in networks_dict.items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n clash = network_clashes[network]\n probability = network_fitness_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, clash, probability\n ))\n f.write('Total: {}, {}'.format(\n sum(network_clashes.values()),\n sum(network_fitness_scores.values())\n ))\n f.write('\\n')\n\n # Selects subpopulation for mating\n if params['matingpopmethod'] == 'fittest':\n mating_pop_dict = ga_calcs.create_mat_pop_fittest(\n networks_dict, network_fitness_scores, pop_sizes[index],\n params['unfitfraction']\n )\n elif params['matingpopmethod'] in ['roulettewheel', 'rankroulettewheel']:\n mating_pop_dict = ga_calcs.create_mat_pop_roulette_wheel(\n networks_dict, network_fitness_scores, pop_sizes[index], params['']\n )\n\n # Performs crossover of parent sequences to generate child sequences\n if params['crossovermethod'] == 'uniform':\n crossover_pop_dict = ga_calcs.uniform_crossover(mating_pop_dict)\n elif params['crossovermethod'] == 'segmented':\n crossover_pop_dict = ga_calcs.segmented_crossover(mating_pop_dict)\n\n # Mutates child sequences\n if params['mutationmethod'] == 'swap':\n mutated_pop_dict = ga_calcs.swap_mutate(crossover_pop_dict)\n elif params['mutationmethod'] == 'scramble':\n mutated_pop_dict = ga_calcs.scramble_mutate(crossover_pop_dict)\n\n # Combines parent and child sequences into single generation\n merged_networks_dict = ga_calcs.add_children_to_parents(\n mutated_pop_dict, mating_pop_dict\n )\n\n random_order = [n for n in range(len(merged_networks_dict))]\n random.shuffle(random_order)\n shuffled_merged_networks_dict = OrderedDict(\n {list(merged_networks_dict.keys())[n]:\n list(merged_networks_dict.values())[n] for n in random_order}\n )\n params['sequencesdict'] = shuffled_merged_networks_dict\n\n # Calculates fitness of output sequences and filters population to maintain\n # the fittest 50%, plus sums the probabilities of the retained sequences and\n # returns this value (to be minimised with hyperopt)\n summed_fitness = 0\n\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n f.write('\\n\\n\\n\\n\\nOutput generation\\n')\n\n if params['fitnessscoremethod'] != 'allatom':\n (network_propensity_scores, network_frequency_scores\n ) = ga_calcs.measure_fitness_propensity(params['sequencesdict'])\n network_fitness_scores = ga_calcs.combine_prop_and_freq_scores(\n network_propensity_scores, network_frequency_scores, raw_or_rank\n )\n elif params['fitnessscoremethod'] == 'allatom':\n network_energies = ga_calcs.measure_fitness_allatom(params['sequencesdict'])\n (network_fitness_scores\n ) = ga_calcs.convert_energies_to_probabilities(network_energies)\n\n # Records sequences output from this generation and their associated\n # fitnesses\n with open('{}/Program_output/Sequence_track.txt'.format(\n bayes_params['workingdirectory']), 'a') as f:\n if params['fitnessscoremethod'] != 'allatom':\n f.write('network, sequence, propensity, frequency\\n')\n elif params['fitnessscoremethod'] == 'allatom':\n f.write('network, sequence, BUDE score\\n')\n for network, G in params['sequencesdict'].items():\n sequence = ''.join([G.nodes()[node]['aa_id'] for node in G.nodes()])\n if params['fitnessscoremethod'] != 'allatom':\n propensity = network_propensity_scores[network]\n frequency = network_frequency_scores[network]\n f.write('{}, {}, {}, {}\\n'.format(\n network, sequence, propensity, frequency\n ))\n elif params['fitnessscoremethod'] == 'allatom':\n energy = network_energies[network]\n f.write('{}, {}, {}\\n'.format(network, sequence, energy))\n if params['fitnessscoremethod'] != 'allatom':\n f.write('Total: {}, {}'.format(\n sum(network_propensity_scores.values()),\n sum(network_frequency_scores.values())\n ))\n elif params['fitnessscoremethod'] == 'allatom':\n f.write('Total: {}'.format(sum(network_energies.values())))\n f.write('\\n')\n\n params['sequencesdict'] = ga_calcs.create_mat_pop_fittest(\n params['sequencesdict'], network_fitness_scores,\n params['populationsize'], unfit_fraction=0\n )\n\n for network in params['sequencesdict'].keys():\n # Higher propensity is more likely, so add because output from\n # measure_fitness_propensity is sum of -log(propensity) values, and\n # hyperopt minimises output score\n # Can't combine propensity and frequency scores without first converting\n # to a probability, so for calculating output combined fitness can only\n # use combined propensity scores to rank the structures\n if params['fitnessscoremethod'] != 'allatom':\n summed_fitness += network_propensity_scores[network]\n # Lower score is more likely, so add because hyperopt minimises output\n # score\n elif params['fitnessscoremethod'] == 'allatom':\n summed_fitness += network_energies[network]\n\n with open('{}/Program_output/GA_output_sequences_dict.pkl'.format(\n bayes_params['workingdirectory']), 'wb') as f:\n pickle.dump(params['sequencesdict'], f)\n\n print(summed_fitness)\n\n return summed_fitness",
"def run():\n if am_i_root():\n\n print(\"*** initializing...\")\n\n # Print parameters\n print(\"N_DIMS = \" + str(N_DIMS))\n print(\"LAMBDA_OVER_DX = \" + str(LAMBDA_OVER_DX))\n print(\"R_DT = \" + str(R_DT))\n print(\"MU0_POISSON = \" + str(MU0_POISSON))\n print(\"NORM_POISSON = \" + NORM_POISSON)\n print(\"N_GRID = \" + str(N_GRID))\n print(\"N_HITS = \" + str(N_HITS))\n print(\"POLICY = \" + str(POLICY))\n if POLICY == -1:\n print(\"MODEL_PATH = \" + str(MODEL_PATH))\n else:\n print(\"STEPS_AHEAD = \" + str(STEPS_AHEAD))\n print(\"EPSILON = \" + str(EPSILON))\n print(\"STOP_t = \" + str(STOP_t))\n print(\"STOP_p = \" + str(STOP_p))\n print(\"N_PARALLEL = \" + str(N_PARALLEL))\n print(\"WITH_MPI = \" + str(WITH_MPI))\n print(\"ADAPTIVE_N_RUNS = \" + str(ADAPTIVE_N_RUNS))\n print(\"REL_TOL = \" + str(REL_TOL))\n print(\"MAX_N_RUNS = \" + str(MAX_N_RUNS))\n print(\"N_RUNS(input) = \" + str(N_RUNS))\n sys.stdout.flush()\n\n # Perform runs\n if am_i_root():\n print(\"*** generating episodes...\")\n\n N_runs = N_RUNS\n if ADAPTIVE_N_RUNS or WITH_MPI:\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n N_runso = 0\n\n if WITH_MPI:\n cdf_t_tot_loc = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot_loc = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_loc = np.nan * np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n failed_loc = - np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n else:\n cdf_t_tot = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_episodes = np.nan * np.ones(MAX_N_RUNS, dtype=float)\n failed_episodes = - np.ones(MAX_N_RUNS, dtype=float)\n\n while True:\n if WITH_MPI: # MPI\n if N_runs % N_PARALLEL != 0:\n raise Exception(\"N_runs must be multiple of N_PARALLEL with MPI\")\n COMM.Barrier()\n # Decomposition\n Nepisodes = N_runs // N_PARALLEL\n episode_list = range(N_runso + ME, N_runs, N_PARALLEL)\n # Run episodes and reduce locally\n ind = N_runso // N_PARALLEL\n for episode in episode_list:\n cdf_t, cdf_h, mean_t_loc[ind], failed_loc[ind] = Worker(episode)\n cdf_t_tot_loc += cdf_t\n cdf_h_tot_loc += cdf_h\n ind += 1\n\n # Reduce globally the mean_t and failed\n mean_t_episodes = np.empty([N_runs], dtype=float)\n failed_episodes = np.empty([N_runs], dtype=float)\n COMM.Barrier()\n COMM.Allgather([mean_t_loc[:ind], Nepisodes, MPI.DOUBLE], [mean_t_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Allgather([failed_loc[:ind], Nepisodes, MPI.DOUBLE], [failed_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Barrier()\n elif N_PARALLEL > 1: # multiprocessing\n # Run episodes in parallel\n pool = multiprocessing.Pool(N_PARALLEL)\n result = pool.map(Worker, range(N_runso, N_runs))\n pool.close()\n pool.join()\n # Reduce\n ind = N_runso\n for cdf_t, cdf_h, mean_t, failed in result:\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n elif N_PARALLEL == 1: # sequential\n ind = N_runso\n for episode in range(N_runso, N_runs):\n cdf_t, cdf_h, mean_t, failed = Worker(episode)\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n else:\n raise Exception(\"Problem with N_PARALLEL: must be an int >= 1\")\n\n # estimate of the error\n mean_ep = np.mean(mean_t_episodes[:N_runs])\n sigma_ep = np.std(mean_t_episodes[:N_runs])\n std_error_mean = sigma_ep / np.sqrt(N_runs)\n rel_std_error_mean = std_error_mean / mean_ep\n\n # break clause\n if not ADAPTIVE_N_RUNS:\n break\n else:\n if rel_std_error_mean < REL_TOL:\n break\n elif N_runs >= MAX_N_RUNS:\n break\n else:\n N_runso = N_runs\n N_runs = int(np.ceil(1.05 * (sigma_ep / mean_ep / REL_TOL) ** 2))\n N_runs = min(N_runs, MAX_N_RUNS)\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n if am_i_root():\n print(\"N_RUNS(performed) = \" + str(N_runs))\n sys.stdout.flush()\n\n # Reduce\n if am_i_root():\n print(\"*** post-processing...\")\n if WITH_MPI:\n # locally\n cdf_t_tot_loc /= N_runs\n cdf_h_tot_loc /= N_runs\n # Reduce globally\n cdf_t_tot = np.empty([LEN_CDF_T], dtype=float)\n cdf_h_tot = np.empty([LEN_CDF_H], dtype=float)\n COMM.Barrier()\n COMM.Allreduce(cdf_t_tot_loc, cdf_t_tot, op=MPI.SUM)\n COMM.Allreduce(cdf_h_tot_loc, cdf_h_tot, op=MPI.SUM)\n COMM.Barrier()\n else:\n cdf_t_tot /= N_runs\n cdf_h_tot /= N_runs\n mean_t_episodes = mean_t_episodes[:N_runs]\n failed_episodes = failed_episodes[:N_runs]\n\n # Further post-processing, save and plot\n if am_i_root():\n\n # from cdf to pdf\n pdf_t_tot = cdf_to_pdf(cdf_t_tot)\n pdf_h_tot = cdf_to_pdf(cdf_h_tot)\n\n # compute stats of number of steps and number of hits\n t_bins = np.arange(BIN_START_T, BIN_END_T, BIN_SIZE_T) + 0.5 * BIN_SIZE_T\n mean_t, sigma_t, skew_t, kurt_t, p_found = stats_from_pdf(t_bins, pdf_t_tot)\n p25_t, p50_t, p75_t, p90_t, p95_t, p99_t, _ = stats_from_cdf(t_bins, cdf_t_tot)\n\n h_bins = np.arange(BIN_START_H, BIN_END_H, BIN_SIZE_H) + 0.5 * BIN_SIZE_H\n mean_h, sigma_h, skew_h, kurt_h, _ = stats_from_pdf(h_bins, pdf_h_tot)\n p25_h, p50_h, p75_h, p90_h, p95_h, p99_h, _ = stats_from_cdf(h_bins, cdf_h_tot)\n\n print(\"probability that the source is never found : %.10f\" % (1.0 - p_found, ))\n print(\"mean number of steps to find the source : %.3f +/- %.3f\" % (mean_t, 1.96 * std_error_mean))\n print(\"number of steps to find the source with 50 %% probability: %.3f\" % p50_t)\n print(\"number of steps to find the source with 99 %% probability: %.3f\" % p99_t)\n nb_failed = np.sum(failed_episodes)\n if np.any(failed_episodes < 0):\n nb_failed = -1\n print(\"problem while recording failures\")\n else:\n print(\"number of failed episodes : %d / %d (%f %%)\"\n % (nb_failed, N_runs, nb_failed / N_runs * 100))\n sys.stdout.flush()\n\n # save all parameters to txt file\n inputs = {\n \"N_DIMS\": N_DIMS,\n \"LAMBDA_OVER_DX\": LAMBDA_OVER_DX,\n \"R_DT\": R_DT,\n \"MU0_POISSON\": MU0_POISSON,\n \"NORM_POISSON\": NORM_POISSON,\n \"N_GRID\": N_GRID,\n \"N_HITS\": N_HITS,\n \"POLICY\": POLICY,\n \"STEPS_AHEAD\": STEPS_AHEAD,\n \"MODEL_PATH\": MODEL_PATH,\n \"STOP_t\": STOP_t,\n \"STOP_p\": STOP_p,\n \"ADAPTIVE_N_RUNS\": ADAPTIVE_N_RUNS,\n \"REL_TOL\": REL_TOL,\n \"MAX_N_RUNS\": MAX_N_RUNS,\n \"N_RUNS_PERFORMED\": N_runs,\n \"BIN_START_T\": BIN_START_T,\n \"BIN_END_T\": BIN_END_T,\n \"BIN_SIZE_T\": BIN_SIZE_T,\n \"BIN_START_H\": BIN_START_H,\n \"BIN_END_H\": BIN_END_H,\n \"BIN_SIZE_H\": BIN_SIZE_H,\n \"EPSILON\": EPSILON,\n }\n param_txt_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_parameters\" + \".txt\"))\n with open(param_txt_file, 'w') as out:\n for key, val in inputs.items():\n print(key + \" = \" + str(val), file=out)\n\n # save stats\n stats_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_statistics\" + \".txt\"))\n with open(stats_file, \"w\") as sfile:\n sfile.write(\"p_not_found\\t%+.4e\\n\" % (1 - p_found,))\n for varname in \\\n ('mean_t', 'sigma_t', 'skew_t', 'kurt_t', 'p25_t', 'p50_t', 'p75_t', 'p90_t', 'p95_t', 'p99_t'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n for varname in \\\n ('mean_h', 'sigma_h', 'skew_h', 'kurt_h', 'p25_h', 'p50_h', 'p75_h', 'p90_h', 'p95_h', 'p99_h'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n\n # save CDF of number of steps\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nsteps\" + \".npy\"))\n np.save(table_file, np.vstack((t_bins, cdf_t_tot)))\n\n # save CDF of number of hits\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nhits\" + \".npy\"))\n np.save(table_file, np.vstack((h_bins, cdf_h_tot)))\n\n # create and save figures\n if POLICY == -1:\n specifics = \"MODEL = \" + os.path.basename(MODEL_PATH)\n else:\n specifics = \"STEPS_AHEAD = \" + str(STEPS_AHEAD)\n subtitle = (\n \"N_DIMS = \"\n + str(N_DIMS)\n + \", \"\n + \"LAMBDA_OVER_DX = \"\n + str(LAMBDA_OVER_DX)\n + \", \"\n + \"R_DT = \"\n + str(R_DT)\n + \", \"\n + \"POLICY = \"\n + str(POLICY)\n + \", \"\n + specifics\n + \", \"\n + \"N_GRID = \"\n + str(N_GRID)\n + \", \"\n + \"N_HITS = \"\n + str(N_HITS)\n + \", \"\n + \"N_RUNS = \"\n + str(N_runs)\n + \"\\n\"\n )\n\n # plot PDF(nsteps), CDF(nsteps), PDF(nhits), CDF(nhits)\n fig, ax = plt.subplots(2, 2, figsize=(12, 10))\n plt.subplots_adjust(left=0.08, bottom=0.06, right=0.96, top=0.92, hspace=0.35, wspace=0.30)\n kwargs = {'xycoords': 'axes fraction', 'fontsize': 8, 'ha': \"right\"}\n for row, varname in enumerate([\"number of steps\", \"number of hits\"]):\n if varname == \"number of steps\":\n bins = t_bins\n cdf_tot = cdf_t_tot\n pdf_tot = pdf_t_tot\n mean = mean_t\n sigma = sigma_t\n skew = skew_t\n kurt = kurt_t\n p50 = p50_t\n p75 = p75_t\n p90 = p90_t\n p99 = p99_t\n filesuffix = 'nsteps'\n color = \"tab:blue\"\n else:\n bins = h_bins\n cdf_tot = cdf_h_tot\n pdf_tot = pdf_h_tot\n mean = mean_h\n sigma = sigma_h\n skew = skew_h\n kurt = kurt_h\n p50 = p50_h\n p75 = p75_h\n p90 = p90_h\n p99 = p99_h\n filesuffix = 'nhits'\n color = \"tab:orange\"\n max_x = bins[np.nonzero(pdf_tot)[0][-1]]\n for col, fct in enumerate([\"PDF\", \"CDF\"]):\n if fct == \"PDF\":\n ydata = pdf_tot\n ylim = (0.0, 1.02 * np.max(pdf_tot))\n elif fct == \"CDF\":\n ydata = cdf_tot\n ylim = (0.0, 1.0)\n\n ax[row, col].plot(bins, ydata, \"-o\", color=color, markersize=2, linewidth=1)\n ax[row, col].set_title(fct + \" of \" + varname)\n ax[row, col].set_xlabel(varname + \" to find the source\")\n ax[row, col].set_xlim((0, max_x))\n ax[row, col].set_ylim(ylim)\n\n if fct == \"PDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"mean = \" + \"{:.3e}\".format(mean), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"std = \" + \"{:.3e}\".format(sigma), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"skew = \" + \"{:.3e}\".format(skew), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"ex. kurt = \" + \"{:.3e}\".format(kurt), xy=(0.98, 0.44), **kwargs)\n elif fct == \"CDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"P50 = \" + \"{:.3e}\".format(p50), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"P75 = \" + \"{:.3e}\".format(p75), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"P90 = \" + \"{:.3e}\".format(p90), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"P99 = \" + \"{:.3e}\".format(p99), xy=(0.98, 0.44), **kwargs)\n plt.grid(False)\n plt.figtext(0.5, 0.985, subtitle, fontsize=7, ha=\"center\", va=\"top\")\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_distributions.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # plot mean nb steps vs number of episodes\n number_episodes = range(1, N_runs + 1)\n cum_mean_t_episodes = np.cumsum(mean_t_episodes) / number_episodes\n if N_runs >= 100:\n number_episodes = number_episodes[20:]\n cum_mean_t_episodes = cum_mean_t_episodes[20:]\n fig, ax = plt.subplots()\n ax.plot(number_episodes, cum_mean_t_episodes, color=\"r\")\n ax.set_title(\"Convergence of the mean number of steps\")\n ax.set_xlabel(\"number of episodes\")\n ax.set_ylabel(\"mean number of steps\")\n plt.figtext(0.5, 0.985, subtitle, fontsize=5, ha=\"center\", va=\"top\")\n plt.grid(False)\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_convergence.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # save monitoring information (concatenate episodes files)\n monitoring_episodes_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_episodes.txt\"))\n filenames = [os.path.join(DIR_TMP, str(\"monitoring_episode_\" + str(episode) + \".txt\")) for episode in range(N_runs)]\n with open(monitoring_episodes_file, \"w\") as mfile:\n mfile.write(\"# episode\\thit_init\\tstop_flag\\tboundary_flag\\t\"\n \"p_not_found\\t\\tmean_nsteps\\t\\ttime_elapsed(sec)\\n\")\n for fname in filenames:\n if os.path.isfile(fname):\n with open(fname) as infile:\n mfile.write(infile.read())\n os.remove(fname)\n else:\n print(\"Unexpected: Missing episode file: \" + str(fname))\n\n # clean up tmp dirs\n if len(os.listdir(DIR_TMP)) != 0:\n print(\"Unexpected: The directory '\" + DIR_TMP\n + \"' is not removed, because it should be empty but is not.\")\n else:\n os.rmdir(DIR_TMP)\n if len(os.listdir(PARENT_DIR_TMP)) == 0:\n os.rmdir(PARENT_DIR_TMP)\n\n # summary\n monitoring_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_summary\" + \".txt\"))\n with open(monitoring_file, \"w\") as mfile:\n mfile.write(\"*** initial hit ***\\n\")\n first_hit = np.loadtxt(monitoring_episodes_file, usecols=1, dtype='int')\n hit_max = np.max(first_hit)\n hit_hist, _ = np.histogram(first_hit, bins=np.arange(0.5, hit_max + 1.5), density=True)\n for h in range(1, hit_max + 1):\n mfile.write(\"hit=%1d: %6.2f %% \\n\" % (h, hit_hist[h - 1] * 100))\n\n mfile.write(\"\\n*** stats convergence ***\\n\")\n mfile.write(\"number of episodes simulated : %d\\n\" % N_runs)\n mfile.write(\"standard error of the mean (estimate): %.4e = %5.2f %%\\n\"\n % (std_error_mean, rel_std_error_mean * 100))\n\n stopping_reason = np.loadtxt(monitoring_episodes_file, usecols=2, dtype='int')\n stop_max = np.max(stopping_reason)\n stopping_hist, _ = np.histogram(stopping_reason, bins=np.arange(0.5, stop_max + 1.5), density=True)\n mfile.write(\"\\n*** reason for stopping (1 is success, anything else is failure) ***\\n\")\n for stop in range(1, stop_max + 1):\n mfile.write(\"stop=%1d: %6.2f %% \\n\" % (stop, stopping_hist[stop - 1] * 100))\n\n mfile.write(\"\\n*** probability that the source is not found at the end of the episodes ***\\n\")\n p_not_found = np.loadtxt(monitoring_episodes_file, usecols=4)\n p_gtr_stop = p_not_found[p_not_found > STOP_p]\n p_not_found_max = np.max(p_not_found)\n mfile.write(\"criteria (STOP_p): %.5e\\n\" % STOP_p)\n mfile.write(\"max(p) : %.5e\\n\" % p_not_found_max)\n mfile.write(\"number of episodes where p > STOP_p: %7d (%8.4f %%)\\n\"\n % (len(p_gtr_stop), len(p_gtr_stop) / N_runs * 100))\n\n near_boundaries = np.loadtxt(monitoring_episodes_file, usecols=3, dtype='int')\n near_boundaries = np.count_nonzero(near_boundaries)\n mfile.write(\"\\n*** agent near boundaries ***\\n\")\n mfile.write(\"number of episodes where it happened: %7d (%8.4f %%)\\n\"\n % (near_boundaries, near_boundaries / N_runs * 100))\n\n episode_elapsed = np.loadtxt(monitoring_episodes_file, usecols=5)\n mfile.write(\"\\n*** computational cost per episode ***\\n\")\n mfile.write(\"avg elapsed seconds per episode: %.5e\\n\" % (np.mean(episode_elapsed)))\n mfile.write(\"max elapsed seconds per episode: %.5e\\n\" % (np.max(episode_elapsed)))\n\n elapsed_time_0 = (time.monotonic() - start_time_0) / 3600.0\n mfile.write(\"\\n*** computational cost ***\\n\")\n mfile.write(\"N_PARALLEL = %d\\n\" % N_PARALLEL)\n mfile.write(\"total elapsed hours : %.5e\\n\" % elapsed_time_0)\n mfile.write(\"cost in hours = total elapsed time * N_PARALLEL: %.5e\\n\" % (elapsed_time_0 * N_PARALLEL))\n\n print(\">>> Results saved in the directory: \" + DIR_OUTPUTS)\n\n sys.stdout.flush()",
"def _generate_trading_instances(self):\n print(\"Creating DataHandler, Strategy, Portfolio, and ExecutionHandler for\")\n\n # Set internal data members equal to the classes we passed in earlier, along with necessary parameters.\n # https://softwareengineering.stackexchange.com/questions/131403/what-is-the-name-of-in-python/131415\n self.data_handler = self.data_handler_class(self.events, self.csv_dir, self.symbol_list)\n self.strategy = self.strategy_class(self.data_handler, self.events)\n self.portfolio = self.portfolio_class(self.data_handler, self.events, self.start_date, self.initial_capital)\n self.execution_handler = self.execution_handler_class(self.events) # The Event Queue sent to ExecutionHandler",
"def run_and_evaluate():\n tsp_problems = read_all_problems()\n # Empty list of metrics\n results = []\n for problem in tqdm.tqdm(tsp_problems):\n # As random factors are involved repeat experiments a couple of times\n best_routes_base = []\n best_routes_af = []\n best_routes_ms = []\n base_times = []\n af_times = []\n ms_times = []\n for i in range(10):\n # Base solution\n start_time = timeit.default_timer()\n best_route_base = solve_tsp_basic(problem)\n base_time = timeit.default_timer() - start_time\n best_routes_base.append(Fitness(route=best_route_base).route_distance())\n base_times.append(base_time)\n\n # AF clustering solution\n start_time = timeit.default_timer()\n best_route_af = solve_tsp_affinity_propagation(problem)\n af_time = timeit.default_timer() - start_time\n best_routes_af.append(Fitness(route=best_route_af).route_distance())\n af_times.append(af_time)\n\n # MS solution\n start_time = timeit.default_timer()\n best_route_ms = solve_mean_shift(problem)\n ms_time = timeit.default_timer() - start_time\n best_routes_ms.append(Fitness(route=best_route_ms).route_distance())\n ms_times.append(ms_time)\n\n results.append(\n {\n \"problem name\": problem.name,\n \"optimal solution\": find_route_optimal_route_length(problem),\n \"baseline tour length\": mean(best_routes_base),\n \"af clustering tour length\": mean(best_routes_af),\n \"ms clustering tour length\": mean(best_routes_ms),\n \"baseline algorithm time\": mean(base_times),\n \"af clustering algorithm time\": mean(af_times),\n \"ms clustering algorithm time\": mean(ms_times),\n }\n )\n # Create dataframe and safe results\n df = pd.DataFrame(results)\n df.to_csv(\"results.csv\", index=False)\n return df",
"def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)"
] | [
"0.6824583",
"0.65352273",
"0.6505157",
"0.63680285",
"0.6147844",
"0.6038669",
"0.59978664",
"0.59736603",
"0.58891684",
"0.58687943",
"0.58315384",
"0.58308274",
"0.5772297",
"0.5758246",
"0.57433945",
"0.57423234",
"0.5734684",
"0.5721028",
"0.5683199",
"0.5653934",
"0.56521845",
"0.5636789",
"0.5634142",
"0.56034064",
"0.5599968",
"0.55728346",
"0.5571222",
"0.55682874",
"0.55668837",
"0.5553678"
] | 0.6787927 | 1 |
Anonymous users can make `whoami` requests. They receive a 401 response confirming they are not logged in. | def test_whoami_by_anonymous_user(self):
response = self.client.get("/api/users/whoami/")
self.assertEqual(response.status_code, 401) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def whoami():\n g.data['authenticated_user'] = g.current_user",
"def whoami():\n try:\n\n token = request.headers['token']\n username, uid, wid = read_auth_token(token)\n return dict(username=username, uid=uid, wid=wid)\n\n except SignatureExpired as e:\n return dict(error=str(e)), 401\n except BadSignature as e:\n return dict(error=str(e)), 401\n except Exception as e:\n return dict(error=str(e)), 500",
"def whoami(self):\n if self.auth:\n return self.auth[0]\n else:\n return \"Anonymous\"",
"def whoami(ctx):\n ctx.setup_logger(format='')\n AuthCmd(ctx).whoami()",
"def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)",
"def test_home_as_anonymous(self):\n response = self.client.get(\"/images/contents/\")\n self.assertEqual(response.status_code, 401)",
"def get_anonymous_user():\n return User.objects.get(username = \"AnonymousUser\")",
"def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)",
"def login_anonymously(self):\n username = b\"anonymous\"\n password = b\"anonymous\"\n self.send_cmd(b\"USER \" + username + B_CRLF)\n self.send_cmd(b\"PASS \" + password + B_CRLF)",
"def is_anonymous():\n return False",
"def test_status_code_for_anonymous_user(self):\n response = self.client.get(self.get_url())\n self.assertEqual(response.status_code, self.status_anonymous)",
"def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)",
"def whoami(self):",
"def login(self):\n self._client.clear_credentials()\n self._client.get('/v1/whoami')",
"def test_get_all_tokens_anonymous_user(self):\r\n\r\n # Anonymoues users should be unauthorized, no matter which kind of token are requesting\r\n res = self.app.get('/api/token')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err",
"def whoami():\n return current_user._get_current_object()",
"def _is_anonymous_user(auth_info):\n auth_info = auth_info or _get_auth_info_for_id_or_from_request()\n return str(auth_info.get(\"user_id\")) == ANONYMOUS_USER_ID",
"def test_anonymous_user_read(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').read,\r\n token)",
"def test_get_specific_token_anonymous_user(self):\r\n\r\n res = self.app.get('/api/token/twitter')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err",
"def get_authenticated_denied(self):",
"def whoami(self):\n response = requests.get(self.ENDPOINT + '/user-resource/user', headers={'apikey':self.apikey})\n\n return response.json()",
"def user_logged_in():\n if not session.get('user_id'):\n return \"nope\", 401\n else:\n return \"yep\", 200",
"def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass",
"def hidden_basic_auth(user=\"user\", passwd=\"passwd\"):\n\n if not check_basic_auth(user, passwd):\n return status_code(404)\n return jsonify(authenticated=True, user=user)",
"def test_authenticated_inherits_anonymous_permission(self):\n resource = Resource('milestone', 'milestone1')\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'anonymous', resource))\n self.assertTrue(self.check_permission('MILESTONE_VIEW',\n 'authenticated', resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('anonymous',\n resource))\n self.assertIn('MILESTONE_VIEW', self.get_perm('authenticated',\n resource))",
"def run_whoami(self, expanded, unexpanded) :\n\t\tif expanded :\n\t\t\treturn self.errormessage(\"Doesn't need any argument\")\n\t\t(username, roles) = self.WhoAmI()\n\t\tself.htmlmessage('Username: %s Roles: %s' % (username, string.join(roles, ', ')), safe=1)\n\t\tself.printf(\"%s\\n\" % username)",
"def is_anonymous(self):\n return False",
"def whoami(self):\n print(\"-----\",self._whoami)",
"def is_anonymous(self):\n return False"
] | [
"0.71340424",
"0.7014765",
"0.6927591",
"0.6695747",
"0.6655952",
"0.6655952",
"0.6635073",
"0.65991694",
"0.65672773",
"0.6493297",
"0.64832705",
"0.64729846",
"0.6437622",
"0.6405885",
"0.64006865",
"0.6381664",
"0.6346517",
"0.6344521",
"0.6308586",
"0.6299756",
"0.62978876",
"0.62631",
"0.62279135",
"0.62279135",
"0.6215758",
"0.6158293",
"0.6147617",
"0.61027783",
"0.60672724",
"0.60541975"
] | 0.8336867 | 0 |
Fetch node data using k8s API | def get_node_data(cluster_id):
try:
# fetching the token from secret of the namespace 'dashboard'
_TOKEN = [base64.b64decode(secret_item.data['token']).decode('UTF-8') for secret_item in client.CoreV1Api(
).list_namespaced_secret('dashboard').items if base64.b64decode(secret_item.data['namespace']).decode(
'UTF-8') == 'dashboard'][0]
# generating User Agent
version_detail = client.VersionApi().get_code()
_USER_AGENT = 'kubectl/' + version_detail.git_version + ' (' + version_detail.platform + ') ' + 'kubernetes/' + \
version_detail.git_commit[0:7]
_HEADERS = {'User-Agent': _USER_AGENT, 'Accept': 'application/json',
'Authorization': f"Bearer {_TOKEN}"}
# generating url for node listing
_URL = client.Configuration().host + '/api/v1/nodes'
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
return requests.get(url=_URL, headers=_HEADERS, verify=False).json()['items']
except Exception as e:
logger.error(e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get(node_instance_id, logger, client, tenant_name):\n if tenant_name:\n logger.info('Explicitly using tenant `{0}`'.format(tenant_name))\n logger.info('Retrieving node instance {0}'.format(node_instance_id))\n try:\n node_instance = client.node_instances.get(node_instance_id)\n except CloudifyClientError as e:\n if e.status_code != 404:\n raise\n raise CloudifyCliError('Node instance {0} not found'.format(\n node_instance_id))\n\n print_data(NODE_INSTANCE_COLUMNS, node_instance, 'Node-instance:', 50)\n\n # print node instance runtime properties\n logger.info('Instance runtime properties:')\n for prop_name, prop_value in utils.decode_dict(\n node_instance.runtime_properties).iteritems():\n logger.info('\\t{0}: {1}'.format(prop_name, prop_value))\n logger.info('')",
"def cli():\n while True:\n try:\n # Get the whole information on each edge.\n l_edge = list()\n s_rsc = '{}/edge'.format(etcdc.prefix)\n \n try:\n r = etcdc.read(s_rsc, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for child in r.children:\n l_app = list()\n d = ast.literal_eval(child.value)\n # get hosts\n print(PROJECT_ROOT + '/' + d['endpoint'])\n l_hosts = kube_list_node(PROJECT_ROOT + '/' + d['endpoint'])\n d['hosts'] = len(l_hosts)\n d_nodes = dict() # {'name': 'ip', ...}\n for item in l_hosts:\n d_nodes[item.metadata.name] = item.status.addresses[0].address\n # log.debug(d_nodes)\n # get # of tenants and apps\n l_tenants = get_tenant(d['name'])\n d['tenants'] = len(l_tenants)\n d['apps'] = 0\n for e in l_tenants:\n if 'app' in e:\n d['apps'] += len(e['app'])\n \n d['cpu'] = 0\n d['memory'] = 0\n i_total_cores = 0\n i_total_memory = 0\n i_total_storage = 0\n for h in l_hosts:\n i_total_cores += int(h.status.capacity['cpu'])\n i_total_memory += int(h.status.capacity['memory'].\n replace('Ki', ''))\n d['tot_cpu'] = i_total_cores\n d['tot_mem'] = int(i_total_memory / (1024*1024))\n \n # Get loadavg and free mem\n if d['name'] == 'edge1':\n ssh_server = 'harden.iorchard.co.kr'\n elif d['name'] == 'edge2':\n ssh_server = 'durant.iorchard.co.kr'\n RSC = 'ssh -p42544 {} get_rsc.sh'.format(ssh_server)\n (b_res, s_out) = cmd(RSC, 3, False)\n l = s_out.split(\"\\n\")\n d['used_cpu'] = (float(l[0]) + float(l[1]) + float(l[2]))\n avail_mem = (int(l[3]) + int(l[4]) + int(l[5])) / (1024*1024)\n d['used_mem'] = d['tot_mem'] - avail_mem\n d['cpu'] = int(d['used_cpu'] / d['tot_cpu'] * 100)\n d['memory'] = int(d['used_mem'] / d['tot_mem'] * 100)\n # ceph storage\n CEPH = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph exec -it \" \\\n + \"$(kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + \" -n rook-ceph get po \" \\\n + \"-l app=rook-ceph-tools \" \\\n + \"-o jsonpath='{.items[0].metadata.name}') -- \" \\\n + \"ceph df --format json\"\n (b_res, s_out) = cmd(CEPH, 3, False)\n print(s_out)\n d['status'] = 'Healthy' if b_res else 'Unhealthy'\n d_stor = ast.literal_eval(s_out)\n d['tot_stor'] = int(d_stor['stats']['total_bytes'] / pow(1024, 3))\n d['used_stor'] = int(d_stor['stats']['total_used_bytes'] / pow(1024, 3))\n d['storage'] = int(d['used_stor'] / d['tot_stor'] * 100)\n # Update etcd status\n try:\n s = '{}/edge/{}'.format(etcdc.prefix,\n d['name'])\n # log.debug(d)\n etcdc.write(s, d, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n # Update app status\n s_app = '{}/app'.format(etcdc.prefix)\n try:\n r_app = etcdc.read(s_app, recursive=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n for app_child in r_app.children:\n if app_child.value is not None:\n d_app = dict()\n app = ast.literal_eval(app_child.value)\n if app['edge'] == d['name']:\n d_app['name'] = app['name']\n d_app['username'] = GUAC_USER\n d_app['password'] = GUAC_PASS\n # Get catalog info.\n s_cat = '{}/catalog/{}'.format(etcdc.prefix,\n app['catalog'])\n try:\n r_cat = etcdc.read(s_cat)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n else:\n cat = ast.literal_eval(r_cat.value)\n app['cat_type'] = cat['type']\n app['cat_name'] = cat['name']\n app['cat_logo'] = cat['logo']\n # Get app status\n if app['cat_type'] == 'vm':\n # first, look at DataVolume status of app.\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get dv ' \\\n + app['name'] \\\n + \" -o jsonpath='{range .status}{.phase},{.progress}{end}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n l_out = s_out.split(',')\n if l_out[0] == 'Succeeded':\n # Get vm status of app\n CMD = \"kubectl --kubeconfig \" + PROJECT_ROOT \\\n + '/' \\\n + d['endpoint'] + ' get vm ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.ready}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res and s_out == 'true':\n # update app status 'running'.\n app.update({'status': 'running'})\n \n if app['edge'] == d['name']:\n # Get where app is running.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get vmi ' \\\n + app['name'] \\\n + \" -o jsonpath='{.status.nodeName}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['hostname'] = d_nodes[s_out]\n # Get nodeport for app.\n CMD = \"kubectl --kubeconfig \" \\\n + PROJECT_ROOT + '/' \\\n + d['endpoint'] + ' get svc ' \\\n + app['name'] \\\n + \" -o jsonpath='{.spec.ports[0].nodePort}'\"\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n else:\n # update app status 'stopped'\n app.update({'status': 'stopped'})\n elif l_out[0] == 'ImportInProgress':\n # update app status 'building' and \n app.update({'status': 'building ({})'.format(l_out[1])})\n elif app['cat_type'] == 'container':\n app.update({'status': 'running'})\n \n try:\n s = '{}/app/{}'.format(etcdc.prefix,\n app['name'])\n # log.debug(app)\n etcdc.write(s, app, prevExist=True)\n except etcd.EtcdKeyNotFound as e:\n log.error(e)\n \n if 'port' in d_app:\n l_app.append(d_app)\n # render guac-config.j2 and copy it to guac broker server\n log.debug(l_app)\n template = env.get_template('broker.j2')\n s_out = template.render(l_app=l_app)\n s_tmp = '/tmp/{}.broker'.format(d['name'])\n try:\n with open(s_tmp, 'w') as f:\n f.write(s_out)\n except Exception as e:\n log.error(e)\n else:\n CMD = \"scp \" \\\n + \"-P42544 {} {}\".format(s_tmp, d['broker_ip']) \\\n + \":/etc/guacamole/noauth-config.xml\"\n log.debug(CMD)\n (b_res, s_out) = cmd(CMD, 5, False)\n if b_res:\n d_app['port'] = s_out\n \n l_edge.append(d)\n \n # log.debug(l_edge)\n log.debug(l_app)\n \n time.sleep(1)\n except:\n log.error('unknown error')",
"async def GET(node_id, path):\n r = session().get(f\"{HOST}:{BASE_PORT + node_id}{path}\")\n if r.status_code != 200:\n raise ValueError(f\"Got bad response {r.status_code} from \" +\n f\"node {node_id} on {path}.\")\n return {\"status_code\": r.status_code, \"data\": r.json()}",
"def get_nodes(self):\n return requests.get(self.__url + 'nodes').json()",
"async def retrieve_node(request: web.Request) -> web.Response:\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n try:\n data = await request.json()\n port_keys = data.get(\"port_keys\", [])\n except json.JSONDecodeError as exc:\n raise web.HTTPBadRequest(reason=f\"Invalid request body: {exc}\") from exc\n\n return web.json_response(\n await director_v2_api.retrieve(\n request.app, f\"{path_params.node_id}\", port_keys\n ),\n dumps=json_dumps,\n )",
"def get(self, key):\n \n print(\"Getting from node {}\".format(self.url))\n\n (headers, content) = self.http_client.request(self.url + \"/\" + key + \"?r=1\", method=\"GET\", redirections=0)\n return (headers, content)",
"def _get_nodes(prefix, cloud, configs):\n # Get nodes with prefix\n nodes = Node.nodes(prefix, cloud, **configs)\n\n # Check for nodes available with prefix\n if not nodes:\n log.info(f\"No resources available with prefix '{prefix}'. Exiting ....\")\n sys.exit(1)\n\n log.info(f\"Nodes with prefix '{prefix}' are {', '.join([n for n, _ in nodes])}\")\n return nodes",
"def get_nodes(self):\n self.get_status()\n old_api = self.version[0] <= '3'\n if old_api:\n certs_path = \"%s/certificate_statuses/*\" % (self.environment)\n nodeinfo_path_tpl = \"{env}/node/{node}\"\n else:\n certs_path = \"puppet-ca/v1/certificate_statuses/no_key?environment=%s\" % (self.environment)\n nodeinfo_path_tpl = \"puppet/v3/node/{node}?environment={env}\"\n\n csts = self._send('GET', certs_path)\n nodes_names = []\n for cst in csts:\n nodes_names.append(cst['name'])\n\n all_nodes = []\n for nname in nodes_names:\n path = nodeinfo_path_tpl.format(node=nname, env=self.environment)\n nodeinfo = self._send('GET', path)\n if old_api:\n nodeinfo = self._from_pson(nodeinfo['data'])\n else:\n nodeinfo = self._from_pson(nodeinfo)\n if 'parameters' in nodeinfo:\n node = nodeinfo['parameters']\n if self.onlynodes:\n if not (node.get('hostname') in self.onlynodes or\n node.get('ipaddress') in self.onlynodes or\n node.get('fqdn') in self.onlynodes or\n node.get('uuid') in self.onlynodes):\n continue\n all_nodes.append(node)\n\n return all_nodes",
"def get_nodes():\n\n host = str(request.args['host'])\n days = float(request.args['days'])\n\n to_time = int(time.time())\n to_day = int(time.strftime('%Y%m%d', time.gmtime(float(to_time))))\n from_time = to_time-int(days*24*60*60)\n from_day = int(time.strftime('%Y%m%d', time.gmtime(float(from_time))))\n day_in=''\n for x in range(from_day, to_day+1):\n day_in = day_in + ',' + str(x)\n day_in=re.sub(r\"^,\", \"\", day_in)\n day_in=re.sub(r\",$\", \"\", day_in)\n query = \"SELECT * FROM metrics WHERE host='\" + str(host) + \"' and date IN (\"\n query = query + str(day_in) + \") and time>=\" + str(int(int(from_time)*1000)) + \" and time<=\"\n query = query + str(int(int(to_time)*1000)) + \" ALLOW FILTERING\"\n rows = session.execute(query);\n reply={}\n last_value={}\n for r in rows:\n if str(r.host) not in reply:\n reply[r.host]={}\n last_value[r.host]={}\n if str(r.metric) not in reply[r.host]:\n reply[r.host][r.metric]=[]\n last_value[r.host][r.metric]=int(r.value)\n continue\n real_value = (r.value-last_value[r.host][r.metric])/60\n\tlast_value[r.host][r.metric]=int(r.value)\n reply[str(r.host)][r.metric].append({ 'value': int(real_value),\n 'time': str(r.time) })\n return json.dumps(reply)",
"def get_kong_node_usage_metrics(opts):\n\n url = \"{0}/status\".format(opts['base_url'])\n\n r = requests.get(url)\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n logging.debug(\"http response body - %s\", r.text)\n logging.error(\"An exception occurred: (%s)\", e)\n sys.exit(2)\n\n print r.text\n\n return True",
"def get_host_info(self):\n\n if len(self.index) == 0:\n # Need to load index from cache\n self.load_index_from_cache()\n\n if not self.args.host in self.index:\n # try updating the cache\n self.do_api_calls_update_cache()\n if not self.args.host in self.index:\n # host might not exist anymore\n return self.json_format_dict({}, True)\n\n node_id = self.index[self.args.host]\n print \"NODE ID %s\" % node_id\n print \"INDEX: %s\" % self.index\n\n node = self.get_node(node_id)\n node_vars = {}\n for direct_attr in [\n \"api_id\",\n \"datacenter_id\",\n \"label\",\n \"display_group\",\n \"create_dt\",\n \"total_hd\",\n \"total_xfer\",\n \"total_ram\",\n \"status\",\n \"alert_cpu_enabled\",\n \"alert_cpu_threshold\",\n \"alert_diskio_enabled\",\n \"alert_diskio_threshold\",\n \"alert_bwin_enabled\",\n \"alert_bwin_threshold\",\n \"alert_bwout_enabled\",\n \"alert_bwout_threshold\",\n \"alert_bwquota_enabled\",\n \"alert_bwquota_threshold\",\n \"backup_weekly_daily\",\n \"backup_window\",\n \"watchdog\"\n ]:\n node_vars[direct_attr] = getattr(node, direct_attr)\n\n node_vars[\"datacenter_city\"] = self.get_datacenter_city(node)\n node_vars[\"public_ip\"] = [addr.address for addr in node.ipaddresses if addr.is_public][0]\n\n return self.json_format_dict(node_vars, True)",
"def cluster_znode_data(cluster_name, znode, headers=None):\n\n zdata_resp = cluster_znode(cluster_name,\n znode,\n headers=headers)\n zdata = json.loads(zdata_resp.get_data())\n resp = Response(zdata[\"data\"],\n status=200,\n mimetype=\"text/plain\")\n return resp",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n major_version: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n password: Optional[pulumi.Input[str]] = None,\n pay_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n public_points: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n status: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None) -> 'Cluster':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = _ClusterState.__new__(_ClusterState)\n\n __props__.__dict__[\"auto_renew\"] = auto_renew\n __props__.__dict__[\"auto_renew_period\"] = auto_renew_period\n __props__.__dict__[\"cluster_name\"] = cluster_name\n __props__.__dict__[\"data_center_name\"] = data_center_name\n __props__.__dict__[\"disk_size\"] = disk_size\n __props__.__dict__[\"disk_type\"] = disk_type\n __props__.__dict__[\"enable_public\"] = enable_public\n __props__.__dict__[\"instance_type\"] = instance_type\n __props__.__dict__[\"ip_white\"] = ip_white\n __props__.__dict__[\"maintain_end_time\"] = maintain_end_time\n __props__.__dict__[\"maintain_start_time\"] = maintain_start_time\n __props__.__dict__[\"major_version\"] = major_version\n __props__.__dict__[\"node_count\"] = node_count\n __props__.__dict__[\"password\"] = password\n __props__.__dict__[\"pay_type\"] = pay_type\n __props__.__dict__[\"period\"] = period\n __props__.__dict__[\"period_unit\"] = period_unit\n __props__.__dict__[\"public_points\"] = public_points\n __props__.__dict__[\"security_groups\"] = security_groups\n __props__.__dict__[\"status\"] = status\n __props__.__dict__[\"tags\"] = tags\n __props__.__dict__[\"vswitch_id\"] = vswitch_id\n __props__.__dict__[\"zone_id\"] = zone_id\n return Cluster(resource_name, opts=opts, __props__=__props__)",
"def get_node(conn, name):\n datacenter_id = get_datacenter_id()\n\n for item in conn.list_servers(datacenter_id)[\"items\"]:\n if item[\"properties\"][\"name\"] == name:\n node = {\"id\": item[\"id\"]}\n node.update(item[\"properties\"])\n return node",
"def cluster_list():\n request_debug(r, logger)\n json_body = r.get_json(force=True, silent=True) or {}\n result = cluster_handler.list(filter_data=json_body)\n response_ok[\"data\"] = result\n return jsonify(response_ok), CODE_OK",
"def send_simple_get_request(hostname, node, key, causal_payload=''):\n get_str = \"http://\" + hostname + \":\" + node.access_port + \"/kvs/\" + key\n data = {'causal_payload':causal_payload}\n if PRINT_HTTP_REQUESTS:\n print \"Get request: \" + get_str + ' data field:' + str(data)\n r = req.get(get_str, data=data)\n if PRINT_HTTP_RESPONSES:\n print r.text, r.status_code\n return r",
"def get(self, controller, data, *args, **kwargs): \n task_manager = controller.get_task_manager()\n res = task_manager.get_all_tasks(details=True)\n resp = {\n u'task-instances':res,\n u'count':len(res)\n } \n return resp",
"def describe_cluster_response():\n return {\n \"cluster\": {\n \"status\": \"ACTIVE\",\n \"endpoint\": \"https://endpoint.amazonaws.com\",\n \"name\": EXAMPLE_NAME,\n \"certificateAuthority\": {\n \"data\": \"LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tDQpWR1Z6ZEdsdVp5QkVZWFJoRFFwVVpYTjBhVzVuSUVSaGRHRU5DbFJsYzNScGJtY2dSR0YwWVEwS2EzVmlaWEp1WlhSbGN6QWVGdzBLVkdWemRHbHVaeUJFWVhSaERRcFVaWE4wYVc1bklFUmhkR0ZWQkFNVERRcHJkV0psY201bGRHVnpNQUVpTUEwS1ZHVnpkR2x1WnlCRVlYUmhEUXBVWlhOMGFXNW5JRVJoZEdFTkNsUmxjM1JwYm1jZ1JHRjBZY3UvR1FnbmFTcDNZaHBDTWhGVVpYTjBhVzVuSUVSaGRHRXl3clZqeEpWNjNwNFVHRmpZdHdGR1drUldJVkV1VkdWemRHbHVaeUJFWVhSaGJzT0MxSVJiTDhPd0lpMVhiWGg2VkdWemRHbHVaeUJFWVhSaFpXVndTTk9VVUZKNmN5QWJaaFpnWVNkTUV3MEtGMVJsYzNScGJtY2dSR0YwWVFZRFZSMFBBUUVFQkFNQ0FsUmxjM1JwYm1jZ1JHRjBZUUV3RFFvR0NTcElEUXBVWlhOMGFXNW5JRVJoZEdGcEgxc1pPRTNMa3lrMU9DWUNHUloyTEZjM3paOCtHell3WEZSbGMzUnBibWNnUkdGMFlYMUR5NjFNMVlGV1AxWVRIMVJsYzNScGJtY2dSR0YwWVd0aE5oMVphM2dWUDBGaGNSWjdKaW9oZVc4N1JsUmxjM1JwYm1jZ1JHRjBZUVpIVHd4NE9IdzZmZz09DQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t\"\n },\n \"roleArn\": \"arn:aws:iam::111222333444/eksRole\",\n \"resourcesVpcConfig\": {\n \"subnetIds\": [\n \"subnet-00000000000000000\",\n \"subnet-00000000000000001\",\n \"subnet-00000000000000002\"\n ],\n \"vpcId\": \"vpc-00000000000000000\",\n \"securityGroupIds\": [\n \"sg-00000000000000000\"\n ]\n },\n \"version\": \"1.10\",\n \"arn\": \"arn:aws:eks:region:111222333444:cluster/\" + EXAMPLE_NAME,\n \"createdAt\": 1500000000.000\n }\n }",
"def list_nodes(hosts):\n if hosts=='hosts':\n query = \"SELECT DISTINCT host FROM metrics_index\";\n reply=[]\n elif hosts=='all':\n query = \"SELECT * FROM metrics_index\";\n reply={}\n\n rows = session.execute(query);\n for r in rows:\n if hosts=='hosts':\n if str(r.host) not in reply:\n reply.append(str(r.host))\n elif hosts=='all':\n if str(r.host) not in reply:\n reply[str(r.host)]=[]\n if str(r.metric) not in reply[str(r.host)]:\n reply[str(r.host)].append(str(r.metric))\n return json.dumps(reply)",
"def memcache_nodes(self) -> pulumi.Output[Sequence['outputs.NodeResponse']]:\n return pulumi.get(self, \"memcache_nodes\")",
"def getContainers(self,node):\n data = self.connect('get','nodes/%s/lxc' % node,None)\n return data",
"def cluster_node_get(self, node_name, desired_attributes=None):\n return self.request( \"cluster-node-get\", {\n 'node_name': [ node_name, 'node-name', [ basestring, 'node-name' ], False ],\n 'desired_attributes': [ desired_attributes, 'desired-attributes', [ ClusterNodeInfo, 'None' ], False ],\n }, {\n 'attributes': [ ClusterNodeInfo, False ],\n } )",
"def test_get_entity(self):\n\n storage = StringIO.StringIO()\n c = pycurl.Curl()\n c.setopt(c.URL,\"http://127.0.0.1:8090/compute/9930\")\n c.setopt(c.HTTPHEADER, ['Accept:application/occi+json'])\n c.setopt(c.VERBOSE, True)\n c.setopt(c.CUSTOMREQUEST, 'GET')\n c.setopt(c.WRITEFUNCTION, storage.write)\n c.perform()\n content = storage.getvalue()\n print \" ===== Body content =====\\n \" + content + \" ==========\\n\"",
"def get(self):\n\n self.response.headers['Access-Control-Allow-Origin'] = '*' # Required until JSONP is supported\n self.response.headers['Content-Type'] = 'application/json' # All responses are json, so JSONP to come\n\n if(self.request.get(\"cid\")):\n cid = self.request.get(\"cid\")\n node = memcache.get(cid)\n\n if node is not None:\n # if it is a valid cid then check to see if there are any commands\n # in the nodes command queue\n # and update some fields on the node\n\n command = node.getNextCommand()\n if command is not None:\n self.write(JSONResponse.execCommand(command))\n if command.autoComplete is True:\n logging.info(\"auto completing\")\n node.completeCommand(command.id)\n else:\n self.write(json.dumps({\"type\":\"noCommand\"}))\n else:\n # if it turned out to be a non valid cid, just make a new node\n node = createNode(self.request)\n self.write(JSONResponse.assignment(node.cid))\n \n\n else: # if the request had no cid attached\n node = createNode(self.request)\n self.write(JSONResponse.assignment(node.cid))",
"def get(self, request, nnid, wfver, desc):\n try:\n return_data = NNCommonManager().get_nn_node_info(nnid, wfver, desc)\n return Response(json.dumps(return_data))\n except Exception as e:\n return_data = {\"status\": \"404\", \"result\": str(e)}\n return Response(json.dumps(return_data))",
"def handle_status(self, request):\n \"\"\"\n @api {get} /status Get node status\n @apiName GetNodeStatus\n @apiGroup Node\n @apiVersion 1.1.0\n\n @apiSuccess {Boolean} execution_enabled Task execution is enabled on the node.\n @apiSuccess {Boolean} leader Node is the leader.\n @apiSuccess {String} name Node name.\n @apiSuccess {Boolean} scheduler_running The scheduler is running on the node.\n @apiSuccess {String} address Node IP address.\n @apiSuccess {String[]} pools Pools in which the node is registered.\n @apiSuccess {Object} running_processes Processes running on the host.\n @apiSuccess {Object} running_processes.process Process.\n @apiSuccess {String} running_processes.process.start_time Time the process started, ISO 8601 formatted.\n @apiSuccess {String} running_processes.process.task ID of the task.\n @apiSuccess {Boolean} cluster_joined Node has joined the cluster.\n @apiSuccess {Boolean} contending_for_lead Node is contending for lead.\n @apiSuccess {Boolean} pools_joined Node has joined its pools.\n\n @apiSuccessExample {json} Example response:\n {\n \"execution_enabled\": true,\n \"leader\": false,\n \"name\": \"node2\",\n \"scheduler_running\": false,\n \"address\": \"127.0.0.1:32002\",\n \"pools\": [\"pool1\", \"pool2\"],\n \"running_processes\": {\n \"b26e5cc2ef3f11e4817b0026b951c045\": {\n \"start_time\": \"2015-04-30T13:49:18.351494+00:00\",\n \"task\": \"508b4b72e44611e49e76c81f66cd0cca\"\n }\n },\n \"cluster_joined\": true,\n \"contending_for_lead\": true,\n \"pools_joined\": true\n }\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n status = {\n 'name': self.cluster.nodename,\n 'address': self.cluster.addr,\n 'pools': self.cluster.mypools,\n 'leader': self.cluster.is_leader,\n 'cluster_joined': self.cluster.cluster_joined,\n 'pools_joined': self.cluster.pools_joined,\n 'contending_for_lead': self.cluster.contending_for_lead,\n\n 'execution_enabled': self.manager.enabled,\n 'running_processes': dict([ (execid, { 'task': details['task'], 'start_time': details['start_time'].isoformat() }) for (execid, details) in self.manager.running_processes.items() ]),\n\n 'scheduler_running': self.cluster.scheduler.running\n }\n\n return HTTPReply(body = json.dumps(status), headers = headers)",
"def getNodes(self):\n data = self.connect('get','nodes',None)\n return data",
"def compute_node_get_by_host(context, host):\n session = get_session()\n with session.begin():\n service = session.query(models.Service).\\\n filter_by(host=host, binary=\"monitor-bmc\").first()\n node = session.query(models.ComputeNode).\\\n options(joinedload('service')).\\\n filter_by(deleted=False,service_id=service.id)\n return node.first()",
"def _load_cluster(self):",
"def node(*,bucket, endpoint, key, crn, auth=IBM_CLOUD_OAUTH_URL):\n # Get bearer token to access COS S3 API\n # payload to generate auth token\n token_req_data = {\n 'grant_type' :'urn:ibm:params:oauth:grant-type:apikey',\n 'response_type':'cloud_iam',\n 'apikey' :key\n }\n response = requests.post(auth, data=token_req_data, headers={'Content-type': 'application/x-www-form-urlencoded'})\n if response.status_code != 200:\n raise Exception(\"error\")\n bearer_token_info = response.json()\n logging.debug(bearer_token_info)\n \n # Get Aspera connection information for the bucket\n header_auth = {\n 'ibm-service-instance-id':crn,\n 'Authorization':bearer_token_info['token_type'] + \" \" + bearer_token_info['access_token'],\n 'Accept':'application/xml'\n }\n response = requests.get(endpoint + \"/\" + bucket, headers=header_auth, params={'faspConnectionInfo':True})\n if response.status_code != 200:\n raise Exception(\"error accessing endpoint\")\n logging.debug(response.content)\n ats_info_root = xml.dom.minidom.parseString(response.content.decode('utf-8'));\n ats_ak = ats_info_root.getElementsByTagName('AccessKey')[0]\n ats_url = ats_info_root.getElementsByTagName('ATSEndpoint')[0].firstChild.nodeValue\n ats_ak_id = ats_ak.getElementsByTagName('Id')[0].firstChild.nodeValue\n ats_ak_secret = ats_ak.getElementsByTagName('Secret')[0].firstChild.nodeValue\n \n # Get delegated token to access the node api\n token_req_data['response_type'] = 'delegated_refresh_token'\n token_req_data['receiver_client_ids'] = 'aspera_ats'\n response = requests.post(auth, data=token_req_data, headers={'Content-type': 'application/x-www-form-urlencoded'})\n if response.status_code != 200:\n raise Exception(\"error when generating token\")\n delegated_token_info = response.json()\n aspera_storage_credentials = {\n 'type': 'token',\n 'token': delegated_token_info\n }\n logging.debug(aspera_storage_credentials)\n return {\n 'url': ats_url,\n 'auth': requests.auth.HTTPBasicAuth(ats_ak_id, ats_ak_secret),\n 'headers': {\n 'X-Aspera-Storage-Credentials':json.dumps(aspera_storage_credentials),\n },\n 'tspec': {'tags':{'aspera':{'node':{'storage_credentials':aspera_storage_credentials}}}}\n }"
] | [
"0.60346806",
"0.59235954",
"0.578973",
"0.57561266",
"0.5736296",
"0.5608303",
"0.5593132",
"0.5583009",
"0.556707",
"0.5539366",
"0.5538481",
"0.55283237",
"0.5513567",
"0.54998785",
"0.5490597",
"0.5488156",
"0.5446512",
"0.5438874",
"0.5410196",
"0.53977907",
"0.5386005",
"0.5373807",
"0.53584456",
"0.53544533",
"0.53529906",
"0.53329325",
"0.5312624",
"0.5312026",
"0.5297073",
"0.5284067"
] | 0.6105052 | 0 |
Fetch compute cell data | def get_compute_cell_data(cluster_id=None, namespace_id=None):
cells_info = client.CustomObjectsApi().list_cluster_custom_object('kiyot.elotl.co', 'v1beta1', 'cells')
return cells_info | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fetch_data(self):\n pass",
"def getdata(\n self,\n col: str,\n exp: str,\n chan: str,\n res: int,\n xs: Tuple[int, int],\n ys: Tuple[int, int],\n zs: Tuple[int, int],\n ):\n if self.hasdata(col, exp, chan, res, xs, ys, zs):\n return self.fs.retrieve(col, exp, chan, res, xs, ys, zs)\n if self.is_terminal:\n raise IndexError(\"Cannot find data at: \", (col, exp, chan, res, xs, ys, zs))\n data = self._next.getdata(col, exp, chan, res, xs, ys, zs)\n if self._cache:\n self.setdata(data, col, exp, chan, res, xs, ys, zs)\n return data",
"def fetch_data():\n data.fetch_data()\n data.start_updating()",
"def fetch_data(self):",
"def get_nypd_complaint_results():\n data = None\n blob = BUCKET.blob(NYPD_COMPLAINT_FNAME)\n if blob.exists():\n blob.reload(client=STORAGE_CLIENT)\n if blob.time_created.strftime(\"%Y-%m-%d\") == datetime.now().strftime(\"%Y-%m-%d\"):\n print(\"Getting cached file \" + NYPD_COMPLAINT_FNAME)\n return {\"data\": __retrieve_from_bucket(NYPD_COMPLAINT_FNAME)}\n \n df = update_nypd_complaint_results()\n data = df.to_dict(orient=\"records\")\n # print(df)\n return {\"data\": data}",
"def get(self, **cell):\n cellobj = Cell(**cell)\n if not cellobj.is_terrestial:\n raise Exception(\"Priogrid only has data for terrestial cells, you are trying to get data for a non-terrestial cell\")\n\n if not self.name in CACHED_VARS:\n CACHED_VARS[self.name] = dict()\n CACHED_VARS[self.name][\"data\"] = get_unknown_data(self.name, startyr=self.startyr, endyr=self.endyr, interpolated=self.interpolated)\n\n # TODO: Test that doesnt slow down...\n## if not all((yr in CACHED_VARS[self.name][\"data\"][cellobj.gid][\"data\"] for yr in range(self.startyr, self.endyr+1))):\n## newdata = get_unknown_data(self.name, startyr=self.startyr, endyr=self.endyr, interpolated=self.interpolated)\n## yrdict = CACHED_VARS[self.name][\"data\"][cellobj.gid][\"data\"]\n## yrdict.update( newdata[cellobj.gid][\"data\"] )\n \n value = CACHED_VARS[self.name][\"data\"][cellobj.gid][\"data\"]\n return value",
"def _read_cell_direct(cls):\n\n cell_data = {}\n cell_columns = cls._get_columns(CELL_MANIFEST)\n cell_psvs = cls._get_component_psvs(CELL_MANIFEST)\n\n for cell_psv in cell_psvs:\n for row in gzip.GzipFile(fileobj=io.BytesIO(cls._read_s3_url(cell_psv))):\n row_dict = dict(zip(cell_columns, row.strip().split(b'|')))\n cell_data[row_dict[\"cellkey\"].decode()] = {k: v.decode() for\n k, v in row_dict.items()}\n total_umis = cell_data[row_dict[\"cellkey\"].decode()][\"total_umis\"]\n cell_data[row_dict[\"cellkey\"].decode()][\"total_umis\"] = (total_umis if total_umis == \"nan\"\n else str(float(total_umis)))\n\n return cell_data",
"def do_fetch(self):\n pass",
"def get_cells(self):\n raise NotImplementedError",
"async def _fetch_data(self) -> T:\n raise NotImplementedError",
"def get_data():\n pass",
"async def _fetch_data(self) -> JobInfo:\n return await self.api.get_job()",
"async def fetch_data(self) -> T:",
"def compute(cls, dataset):\n return dataset",
"def get_data():\n return",
"def run(self):\n results = self.fetch()\n return results",
"def _get_data(self):\n raise NotImplementedError()",
"def getData(self, local_cache):",
"def fetch_data(self):\n\n data_dict = {\n 'price': self.get_current_price(),\n }\n\n return self.save_data(data_dict)",
"def run_calculation():\n\n print(\"Creating %d-process pool\" % mp.cpu_count())\n\n pool = mp.Pool(mp.cpu_count())\n\n f = h5py.File('/testdata/mandelbrot.hdf5', 'w')\n\n print(\"Creating output dataset with shape %s x %s\" % (NX, NY))\n\n dset = f.create_dataset('mandelbrot', (NX, NY), 'i')\n dset.attrs['XSTART'] = XSTART\n dset.attrs['YSTART'] = YSTART\n dset.attrs['XEXTENT'] = XEXTENT\n dset.attrs['YEXTENT'] = YEXTENT\n\n result = pool.imap(compute_row, (x * xincr for x in range(NX)))\n\n for idx, arr in enumerate(result):\n if idx % 25 == 0: print(\"Recording row %s\" % idx)\n dset[idx] = arr\n\n print(\"Closing HDF5 file\")\n\n f.close()\n\n print(\"Shutting down process pool\")\n\n pool.close()\n pool.join()",
"def get_building_complaint_results():\n data = None\n blob = BUCKET.blob(BUILDING_COMPLAINT_FNAME)\n if blob.exists():\n blob.reload(client=STORAGE_CLIENT)\n if blob.time_created.strftime(\"%Y-%m-%d\") == datetime.now().strftime(\"%Y-%m-%d\"):\n print(\"Getting cached file \" + BUILDING_COMPLAINT_FNAME)\n return {\"data\": __retrieve_from_bucket(BUILDING_COMPLAINT_FNAME)}\n \n df = update_building_complaint_results()\n data = df.to_dict(orient=\"records\")\n return {\"data\": data}",
"def _data(self):\n cell = self._get_cell()\n return deepcopy(cell.data)",
"def data_fetch(self, curs, splat_table, mcl_table, crs_no=0, output_fname=None):\n\t\tgene_no2gene_id = get_gene_no2gene_id(curs)\t#08-31-05\n\t\toutf = open(output_fname, 'w')\t#08-31-05\n\t\toutf.write(\"r:=[\")\t#08-31-05\n\t\t\n\t\tmcl_id2cluster_dstructure = {}\n\t\tno_of_total_genes = get_no_of_total_genes(curs)\n\t\tsys.stderr.write(\"Getting the basic information for all clusters...\\n\")\n\t\tcurs.execute(\"DECLARE crs%s CURSOR FOR select m.mcl_id, m.vertex_set, m.connectivity, 0,\\\n\t\t\tm.recurrence_array, s.edge_set, s.connectivity, m.cooccurrent_cluster_id from %s m, %s s where \\\n\t\t\tm.splat_id=s.splat_id\"\\\n\t\t\t%(crs_no, mcl_table, splat_table))\t#06-20-05\tconnectivity_original faked to be 0\n\t\tcurs.execute(\"fetch 5000 from crs%s\"%crs_no)\n\t\trows = curs.fetchall()\n\t\twhile rows:\n\t\t\tfor row in rows:\n\t\t\t\tunit = cluster_dstructure()\n\t\t\t\tunit.cluster_id = row[0]\n\t\t\t\tvertex_set = row[1][1:-1].split(',')\n\t\t\t\tunit.vertex_set = map(int, vertex_set)\n\t\t\t\tunit.connectivity = row[2]\n\t\t\t\tunit.connectivity_original = row[3]\n\t\t\t\trecurrence_array = row[4][1:-1].split(',')\n\t\t\t\tunit.recurrence_array = map(float, recurrence_array)\n\t\t\t\tunit.edge_set = parse_splat_table_edge_set(row[5])\n\t\t\t\tunit.splat_connectivity = row[6]\n\t\t\t\tunit.cooccurrent_cluster_id = row[7]\n\t\t\t\tunit.go_no2association_genes = self.get_go_functions_of_this_gene_set(curs, unit.vertex_set)\n\t\t\t\tunit.go_no2information = self.get_information_of_go_functions(curs, \\\n\t\t\t\t\tunit.go_no2association_genes, len(unit.vertex_set), no_of_total_genes, p_value_cut_off=0.05)\t#jasmine wants to cut some go-nos.\n\t\t\t\tunit.edge_cor_2d_list, unit.edge_sig_2d_list = self.get_cor_sig_2d_list(curs, unit.edge_set)\n\t\t\t\t\n\t\t\t\tstr_tmp = self.return_string_form_of_cluster_dstructure(unit, gene_no2gene_id)\t#08-31-05\n\t\t\t\toutf.write(\"%s,\"%str_tmp)\n\t\t\t\t#mcl_id2cluster_dstructure[unit.cluster_id] = unit\n\t\t\t\t\"\"\"\n\t\t\t\torder_1st_id, order_2nd_id = map(int, unit.cooccurrent_cluster_id.split('.'))\n\t\t\t\tif order_1st_id not in self.order_1st_id2all_clusters:\n\t\t\t\t\tself.order_1st_id2all_clusters[order_1st_id] = {}\n\t\t\t\tif order_2nd_id not in self.order_1st_id2all_clusters[order_1st_id]:\n\t\t\t\t\tself.order_1st_id2all_clusters[order_1st_id][order_2nd_id] = []\n\t\t\t\tself.order_1st_id2all_clusters[order_1st_id][order_2nd_id].append(unit)\n\t\t\t\t\"\"\"\n\t\t\tcurs.execute(\"fetch 5000 from crs%s\"%crs_no)\n\t\t\trows = curs.fetchall()\n\t\toutf.write(\"[]]:\")\t#08-31-05, 09-01-05 add the last blank []\n\t\tdel outf\n\t\tsys.stderr.write(\"Done.\\n\")\n\t\treturn mcl_id2cluster_dstructure",
"def read(self):\n self._load_metadata()\n return self._df.compute()",
"def _retrieveCachedData(self):",
"def get_data(self):",
"def cell(self):\n return self._cell",
"def get_data():\n pass",
"def get_data():\n pass",
"def get_data():\n pass"
] | [
"0.6227691",
"0.6165168",
"0.61336154",
"0.6130058",
"0.6112285",
"0.60044104",
"0.5895254",
"0.57669675",
"0.57077354",
"0.5702612",
"0.56423634",
"0.5611894",
"0.56060374",
"0.559",
"0.5576882",
"0.5543976",
"0.552921",
"0.5527102",
"0.5463397",
"0.54594564",
"0.5458928",
"0.54355335",
"0.5432847",
"0.5414548",
"0.540815",
"0.5400971",
"0.5390855",
"0.5388424",
"0.5388424",
"0.5388424"
] | 0.6701156 | 0 |
Get count of resources for requested cluster and namespace | def get_resource_count(cluster_id, namespace_id=None):
# fetching namespaced resource count
if namespace_id:
# Deployment count
deployment_count = len(client.AppsV1beta2Api().list_namespaced_deployment(namespace_id).items)
# Pod count
pod_items = client.CoreV1Api().list_namespaced_pod(namespace_id).items
pod_count = len([pod_item for pod_item in pod_items if pod_item.status.phase == 'Running'])
# Cell count
cell_pod_map = get_cell_pod_map(cluster_id)
pods_list = [pod_item.metadata.name for pod_item in pod_items]
cell_count = len([cell_pod_map[pods] for pods in pods_list if pods in cell_pod_map])
# fetching resource count for entire cluster
else:
# Deployment count
deployment_count = len(client.AppsV1beta2Api().list_deployment_for_all_namespaces().items)
# Pod count
pod_count = len(client.CoreV1Api().list_pod_for_all_namespaces().items)
# Cell count
cell_count = len(get_compute_cell_data()['items'])
# Removing resources related to the excluded namespace
if namespace_is_hidden == 'true':
resources_to_hide = get_hidden_namespace_resources(cluster_id, 'dashboard')
deployment_count = 0 if deployment_count == 0 else deployment_count - resources_to_hide['deployments']
pod_count = 0 if pod_count == 0 else pod_count - resources_to_hide['pods']
cell_count = 0 if cell_count == 0 else cell_count - resources_to_hide['cells']
return {"deployments": deployment_count,
"pods": pod_count,
'cells': cell_count} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())",
"def count(self, resource):\n return len(self.all(resource))",
"def get_count_all(cls, context, cluster_id):\n return cls.dbapi.get_cluster_nodegroup_count(context, cluster_id)",
"def test_get_resource_license_resource_count_list(self):\n pass",
"def count(cls, client) :\n try :\n obj = nshttpprofile()\n option_ = options()\n option_.count = True\n response = obj.get_resources(client, option_)\n if response :\n return response[0].__dict__['___count']\n return 0\n except Exception as e :\n raise e",
"def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()",
"def getNumResources(self, *args):\n return _libsbml.CVTerm_getNumResources(self, *args)",
"def test_list_applied_cluster_resource_quota_for_all_namespaces(self):\n pass",
"def test_list_namespaced_applied_cluster_resource_quota(self):\n pass",
"def GetConceptCounts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def count(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'count')\r\n\r\n return http.Request('GET', url), parsers.parse_json",
"def get_hidden_namespace_resources(cluster_id, namespace_id):\n # Deployment count\n deployment_count = len(client.AppsV1beta2Api().list_namespaced_deployment(namespace_id).items)\n # Pod count\n pod_items = client.CoreV1Api().list_namespaced_pod(namespace_id).items\n pod_count = len(pod_items)\n # Cell count\n cell_pod_map = get_cell_pod_map(cluster_id)\n pods_list = [pod_item.metadata.name for pod_item in pod_items]\n cell_count = len([cell_pod_map[pods] for pods in pods_list])\n return {'deployments': deployment_count,\n 'pods': pod_count,\n 'cells': cell_count\n }",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = bfdsession()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def test_read_namespaced_applied_cluster_resource_quota(self):\n pass",
"def test_count_resources(ops_and_shots, expected_resources):\n ops, shots = ops_and_shots\n computed_resources = _count_resources(QuantumScript(ops=ops, shots=shots))\n assert computed_resources == expected_resources",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = rewriteaction()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def get_resource_count(har_json):\n entries = har_json['log']['entries']\n\n resource_type_counts = Counter()\n\n for entry in entries:\n resource = entry['request']['url']\n dirty_resource_type = resource.split('.')[-1]\n resource_type = dirty_resource_type.split('?')[0] # Remove url params\n if len(resource_type) > 4:\n resource_type_counts['other'] += 1\n # print 'Found other resource type: {0}'.format(resource_type)\n else:\n resource_type_counts[resource_type] += 1\n\n return resource_type_counts",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lbprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = lsntransportprofile()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def resource_discovery_association_count(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"resource_discovery_association_count\")",
"def fetch_counts(swarming, start, end, state, tags, parallel):\n\n def process(data):\n return int(data['count'])\n delta = datetime.timedelta(days=1)\n return _fetch_daily_internal(delta, swarming, process, 'tasks/count', start,\n end, state, tags, parallel)",
"def getHostCount(self, **kwargs):\n\n allParams = ['scope']\n\n params = locals()\n for (key, val) in list(params['kwargs'].items()):\n if key not in allParams:\n raise TypeError(\"Got an unexpected keyword argument '%s' to method getHostCount\" % key)\n params[key] = val\n del params['kwargs']\n\n resourcePath = '/host/count'\n resourcePath = resourcePath.replace('{format}', 'json')\n method = 'GET'\n\n queryParams = {}\n headerParams = {}\n formParams = {}\n files = {}\n bodyParam = None\n\n headerParams['Accept'] = 'application/json'\n headerParams['Content-Type'] = 'application/json'\n\n\n\n\n if ('scope' in params):\n headerParams['scope'] = params['scope']\n\n\n\n\n\n\n\n\n postData = (formParams if formParams else bodyParam)\n\n response = self.apiClient.callAPI(resourcePath, method, queryParams,\n postData, headerParams, files=files)\n\n\n if not response:\n return None\n\n responseObject = self.apiClient.deserialize(response, 'CountResult')\n return responseObject",
"def get_public_narrative_count():\n client_workspace = MongoClient(mongoDB_metrics_connection+to_workspace)\n db_workspace = client_workspace.workspace\n public_narrative_count = db_workspace.workspaceACLs.find({\"user\" : \"*\"}).count()\n return public_narrative_count;",
"def aquire_cnts(self,n):\n return self.cnts.get_n(n)",
"def query_counts(\n self,\n nodes,\n project_id=None,\n chunk_size=2500,\n format=\"json\",\n args=None,\n ):\n\n counts = {}\n\n if isinstance(nodes,str):\n nodes = [nodes]\n\n for node in nodes:\n if project_id != None:\n program, project = project_id.split(\"-\", 1)\n if args == None:\n query_txt = \"\"\"{_%s_count (project_id:\"%s\")}\"\"\" % (node, project_id)\n else:\n query_txt = \"\"\"{_%s_count (project_id:\"%s\", %s)}\"\"\" % (node, project_id, args)\n else:\n if args == None:\n query_txt = \"\"\"{_%s_count}\"\"\" % (node)\n else:\n query_txt = \"\"\"{_%s_count (%s)}\"\"\" % (node, args)\n\n # First query the node count to get the expected number of results for the requested query:\n\n try:\n res = self.sub.query(query_txt)\n count_name = \"_\".join(map(str, [\"\", node, \"count\"]))\n qsize = res[\"data\"][count_name]\n counts[node] = qsize\n except:\n print(\"\\n\\tQuery to get _{}_count failed! {}\".format(node, query_txt))\n\n\n return counts",
"def _count(self):\n if self._count_valid:\n return self._total_results\n\n result = self._cb.get_object(self._doc_class.urlobject.format(self._cb.credentials.org_key))\n results = result.get(\"results\", [])\n\n self._total_results = len(results)\n self._count_valid = True\n\n return self._total_results",
"def resource_record_set_count(self) -> int:\n return pulumi.get(self, \"resource_record_set_count\")",
"def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])",
"def count(cls, client) :\n\t\ttry :\n\t\t\tobj = appfwlearningsettings()\n\t\t\toption_ = options()\n\t\t\toption_.count = True\n\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\tif response :\n\t\t\t\treturn response[0].__dict__['___count']\n\t\t\treturn 0\n\t\texcept Exception as e :\n\t\t\traise e",
"def get_desired_count(cluster_name, service_name):\n\n response = ecs_client.describe_services(\n cluster=cluster_name, services=[service_name],\n )\n\n for service in response[\"services\"]:\n return service[\"desiredCount\"]\n\n raise Exception(\n f\"desiredCount not found for cluster: {cluster_name} service: {service_name}\"\n )"
] | [
"0.66817355",
"0.65607",
"0.6554999",
"0.6523906",
"0.65174896",
"0.64166987",
"0.6349572",
"0.6297506",
"0.6249817",
"0.6204213",
"0.6179697",
"0.6140397",
"0.6123831",
"0.6111716",
"0.61096334",
"0.6105647",
"0.6091557",
"0.6071596",
"0.6053722",
"0.6047418",
"0.6045339",
"0.60324806",
"0.6028506",
"0.60127497",
"0.60122764",
"0.6005237",
"0.596058",
"0.5957017",
"0.5947993",
"0.59468925"
] | 0.7759584 | 0 |
Get cluster capacity from node detail | def get_cluster_capacity_info(cluster_id):
cpu_capacity_info = get_node_data(cluster_id)
cpu_capacity_in_cores = round(unit_conversion(sum([int(''.join(filter(
str.isdigit, str(item['status']['allocatable']['cpu'])))) for item in cpu_capacity_info]), 'm'), 2)
memory_capacity_in_gib = round(sum(
[unit_conversion(int(''.join(filter(str.isdigit, str(item['status']['allocatable']['memory'])))),
''.join(filter(str.isalpha, str(item['status']['allocatable']['memory']))))
for item in cpu_capacity_info]), 2)
return {'cpu': cpu_capacity_in_cores, 'memory': memory_capacity_in_gib} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_capacity():\n fs.get_capacity()",
"def capacity(self):\n capacity = {}\n resources = self.nodes[0].capacity.keys()\n for r in resources:\n values = [n.capacity[r] for n in self.nodes]\n capacity[r] = mean(values) if len(values) > 0 else 0.0\n return capacity",
"def get_capacity_param(self):\n intr = self.get_interaction()\n return intr.get_capacity(None, None, None, None, raw=True)",
"def get_capacity_var(self):\n return self._capacity_var",
"def get_capacity_var(self):\n return self.get_interaction().get_capacity_var()",
"def Capacity(self) -> int:",
"def capacity(self) -> Capacity:\n raw = self._call('GET', 'capacity')\n return Capacity.parse_raw(raw)",
"def get_num_slots(self):\n # Your code here\n return self.capacity",
"def test_rest_v20_dd_systems_systemid_stats_capacity_get(self):\n pass",
"def get_capacity(self, meta, raven_vars, dispatch, t, raw=False):\n return self.get_interaction().get_capacity(meta, raven_vars, dispatch, t, raw=raw)",
"def test_get_cluster_capacity_monthly_resolution(self):\n url = \"?filter[time_scope_units]=month&filter[time_scope_value]=-1&filter[resolution]=monthly\"\n query_params = self.mocked_query_params(url, OCPCpuView)\n handler = OCPReportQueryHandler(query_params)\n query_data = [{\"row\": 1}]\n query_data, total_capacity = handler.get_cluster_capacity(query_data)\n self.assertTrue(\"capacity\" in total_capacity)\n self.assertTrue(isinstance(total_capacity[\"capacity\"], Decimal))\n self.assertTrue(\"capacity\" in query_data[0])\n self.assertIsNotNone(query_data[0].get(\"capacity\"))\n self.assertIsNotNone(total_capacity.get(\"capacity\"))\n self.assertEqual(query_data[0].get(\"capacity\"), total_capacity.get(\"capacity\"))",
"def get_used_capacity(self,tot=\"50\"):\n data=self.at_cmd(\"CPMS?\")\n index=data[1].find(tot)-1\n if data[1][index-1]==',':\n return data[index]\n else:\n return data[1][index-1:index]",
"def node_memory_allocatable(self) -> units.Quantity:\n stdout, _, _ = RunKubectlCommand(\n # TODO(pclay): Take a minimum of all nodes?\n [\n 'get', 'nodes', '-o',\n 'jsonpath={.items[0].status.allocatable.memory}'\n ])\n return units.ParseExpression(stdout)",
"def test_read_cluster_resource_quota(self):\n pass",
"def capacity(self):\n return self._cap",
"def capacity(self):\n return self._capacity",
"def capacity(self):\n return str(int(self._properties.get('capacity')) * 1073741824)",
"def capacity(self):\n raise NotImplementedError()",
"def capacity(self):\n return self._capacity",
"def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")",
"def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")",
"def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")",
"def capacity(self) -> Optional[int]:\n return pulumi.get(self, \"capacity\")",
"def capacity_used(self):\n raise NotImplementedError()",
"def capacity_gb(self) -> str:\n return pulumi.get(self, \"capacity_gb\")",
"def get_heap_cap(self):\r\n return self.capacity",
"def get_cluster_capacity(self, query_data): # noqa: C901\n annotations = self._mapper.report_type_map.get(\"capacity_aggregate\")\n if not annotations:\n return query_data, {}\n\n cap_key = list(annotations.keys())[0]\n total_capacity = Decimal(0)\n daily_total_capacity = defaultdict(Decimal)\n capacity_by_cluster = defaultdict(Decimal)\n daily_capacity_by_cluster = defaultdict(lambda: defaultdict(Decimal))\n\n q_table = self._mapper.query_table\n query = q_table.objects.filter(self.query_filter)\n query_group_by = [\"usage_start\", \"cluster_id\"]\n\n with tenant_context(self.tenant):\n cap_data = query.values(*query_group_by).annotate(**annotations)\n for entry in cap_data:\n cluster_id = entry.get(\"cluster_id\", \"\")\n usage_start = entry.get(\"usage_start\", \"\")\n if isinstance(usage_start, datetime.date):\n usage_start = usage_start.isoformat()\n cap_value = entry.get(cap_key, 0)\n if cap_value is None:\n cap_value = 0\n capacity_by_cluster[cluster_id] += cap_value\n daily_capacity_by_cluster[usage_start][cluster_id] = cap_value\n daily_total_capacity[usage_start] += cap_value\n total_capacity += cap_value\n\n if self.resolution == \"daily\":\n for row in query_data:\n cluster_id = row.get(\"cluster\")\n date = row.get(\"date\")\n if cluster_id:\n row[cap_key] = daily_capacity_by_cluster.get(date, {}).get(cluster_id, Decimal(0))\n else:\n row[cap_key] = daily_total_capacity.get(date, Decimal(0))\n elif self.resolution == \"monthly\":\n for row in query_data:\n cluster_id = row.get(\"cluster\")\n if cluster_id:\n row[cap_key] = capacity_by_cluster.get(cluster_id, Decimal(0))\n else:\n row[cap_key] = total_capacity\n\n return query_data, {cap_key: total_capacity}",
"def current_capacity(self, capacity=None):\n if capacity:\n if self._request('SC', str(capacity))[0]:\n return capacity\n else:\n done, data = self._request('GE')\n if done:\n return int(data[0])\n\n raise EvseError",
"def test_list_cluster_resource_quota(self):\n pass",
"def current_capacity_range(self):\n done, data = self._request('GC')\n if done:\n return int(data[0]), int(data[1])\n\n raise EvseError"
] | [
"0.7165189",
"0.67714345",
"0.67460614",
"0.66856956",
"0.6549141",
"0.6534968",
"0.65045047",
"0.64937866",
"0.648811",
"0.64549166",
"0.64503264",
"0.63680506",
"0.6324158",
"0.6307951",
"0.6224518",
"0.6187836",
"0.61583894",
"0.6155459",
"0.6146542",
"0.6124433",
"0.6124433",
"0.6124433",
"0.6124433",
"0.6105483",
"0.60947037",
"0.60880065",
"0.60655963",
"0.60592383",
"0.6057764",
"0.60521275"
] | 0.7078413 | 1 |
get resource usage information from pods usage | def get_cluster_usage_info(cluster_id, kind, namespace_id=None, pods_list=None):
if pods_list is None:
pods_list = []
else:
logger.info('pod list not none')
if pods_list == 'no_pod_resource':
return {'cpu': 0,
'memory': 0}
else:
logger.info('resources no 0')
# node usage stats if needed
if kind == 'nodes':
cpu_usage_info = client.CustomObjectsApi().list_cluster_custom_object('metrics.k8s.io', 'v1beta1', kind)
cpu_usage_in_cores = sum([int(''.join(filter(
str.isdigit, str(cpu_usage_item['usage']['cpu'].encode(
'utf-8'))))) for cpu_usage_item in cpu_usage_info['items']])
cpu_usage_in_percentage = round(cpu_usage_in_cores / 10000000, 0)
memory_usage = sum([unit_conversion(int(''.join(filter(
str.isdigit, str(memory_usage_item['usage']['memory'].encode(
'utf-8'))))), ''.join(filter(str.isalpha, str(memory_usage_item['usage']['memory'].encode('utf-8')))))
for memory_usage_item in cpu_usage_info['items']])
# pods usage stats
elif kind == 'pods':
if namespace_id:
cpu_usage_info = client.CustomObjectsApi().list_namespaced_custom_object('metrics.k8s.io', 'v1beta1',
namespace_id, kind)
else:
cpu_usage_info = client.CustomObjectsApi().list_cluster_custom_object('metrics.k8s.io', 'v1beta1', kind)
if len(pods_list) != 0:
cpu_usage_in_cores = round(unit_conversion(sum([int(''.join(filter(
str.isdigit, str(cpu_usage_item['containers'][0]['usage']['cpu'].encode(
'utf-8'))))) for cpu_usage_item in cpu_usage_info['items'] if cpu_usage_item['metadata']['name']
in pods_list]), 'n'), 2)
memory_usage = round(sum([unit_conversion(int(''.join(filter(
str.isdigit, str(memory_usage_item['containers'][0]['usage']['memory'].encode(
'utf-8'))))),
''.join(
filter(str.isalpha, str(memory_usage_item['containers'][0]['usage']['memory'].encode('utf-8')))))
for memory_usage_item in cpu_usage_info['items'] if memory_usage_item['metadata']['name']
in pods_list]), 2)
else:
cpu_usage_in_cores = round(unit_conversion(sum([int(''.join(filter(
str.isdigit, str(cpu_usage_item['containers'][0]['usage']['cpu'].encode(
'utf-8'))))) for cpu_usage_item in cpu_usage_info['items']]), 'n'), 2)
memory_usage = round(sum([unit_conversion(int(''.join(filter(
str.isdigit, str(memory_usage_item['containers'][0]['usage']['memory'].encode(
'utf-8'))))),
''.join(filter(str.isalpha, str(memory_usage_item['containers'][0]['usage']['memory'].encode('utf-8')))))
for memory_usage_item in cpu_usage_info['items']]), 2)
return {'cpu': cpu_usage_in_cores,
'memory': memory_usage} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_resource_info(cluster_id, kind, namespace_id=None, pods_list=None):\n if pods_list is None:\n pods_list = []\n capacity = get_cluster_capacity_info(cluster_id),\n usage = get_cluster_usage_info(cluster_id, kind, namespace_id, pods_list)\n if capacity[0]['cpu'] != 0 and capacity[0]['memory'] != 0:\n resource_info = {\n \"capacity\": capacity[0],\n \"usage\": {\n \"cpu\": usage['cpu'],\n \"cpu_percentage\": float(round(decimal.Decimal(usage['cpu'] / capacity[0]['cpu'])*100, 2)),\n \"memory\": usage['memory'],\n \"memory_percentage\": float(round(decimal.Decimal(usage['memory'] / capacity[0]['memory'])*100, 2))\n }\n }\n else:\n resource_info = {\n \"capacity\": capacity[0],\n \"usage\": {\n \"cpu\": usage['cpu'],\n \"cpu_percentage\": 0.0,\n \"memory\": usage['memory'],\n \"memory_percentage\": 0.0\n }\n }\n\n return resource_info",
"def get_resource_usages(self, project_id):\n try:\n # The API call does not give usage for keypair, fixed ips &\n # metadata items. Have raised a bug for that.\n limits = self.nova_client.limits.get(\n tenant_id=project_id).to_dict()\n resource_usage = collections.defaultdict(dict)\n resource_usage['ram'] = limits['absolute']['totalRAMUsed']\n resource_usage['cores'] = limits['absolute']['totalCoresUsed']\n resource_usage['instances'] = \\\n limits['absolute']['totalInstancesUsed']\n # If neutron is not enabled, calculate below resources from nova\n if self.no_neutron:\n resource_usage['security_groups'] = \\\n limits['absolute']['totalSecurityGroupsUsed']\n resource_usage['floating_ips'] = \\\n limits['absolute']['totalFloatingIpsUsed']\n # For time being, keypair is calculated in below manner.\n resource_usage['key_pairs'] = \\\n len(self.nova_client.keypairs.list())\n return resource_usage\n except exceptions.InternalError:\n raise",
"def getUsageInfo(self):\n return self.jsonRequest(\"/api/v1/usage\", { \"apiKey\": self._apiKey })",
"def get_kong_node_usage_metrics(opts):\n\n url = \"{0}/status\".format(opts['base_url'])\n\n r = requests.get(url)\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n logging.debug(\"http response body - %s\", r.text)\n logging.error(\"An exception occurred: (%s)\", e)\n sys.exit(2)\n\n print r.text\n\n return True",
"def get_usage_info(self):\n\n usage_info = resource.getrusage(resource.RUSAGE_SELF)\n user_cpu = usage_info[0]\n system_cpu = usage_info[1]\n rss_size = usage_info[2]\n\n return user_cpu, system_cpu, rss_size",
"def get_usages(self):\n return self.client._perform_json(\"GET\", \"/projects/%s/managedfolders/%s/usages\" % (self.project_key, self.odb_id))",
"def usage_metrics(self) -> Sequence['outputs.GetServiceQuotaUsageMetricResult']:\n return pulumi.get(self, \"usage_metrics\")",
"def get_available_resource(self, nodename):\n curent_time = time.time()\n if curent_time - self.cleanup_time > CONF.azure.cleanup_span:\n self.cleanup_time = curent_time\n self._cleanup_deleted_os_disks()\n self._cleanup_deleted_nics()\n usage_family = 'basicAFamily'\n try:\n page = self.compute.usage.list(CONF.azure.location)\n except Exception as e:\n msg = six.text_type(e)\n LOG.exception(msg)\n ex = exception.ComputeUsageListFailure(reason=six.text_type(e))\n raise ex\n usages = [i for i in page]\n cores = 0\n cores_used = 0\n for i in usages:\n if hasattr(i, 'name') and hasattr(i.name, 'value'):\n if usage_family == i.name.value:\n cores = i.limit if hasattr(i, 'limit') else 0\n cores_used = i.current_value \\\n if hasattr(i, 'current_value') else 0\n break\n return {'vcpus': cores,\n 'memory_mb': 100000000,\n 'local_gb': 100000000,\n 'vcpus_used': cores_used,\n 'memory_mb_used': 0,\n 'local_gb_used': 0,\n 'hypervisor_type': hv_type.HYPERV,\n 'hypervisor_version': 300,\n 'hypervisor_hostname': nodename,\n 'cpu_info': '{\"model\": [\"Intel(R) Xeon(R) CPU E5-2670 0 @ '\n '2.60GHz\"], \"topology\": {\"cores\": 16, \"threads\": '\n '32}}',\n 'supported_instances': [(arch.I686, hv_type.HYPERV,\n vm_mode.HVM),\n (arch.X86_64, hv_type.HYPERV,\n vm_mode.HVM)],\n 'numa_topology': None\n }",
"def memory_get_usage():\n raise NotImplementedError()",
"def resource_usage_export_config(self) -> 'outputs.ResourceUsageExportConfigResponse':\n return pulumi.get(self, \"resource_usage_export_config\")",
"def get_usage(self, start=None, end=None):\n return self.manager.get_usage(self, start=start, end=end)",
"def get_resources():\n # Acquire the lock...\n get_resources_lock.acquire()\n\n # ...but always release it\n try:\n # Construct the dictionaries as copies from nanny\n (limits,usage) = nanny.get_resource_information()\n\n\n # Calculate all the usage's\n pid = os.getpid()\n\n # Get CPU and memory, this is thread specific\n if ostype in [\"Linux\", \"Darwin\"]:\n \n # Get CPU first, then memory\n usage[\"cpu\"] = os_api.get_process_cpu_time(pid)\n\n # This uses the cached PID data from the CPU check\n usage[\"memory\"] = os_api.get_process_rss()\n\n # Get the thread specific CPU usage\n usage[\"threadcpu\"] = os_api.get_current_thread_cpu_time() \n\n\n # Windows Specific versions\n elif ostype in [\"Windows\"]:\n \n # Get the CPU time\n usage[\"cpu\"] = windows_api.get_process_cpu_time(pid)\n\n # Get the memory, use the resident set size\n usage[\"memory\"] = windows_api.process_memory_info(pid)['WorkingSetSize'] \n\n # Get thread-level CPU \n usage[\"threadcpu\"] = windows_api.get_current_thread_cpu_time()\n\n # Unknown OS\n else:\n raise EnvironmentError(\"Unsupported Platform!\")\n\n # Use the cached disk used amount\n usage[\"diskused\"] = cached_disk_used\n\n finally:\n # Release the lock\n get_resources_lock.release()\n\n # Copy the stop times\n stoptimes = process_stopped_timeline[:]\n\n # Return the dictionaries and the stoptimes\n return (limits,usage,stoptimes)",
"def get_resource_information():\n\n\n # the resources we are allowed to use is easy. We just copy this...\n resource_limit_dict = _resources_allowed_dict.copy()\n\n \n # from the other dict, we only take the resource information. (this omits\n # locks and timing information that isn't needed)\n\n # first, let's do the easy thing, the quantity resources. These are just \n # floats\n resource_use_dict = {}\n for resourcename in resource_constants.quantity_resources:\n resource_use_dict[resourcename] = _resources_consumed_dict[resourcename]\n\n # for the fungible resources (files opened, etc,), we only need a count...\n for resourcename in resource_constants.fungible_item_resources:\n resource_use_dict[resourcename] = len(_resources_consumed_dict[resourcename])\n\n # for the individual item resources (ports, etc,), we copy the set...\n for resourcename in resource_constants.individual_item_resources:\n resource_use_dict[resourcename] = _resources_consumed_dict[resourcename].copy()\n\n # and that's it!\n return (resource_limit_dict, resource_use_dict)",
"def get_cpu_usage():\n process_details = RU_OBJ.get_curr_processes()\n return json.dumps(sorted(process_details, key=lambda k: k['name']))",
"def ok(self, results):\n return \"{:5.2f}% capacity used\".format(\n results[\"usage\"].resource.usage_ratio * 100.0\n )",
"def get_used_resource_overview(self, resource_name):\n page_overview = self._page_overview()\n return page_overview.row_avaible_resource.value(resource_name)",
"def get_cpu_usage(conn):\n prev_idle = 0\n prev_total = 0\n cpu = conn.getCPUStats(-1, 0)\n if type(cpu) == dict:\n for num in range(2):\n idle = list(conn.getCPUStats(-1, 0).values())[1]\n total = sum(list(conn.getCPUStats(-1, 0).values()))\n diff_idle = idle - prev_idle\n diff_total = total - prev_total\n diff_usage = (1000 * (diff_total - diff_idle) / diff_total + 5) / 10\n prev_total = total\n prev_idle = idle\n if num == 0:\n time.sleep(1)\n else:\n if diff_usage < 0:\n diff_usage = 0\n else:\n return {'usage': None}\n return {'usage': diff_usage}",
"def usage_information(self):\n return self._usage_information",
"def get_cpu_usage(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetCpuUsage', self.handle)",
"def resource_collect(pid=None):\n try:\n import psutil\n except ImportError:\n return {}\n\n p = psutil.Process(pid or os.getpid())\n return {'cpu_percent': psutil.cpu_percent(),\n 'status': p.status(),\n 'memory_percent': p.memory_percent(),\n 'memory_info_ex': p.memory_info_ex(),\n 'disk_io_counters': metrics.disk_io_counters(),\n 'net_io_counters': metrics.net_io_counters()}",
"def get_usage(self, loadbalancer=None, start=None, end=None):\n return self._manager.get_usage(loadbalancer=loadbalancer, start=start,\n end=end)",
"def ListPodMetrics(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def get_pods(self, **kw):\n resource = self.pods\n\n try:\n pod_data = resource.get(**kw)\n log.info(pod_data)\n except exceptions.NotFoundError:\n log.error(\"Failed to get pods: resource not found.\")\n raise\n except Exception:\n log.error(\"Unexpected error.\")\n raise\n\n return [item.metadata.name for item in pod_data.items]",
"def get(cls, service, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tobj = qos_stats()\n\t\t\tif not name :\n\t\t\t\tresponse = obj.stat_resources(service, option_)\n\t\t\treturn response\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_usage(self):\r\n return self.box_usage",
"def get_usage_stats(self) -> UsageStats:\n return self._usage",
"def get_usage(self):\n return self.box_usage",
"def test_get_deployment_metric(self):\n pass",
"def _calc_resource_stats(self, interval):\n result = {}\n\n if 'mem' in self.metrics:\n result['mem'] = self._get_mem_info()\n\n if 'disk-space' in self.metrics:\n result['disk-space'] = self.__get_disk_usage(self.engine.artifacts_dir).percent\n\n if 'engine-loop' in self.metrics:\n result['engine-loop'] = self.engine.engine_loop_utilization\n\n if 'conn-all' in self.metrics:\n try:\n # take all connections without address resolution\n output = subprocess.check_output(['netstat', '-an'])\n output_lines = stream_decode(output).split('\\n') # in py3 stream has 'bytes' type\n est_lines = [line for line in output_lines if line.find('EST') != -1]\n result['conn-all'] = len(est_lines)\n except BaseException as exc:\n self.log.debug(\"Failed to get connections info: %s\", exc)\n result['conn-all'] = 0\n\n if 'cpu' in self.metrics:\n result['cpu'] = self._get_cpu_percent()\n\n if 'bytes-recv' in self.metrics or 'bytes-sent' in self.metrics:\n net = self.__get_net_counters()\n if net is not None:\n tx_bytes = int((net.bytes_sent - self._net_counters.bytes_sent) / float(interval))\n rx_bytes = int((net.bytes_recv - self._net_counters.bytes_recv) / float(interval))\n self._net_counters = net\n else:\n rx_bytes = 0.0\n tx_bytes = 0.0\n\n if 'bytes-recv' in self.metrics:\n result['bytes-recv'] = rx_bytes\n if 'bytes-sent' in self.metrics:\n result['bytes-sent'] = tx_bytes\n\n if 'disk-read' in self.metrics or 'disk-write' in self.metrics:\n disk = self.__get_disk_counters()\n if disk is not None:\n dru = int((disk.read_bytes - self._disk_counters.read_bytes) / float(interval))\n dwu = int((disk.write_bytes - self._disk_counters.write_bytes) / float(interval))\n self._disk_counters = disk\n else:\n dru = 0.0\n dwu = 0.0\n\n if 'disk-read' in self.metrics:\n result['disk-read'] = dru\n if 'disk-write' in self.metrics:\n result['disk-write'] = dwu\n\n return result",
"def get_pool_stats(self, pool):\n svc = self.pool_path % pool\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n exception_msg = (_('Error getting pool stats: '\n 'pool: %(pool)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'pool': pool,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.InvalidInput(reason=exception_msg)\n val = jsonutils.loads(ret.data)\n if not self._is_pool_owned(val):\n exception_msg = (_('Error pool ownership: '\n 'pool %(pool)s is not owned '\n 'by %(host)s.')\n % {'pool': pool,\n 'host': self.host})\n raise exception.InvalidInput(reason=pool)\n avail = val['pool']['usage']['available']\n used = val['pool']['usage']['used']\n return avail, used"
] | [
"0.684437",
"0.6337999",
"0.6151427",
"0.6085964",
"0.6063859",
"0.6056686",
"0.60112286",
"0.5989251",
"0.5980302",
"0.5904598",
"0.5870229",
"0.58482635",
"0.58432084",
"0.5836446",
"0.5833565",
"0.582723",
"0.57628006",
"0.57566756",
"0.5720972",
"0.5699034",
"0.5695292",
"0.56908196",
"0.5646332",
"0.56459475",
"0.56391704",
"0.56358427",
"0.5618438",
"0.56183475",
"0.5572603",
"0.55669135"
] | 0.730197 | 0 |
Providing random mock values for resource capacity and usage. | def randomise(mock_info):
mock_info["resource_info"]["usage"]["cpu"] = round(random.uniform(0, 1), 2)
mock_info["resource_info"]["usage"]["cpu_percentage"] = round(random.uniform(0, 1), 2)
mock_info["resource_info"]["usage"]["memory"] = round(random.uniform(0, 1), 2)
mock_info["resource_info"]["usage"]["memory_percentage"] = round(random.uniform(0, 1), 2)
return mock_info | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_glass_capacity__has_expected_default_value():\n glass = moet.create_glass(\"A\")\n assert glass.capacity == 250",
"def _get_random_returns(self): \n return self.asset_process.distrib.random()",
"def test_sdram(self):\n sdram = SDRAMResource(128 * (2**20))\n self.assertEqual(sdram.get_value(), 128 * (2**20))\n sdram = SDRAMResource(128 * (2**19))\n self.assertEqual(sdram.get_value(), 128 * (2**19))\n sdram = SDRAMResource(128 * (2**21))\n self.assertEqual(sdram.get_value(), 128 * (2**21))",
"def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units",
"def _get_random_value(self):\r\n return random.randint(1, 10)",
"def random(vms_cpu):\n return choice(vms_cpu.keys())",
"def randomize_value(self) -> None:",
"def test_rest_v20_dd_systems_systemid_stats_capacity_get(self):\n pass",
"def __init__(self):\n self.set_health(100)\n self.set_recharge(random.randint(100, 2000) / 10000)",
"def generate_products(self = random.sample, name = random.choice(result), price = random.randint(5, 100), weight = random.randint(5, 100), \nflammability= random.uniform(0, 2.5)):\n return sample",
"def test_set_glass_capacity__with_valid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n numbers = [0, 1, 250, 0.0, 100.5]\n for number in numbers:\n glass.capacity = number\n assert glass.capacity == number",
"def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial",
"def test_loc_techs_resource_capacity_constraint(self, override):\n\n if override is None:\n m = build_model(\n {}, \"simple_supply_and_supply_plus,two_hours,investment_costs\"\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n assert expr.lb == 0\n assert expr.ub == np.inf\n\n else:\n m = build_model(\n {\n \"techs.test_supply_plus.constraints.resource_cap_{}\".format(\n override\n ): 10\n },\n \"simple_supply_and_supply_plus,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n if override == \"max\":\n assert expr.ub == 10\n assert expr.lb == 0\n elif override == \"equals\":\n assert expr.ub == 10\n assert expr.lb == 10\n if override == \"min\":\n assert expr.lb == 10\n assert expr.ub == np.inf",
"def randomize(self):\n self.size = randint(1,5)\n self.resource = randint(1,3)\n self.temperature = randint(20, 1000)\n self.gravity = randint(0, 10)\n for key in self.get_atmosphere().keys():\n setattr(self, key, randint(0, 5))\n for attribute_count in range(randint(0, 3)):\n pa = PlanetaryAttribute.objects.order_by('?')[0]\n self.attributes.add(pa)",
"def init_physical_resources():\n test_physical_resources = []\n\n # add info to list in memory, one by one, following signature values\n phys_resrc_ID = 1\n phys_resrc_name = \"small-cavium-1\"\n phys_resrc_info = \"Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"10.10.50.12\"\n phys_resrc_MACAddress = \"00-14-22-01-23-45\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 2\n phys_resrc_name = \"medium-cavium-1\"\n phys_resrc_info = \"Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"30.31.32.33\"\n phys_resrc_MACAddress = \"0xb3:22:05:c1:aa:82\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n phys_resrc_ID = 3\n phys_resrc_name = \"mega-cavium-666\"\n phys_resrc_info = \"Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS\"\n phys_resrc_IPAddress = \"54.53.52.51\"\n phys_resrc_MACAddress = \"01-23-45-67-89-ab\"\n\n test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,\n phys_resrc_info,\n phys_resrc_IPAddress,\n phys_resrc_MACAddress))\n\n\n # write list to binary file\n write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)\n\n return test_physical_resources",
"def test_cpu(self):\n cpu = CPUCyclesResource(128 * (2**20))\n self.assertEqual(cpu.get_value(), 128 * (2**20))\n cpu = CPUCyclesResource(128 * (2**19))\n self.assertEqual(cpu.get_value(), 128 * (2**19))\n cpu = CPUCyclesResource(128 * (2**21))\n self.assertEqual(cpu.get_value(), 128 * (2**21))",
"def test_set_glass_capacity__with_invalid_numbers__returns_expected():\n glass = moet.create_glass(\"A\")\n with pytest.raises(ValueError):\n glass.capacity = -100",
"def get_random(self):\n base_genom = \"1\" * sum(self._size_var)\n return utils.randomise_a_string(base_genom)",
"def Capacity(self) -> int:",
"def capacity_used(self):\n raise NotImplementedError()",
"def test_resource_container(self):\n sdram = SDRAMResource(128 * (2**20))\n dtcm = DTCMResource(128 * (2**20) + 1)\n cpu = CPUCyclesResource(128 * (2**20) + 2)\n\n container = ResourceContainer(dtcm, sdram, cpu)\n self.assertEqual(container.sdram.get_value(), 128 * (2**20))\n self.assertEqual(container.dtcm.get_value(), 128 * (2**20) + 1)\n self.assertEqual(container.cpu.get_value(), 128 * (2**20) + 2)\n\n sdram = SDRAMResource(128 * (2**19))\n dtcm = DTCMResource(128 * (2**19) + 1)\n cpu = CPUCyclesResource(128 * (2**19) + 2)\n\n container = ResourceContainer(dtcm, sdram, cpu)\n self.assertEqual(container.sdram.get_value(), 128 * (2**19))\n self.assertEqual(container.dtcm.get_value(), 128 * (2**19) + 1)\n self.assertEqual(container.cpu.get_value(), 128 * (2**19) + 2)\n\n sdram = SDRAMResource(128 * (2**21))\n dtcm = DTCMResource(128 * (2**21) + 1)\n cpu = CPUCyclesResource(128 * (2**21) + 2)\n\n container = ResourceContainer(dtcm, sdram, cpu)\n self.assertEqual(container.sdram.get_value(), 128 * (2**21))\n self.assertEqual(container.dtcm.get_value(), 128 * (2**21) + 1)\n self.assertEqual(container.cpu.get_value(), 128 * (2**21) + 2)",
"def get_capacity():\n fs.get_capacity()",
"def test_read_cluster_resource_quota(self):\n pass",
"def get_capacity_var(self):\n return self._capacity_var",
"def test_capacity_factor(pudl_out_eia):\n print(\"\\nCalculating generator capacity factors...\")\n cf = pudl_out_eia.capacity_factor()\n print(f\" capacity_factor: {len(cf)} records\")",
"def test_limit_cpu(self):\n url = '/api/apps'\n body = {'cluster': 'autotest'}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n app_id = response.data['id']\n url = '/api/apps/{app_id}/limits'.format(**locals())\n # check default limit\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n self.assertEqual(json.loads(response.data['cpu']), {})\n # regression test for https://github.com/deis/deis/issues/1563\n self.assertNotIn('\"', response.data['cpu'])\n # set an initial limit\n body = {'cpu': json.dumps({'web': '1024'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n self.assertIn('x-deis-release', response._headers)\n limit1 = response.data\n # check memory limits\n response = self.client.get(url, content_type='application/json')\n self.assertEqual(response.status_code, 200)\n self.assertIn('cpu', response.data)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # set an additional value\n body = {'cpu': json.dumps({'worker': '512'})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit2 = response.data\n self.assertNotEqual(limit1['uuid'], limit2['uuid'])\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # read the limit again\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n limit3 = response.data\n self.assertEqual(limit2, limit3)\n cpu = json.loads(response.data['cpu'])\n self.assertIn('worker', cpu)\n self.assertEqual(cpu['worker'], '512')\n self.assertIn('web', cpu)\n self.assertEqual(cpu['web'], '1024')\n # unset a value\n body = {'memory': json.dumps({'worker': None})}\n response = self.client.post(url, json.dumps(body), content_type='application/json')\n self.assertEqual(response.status_code, 201)\n limit4 = response.data\n self.assertNotEqual(limit3['uuid'], limit4['uuid'])\n self.assertNotIn('worker', json.dumps(response.data['memory']))\n # disallow put/patch/delete\n self.assertEqual(self.client.put(url).status_code, 405)\n self.assertEqual(self.client.patch(url).status_code, 405)\n self.assertEqual(self.client.delete(url).status_code, 405)\n return limit4",
"def test_create_cluster_resource_quota(self):\n pass",
"def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")",
"def get_capacity_param(self):\n intr = self.get_interaction()\n return intr.get_capacity(None, None, None, None, raw=True)",
"def test_default_num(self):\n products = generate_products()\n self.assertEqual(len(products), 30)"
] | [
"0.631372",
"0.61010695",
"0.60488284",
"0.6046735",
"0.5982311",
"0.5956989",
"0.59451956",
"0.5942304",
"0.59133613",
"0.589804",
"0.58927816",
"0.5846016",
"0.57881594",
"0.5747037",
"0.57254654",
"0.57179344",
"0.57009256",
"0.5674274",
"0.5642999",
"0.56372076",
"0.56222486",
"0.560608",
"0.55932564",
"0.55919045",
"0.55906326",
"0.55837125",
"0.55759865",
"0.5560307",
"0.55548155",
"0.55473036"
] | 0.7475767 | 0 |
Returns N samples from the prior. | def sample_from_prior(self, n_samples):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_from_prior(self, n_samples):\n\n p0 = self.min + self.rng.rand(n_samples) * (self.max - self.min)\n return p0[:, np.newaxis]",
"def sample_from_prior(self, n_samples):\n\n p0 = self.rng.normal(loc=self.mean, scale=self.sigma, size=n_samples)\n return p0[:, np.newaxis]",
"def sample_from_prior(self, n_samples):\n\n p0 = self.rng.lognormal(mean=self.mean, sigma=self.sigma, size=n_samples)\n return p0[:, np.newaxis]",
"def sample_from_prior(self, n_samples):\n\n lamda = np.abs(self.rng.standard_cauchy(size=n_samples))\n\n p0 = np.log(np.abs(self.rng.randn() * lamda * self.scale))\n return p0[:, np.newaxis]",
"def sample_prior(size):\n return torch.randn(size)",
"def sample_from_prior(self):\n raise NotImplementedError",
"def sample_parameters_prior(self, n_samples, random_seed=None):\n\n if random_seed is not None:\n np.random.seed(random_seed)\n samples = []\n samples.append(self.var_noise.sample_from_prior(n_samples))\n samples.append(self.mean.sample_from_prior(n_samples))\n samples.append(self.kernel.sample_parameters(n_samples))\n\n return np.concatenate(samples, 1)",
"def sample(self, n_samples=1):\n\n\t\tif isinstance(n_samples, int):\n\t\t\tn_samples = (n_samples,)\n\t\tbeta_sample = self.beta_prior.sample(n_samples)\n\t\tgamma_sample = self.gamma_prior.sample(n_samples)\n\t\tp = torch.stack((beta_sample, gamma_sample)).T\n\t\tif n_samples == (1,):\n\t\t\tp = p[0]\n\t\tp = p.numpy()\n\t\treturn p",
"def sample_from_prior(self, *args, **kwargs):\n pass",
"def forward_sample(self, n):\n return self.flow.sample(n)",
"def prior_sample(self):\n pass",
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def sample(self, n):\n # Est probs from priority weights\n summed = sum(self.weight) + self.eps\n self.probs = [w / summed for w in self.priority]\n\n # Wieghted sample\n return np.random.choice(self.memory, size=n, p=self.probs).tolist()",
"def prior_samples(self, nsamples, coords=None):\n S = self.eval_S(self.kappa, self.sigma_f)\n if coords is None:\n coords = slice(self.mesh.num_vertices()) # take all coords\n weights = np.random.normal(scale=np.sqrt(S),\n size=(nsamples,) + S.shape) # shape (s, l)\n prior = np.einsum('sl,nl->sn', weights, self.eigenfunctions[coords])\n\n return prior",
"def samples(self):\n return self._values[:self.nsamples]",
"def generate_samples(self, no=10):\n observations = []\n state_sequence = []\n initial_state = np.random.choice(\n self.latent_variable_markov_chain.states,\n p=self.prior_probabilities)\n state_sequence.append(initial_state)\n observations.append(self.observation_from_state(initial_state))\n current_state = initial_state\n for i in range(2, no):\n next_state = self.latent_variable_markov_chain.next_state(current_state)\n state_sequence.append(next_state)\n observations.append(self.observation_from_state(next_state))\n current_state = next_state\n return observations, state_sequence",
"def sample(self, n, include=True):\n return [self(t / n) for t in range(n + int(include))]",
"def prior(n=10):\r\n p = []\r\n trials = 0\r\n acc = 0\r\n while acc < n:\r\n trials += 1\r\n r = np.random.rand(2) * np.array([4, 2]) + np.array([-2, -1])\r\n # print(\"r: \", r)\r\n if r[1] + r[0] >= -1 and r[1] - r[0] >= -1:\r\n p.append(r)\r\n acc += 1\r\n # print(\"trials: \", trials, \", acc: \", acc)\r\n return p",
"def pull_reads(self, n, prng): \n if not self.sampling:\n self.convert_to_array()\n index = prng.randint(0, self.total, size=n)\n index = np.sort(index)\n return self.reads[index,:]",
"def sample(self, n=1):\n raise NotImplementedError",
"def get_training_n(self, n: Optional[int] = None, random: bool = True) -> Tuple[torch.Tensor, torch.Tensor]:\n return self.__sample(self.train_data, self.train_los, n, random)",
"def sample(self, n):\n raise NotImplementedError",
"def _sample(self, n=1):\n return [self[i] for i in np.random.choice(self.length, n, replace=False)]",
"def get_n_samples(self):\n return {op.rx.get_n_samples() for op in self.ops}",
"def sample(self, n_samples):\n\n z = sample_prior((n_samples,) + self.flow.z_shape)\n ldj = torch.zeros(z.size(0))\n\n z, ldj = self.flow (z, ldj, reverse=True)\n z, ldj = self.logit_normalize(z, ldj, reverse=True)\n\n return z",
"def Sample(n=6):\n t = [random.normalvariate(0.0, 1.0) for i in range(n)]\n t.sort()\n return t",
"def posterior_sampler(self, nsamples, seed=0, verbose=True):\n\n import random\n\n random.seed(seed)\n sample = self.get_chain()[-self.get_tune:]\n sample = sample.reshape(-1, sample.shape[-1])\n sample = random.choices(sample, k=nsamples)\n\n return sample",
"def generate_samples(self, n_samples=100):\n \t\t\n\t\t#how many times should ancestral sampling be run\n\t\t#n_samples\n prior_samples=[]\n for i in range(0,n_samples):\n prior_sample = self.prior.get_samples(\n n_latent_nodes=self.n_latent_nodes,\n n_gibbs_sampling_steps=100, \n sampling_mode=\"gibbs_ancestral\")\n prior_sample = torch.cat(prior_sample)\n prior_samples.append(prior_sample)\n prior_samples=torch.stack(prior_samples)\n # prior_samples = tf.slice(prior_samples, [0, 0], [num_samples, -1])\n output_activations = self.decoder.decode(prior_samples)\n output_activations = output_activations+self._train_bias\n output_distribution = Bernoulli(logit=output_activations)\n output=torch.sigmoid(output_distribution.logits)\n # output_activations[0] = output_activations[0] + self.train_bias\n # output_dist = FactorialBernoulliUtil(output_activations)\n # output_samples = tf.nn.sigmoid(output_dist.logit_mu)\n # print(\"--- \",\"end VAE::generate_samples()\")\n return output",
"def get_random_samplers(self, n):\n if not self.has_samplers():\n self.draw_samplers()\n \n def get_shuffledcopy(x):\n x_ = x.copy()\n np.random.shuffle(x_)\n return x_\n \n return get_shuffledcopy(self.samplers)[:n]"
] | [
"0.7308122",
"0.7254096",
"0.71898454",
"0.7128596",
"0.6979248",
"0.6961805",
"0.67606914",
"0.6745526",
"0.6690407",
"0.6622515",
"0.6562736",
"0.65446556",
"0.65446556",
"0.6425273",
"0.6419413",
"0.6395372",
"0.63613814",
"0.6323354",
"0.63091654",
"0.6305061",
"0.6287302",
"0.6239015",
"0.6221694",
"0.6207399",
"0.6197996",
"0.6184932",
"0.6165855",
"0.6153619",
"0.6153358",
"0.6140597"
] | 0.7954478 | 0 |
Computes the gradient of the prior with respect to theta. | def gradient(self, theta):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gradient(self, theta):\n return (1 / (self.sigma * np.sqrt(2 * np.pi))) * (\n -theta / (self.sigma ** 2) * np.exp(-(theta ** 2) / (2 * self.sigma ** 2))\n )",
"def gradient(self, theta):\n a = -(6 * self.scale ** 2)\n b = 3 * self.scale ** 2 + np.exp(2 * theta)\n b *= np.log(3 * self.scale ** 2 * np.exp(-2 * theta) + 1)\n return a / b",
"def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad",
"def gradient_function(theta, X, y):\n\n grad = None\n #######################################################################\n # TODO: #\n # Compute the gradient for a particular choice of theta. #\n # Compute the partial derivatives and set grad to the partial #\n # derivatives of the cost w.r.t. each parameter in theta #\n # #\n #######################################################################\n \n theta = theta[:, np.newaxis]\n \n thetatrans = theta.T\n Xtrans = X.T\n \n MulThetaX = np.dot(thetatrans, Xtrans)\n \n h = sigmoid(MulThetaX)\n \n grad = (y - h) * Xtrans\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n return grad",
"def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta",
"def gradient(self, theta):\n return np.zeros([theta.shape[0]])",
"def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad",
"def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad",
"def gradient(theta, X, y, Lambda=0.0):\n m = X.shape[0] # number of samples\n\n h = hypothesis(theta, X)\n\n if Lambda:\n g_0 = (1/m)*(X.T@(h - y))[0]\n g_1 = (1/m)*(X.T@(h - y))[1:] + (Lambda/m)*theta[1:] # skip theta-0\n \n return np.append(g_0, g_1)\n else:\n return (1/m)*(X.T@(h - y))",
"def gradient(x, y, theta):\n if x.ndim == 1:\n x = x[:, np.newaxis]\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.flatten()\n if theta.ndim == 2 and theta.shape[1] == 1:\n theta = theta.flatten()\n\n if (x.size == 0 or y.size == 0 or theta.size == 0\n or x.ndim != 2 or y.ndim != 1 or theta.ndim != 1\n or x.shape[0] != y.shape[0] or x.shape[1] + 1 != theta.shape[0]):\n return None\n\n x_padded = np.c_[np.ones(x.shape[0]), x]\n\n return x_padded.T.dot(x_padded.dot(theta) - y) / y.shape[0]",
"def calc_grad(X, Y, theta):\n m, n = X.shape\n\n margins = Y * X.dot(theta)\n probs = 1. / (1 + np.exp(margins))\n grad = -(1./m) * (X.T.dot(probs * Y))\n\n return grad",
"def gradient_descent(X, y, theta, alpha, total_iterations, hypothesis):\n len_theta = len(theta)\n m = len(y)\n one_over_m = (1.0 / float(m))\n\n for _ in range(0, total_iterations):\n temp_theta = numpy.zeros(len_theta)\n\n X_by_theta_minus_y = numpy.subtract(hypothesis(numpy.matrix(theta), X), y)\n\n for j in range(0, len_theta):\n jth_column_of_X = X[:,j]\n derivative_j = one_over_m * numpy.multiply(X_by_theta_minus_y, jth_column_of_X).sum()\n temp_theta[j] = theta[j] - alpha*derivative_j\n\n theta = temp_theta\n\n return numpy.matrix(theta)",
"def gradient_descent(self, X, theta, Y, m):\n\n Z = X.dot(theta)\n H = Predict.g(Z)\n gradient = np.dot(X.T, (H - Y)) / m\n return self.alpha * gradient",
"def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad",
"def gradientFunctionReg(theta, X, y, Lambda):\n m = len(y) # number of training examples\n grad = np.zeros(theta.shape[0])\n theta = np.transpose(theta)\n sum_1 = 0\n X = X.values\n y = y.values\n #calcuate the theta_0 \n# ====================== YOUR CODE HERE ======================\n# Instructions: Compute the gradient of a particular choice of theta.\n# Compute the partial derivatives and set grad to the partial\n# derivatives of the cost w.r.t. each parameter in theta\n for i in range(theta.shape[0]):\n if i == 0:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i]\n else:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i] + Lambda*theta[i]\n grad[i] = sum_1/m\n sum_1 = 0\n\n# =============================================================\n\n return grad",
"def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad",
"def compute_loss_gradient(theta_vector, *args):\n\n psi = args[0] # feed psi as a parameter\n circ_depth = args[1]\n num_qbits = args[2]\n theta = np.reshape(theta_vector, (circ_depth, num_qbits)) # reshapes the flat theta vector\n fidelity = get_fidelity(theta, psi)\n\n # the derivative of the loss wrt fidelity\n dl_df = -0.5 * fidelity ** (-0.5)\n\n df_dtheta = [] # a list of partial derivatives of the fidelity wrt the theta parameters\n\n for index in range(len(theta_vector)):\n layer_index = index // num_qbits\n qbit_index = index % num_qbits\n\n theta_plus = np.copy(theta)\n theta_plus[layer_index][qbit_index] += np.pi / 2 # added pi/2 to the ith theta parameter\n\n theta_minus = np.copy(theta)\n theta_minus[layer_index][qbit_index] -= np.pi / 2 # subtracted pi/2 to the ith theta parameter\n\n df_dtheta_i = 0.5 * (get_fidelity(theta_plus, psi) - get_fidelity(theta_minus, psi)) # ith derivative\n df_dtheta.append(df_dtheta_i)\n\n df_dtheta = np.array(df_dtheta)\n dl_dtheta = dl_df * df_dtheta # chain rule to get partial derivative of loss wrt theta parameters\n\n return dl_dtheta",
"def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta",
"def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)",
"def gradient_descent(x, y, theta=[[0], [0]]):\n m = y.size\n j_history = []\n for i in range(ITERATIONS):\n h = x.dot(theta)\n theta = theta - (ALPHA / m) * (x.T.dot(h - y))\n j_history.append(compute_cost(x, y, theta))\n return theta, j_history",
"def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a",
"def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r",
"def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)",
"def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)",
"def lr_cost_function_grad(theta: np.ndarray, X: np.ndarray, y: np.ndarray, l: float) -> np.ndarray:\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # You need to return the following variable correctly.\n grad = np.zeros(theta.shape)\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the partial derivatives and set grad to the partial\n # derivatives of the cost w.r.t. each parameter in theta.\n\n # =============================================================\n return grad",
"def gradient_ascent(self, w, X, y, lr):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n # gradient = x_j*(y-σ(wTX))\r\n return np.dot(X.T, y-self.sigmoid(np.dot(X, w)))",
"def computeNumericalGradient(J, theta):\n numgrad = np.zeros(theta.size)\n perturb = np.zeros(theta.size)\n e = 1e-4\n for p in range(theta.size):\n # Set perturbation vector\n perturb[p] = e\n loss1 = J(theta - perturb)[0]\n loss2 = J(theta + perturb)[0]\n # Compute Numerical Gradient\n numgrad[p] = (loss2 - loss1) / (2*e)\n perturb[p] = 0\n return numgrad",
"def computeNumericalGradient(J, theta):\n numgrad = np.zeros_like(theta).reshape(-1)\n perturb = np.zeros_like(theta).reshape(-1)\n e = 1e-4\n for p in range(theta.size):\n # Set perturbation vector\n perturb[p] = e\n loss1, _ = J(theta - perturb.reshape(theta.shape))\n loss2, _ = J(theta + perturb.reshape(theta.shape))\n # Compute Numerical Gradient\n numgrad[p] = (loss2 - loss1) / (2 * e)\n perturb[p] = 0\n\n return numgrad.reshape(theta.shape)",
"def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta",
"def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)"
] | [
"0.8256445",
"0.8055304",
"0.7947039",
"0.7867961",
"0.78287417",
"0.78280646",
"0.7741965",
"0.77182055",
"0.77098596",
"0.76828635",
"0.76208895",
"0.75614756",
"0.72576725",
"0.72168595",
"0.7216574",
"0.7211889",
"0.70136374",
"0.70051277",
"0.6994997",
"0.69862175",
"0.69123644",
"0.6910823",
"0.68812525",
"0.6881161",
"0.6863728",
"0.6838628",
"0.6829457",
"0.68013173",
"0.67898685",
"0.67635065"
] | 0.83195007 | 0 |
Returns N samples from the prior. | def sample_from_prior(self, n_samples):
p0 = self.min + self.rng.rand(n_samples) * (self.max - self.min)
return p0[:, np.newaxis] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sample_from_prior(self, n_samples):\n pass",
"def sample_from_prior(self, n_samples):\n\n p0 = self.rng.normal(loc=self.mean, scale=self.sigma, size=n_samples)\n return p0[:, np.newaxis]",
"def sample_from_prior(self, n_samples):\n\n p0 = self.rng.lognormal(mean=self.mean, sigma=self.sigma, size=n_samples)\n return p0[:, np.newaxis]",
"def sample_from_prior(self, n_samples):\n\n lamda = np.abs(self.rng.standard_cauchy(size=n_samples))\n\n p0 = np.log(np.abs(self.rng.randn() * lamda * self.scale))\n return p0[:, np.newaxis]",
"def sample_prior(size):\n return torch.randn(size)",
"def sample_from_prior(self):\n raise NotImplementedError",
"def sample_parameters_prior(self, n_samples, random_seed=None):\n\n if random_seed is not None:\n np.random.seed(random_seed)\n samples = []\n samples.append(self.var_noise.sample_from_prior(n_samples))\n samples.append(self.mean.sample_from_prior(n_samples))\n samples.append(self.kernel.sample_parameters(n_samples))\n\n return np.concatenate(samples, 1)",
"def sample(self, n_samples=1):\n\n\t\tif isinstance(n_samples, int):\n\t\t\tn_samples = (n_samples,)\n\t\tbeta_sample = self.beta_prior.sample(n_samples)\n\t\tgamma_sample = self.gamma_prior.sample(n_samples)\n\t\tp = torch.stack((beta_sample, gamma_sample)).T\n\t\tif n_samples == (1,):\n\t\t\tp = p[0]\n\t\tp = p.numpy()\n\t\treturn p",
"def sample_from_prior(self, *args, **kwargs):\n pass",
"def forward_sample(self, n):\n return self.flow.sample(n)",
"def prior_sample(self):\n pass",
"def generate_samples(self, n_samples):",
"def generate_samples(self, n_samples):",
"def sample(self, n):\n # Est probs from priority weights\n summed = sum(self.weight) + self.eps\n self.probs = [w / summed for w in self.priority]\n\n # Wieghted sample\n return np.random.choice(self.memory, size=n, p=self.probs).tolist()",
"def prior_samples(self, nsamples, coords=None):\n S = self.eval_S(self.kappa, self.sigma_f)\n if coords is None:\n coords = slice(self.mesh.num_vertices()) # take all coords\n weights = np.random.normal(scale=np.sqrt(S),\n size=(nsamples,) + S.shape) # shape (s, l)\n prior = np.einsum('sl,nl->sn', weights, self.eigenfunctions[coords])\n\n return prior",
"def samples(self):\n return self._values[:self.nsamples]",
"def generate_samples(self, no=10):\n observations = []\n state_sequence = []\n initial_state = np.random.choice(\n self.latent_variable_markov_chain.states,\n p=self.prior_probabilities)\n state_sequence.append(initial_state)\n observations.append(self.observation_from_state(initial_state))\n current_state = initial_state\n for i in range(2, no):\n next_state = self.latent_variable_markov_chain.next_state(current_state)\n state_sequence.append(next_state)\n observations.append(self.observation_from_state(next_state))\n current_state = next_state\n return observations, state_sequence",
"def sample(self, n, include=True):\n return [self(t / n) for t in range(n + int(include))]",
"def prior(n=10):\r\n p = []\r\n trials = 0\r\n acc = 0\r\n while acc < n:\r\n trials += 1\r\n r = np.random.rand(2) * np.array([4, 2]) + np.array([-2, -1])\r\n # print(\"r: \", r)\r\n if r[1] + r[0] >= -1 and r[1] - r[0] >= -1:\r\n p.append(r)\r\n acc += 1\r\n # print(\"trials: \", trials, \", acc: \", acc)\r\n return p",
"def pull_reads(self, n, prng): \n if not self.sampling:\n self.convert_to_array()\n index = prng.randint(0, self.total, size=n)\n index = np.sort(index)\n return self.reads[index,:]",
"def sample(self, n=1):\n raise NotImplementedError",
"def get_training_n(self, n: Optional[int] = None, random: bool = True) -> Tuple[torch.Tensor, torch.Tensor]:\n return self.__sample(self.train_data, self.train_los, n, random)",
"def sample(self, n):\n raise NotImplementedError",
"def _sample(self, n=1):\n return [self[i] for i in np.random.choice(self.length, n, replace=False)]",
"def get_n_samples(self):\n return {op.rx.get_n_samples() for op in self.ops}",
"def sample(self, n_samples):\n\n z = sample_prior((n_samples,) + self.flow.z_shape)\n ldj = torch.zeros(z.size(0))\n\n z, ldj = self.flow (z, ldj, reverse=True)\n z, ldj = self.logit_normalize(z, ldj, reverse=True)\n\n return z",
"def Sample(n=6):\n t = [random.normalvariate(0.0, 1.0) for i in range(n)]\n t.sort()\n return t",
"def generate_samples(self, n_samples=100):\n \t\t\n\t\t#how many times should ancestral sampling be run\n\t\t#n_samples\n prior_samples=[]\n for i in range(0,n_samples):\n prior_sample = self.prior.get_samples(\n n_latent_nodes=self.n_latent_nodes,\n n_gibbs_sampling_steps=100, \n sampling_mode=\"gibbs_ancestral\")\n prior_sample = torch.cat(prior_sample)\n prior_samples.append(prior_sample)\n prior_samples=torch.stack(prior_samples)\n # prior_samples = tf.slice(prior_samples, [0, 0], [num_samples, -1])\n output_activations = self.decoder.decode(prior_samples)\n output_activations = output_activations+self._train_bias\n output_distribution = Bernoulli(logit=output_activations)\n output=torch.sigmoid(output_distribution.logits)\n # output_activations[0] = output_activations[0] + self.train_bias\n # output_dist = FactorialBernoulliUtil(output_activations)\n # output_samples = tf.nn.sigmoid(output_dist.logit_mu)\n # print(\"--- \",\"end VAE::generate_samples()\")\n return output",
"def posterior_sampler(self, nsamples, seed=0, verbose=True):\n\n import random\n\n random.seed(seed)\n sample = self.get_chain()[-self.get_tune:]\n sample = sample.reshape(-1, sample.shape[-1])\n sample = random.choices(sample, k=nsamples)\n\n return sample",
"def get_random_samplers(self, n):\n if not self.has_samplers():\n self.draw_samplers()\n \n def get_shuffledcopy(x):\n x_ = x.copy()\n np.random.shuffle(x_)\n return x_\n \n return get_shuffledcopy(self.samplers)[:n]"
] | [
"0.7953035",
"0.72522676",
"0.718797",
"0.71269745",
"0.6977341",
"0.6960459",
"0.6759681",
"0.6744323",
"0.66894495",
"0.6622551",
"0.6560197",
"0.65461904",
"0.65461904",
"0.64246196",
"0.641773",
"0.63970256",
"0.6361261",
"0.6323558",
"0.6306985",
"0.63066787",
"0.62868726",
"0.6237001",
"0.6221359",
"0.62077487",
"0.620033",
"0.61837375",
"0.61646336",
"0.6154083",
"0.615204",
"0.6140827"
] | 0.73065966 | 1 |
Computes the gradient of the prior with respect to theta. | def gradient(self, theta):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gradient(self, theta):\n return (1 / (self.sigma * np.sqrt(2 * np.pi))) * (\n -theta / (self.sigma ** 2) * np.exp(-(theta ** 2) / (2 * self.sigma ** 2))\n )",
"def gradient(self, theta):\n a = -(6 * self.scale ** 2)\n b = 3 * self.scale ** 2 + np.exp(2 * theta)\n b *= np.log(3 * self.scale ** 2 * np.exp(-2 * theta) + 1)\n return a / b",
"def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad",
"def gradient_function(theta, X, y):\n\n grad = None\n #######################################################################\n # TODO: #\n # Compute the gradient for a particular choice of theta. #\n # Compute the partial derivatives and set grad to the partial #\n # derivatives of the cost w.r.t. each parameter in theta #\n # #\n #######################################################################\n \n theta = theta[:, np.newaxis]\n \n thetatrans = theta.T\n Xtrans = X.T\n \n MulThetaX = np.dot(thetatrans, Xtrans)\n \n h = sigmoid(MulThetaX)\n \n grad = (y - h) * Xtrans\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n return grad",
"def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta",
"def gradient(self, theta):\n return np.zeros([theta.shape[0]])",
"def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad",
"def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad",
"def gradient(theta, X, y, Lambda=0.0):\n m = X.shape[0] # number of samples\n\n h = hypothesis(theta, X)\n\n if Lambda:\n g_0 = (1/m)*(X.T@(h - y))[0]\n g_1 = (1/m)*(X.T@(h - y))[1:] + (Lambda/m)*theta[1:] # skip theta-0\n \n return np.append(g_0, g_1)\n else:\n return (1/m)*(X.T@(h - y))",
"def gradient(x, y, theta):\n if x.ndim == 1:\n x = x[:, np.newaxis]\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.flatten()\n if theta.ndim == 2 and theta.shape[1] == 1:\n theta = theta.flatten()\n\n if (x.size == 0 or y.size == 0 or theta.size == 0\n or x.ndim != 2 or y.ndim != 1 or theta.ndim != 1\n or x.shape[0] != y.shape[0] or x.shape[1] + 1 != theta.shape[0]):\n return None\n\n x_padded = np.c_[np.ones(x.shape[0]), x]\n\n return x_padded.T.dot(x_padded.dot(theta) - y) / y.shape[0]",
"def calc_grad(X, Y, theta):\n m, n = X.shape\n\n margins = Y * X.dot(theta)\n probs = 1. / (1 + np.exp(margins))\n grad = -(1./m) * (X.T.dot(probs * Y))\n\n return grad",
"def gradient_descent(X, y, theta, alpha, total_iterations, hypothesis):\n len_theta = len(theta)\n m = len(y)\n one_over_m = (1.0 / float(m))\n\n for _ in range(0, total_iterations):\n temp_theta = numpy.zeros(len_theta)\n\n X_by_theta_minus_y = numpy.subtract(hypothesis(numpy.matrix(theta), X), y)\n\n for j in range(0, len_theta):\n jth_column_of_X = X[:,j]\n derivative_j = one_over_m * numpy.multiply(X_by_theta_minus_y, jth_column_of_X).sum()\n temp_theta[j] = theta[j] - alpha*derivative_j\n\n theta = temp_theta\n\n return numpy.matrix(theta)",
"def gradient_descent(self, X, theta, Y, m):\n\n Z = X.dot(theta)\n H = Predict.g(Z)\n gradient = np.dot(X.T, (H - Y)) / m\n return self.alpha * gradient",
"def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad",
"def gradientFunctionReg(theta, X, y, Lambda):\n m = len(y) # number of training examples\n grad = np.zeros(theta.shape[0])\n theta = np.transpose(theta)\n sum_1 = 0\n X = X.values\n y = y.values\n #calcuate the theta_0 \n# ====================== YOUR CODE HERE ======================\n# Instructions: Compute the gradient of a particular choice of theta.\n# Compute the partial derivatives and set grad to the partial\n# derivatives of the cost w.r.t. each parameter in theta\n for i in range(theta.shape[0]):\n if i == 0:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i]\n else:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i] + Lambda*theta[i]\n grad[i] = sum_1/m\n sum_1 = 0\n\n# =============================================================\n\n return grad",
"def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad",
"def compute_loss_gradient(theta_vector, *args):\n\n psi = args[0] # feed psi as a parameter\n circ_depth = args[1]\n num_qbits = args[2]\n theta = np.reshape(theta_vector, (circ_depth, num_qbits)) # reshapes the flat theta vector\n fidelity = get_fidelity(theta, psi)\n\n # the derivative of the loss wrt fidelity\n dl_df = -0.5 * fidelity ** (-0.5)\n\n df_dtheta = [] # a list of partial derivatives of the fidelity wrt the theta parameters\n\n for index in range(len(theta_vector)):\n layer_index = index // num_qbits\n qbit_index = index % num_qbits\n\n theta_plus = np.copy(theta)\n theta_plus[layer_index][qbit_index] += np.pi / 2 # added pi/2 to the ith theta parameter\n\n theta_minus = np.copy(theta)\n theta_minus[layer_index][qbit_index] -= np.pi / 2 # subtracted pi/2 to the ith theta parameter\n\n df_dtheta_i = 0.5 * (get_fidelity(theta_plus, psi) - get_fidelity(theta_minus, psi)) # ith derivative\n df_dtheta.append(df_dtheta_i)\n\n df_dtheta = np.array(df_dtheta)\n dl_dtheta = dl_df * df_dtheta # chain rule to get partial derivative of loss wrt theta parameters\n\n return dl_dtheta",
"def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta",
"def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)",
"def gradient_descent(x, y, theta=[[0], [0]]):\n m = y.size\n j_history = []\n for i in range(ITERATIONS):\n h = x.dot(theta)\n theta = theta - (ALPHA / m) * (x.T.dot(h - y))\n j_history.append(compute_cost(x, y, theta))\n return theta, j_history",
"def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a",
"def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r",
"def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)",
"def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)",
"def lr_cost_function_grad(theta: np.ndarray, X: np.ndarray, y: np.ndarray, l: float) -> np.ndarray:\n # Initialize some useful values\n m = len(y) # number of training examples\n\n # You need to return the following variable correctly.\n grad = np.zeros(theta.shape)\n\n # ====================== YOUR CODE HERE ======================\n # Instructions: Compute the partial derivatives and set grad to the partial\n # derivatives of the cost w.r.t. each parameter in theta.\n\n # =============================================================\n return grad",
"def gradient_ascent(self, w, X, y, lr):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n # gradient = x_j*(y-σ(wTX))\r\n return np.dot(X.T, y-self.sigmoid(np.dot(X, w)))",
"def computeNumericalGradient(J, theta):\n numgrad = np.zeros(theta.size)\n perturb = np.zeros(theta.size)\n e = 1e-4\n for p in range(theta.size):\n # Set perturbation vector\n perturb[p] = e\n loss1 = J(theta - perturb)[0]\n loss2 = J(theta + perturb)[0]\n # Compute Numerical Gradient\n numgrad[p] = (loss2 - loss1) / (2*e)\n perturb[p] = 0\n return numgrad",
"def computeNumericalGradient(J, theta):\n numgrad = np.zeros_like(theta).reshape(-1)\n perturb = np.zeros_like(theta).reshape(-1)\n e = 1e-4\n for p in range(theta.size):\n # Set perturbation vector\n perturb[p] = e\n loss1, _ = J(theta - perturb.reshape(theta.shape))\n loss2, _ = J(theta + perturb.reshape(theta.shape))\n # Compute Numerical Gradient\n numgrad[p] = (loss2 - loss1) / (2 * e)\n perturb[p] = 0\n\n return numgrad.reshape(theta.shape)",
"def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta",
"def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)"
] | [
"0.8256445",
"0.8055304",
"0.7947039",
"0.7867961",
"0.78287417",
"0.78280646",
"0.7741965",
"0.77182055",
"0.77098596",
"0.76828635",
"0.76208895",
"0.75614756",
"0.72576725",
"0.72168595",
"0.7216574",
"0.7211889",
"0.70136374",
"0.70051277",
"0.6994997",
"0.69862175",
"0.69123644",
"0.6910823",
"0.68812525",
"0.6881161",
"0.6863728",
"0.6838628",
"0.6829457",
"0.68013173",
"0.67898685",
"0.67635065"
] | 0.83195007 | 1 |
Find a square that forms a bracket with `square` for `player` in the given `direction`. Returns None if no such square exists. Returns the index of the bracketing square if found | def find_bracket(self, square, player, board, direction):
curr = square+ direction
opp = self.opponent(player)
if(board[curr]!=opp):
return None
while(self.is_valid(curr) and board[curr]==opp):
curr+=direction
if(self.is_valid(curr) and board[curr] == player):
return curr
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_bracket(square, player, board, direction):\n bracket = square + direction\n if board[bracket] == player:\n return None\n opp = Othello.opponent(player)\n while board[bracket] == opp:\n bracket += direction\n return None if board[bracket] in (OUTER, EMPTY) else bracket",
"def get_square_index(self, square):\r\n # find the player by going through the list\r\n for i in range(len(self.squares)):\r\n if square == self.squares[i]:\r\n return i\r\n raise SquareNotFound",
"def search_next_win(self, player):\n for i, j, k in self.winning_cases:\n if self.game_board[i] == player and \\\n self.game_board[j] == player and \\\n self.game_board[k] == ' ':\n return k\n elif self.game_board[j] == player and \\\n self.game_board[k] == player and \\\n self.game_board[i] == ' ':\n return i\n elif self.game_board[i] == player and \\\n self.game_board[k] == player and \\\n self.game_board[j] == ' ':\n return j\n return None",
"def get_square_index(self, cell):\n return next(s for s, square in enumerate(self.squares) if cell in square)",
"def getMove(player):\n\n\tsquares = { \"1\":1, \"2\":2, \"3\":3, \"4\":4, \"5\":5, \"6\":6, \"7\":7, \"8\":8, \"9\":9 }\n\tchoice = input(\"Player \" + str(player + 1) + \", pick a square (1-9): \")\n\ttry:\n\t\treturn squares[choice]\n\texcept KeyError:\n\t\tpass",
"def find_square(self, target_name: str) -> int:\n found_square_num = None\n for i in range(len(self.squares)):\n if target_name == self.squares[i].name:\n found_square_num = i\n break\n return found_square_num",
"def get_square_index_by_name(self, square_name, from_square=None):\r\n if from_square is not None:\r\n # don't start at the begining\r\n raise Exception\r\n\r\n for i in range(len(self.squares)):\r\n print(self.squares[i].name, square_name)\r\n if self.squares[i].name == square_name:\r\n return i\r\n\r\n raise SquareNotFound",
"def get_move(board, player):\n #optimization to always pick the top-left corner on an empty board\n if set(board) == set([config.NO_PLAYER]):\n return 0\n result = minimax(board, player, 2, config.NEG_INF, config.INF)\n return result.index",
"def index_in_direction(self, index, grid_size, direction):\n # convert index to row, col coordinate\n col = index % grid_size\n row = index // grid_size\n if RIGHT in direction:\n col += 1\n elif LEFT in direction:\n col -= 1\n # Notice the use of if, not elif here\n if UP in direction:\n row -= 1\n elif DOWN in direction:\n row += 1\n if not (0 <= col < grid_size and 0 <= row < grid_size):\n return None\n return row * grid_size + col",
"def find_piece(self, piece_to_find):\n for row in range(BOARD_SIZE):\n for col in range(BOARD_SIZE):\n if self.board[row][col] is piece_to_find:\n return Square.at(row, col)\n raise Exception('The supplied piece is not on the board')",
"def find_piece(self, piece_to_find):\n for row in range(BOARD_SIZE):\n for col in range(BOARD_SIZE):\n if self.board[row][col] is piece_to_find:\n return Square.at(row, col)\n raise Exception('The supplied piece is not on the board')",
"def search(self, depth, board, current_player, opposite_player):\n legal_boards = []\n for column in range(7):\n if board.get_hole(column, 0) is 0:\n temp = deepcopy(board)\n temp.set_column(column, current_player)\n legal_boards.append(temp)\n \n if depth == 0 or len(legal_boards) == 0 or self.game_is_over(board, current_player, opposite_player):\n return self.value(board, current_player, opposite_player)\n\n alpha = -99999999\n\n for legal_board in legal_boards:\n alpha = max(alpha, -self.search(depth-1, legal_board, opposite_player, current_player))\n return alpha",
"def getWinner(board):\n players = [X, O]\n num_symbols_in_line = 3\n for player in players:\n # check rows\n for row in board:\n line_count = row.count(player)\n if line_count == num_symbols_in_line:\n return player\n \n # check columns\n for col_i in range(len(board[0])):\n line_count = 0\n for row_i in range(len(board)):\n if board[row_i][col_i] == player:\n line_count += 1\n if line_count == num_symbols_in_line:\n return player\n \n # check vertical from top left to bottom right\n line_count = 0\n for vert_cell in range(len(board)):\n if board[vert_cell][vert_cell] == player:\n line_count += 1\n if line_count == num_symbols_in_line:\n return player\n \n # check vertical from top right to bottom left\n line_count = 0\n col_i = len(board) - 1\n for row_i in range(len(board)):\n if board[row_i][col_i] == player:\n line_count += 1\n col_i -= 1\n if line_count == num_symbols_in_line:\n return player\n\n return None",
"def get_square_at_relative_position(\n cls, start_square, orientation, stepped_squares):\n if orientation == cls.VERTICAL:\n star_square_row = cls._get_row_fow_letter(start_square[0])\n end_square_row = star_square_row + stepped_squares\n end_square_row = chr(64 + end_square_row)\n else:\n end_square_row = start_square[0]\n\n start_square_column = int(start_square[1:])\n if orientation == cls.HORIZONTAL:\n end_square_column = start_square_column + stepped_squares\n else:\n end_square_column = start_square_column\n\n return '%s%d' % (end_square_row, end_square_column)",
"def find_player(self):\n for y, line in enumerate(self.maze):\n for x, character in enumerate(line):\n if character == \"m\":\n return y, x\n return None",
"def look_for_win(self, board, player=None):\n\n win_spot = None\n if player is None:\n player = self\n\n for group in WINS:\n # creates a list of just the elements of the board which are\n # part of a specific win group and and not already owned by the player\n # and creates a list of tuples of the element and its value.\n not_mine = [(i, val) for i, val in enumerate(board.tttboard)\n if i in group\n and val != player.board_value]\n\n # If there's only one not owned by the ai player and not owned by\n # the other player then select it and we've won\n if len(not_mine) == 1 and not_mine[0][1] is None:\n # Maybe this should return the selection rather than\n # modifying the board in here. Decide later.\n win_spot = not_mine[0][0]\n break\n\n return win_spot",
"def get_checker(self, square):\n\n logger.debug(u'get_checker({})'.format(square))\n\n row, column = square\n if row < 0 or row > 7 or column < 0 or column > 7:\n return None\n else:\n return self.squares[row][column]",
"def find_player_position(labyrinth: Labyrinth) -> Tuple[int, int]:\n for row in range(0, len(labyrinth)):\n for col in range(0, len(labyrinth[0])):\n if labyrinth[row][col] == Labyrinth.START:\n return row, col\n\n # todo: handle exception, if there is no field holding 'S' then something is wrong\n return -1, -1",
"def match_parentheses(dot, position):\n stack = 0\n for i in range(position + 1, len(dot)):\n if dot[i] == '(':\n stack += 1\n elif dot[i] == ')':\n if stack == 0:\n return i\n else:\n stack -= 1\n return -1",
"def isSolved(board):\n for player in [1, 2]:\n if [player]*3 in chain(\n board, # Rows\n zip(board), # Columns\n [ # Diagonals\n [board[i][i] for i in range(len(board))],\n [board[len(board) - i - 1][i] for i in range(len(board))]\n ]\n ):\n return player\n return -1 if 0 in chain(*board) else 0",
"def get_map_square(x, y):\n result = MAP_SQUARE_ROCK\n if ((x >=0 and x< width) and (y>= 0 and y< height)): # LT i think done TODO: Replace False with a condition that checks if the values x and y are valid. Valid index values start at 0. x must be less than width and y must be less than height. Negative numbers are not valid.\n row= dungeon_map[y]\n result= row[x] # LT... done? see bitmap hw in comments below TODO: Replace None with an expression that uses x and y to get the right value from dungeon_map. \n return result",
"def play_game_turn(player, symbol):\n\n row = ask_input(player, \"row\")\n column = ask_input(player, \"column\")\n\n if board.is_empty(row, column):\n board.put_symbol(symbol, row, column)\n board.print_board()\n else:\n print \"That spot has been taken. Please try again.\"\n play_game_turn(player, symbol)",
"def winner(board):\n \n for m in [\"XXX\", \"OOO\"]:\n # horizontal\n for row in range(3):\n if board[row][0] == board[row][1] == board[row][2]:\n return board[row][0]\n # vertical\n for col in range(3):\n if board[0][col] == board[1][col] == board[2][col]:\n return board[0][col]\n # diagonal\n if board[0][0] == board[1][1] == board[2][2]:\n return board[1][1]\n if board[0][2] == board[1][1] == board[2][0]:\n return board[1][1]\n return None",
"def index_in_direction(index, grid_size, direction):\n pos_to_return = 0\n #position to return\n if direction == 'up':\n pos_to_return = index-grid_size\n if direction == 'down':\n pos_to_return = index+grid_size\n if direction == 'left':\n pos_to_return = index-1\n if (pos_to_return+1)%grid_size == 0:\n return None\n if direction == 'right':\n pos_to_return = index+1\n if (pos_to_return)%grid_size == 0:\n return None\n if direction == 'up-right':\n pos_to_return = index-grid_size+1\n if (pos_to_return)%grid_size == 0:\n return None\n if direction == 'up-left':\n pos_to_return = index-grid_size-1\n if (pos_to_return+1)%grid_size == 0:\n return None\n if direction == 'down-right':\n pos_to_return = index+grid_size+1\n if (pos_to_return)%grid_size == 0:\n return None\n if direction == 'down-left':\n pos_to_return = index+grid_size-1\n if (pos_to_return+1)%grid_size == 0:\n return None\n if 0 <= pos_to_return < grid_size*grid_size:\n return pos_to_return\n return None",
"def player(board):\n\n # Game is over\n if terminal(board):\n return None\n\n # Count number of occurences of X and O\n x_count = 0\n o_count = 0\n for row in board:\n for box in row:\n if box == X:\n x_count = x_count + 1\n elif box == O:\n o_count = o_count + 1\n # When move count is tied, X is next\n if x_count <= o_count:\n return X\n # When X has moved once more than O, next move is O\n else:\n return O",
"def getWinner(game, situation, player):\r\n spec = Player.get_spec(player)\r\n for i in range(len(situation)):\r\n if situation[i][0] == situation[i][1] == situation[i][2] == spec :\r\n return player\r\n if i == 0:\r\n for j in range(3):\r\n if situation[i][j] == situation[i+1][j] == situation[i+2][j] == spec :\r\n return player\r\n if j == 0 or j == 2 :\r\n if situation[i][j] == situation[i+1][abs(j-1)] == situation[i+2][abs(j-2)] == spec :\r\n return player\r\n return None",
"def find_index(self):\n current = self.from_grid\n #find index of \"*\"\n for x in range(len(current)):\n for y in range(len(current[x])):\n if current[x][y] == \"*\":\n index = (x,y)\n return index",
"def get_move(board, player):\n row, col = 0, 0\n return row, col",
"def GetPlayerXY(level):\n for row, line in enumerate(level):\n for column, square in enumerate(line):\n if square in \"SQ\":\n return (column, row, square)",
"def get_piece(self, square):\n return self.board[square.row][square.col]"
] | [
"0.80075675",
"0.657441",
"0.63864964",
"0.6318657",
"0.6174479",
"0.5725372",
"0.57224786",
"0.5603353",
"0.5454616",
"0.54519516",
"0.54519516",
"0.543676",
"0.5399402",
"0.5393349",
"0.5366055",
"0.52998435",
"0.52970845",
"0.5295573",
"0.5294598",
"0.5291805",
"0.5225825",
"0.5218502",
"0.5210849",
"0.52027273",
"0.5199162",
"0.5196862",
"0.516445",
"0.51618403",
"0.5158051",
"0.51332766"
] | 0.80948424 | 0 |
Flip pieces in the given direction as a result of the move by player. | def make_flips(self, move, player, board, direction):
curr = move + direction
opp = self.opponent(player)
while(board[curr]==opp):
board[curr] = player
curr += direction
#return board
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def flip(self, x, y):\n self.pieces[x + (y * self.width)].flip()",
"def flip(self, bev_direction: str = 'horizontal') -> None:\n pass",
"def make_flips(move, player, board, direction):\n bracket = Othello.find_bracket(move, player, board, direction)\n if not bracket:\n return\n square = move + direction\n while square != bracket:\n board[square] = player\n square += direction",
"def flip_player(cls):\n cls.current_player = 'X' if cls.current_player == 'O' else 'O'\n\n cls.display_board()\n cls.prompt_player()",
"def set_flipped(self, x, y):\n self.pieces[x + (y * self.width)].set_flipped()",
"def _flip(self, i_row, i_col, flip_lst):\n self._board[i_row][i_col] = self._turn\n for cell in flip_lst:\n self._board[cell[0]][cell[1]] = self._turn",
"def flip_sides(self) -> 'Piece':\n return Piece(\n str(self.rank),\n self.color.other_color,\n promoted=self.is_promoted\n )",
"def _flip(self,update_stack,index):\n cell=game.get_cell(index)\n if cell.ifFlipped()==False:\n cell.flip()\n game.decrease_remain()\n if cell.isMine()==False and cell.get_neighbor()>0:\n update_stack[str(index)]=cell.get_neighbor()\n return\n elif cell.isMine()==False and cell.get_neighbor()==0:\n update_stack[str(index)]=cell.get_neighbor()\n temp_r=index/self._col_num\n temp_c=index%self._col_num\n shift=[[temp_r+dr,temp_c+dc] for dr in self.shifts for dc in self.shifts\n if [temp_r+dr,temp_c+dc]!=[temp_r,temp_c]\n and temp_r+dr in range(0,self._row_num)\n and temp_c+dc in range(0,self._col_num)]\n for s in shift:\n self._flip(update_stack,s[0]*self._col_num+s[1])",
"def move_piece(self, direction=None):\n if self.will_collide(direction=direction):\n return\n self.active_piece.move(direction=direction)\n self.display_piece()",
"def flip_cards(self):\n for card_ in self.cards:\n card_.flip()",
"def flip_direction(direction):\n if direction==\"NORTH\": return \"SOUTH\"\n if direction==\"SOUTH\": return \"NORTH\"\n if direction==\"WEST\": return \"EAST\"\n if direction==\"EAST\": return \"WEST\"\n elif isinstance(direction, float):\n return (direction + np.pi)%(2*np.pi)",
"def _flip_turn(self):\n self._turn = self._next_turn\n return self._turn",
"def swap(self, direction):\n directions = {'up': (-1, 0), 'down': (1, 0), 'left': (0, -1), 'right': (0, 1),}\n new_row = self.__blank_box[0] + directions[direction][0]\n new_col = self.__blank_box[1] + directions[direction][1]\n new_position = self.__get_box((new_row*self.__length)+new_col)\n self.__board[self.__blank_box[0]][self.__blank_box[1]] \\\n = new_position\n self.__board[new_row][new_col] = None\n self.__blank_box = (new_row, new_col)\n self.__set_possibilities()\n self.__previous_move = direction",
"def flip_player():\n global current_player\n # If current player is 'X', then set current player to 'O'.\n if current_player == 'X':\n current_player = 'O'\n # If current player is 'O', then set current player to 'X'.\n elif current_player == 'O':\n current_player = 'X'",
"def flip(self):",
"def flip(self, mode='h'):\n # TODO: Implement the flip function. Remember to record the boolean values is_horizontal_flip and\n # is_vertical_flip.\n if mode == 'h':\n self.is_horizontal_flip = True\n self.x = np.flipud(self.x)\n elif mode == 'v':\n self.is_vertical_flip = True\n self.x = np.fliplr(self.x)\n else:\n self.is_vertical_flip = True\n self.is_horizontal_flip = True\n self.x = np.fliplr(self.x)\n self.x = np.flipud(self.x)\n # raise NotImplementedError\n #######################################################################\n # #\n # #\n # TODO: YOUR CODE HERE #\n # #\n # #\n #######################################################################",
"def test_flip_piece():\n board = Board(640, 640, 8)\n board.start_game()\n board.gm.flip_pieces = [(3, 3)]\n current_color = board.game_pieces[3][3].color\n board.flip_pieces()\n assert board.game_pieces[3][3].color != current_color\n \n board.gm.flip_pieces = [(3, 4)]\n current_color = board.game_pieces[3][4].color\n board.flip_pieces()\n assert board.game_pieces[3][4].color != current_color",
"def _label_flips(self, i_row, i_col, direction):\n vert_move, hori_move = i_row, i_col #Initially start at the opposing cell\n candidates = []\n\n #Perhaps could have done if 0 > vert_move > num_rows and 0 > hori_move > num_cols instead!\n while ((self._board[vert_move][hori_move] != self._turn) and not #This can be True in one of two ways! \n self._is_dead_end(vert_move, hori_move, direction) and # think: \"W\" or \" \"\n self._board[vert_move][hori_move] != \" \"):\n candidates.append((vert_move, hori_move))\n if direction == \"n\":\n vert_move += 1\n elif direction == \"ne\":\n hori_move -= 1\n vert_move += 1\n elif direction == \"e\":\n hori_move -= 1\n elif direction == \"se\":\n hori_move -= 1\n vert_move -= 1\n elif direction == \"s\":\n vert_move -= 1\n elif direction == \"sw\":\n hori_move += 1\n vert_move -= 1\n elif direction == \"w\":\n hori_move += 1\n elif direction == \"nw\":\n hori_move += 1\n vert_move += 1\n #Watch out, index can go out of range after several iterations\n #of the loop body, not just once you enter the loop!!!\n\n ending_cell = self._board[vert_move][hori_move] \n if ending_cell == self._turn: #If the ending cell is same color, then flip can be done.\n return (True, candidates)\n else:\n return (False, [])",
"def update_player_direction(self,direction):\n pass",
"def switch_playing_direction(position_coords):\n # just mirrors the x-coordinate in place\n position_coords[:,0::2] *= -1",
"def flip(self):\n if self.color != \"empty\":\n self.color = self.oppositeColor()\n else:\n raise pieceError(self.col, self.row)",
"def flip(self):\n \n if self.faceup:\n self.faceup = False\n else:\n self.faceup = True",
"def flip(self, p):\n return -p",
"def turn_around():\n for i in range(2):\n turn_left()",
"def flip(self, bev_direction='horizontal', points=None):\n assert bev_direction in ('horizontal', 'vertical')\n if bev_direction == 'horizontal':\n self.tensor[:, 0::7] = -self.tensor[:, 0::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6] + np.pi\n elif bev_direction == 'vertical':\n self.tensor[:, 2::7] = -self.tensor[:, 2::7]\n if self.with_yaw:\n self.tensor[:, 6] = -self.tensor[:, 6]\n\n if points is not None:\n assert isinstance(points, (torch.Tensor, np.ndarray, BasePoints))\n if isinstance(points, (torch.Tensor, np.ndarray)):\n if bev_direction == 'horizontal':\n points[:, 0] = -points[:, 0]\n elif bev_direction == 'vertical':\n points[:, 2] = -points[:, 2]\n elif isinstance(points, BasePoints):\n points.flip(bev_direction)\n return points",
"def move_the_pieces(self, p_place, direction, color):\r\n if direction == \"down\":\r\n return self.move_vertically(p_place, \"down\", color)\r\n elif direction == \"up\":\r\n return self.move_vertically(p_place, \"up\", color)\r\n elif direction == \"right\":\r\n return self.move_horizontally(p_place, \"right\", color)\r\n elif direction == \"left\":\r\n return self.move_horizontally(p_place, \"left\", color)\r\n elif direction == \"diagonal down right\":\r\n return self.move_diagonally(p_place, \"diagonal down right\", color)\r\n elif direction == \"diagonal down left\":\r\n return self.move_diagonally(p_place, \"diagonal down left\", color)\r\n elif direction == \"diagonal up right\":\r\n return self.move_diagonally(p_place, \"diagonal up right\", color)\r\n else:\r\n return self.move_diagonally(p_place, \"diagonal up left\", color)",
"def __call__(self, results):\n\n if 'flip' not in results:\n if isinstance(self.direction, list):\n # None means non-flip\n direction_list = self.direction + [None]\n else:\n # None means non-flip\n direction_list = [self.direction, None]\n\n if isinstance(self.flip_ratio, list):\n non_flip_ratio = 1 - sum(self.flip_ratio)\n flip_ratio_list = self.flip_ratio + [non_flip_ratio]\n else:\n non_flip_ratio = 1 - self.flip_ratio\n # exclude non-flip\n single_ratio = self.flip_ratio / (len(direction_list) - 1)\n flip_ratio_list = [single_ratio] * (len(direction_list) -\n 1) + [non_flip_ratio]\n\n cur_dir = np.random.choice(direction_list, p=flip_ratio_list)\n\n results['flip'] = cur_dir is not None\n if 'flip_direction' not in results:\n results['flip_direction'] = cur_dir\n if results['flip']:\n self._flip_img(results)\n self._flip_bboxes(results)\n self._flip_cbboxes(results)\n self._flip_masks(results)\n self._flip_seg(results)\n\n return results",
"def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1",
"def __call__(self, results):\n\n if 'flip' not in results:\n if isinstance(self.direction, list):\n # None means non-flip\n direction_list = self.direction + [None]\n else:\n # None means non-flip\n direction_list = [self.direction, None]\n\n if isinstance(self.flip_ratio, list):\n non_flip_ratio = 1 - sum(self.flip_ratio)\n flip_ratio_list = self.flip_ratio + [non_flip_ratio]\n else:\n non_flip_ratio = 1 - self.flip_ratio\n # exclude non-flip\n single_ratio = self.flip_ratio / (len(direction_list) - 1)\n flip_ratio_list = [single_ratio] * (len(direction_list) -\n 1) + [non_flip_ratio]\n\n cur_dir = np.random.choice(direction_list, p=flip_ratio_list)\n\n results['flip'] = cur_dir is not None\n if 'flip_direction' not in results:\n results['flip_direction'] = cur_dir\n if results['flip']:\n # flip image\n for key in results.get('img_fields', ['img']):\n results[key] = general_ocr.imflip(\n results[key], direction=results['flip_direction'])\n # flip bboxes\n for key in results.get('bbox_fields', []):\n results[key] = self.bbox_flip(results[key],\n results['img_shape'],\n results['flip_direction'])\n # flip masks\n for key in results.get('mask_fields', []):\n results[key] = results[key].flip(results['flip_direction'])\n\n # flip segs\n for key in results.get('seg_fields', []):\n results[key] = general_ocr.imflip(\n results[key], direction=results['flip_direction'])\n return results",
"def flip(self, horizontal):\n try:\n self._is_transformable()\n horizontal = get_int(horizontal)\n except NotTransformable as e:\n self._app[\"statusbar\"].message(str(e) + \" flip\", \"error\")\n return\n except StringConversionError as e:\n self._app[\"statusbar\"].message(str(e), \"error\")\n return\n images = self.get_images(\"Flipped\")\n # Apply changes\n for fil in images:\n if fil not in self._changes:\n self._changes[fil] = [0, 0, 0]\n if horizontal:\n self._changes[fil][1] = \\\n (self._changes[fil][1] + 1) % 2\n else:\n self._changes[fil][2] = \\\n (self._changes[fil][2] + 1) % 2\n # Flip the image shown\n if self._app.get_path() in images:\n self.emit(\"changed\", \"flip\", horizontal)\n # Reload thumbnails of flipped images immediately\n if self._app[\"thumbnail\"].toggled:\n self.apply()"
] | [
"0.68362707",
"0.6592844",
"0.65014696",
"0.6500981",
"0.6391202",
"0.62869",
"0.61508685",
"0.61346436",
"0.6131365",
"0.6130957",
"0.6108974",
"0.6072877",
"0.6030984",
"0.59973514",
"0.5908052",
"0.5880105",
"0.57856685",
"0.5767312",
"0.5751639",
"0.5737677",
"0.57143956",
"0.57065123",
"0.5672485",
"0.56431377",
"0.5641535",
"0.5639652",
"0.5628632",
"0.5608284",
"0.56008613",
"0.55778646"
] | 0.69535416 | 0 |
Can player make any moves? Returns a boolean | def any_legal_move(self, player, board):
moves = self.legal_moves(player, board)
#print(moves)
return len(moves)!=0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move_check(self):\r\n \r\n if not self.run:\r\n return False\r\n \r\n if self.get_num_legal_moves() == 0:\r\n SlTrace.lg(\"NO more legal moves!\", \"nolegalmoves\")\r\n ###return False \r\n \r\n if self.new_move:\r\n self.announce_player(\"start_move\")\r\n if SlTrace.trace(\"selected\"):\r\n self.list_selected(\"After start_move\")\r\n self.new_move = False\r\n player = self.get_player()\r\n if player is None:\r\n return False\r\n \r\n return True",
"def can_move(self, side, number_of_turns):\n return True",
"def valid_move(self, player, move):\n return (True)",
"def player(self):\n legal = self.board.legal_move(self.black)\n if(len(legal) == 0):\n self.p_no_move = 1\n print(\"No legal move for player!\")\n self.computer_turn = True\n self.player_turn = False",
"def is_valid_move(self, somerow, somecol):\n bool_1 = self.board[somerow][somecol] != 1\n bool_2 = self.num_queens_placed < self.size \n bool_3 = self.attack(somerow, somecol)\n return bool_1 and bool_2 and bool_3",
"def move_valid(move):\n return True",
"def can_move(self):\r\n for wall in self.app.walls:\r\n if vec(self.grid_pos+self.direction) == wall:\r\n return False\r\n return True",
"def is_legal_move(self, current_player, move):\n\t\tstarting_pos = move[0]\n\t\tending_pos = move[1]\n\t\tif ending_pos[0] not in range(self.board_size) or ending_pos[1] not in range(self.board_size):\t# Discard any generated moves that fall off of the board\n\t\t\treturn False \n\t\tif self.board.repr[starting_pos[0]][starting_pos[1]]!=self.player_symbol[current_player]:\n\t\t\tprint \"this should never trigger and is redundant\"\n\t\t\treturn False\n\t\tif self.board.repr[ending_pos[0]][ending_pos[1]]!= '.':\t# Check that landing spot is empty\n\t\t\treturn False\n\t\tmiddle_pos = (starting_pos[0]-(starting_pos[0]-ending_pos[0])/2,starting_pos[1]-(starting_pos[1]-ending_pos[1])/2)\t# Check the middle spot is the other piece - this should in theory not matter because the pieces alternate\n\t\tother_player = 1 - current_player \n\t\tif self.board.repr[middle_pos[0]][middle_pos[1]] != self.player_symbol[other_player]:\n\t\t\treturn False \n\t\treturn True",
"def valid_move(self, player, move):\n if self.rounds < len(self.players):\n if ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or not (True in [(pt in player.corners) for pt in move])):\n return (False)\n else:\n return (True)\n\n elif ((False in [(self.board).in_bounds(pt) for pt in move])\n or (self.board).overlap(move)\n or (self.board).adj(player, move)\n or not (self.board).corner(player, move)):\n return (False)\n\n else:\n return (True)",
"def is_legal(self, move, player, board):\r\n if(self.is_valid(move)==False):\r\n return False\r\n if(board[move]!=core.EMPTY):\r\n return False\r\n return True",
"def can_play_on_all_active(self):\n if self.last_move is None:\n return True\n x, y = self.last_move[-2:]\n if self.boards[x][y].state != State.IN_PROGRESS:\n return True\n return False",
"def available_moves(self) -> bool:\n has_move = False\n for i in range(self.col):\n if self.valid_column(i):\n has_move = True\n return has_move",
"def check_move(self, move):\n\n if str(move) in self.moves_made:\n return False\n return True",
"def is_valid_move(self, move: Any) -> bool:\n return move in self.get_possible_moves()",
"def can_move(self,direction):\r\n if direction in self.current_room.return_directions():\r\n print('move into the next room')\r\n # makes next room \r\n self.next_room(direction)\r\n return True\r\n else:\r\n print(\"Can't move that way\")\r\n return False",
"def can_turnover(self):\n return True if len(self._turn_chars) != 0 else False",
"def check_illegal_move(self, player, action):\n available_actions = self.filter_actions(player)\n if action not in available_actions:\n print('Illegal move! Please choose another move!')\n return False\n return True",
"def can_turn_without_moving(self):\n return self.turn",
"def valid_move(x, y):\r\n if [x, y] in empty_cells(board):\r\n return True\r\n else:\r\n return False",
"def is_move_valid(move: Move, board: Board, whites_turn: bool) -> bool:\n if out_of_bounds(move[0]) == True or out_of_bounds(move[1]) == True:\n return False\n \n if move[0] == move[1]:\n return False\n\n if is_current_players_piece(piece_at_position(move[0], board), False) and whites_turn == True:\n return False\n elif is_current_players_piece(piece_at_position(move[0], board), True) and whites_turn == False:\n return False\n\n\n if piece_at_position(move[1], board) in WHITE_PIECES and whites_turn == True:\n return False\n elif piece_at_position(move[1], board) in BLACK_PIECES and whites_turn == False:\n return False\n\n\n if move[1] not in get_possible_moves(move[0], board):\n return False\n\n\n test_board = board\n test_board = update_board(test_board, move)\n if is_in_check(test_board, True) and whites_turn == True:\n return False\n elif is_in_check(test_board, False) and whites_turn == False:\n return False\n\n return True",
"def any_legal_move(player, board):\n return any(Othello.is_legal(sq, player, board) for sq in Othello.squares())",
"def is_win(state: StonehengeState) -> bool:\n moves = []\n for move in state.get_possible_moves():\n new_state = deepcopy(state.make_move(move))\n moves.append(new_state.finished())\n return any(moves)",
"def valid_move(self, row, col):\n if not self._game_over:\n i_row, i_col = row-1, col-1\n #i_row and i_col wil be used to index the board (hence the i)\n (valid, flip_lst) = self._valid_placement(i_row, i_col)\n #print(\"FOR TESTING. Tiles Flipped: \", flip_lst)\n \n if valid:\n #Big Change: You decided to make determining validity\n # and flipping separate operations\n self._flip(i_row, i_col, flip_lst)\n else:\n print(\"\\nPlease enter a valid move!\")\n return False\n\n if self._board_is_full():\n self._game_over = True\n self._set_winner() \n \n self._switch_turn(self._turn)\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"\\nNo valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n self._switch_turn(self._turn) #Switch turn back to player before skip was determined\n if not self._valid_move_exists(): #Check if the other player has any valid moves\n print(\"No valid moves exist for {0}. {0}'s turn has been skipped\".format(self._turn))\n print(\"No moves exist for either player. GAME OVER\")\n self._game_over = True\n self._set_winner()\n return False\n\n return True\n elif self._game_over:\n print(\"The game is over. No more moves can be made!\")\n #TODO: Replace this^ with an exception later?\n return False",
"def is_valid(self, move):\r\n return move > 10 and move < 89",
"def is_allowed(self, cpos):\n if self.step is None:\n return True\n \n # has the player clicked on one of the allowed cells?\n if (cpos in self.step.toclick):\n # mark step as finished\n self.step.finished = True\n return True\n return False",
"def test_valid_move(self, move):\n if self.game_state[move[0]][move[1]] is not None:\n return False\n return True",
"def validBoard():\r\n\r\n\tglobal move1, move2\r\n\r\n\tif move1==move2 or move1-move2==1:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False",
"def is_legal_move(state, action, player, rewarding_move=False): # TODO: Update this function to an more\n # optimized one.\n action = action.get_action_as_dict()\n if rewarding_move:\n if player == state.get_next_player() == state.get_latest_player():\n if action['action_type'] == YoteActionType.STEAL_FROM_HAND and state.in_hand[player * -1] > 0:\n return True\n elif action['action_type'] == YoteActionType.STEAL_FROM_BOARD:\n opponent_piece = state.get_board().get_player_pieces_on_board(Color(player * -1))\n if opponent_piece and action['action']['at'] in opponent_piece:\n return True\n return False\n else:\n if state.get_next_player() == player:\n if action['action_type'] == YoteActionType.ADD and state.in_hand[player] > 0:\n empty_cells = state.get_board().get_all_empty_cells()\n if empty_cells and action['action']['to'] in empty_cells:\n return True\n elif action['action_type'] == YoteActionType.MOVE:\n if state.get_board().get_cell_color(action['action']['at']) == Color(player):\n effective_moves = YoteRules.get_effective_cell_moves(state, action['action']['at'], player)\n if effective_moves and action['action']['to'] in effective_moves:\n return True\n return False\n return False",
"def has_won(board, player):\r\n return False",
"def legalMove(self,p,intMove):\n mPos = self.movePos(p,intMove)#board position of move\n if(self.inBounds(mPos)!=True):#Can't make move out of board bounds\n return False\n #if(p.color != self.whoseTurn):#Can't make move if it's not players pawn\n # return False\n if(intMove==0):#to move forward the node must be empty\n return (self.gameState[mPos.get()] == EMPTY)\n else:#to attack the node must have an enemy\n return (self.gameState[mPos.get()] == self.togglePlayer(p.color))"
] | [
"0.80388695",
"0.7600481",
"0.76002926",
"0.75457877",
"0.73324925",
"0.7276734",
"0.72636664",
"0.71992004",
"0.71682996",
"0.7102028",
"0.7098147",
"0.7088383",
"0.70783705",
"0.70189273",
"0.6986816",
"0.69830495",
"0.69700056",
"0.69603264",
"0.6956357",
"0.6952261",
"0.6908935",
"0.6897999",
"0.68950266",
"0.68380505",
"0.6813737",
"0.6810564",
"0.6810527",
"0.6808044",
"0.68060833",
"0.6793837"
] | 0.77968675 | 1 |
Which player should move next? Returns None if no legal moves exist. | def next_player(self,board, prev_player):
opp = self.opponent(prev_player)
isOpp = self.any_legal_move(opp, board)
isPrev = self.any_legal_move(prev_player, board)
if(isOpp==False and isPrev==False):
return None
elif(isOpp == False and isPrev == True):
return prev_player
else:
return opp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def next_player(board, prev_player):\n opp = Othello.opponent(prev_player)\n if Othello.any_legal_move(opp, board):\n return opp\n elif Othello.any_legal_move(prev_player, board):\n return prev_player\n return None",
"def player_move():\n\tmove = None\n\twhile move not in moves:\n\t\tmove = raw_input(\"What is your move %s? --> \" % name)\n\treturn move",
"def game_move(self):\n\t\t# make a note of the player who isn't playing\n\t\tfor x in self.players.keys():\n\t\t\tif x != self.nextPlayer:\n\t\t\t\totherPlayer = x\n\t\t\t\tbreak\n\t\t\n\t\t\n\t\t# If there are no remaining moves for this player, either the other\n\t\t# player has won or it's a draw\n\t\t# self.expansions = 1\n\t\tself.expansionCounter.count = 1\n\t\tif len(self.state.successors()) == 0:\n\t\t\tif self.state.is_win(otherPlayer):\n\t\t\t\treturn (None, otherPlayer)\n\t\t\telse:\n\t\t\t\t# None, None for a draw\n\t\t\t\treturn (None, None)\n\t\t\t\n\t\t# allow the player max_expansions for this turn\n\t\t# self.expansions = self.max_expansions\n\t\tself.expansionCounter.count = self.max_expansions\n\t\t\n\t\tnextPlayer = self.players[self.nextPlayer]\n\t\tlastPlayer = None\n\t\t\n\t\t# player may throw an exception\n\t\ttry:\n\t\t\t# get player's move, make sure we don't modify the current state\n\t\t\tmove = nextPlayer.move(self.state.get_player_state(self.nextPlayer), \n\t\t\t\t\t self.visitedStates)\n\t\t\t# player may give up\n\t\t\tif move.is_forfeit():\n\t\t\t\tprint \"Player\", self.nextPlayer, \"forfeits.\"\n\t\t\t\treturn (None, otherPlayer)\n\t\t\t# player may return illegal move\n\t\t\tif not self.state.is_valid_move(move):\n\t\t\t\tprint \"Illegal move returned by player\", self.nextPlayer, \\\n\t\t\t\t\t\t\"(\", self.players[self.nextPlayer].get_name(), \")\"\n\t\t\t\treturn (move, otherPlayer)\n\t\t\t# this player is now last player\n\t\t\tlastPlayer = self.nextPlayer\n\t\t\t# get the new next player and make the indicated move\n\t\t\tself.nextPlayer, clear = self.state.move(move, True)\n\t\t\tif clear:\n\t\t\t\tself.clear_repeat()\n\t\texcept:\n\t\t\tprint \"Exception thrown by player\", self.nextPlayer, \\\n\t\t\t\t\t\t\"(\", self.players[self.nextPlayer].get_name(), \")\"\n\t\t\tprint\n\t\t\ttraceback.print_exc()\n\t\t\tprint\n\t\t\treturn (None, otherPlayer)\n\t\t\n\t\tos.chdir(self.wd)\n\t\t\n\t\t# may be a repeated state IF the game cycles\n\t\tif self.is_repeat(self.state):\n\t\t\tself.state.handle_cycle()\n\t\t# otherwise, if the game cycles, note that we've been here\n\t\telif self.state.repeats():\n\t\t\tself.visitedStates.add(self.state.repeated_rep())\n\t\t\t\n\t\t# player may have sacrificed the game\n\t\tif self.state.is_win(otherPlayer):\n\t\t\treturn (move, otherPlayer)\n\t\t\n\t\t# player may have won\n\t\tif self.state.is_win(lastPlayer):\n\t\t\treturn (move, lastPlayer)\n\t\t\n\t\t# nobody's won or lost yet\n\t\treturn (move, None)",
"def move(self):\r\n their_move = self.last_moves[\"their_move\"]\r\n return (their_move == \"\" and random.choice(moves) or their_move)",
"def get_move(self, board, possible_moves, player_1_or_2):\n\n # Given a Tic-Tac-Toe 3x3 board position where 1 => current player's square,\n # -1 => opponent's square, 0 => blank square,\n # this will return the current player's best move [as the x and y indexes into \n # the board array.]\n # The second input parameter, player_1_or_2, is 1 or -1 to indicate which player's\n # move it is. \n \n print('RL ~ Current player 1 or 2 (= -1):', player_1_or_2)\n \n print('RL ~ Current board: ')\n print(board)\n \n print('RL ~ possible_moves:', possible_moves)\n\n next_move = () \n\n # This will be the best move i.e. the move with the current\n # value of highest winning probability except when it is making exploratory\n # (as opposed to greedy) moves.\n\n next_move = self.board_position_states.get_next_move(board, possible_moves, self.current_player)\n\n next_move_location_tuple = possible_moves[next_move]\n board[next_move_location_tuple] = self.current_player\n\n self.list_board_positions_moved_to.append(board.copy()) # This board that we are\n # appending here could be changed by the next line of code, for example.\n # Hence we need to make a copy\n\n board[next_move_location_tuple] = 0 # undo the move in case it affects the calling method.\n\n return next_move",
"def _get_player_move(self) -> Tile:\n if not self.game_state:\n raise RuntimeError(\"Cannot call get_player_move when the game has not started!\")\n current_player = next(player for player in self.player_list if player.name == self.current_turn.name)\n if current_player is None:\n raise RuntimeError(\"Attempted to get player move from a player who does not exist!\")\n return current_player.move()",
"def player(board):\n #X ALWAYS gets first move, alternates with each additional move\n curr_moves = actions(board)\n if (board == initial_state()):\n return X\n if(len(curr_moves) % 2 == 0):\n return O\n else:\n return X",
"def get_best_move(self):\n moves1 = self.get_next_moves1() # moves1 represents all legal moves.\n moves2 = self.get_next_moves2() # moves2 represents the moves that allow the AI to score a box.\n moves3 = self.get_next_moves3() # moves3 represents the moves that will allow the player to score a box.\n\n\n if len(moves1) == 0: # the siuation that there is no legal move\n return self\n if len(moves2) != 0:\n return moves2[len(moves2) // 2] # the siuation that there is(are) move(s) to score\n\n elif len(moves3) != 0:\n return moves3[len(moves3) // 2] # the siuation that there is(are) moves(s) to allow the player to score\n\n else:\n return moves1[len(moves1) // 2] # if there is no better moves, the AI will play sequentially, starting from the top left.",
"def decide_next_move(self):\n pass",
"def get_move(self, board):\n # First, check if we can win in the next move\n winning_move = self.get_winning_move(board, self.letter)\n if winning_move is not None:\n return winning_move\n # Check if the player could win on their next move, and block them.\n blocking_move = self.get_winning_move(board, self.opponent_letter)\n if blocking_move is not None:\n return blocking_move\n # Try to take one of the corners, if they are free.\n corner_move = self.move_in_a_corner(board)\n if corner_move is not None:\n return corner_move\n # Try to take the center, if it is free.\n if board.size % 2 == 1:\n if board.is_position_availible(board.letters[board.size // 2]\n + board.numbers[board.size // 2]):\n return board.letters[board.size // 2] + board.numbers[board.size // 2]\n # Move on one of the sides.\n return self.choose_random_move_from_list(board, list(board.positions.keys()))",
"def get_next_player(current_player: Optional[str]) -> str:\n if current_player == c.X:\n return c.O\n else:\n return c.X",
"def next_move(self, board):\n \n return self.best_move(self.score_columns(board))",
"def choose_move(self):\r\n \r\n return None",
"def next_player(self):\n return next(self.next_tour)",
"def next_move(board, player):\n \n move_row = \"move\"\n move_column = \"move\"\n\n while not move_row.isnumeric():\n move_row = input(\"{}, pick row to place your {}. > \".format(player.name, player.char))\n while not move_column.isnumeric(): \n move_column = input(\"Pick column in row {} to place your {}. > \".format(move_row, player.char))\n\n move_row = int(move_row)\n move_column = int(move_column)\n\n move = Move(player, (move_row, move_column))\n \n # Check if move is out of bounds\n if (move_row >= len(board.current_board) or\n move_column >= len(board.current_board)):\n print(\"Move out of bounds. Choose a valid move.\")\n return board\n\n # Check if space is already used\n if board.current_board[move_row][move_column] != \"-\":\n print(\"Spot already played. Pick an unused space.\")\n return board\n\n board.last_move = player.name\n board.add_move(move)\n\n return board",
"def best_move(self):\n if self._move is not None:\n return self._move\n else:\n return self.pass_move",
"def takeStrategicMove():\r\n\tglobal move1, move2\r\n\r\n\tif move1==0 or move2==0:\r\n\t\tif validMove(1):\r\n\t\t\treturn 1\r\n\t\telif validMove(5):\r\n\t\t\treturn 5\r\n\telif winningMove():\r\n\t\treturn winningMove()\t\t\r\n\telif blockingMove():\r\n\t\treturn blockingMove()\r\n\telse:\r\n\t\treturn takeNaiveMove()",
"def choose_move(self, board):\n if self.opp == Player.HUMAN:\n time.sleep(4)\n if self.type == Player.HUMAN:\n move = input(\"Please enter your move:\")\n while not board.legalMove(self, move):\n print(move, \"is not valid\")\n move = input(\"Please enter your move\")\n return move\n elif self.type == Player.RANDOM:\n move = choice(board.legalMoves(self))\n return move\n elif self.type == Player.MINIMAX:\n val, move = self.minimax_move(board, self.depth * 2,\n Player.MAX_PLAYER)\n board.last_move = move\n return move\n elif self.type == Player.ABPRUNE:\n val, move = self.alpha_beta_move(board, self.depth * 2,\n float('-inf'), float('inf'),\n Player.MAX_PLAYER)\n return move\n elif self.type == Player.CUSTOM:\n move = self.agent.getAction(board)\n self.agent.update_current_state(board, move)\n return move\n elif self.type == Player.MIX:\n return self.mixed_move(board)\n\n else:\n print(\"Unknown player type\")\n return -1",
"def player(self):\n legal = self.board.legal_move(self.black)\n if(len(legal) == 0):\n self.p_no_move = 1\n print(\"No legal move for player!\")\n self.computer_turn = True\n self.player_turn = False",
"def move(self):\r\n my_move = self.last_moves[\"my_move\"]\r\n return (my_move != \"\" and moves[(moves.index(my_move)+1) % 3] or\r\n random.choice(moves))",
"def determineNextMove(playerLocation, opponentLocation, coins):\n global packages, route_table, best_path, best_weight, route\n if len(best_path) == 0:\n current_package = packages.pop(0)\n exhaustive(current_package, playerLocation, [], 0, (route_table,dists))\n api.debug(best_path)\n return u.direction(playerLocation, best_path.pop(0))",
"def choose_absolute_move(self):\n move = self.choose_move()\n if self.player_name == 'A':\n return move\n # Player B, revert the IDs\n return (move + 6) % 12",
"def next_move(self):\n\n # Calculate all paths to destination from current location and time.\n solution = self.calculate_best_solution((None, None), self.currentTurn, [self.character.path[-1]],\n self.character.spent)\n\n # Add travel weight to spent.\n if solution[1] is not None and solution[1][0] != solution[1][1]:\n self.character.spent += self.pekingMap.get_vertex(solution[1][0]).weight(solution[1][1])\n\n # Return next point in shortest path to location.\n if solution[1] is not None:\n return solution[1][1]\n\n return None",
"def next_player(self):\n # Counter is a useful class that counts objects.\n count = Counter(self.board)\n if count.get('X', 0) > count.get('O', 0):\n return 'O'\n return 'X'",
"def mm_move(board, player):\n result = board.check_win() # get result of the current board\n if result == None:\n move_list = board.get_empty_squares() # get the tree branches and possible next moves\n best = (None, (-1, -1))\n for step in move_list:\n bd_clone = board.clone()\n bd_clone.move(step[0], step[1], player) #make a move on a cloned board\n next_player = provided.switch_player(player)\n next_score = mm_move(bd_clone, next_player) #make a recursive call to mm_move() pasing the cloned board and the 'other' player\n if player == 3: #if it is oppo O--min\n if best[0] == None or (next_score[0] < best[0]):\n best = (next_score[0], step)\n #print best\n elif player ==2: #if it is X--max\n if best[0] == None or (next_score[0] > best[0]):\n best = (next_score[0], step)\n return best\n else:\n return SCORES[result], (-1, -1)",
"def get_next_move(self):\n return int(input('Enter your move: '))",
"def choose_next_player(self):\n player_index = self.players.index(self.current_player)\n if self.direction_clock_wise:\n if player_index >= len(self.players) - 1:\n self.current_player = self.players[0]\n else:\n self.current_player = self.players[player_index + 1]\n else:\n if player_index <= 0:\n self.current_player = self.players[len(self.players) - 1]\n else:\n self.current_player = self.players[player_index - 1]",
"def find_best_move(state: GameState) -> None:",
"def determine_best_move(self, board):\r\n\r\n # Check if the AI is smart, run a smart version of the AI\r\n if self.is_smart:\r\n\r\n # Iterate through every column\r\n # keep track of any restricted moves (moves that may cause the challenger to win)\r\n # and keep track of the final result of where the AI should move\r\n restricted = []\r\n ai_move = -1\r\n for column in range(board.width):\r\n\r\n # Create two copies of the board to emulate AI moves and player moves\r\n ai_copy = board.copy()\r\n player_copy = board.copy()\r\n\r\n # Check if the current column is full, move onto the next column\r\n if board.is_column_full(column):\r\n continue\r\n\r\n # Column is not full; Emulate AI and player moves at this column\r\n ai_copy.add_piece(column, is_challenger = False) # AI move\r\n player_copy.add_piece(column, is_challenger = True) # Player move\r\n\r\n # Check if either the ai_copy or player_copy has a win in it\r\n ai_win_check = ai_copy.check_for_winner()\r\n player_win_check = player_copy.check_for_winner()\r\n\r\n # If either board has a win in it, make that the AI move\r\n # if the player would go to this current column in their next move\r\n # they would win, the AI should try to stop it\r\n # if the ai would go to this current column in its next move\r\n # they would win, the AI should immediately go here\r\n if ai_win_check == False or player_win_check == True:\r\n ai_move = column\r\n break\r\n\r\n # Neither of the moves would win in either board,\r\n # emulate the next moves on the same column\r\n else:\r\n\r\n # Check if the column is full, move onto the next column\r\n if ai_copy.is_column_full(column):\r\n continue\r\n\r\n # Column is not full, emulate the player move on the AI copy\r\n ai_copy.add_piece(column, is_challenger = True)\r\n\r\n # Check if the player would win; If so, do not let the AI go to this column\r\n player_win_check = ai_copy.check_for_winner()\r\n if player_win_check == True:\r\n restricted.append(column)\r\n\r\n # There has been no ai_move generated yet\r\n # generate a random column\r\n if ai_move == -1:\r\n\r\n # Check if the board is full, there must be a draw\r\n if board.is_board_full():\r\n return False\r\n\r\n # The board is not full, generate a random column that is not full\r\n while True:\r\n ai_move = randint(0, board.width - 1)\r\n\r\n # Check if the column is full, continue generating a random column\r\n if board.is_column_full(ai_move):\r\n continue\r\n\r\n # Check to see if this is the only available column to go to\r\n # or check to see if this column is a restricted move\r\n elif board.board[0].count(None) == 1 or len(restricted) == board.board[0].count(None):\r\n break\r\n\r\n # Check to see if the move is not a restricted move\r\n elif ai_move not in restricted:\r\n break\r\n\r\n # The AI is not smart, choose a random place\r\n else:\r\n ai_move = randint(0, board.width - 1)\r\n while board.is_column_full(ai_move):\r\n ai_move = randint(0, board.width - 1)\r\n\r\n # Make the AI go to its chosen move\r\n board.add_piece(ai_move, is_challenger = False)\r\n return True",
"def get_next_player(self, player):\r\n return player * -1"
] | [
"0.7872411",
"0.74398774",
"0.71945137",
"0.71246403",
"0.70864546",
"0.70783263",
"0.705533",
"0.7050984",
"0.70068854",
"0.6994962",
"0.69577587",
"0.6937091",
"0.68736523",
"0.68567985",
"0.6854401",
"0.6811088",
"0.6777367",
"0.6775967",
"0.67653716",
"0.67629874",
"0.6720475",
"0.66823167",
"0.6679926",
"0.6675034",
"0.6658754",
"0.6657128",
"0.6638265",
"0.66152275",
"0.66124874",
"0.6603546"
] | 0.74983096 | 1 |
Compute player's score (number of player's pieces minus opponent's). | def score(self,player, board):
numPlayer = 0
numOpp = 0
for i in self.squares():
if board[i] == player:
numPlayer+= SQUARE_WEIGHTS[i]
else:
numOpp+=SQUARE_WEIGHTS[i]
return numPlayer-numOpp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def score2(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= 1\r\n else:\r\n numOpp+=1\r\n return numPlayer-numOpp",
"def score(player, board):\n mine, theirs = 0, 0\n opp = Othello.opponent(player)\n for sq in Othello.squares():\n piece = board[sq]\n if piece == player: mine += 1\n elif piece == opp: theirs += 1\n return mine - theirs",
"def calculate_score(player_cards):\n score = sum(player_cards)\n return score",
"def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)",
"def compute_score(window, computer_piece):\n score = 0\n if window.count(computer_piece) == 4:\n score += 100\n elif window.count(computer_piece) == 3 and window.count(0) == 1:\n score += 5\n elif window.count(computer_piece) == 2 and window.count(0) == 2:\n score += 2\n if window.count(PLAYER_PIECE) == 2 and window.count(0) == 2:\n score -= 1\n if window.count(PLAYER_PIECE) == 3 and window.count(0) == 1:\n score -= 100\n return score",
"def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)",
"def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score",
"def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opponent = game.get_opponent(player)\n\n opp_moves = game.get_legal_moves(opponent)\n own_moves = game.get_legal_moves(player)\n\n # Calculate the normalized distance if both players are on the board.\n player_loc = game.get_player_location(player)\n opp_loc = game.get_player_location(opponent)\n norm_dis = 0\n if opp_loc and player_loc:\n norm_dis = distance(player_loc, opp_loc) / 8.46 # 8.46 is distance((0, 0), (6, 6))\n\n return len(own_moves) / max(len(opp_moves), 1e-6) - norm_dis",
"def _evaluate_num_pieces(self, player):\n evaluation = 0\n if player is Player.black:\n evaluation += self.num_black_pieces * 10\n evaluation -= self.num_white_pieces * 10\n evaluation += self.num_black_kings * 10\n evaluation -= self.num_white_kings * 10\n elif player is Player.white:\n evaluation -= self.num_black_pieces * 10\n evaluation += self.num_white_pieces * 10\n evaluation -= self.num_black_kings * 10\n evaluation += self.num_white_kings * 10\n\n return evaluation",
"def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)",
"def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores",
"def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def weighted_score(player, board):\n opp = Othello.opponent(player)\n total = 0\n for sq in Othello.squares():\n if board[sq] == player:\n total += SQUARE_WEIGHTS[sq]\n elif board[sq] == opp:\n total -= SQUARE_WEIGHTS[sq]\n return total",
"def compute_utility(board, color):\n player1_score = 0\n player2_score = 0\n\n score = get_score(board)\n if color == 1:\n return score[0] - score[1]\n else:\n return score[1] - score[0]",
"def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Aim to maximise your own available moves vs the opponent (Factor 2)\n\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves",
"def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def basic_evaluate(board):\n if board.is_game_over():\n # If the game has been won, we know that it must have been\n # won or ended by the previous move.\n # The previous move was made by our opponent.\n # Therefore, we can't have won, so return -1000.\n # (note that this causes a tie to be treated like a loss)\n score = -1000\n else:\n score = board.longest_chain(board.get_current_player_id()) * 10\n # Prefer having your pieces in the center of the board.\n for row in range(6):\n for col in range(7):\n if board.get_cell(row, col) == board.get_current_player_id():\n score -= abs(3-col)\n elif board.get_cell(row, col) == board.get_other_player_id():\n score += abs(3-col)\n\n return score",
"def get_game_score(self):\n if self.game_is_tied():\n return 0\n elif self.is_game_won():\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n my_score = self.my_score - self.penalty_score if my_available_steps == 0 else self.my_score\n opp_score = self.opponent_score - self.penalty_score if opp_available_steps == 0 else self.opponent_score\n return (my_score - opp_score) / (abs(my_score) + abs(opp_score))\n else:\n if abs(self.my_score) + abs(self.opponent_score) == 0:\n return 0\n return (self.my_score - self.opponent_score) / (abs(self.my_score) + abs(self.opponent_score))",
"def get_move_score(board, player_piece):\n # compute score horizontal\n score = 0\n center_elements = [board[i][len(board[0]) // 2] for i in range(len(board))]\n score += center_elements.count(player_piece) * 3\n\n for row in board:\n for column_count in range(len(board[0]) - 3):\n window = row[column_count: column_count + 4]\n score += compute_score(window, player_piece)\n\n # compute score vertical\n for column_index in range(len(board[0])):\n column_elements = [board[i][column_index] for i in range(len(board))]\n for row_count in range(len(board) - 3):\n window = column_elements[row_count: row_count + 4]\n score += compute_score(window, player_piece)\n\n # compute score on diagonals\n for row_index in range(len(board) - 3):\n for column_index in range(len(board[0]) - 3):\n window = [board[row_index + i][column_index + i] for i in range(4)]\n score += compute_score(window, player_piece)\n\n for row_index in range(len(board) - 3):\n for column_index in range(len(board[0]) - 3):\n window = [board[row_index + 3 - i][column_index + i] for i in\n range(4)]\n score += compute_score(window, player_piece)\n\n return score",
"def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n #own_moves = len(game.get_legal_moves(player))\n\n #if game.move_count < 23:\n # opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n # return float(own_moves - opp_moves)\n\n return float(free_area_score(game, player) - free_area_score(game, game.get_opponent(player)))",
"def score_position(board: bytearray, piece: int) -> int:\n \n opp_piece: int = _PLAYER if piece == _AI else _PLAYER\n score: int = 0\n\n # score 3 points for every point in _CENTER\n for i in _CENTER:\n if board[i] == piece:\n score += 3\n\n # Evaluate each 4-piece \"window\" and score with heuristics below\n for w in _WINDOWS:\n count: int = 0\n empties: int = 0\n opp_count: int = 0\n \n # Count what's in this window\n for i in w:\n if board[i] == piece:\n count += 1\n elif board[i] == opp_piece:\n opp_count += 1\n else:\n empties += 1\n\n # Our heuristics:\n # - playing in the _CENTER is strong\n # - three out a window of four is strong\n # - two out of a window of four is good\n window_score: int = 0\n \n if count == 4:\n window_score += 10000\n elif count == 3 and empties == 1:\n window_score += 5\n elif count == 2 and empties == 2:\n window_score += 2\n\n # defensive angle: don't let _PLAYER get a window w/three pieces\n if opp_count == 3 and empties == 1:\n window_score -= 4\n\n score += window_score\n \n return score",
"def counter_opponent_win(self):\n\n # get essential values\n board = self.get_game_space()\n affinity = self.get_opponent().get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get the possible ways for the opponent to win\n possible_wins = board.get_wins(affinity)\n winning_blocks = board.get_winning_blocks(affinity)\n best_move = None\n\n # sort the best win to counter \n for win in possible_wins:\n if best_move is None: best_move = win\n elif win[0] <= best_move[0]: \n if win[1] >= best_move[1]:\n best_move = win\n if best_move is not None: possible_wins.remove(best_move)\n return best_move",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)",
"def custom_score(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # Opponent\n opponent = game.get_opponent(player)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n board_score = no_moves - opp_moves\n score = board_score/rem_spaces\n\n return float(score)",
"def open_move_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return float(len(game.get_legal_moves(player)))"
] | [
"0.77100617",
"0.76665014",
"0.7596806",
"0.755954",
"0.74644375",
"0.7270104",
"0.72659296",
"0.7192504",
"0.71761346",
"0.70750165",
"0.70623815",
"0.7020574",
"0.6970311",
"0.69272745",
"0.69137883",
"0.68977886",
"0.6897169",
"0.68841004",
"0.68778896",
"0.68595576",
"0.685108",
"0.68043935",
"0.6797116",
"0.6742683",
"0.67382455",
"0.6693324",
"0.6664049",
"0.6657715",
"0.66431",
"0.6635757"
] | 0.7846785 | 0 |
Compute player's score (number of player's pieces minus opponent's). | def score2(self,player, board):
numPlayer = 0
numOpp = 0
for i in self.squares():
if board[i] == player:
numPlayer+= 1
else:
numOpp+=1
return numPlayer-numOpp | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def score(self,player, board):\r\n numPlayer = 0\r\n numOpp = 0\r\n for i in self.squares():\r\n if board[i] == player:\r\n numPlayer+= SQUARE_WEIGHTS[i]\r\n else:\r\n numOpp+=SQUARE_WEIGHTS[i]\r\n return numPlayer-numOpp",
"def score(player, board):\n mine, theirs = 0, 0\n opp = Othello.opponent(player)\n for sq in Othello.squares():\n piece = board[sq]\n if piece == player: mine += 1\n elif piece == opp: theirs += 1\n return mine - theirs",
"def calculate_score(player_cards):\n score = sum(player_cards)\n return score",
"def custom_score(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opp_moves = game.get_legal_moves(game.get_opponent(player))\n own_moves = game.get_legal_moves(player)\n\n return len(own_moves) / max(len(opp_moves), 1e-6)",
"def compute_score(window, computer_piece):\n score = 0\n if window.count(computer_piece) == 4:\n score += 100\n elif window.count(computer_piece) == 3 and window.count(0) == 1:\n score += 5\n elif window.count(computer_piece) == 2 and window.count(0) == 2:\n score += 2\n if window.count(PLAYER_PIECE) == 2 and window.count(0) == 2:\n score -= 1\n if window.count(PLAYER_PIECE) == 3 and window.count(0) == 1:\n score -= 100\n return score",
"def custom_score(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # return different between # of my agent's move and oppenent's\n return float(own_moves - opp_moves)",
"def get_score(self, player: int) -> int:\n score = 0\n i = 0\n while i < len(self.leylines):\n score += 1 if self.leylines[i].player == player else 0\n score += 1 if self.rights[i].player == player else 0\n score += 1 if self.lefts[i].player == player else 0\n i += 1\n return score",
"def improved_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return -math.inf\n\n if game.is_winner(player):\n return math.inf\n\n opponent = game.get_opponent(player)\n\n opp_moves = game.get_legal_moves(opponent)\n own_moves = game.get_legal_moves(player)\n\n # Calculate the normalized distance if both players are on the board.\n player_loc = game.get_player_location(player)\n opp_loc = game.get_player_location(opponent)\n norm_dis = 0\n if opp_loc and player_loc:\n norm_dis = distance(player_loc, opp_loc) / 8.46 # 8.46 is distance((0, 0), (6, 6))\n\n return len(own_moves) / max(len(opp_moves), 1e-6) - norm_dis",
"def _evaluate_num_pieces(self, player):\n evaluation = 0\n if player is Player.black:\n evaluation += self.num_black_pieces * 10\n evaluation -= self.num_white_pieces * 10\n evaluation += self.num_black_kings * 10\n evaluation -= self.num_white_kings * 10\n elif player is Player.white:\n evaluation -= self.num_black_pieces * 10\n evaluation += self.num_white_pieces * 10\n evaluation -= self.num_black_kings * 10\n evaluation += self.num_white_kings * 10\n\n return evaluation",
"def custom_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player) / 8\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player)) / 8\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = (own_moves * 8) / (opp_moves * 8) / 8\n\n # Calculate centerness_score\n completeness = completeness_of_game(game)\n centerness_score = 0\n if completeness < 0.5:\n centerness_max = (game.width / 2.)**2 + (game.height / 2.)**2\n\n own_centerness = centerness(game, player) / centerness_max\n opp_centerness = centerness(game, game.get_opponent(player)) / centerness_max\n centerness_ratio = (own_centerness * centerness_max) / (centerness_max * opp_centerness + 0.1) / centerness_max\n\n centerness_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n if game.move_count < 15:\n return center_modified_score(game, player)\n\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n return float(own_moves - opp_moves)",
"def calculate_scores(self):\n words = self.walk_board()\n player_scores = {}\n for word in words:\n player = word.get_owning_player()\n if player not in player_scores:\n player_scores[player] = 0\n player_scores[player] += word.get_score()\n return player_scores",
"def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n move_ratio = own_moves / opp_moves\n\n completeness = completeness_of_game(game)\n centerness_score = 0\n\n if completeness < 0.5:\n own_centerness = centerness(game, player)\n opp_centerness = centerness(game, game.get_opponent(player))\n centerness_ratio = own_centerness / opp_centerness + 0.1\n\n center_score = -1 * own_centerness + opp_centerness - centerness_ratio\n\n return 2 * own_moves - 2 * opp_moves + 2 * move_ratio + centerness_score",
"def weighted_score(player, board):\n opp = Othello.opponent(player)\n total = 0\n for sq in Othello.squares():\n if board[sq] == player:\n total += SQUARE_WEIGHTS[sq]\n elif board[sq] == opp:\n total -= SQUARE_WEIGHTS[sq]\n return total",
"def compute_utility(board, color):\n player1_score = 0\n player2_score = 0\n\n score = get_score(board)\n if color == 1:\n return score[0] - score[1]\n else:\n return score[1] - score[0]",
"def custom_score_2(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n if game.is_winner(player):\n return float(\"inf\")\n\n # Aim to maximise your own available moves vs the opponent (Factor 2)\n\n opponent = game.get_opponent(player)\n return float(len(game.get_legal_moves(player)))-2.0*float(len(game.get_legal_moves(opponent)))",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n own_moves = number_moves(game, player)\n if own_moves == 0:\n return float(\"-inf\")\n\n opp_moves = number_moves(game, game.get_opponent(player))\n if opp_moves == 0:\n return float(\"inf\")\n\n #Between 1-8\n return own_moves / opp_moves",
"def custom_score_2(game, player):\n \n # get avaliable moves for each player\n own_moves = len(game.get_legal_moves(player))\n opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n \n # shortcut to definite state:\n # 1. my agent win -> return very high score\n if opp_moves == 0:\n return float(\"inf\")\n # 2. opponenent's agent win -> return very low score\n elif own_moves == 0:\n return float(\"-inf\")\n\n # score: avaliable moves ratio\n return float(own_moves/opp_moves)",
"def compute_player_score():\n\n progress_bar = ProgressBar(label=\"Computing universes\")\n\n survivals_count = 0\n for i in range(PARALLEL_UNIVERSES_COUNT):\n if simulate_universe():\n survivals_count += 1\n progress_bar.set_progression((i + 1) / PARALLEL_UNIVERSES_COUNT)\n\n progress_bar.end(\"\\n\\n\")\n\n return survivals_count / PARALLEL_UNIVERSES_COUNT",
"def basic_evaluate(board):\n if board.is_game_over():\n # If the game has been won, we know that it must have been\n # won or ended by the previous move.\n # The previous move was made by our opponent.\n # Therefore, we can't have won, so return -1000.\n # (note that this causes a tie to be treated like a loss)\n score = -1000\n else:\n score = board.longest_chain(board.get_current_player_id()) * 10\n # Prefer having your pieces in the center of the board.\n for row in range(6):\n for col in range(7):\n if board.get_cell(row, col) == board.get_current_player_id():\n score -= abs(3-col)\n elif board.get_cell(row, col) == board.get_other_player_id():\n score += abs(3-col)\n\n return score",
"def get_game_score(self):\n if self.game_is_tied():\n return 0\n elif self.is_game_won():\n my_available_steps = self.steps_available(self.loc)\n opp_available_steps = self.steps_available(self.opponent_loc)\n my_score = self.my_score - self.penalty_score if my_available_steps == 0 else self.my_score\n opp_score = self.opponent_score - self.penalty_score if opp_available_steps == 0 else self.opponent_score\n return (my_score - opp_score) / (abs(my_score) + abs(opp_score))\n else:\n if abs(self.my_score) + abs(self.opponent_score) == 0:\n return 0\n return (self.my_score - self.opponent_score) / (abs(self.my_score) + abs(self.opponent_score))",
"def get_move_score(board, player_piece):\n # compute score horizontal\n score = 0\n center_elements = [board[i][len(board[0]) // 2] for i in range(len(board))]\n score += center_elements.count(player_piece) * 3\n\n for row in board:\n for column_count in range(len(board[0]) - 3):\n window = row[column_count: column_count + 4]\n score += compute_score(window, player_piece)\n\n # compute score vertical\n for column_index in range(len(board[0])):\n column_elements = [board[i][column_index] for i in range(len(board))]\n for row_count in range(len(board) - 3):\n window = column_elements[row_count: row_count + 4]\n score += compute_score(window, player_piece)\n\n # compute score on diagonals\n for row_index in range(len(board) - 3):\n for column_index in range(len(board[0]) - 3):\n window = [board[row_index + i][column_index + i] for i in range(4)]\n score += compute_score(window, player_piece)\n\n for row_index in range(len(board) - 3):\n for column_index in range(len(board[0]) - 3):\n window = [board[row_index + 3 - i][column_index + i] for i in\n range(4)]\n score += compute_score(window, player_piece)\n\n return score",
"def _calculate_score(self):\n mul = self._check_board()\n if mul > 0:\n inc = 100 * mul + ((mul - 1) * 25)\n self.score += inc",
"def custom_score_3(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n #own_moves = len(game.get_legal_moves(player))\n\n #if game.move_count < 23:\n # opp_moves = len(game.get_legal_moves(game.get_opponent(player)))\n # return float(own_moves - opp_moves)\n\n return float(free_area_score(game, player) - free_area_score(game, game.get_opponent(player)))",
"def score_position(board: bytearray, piece: int) -> int:\n \n opp_piece: int = _PLAYER if piece == _AI else _PLAYER\n score: int = 0\n\n # score 3 points for every point in _CENTER\n for i in _CENTER:\n if board[i] == piece:\n score += 3\n\n # Evaluate each 4-piece \"window\" and score with heuristics below\n for w in _WINDOWS:\n count: int = 0\n empties: int = 0\n opp_count: int = 0\n \n # Count what's in this window\n for i in w:\n if board[i] == piece:\n count += 1\n elif board[i] == opp_piece:\n opp_count += 1\n else:\n empties += 1\n\n # Our heuristics:\n # - playing in the _CENTER is strong\n # - three out a window of four is strong\n # - two out of a window of four is good\n window_score: int = 0\n \n if count == 4:\n window_score += 10000\n elif count == 3 and empties == 1:\n window_score += 5\n elif count == 2 and empties == 2:\n window_score += 2\n\n # defensive angle: don't let _PLAYER get a window w/three pieces\n if opp_count == 3 and empties == 1:\n window_score -= 4\n\n score += window_score\n \n return score",
"def counter_opponent_win(self):\n\n # get essential values\n board = self.get_game_space()\n affinity = self.get_opponent().get_affinity()\n \n # pick the right check for the game we are playing\n if isinstance(board, Gomoku):\n \n # get the possible ways for the opponent to win\n possible_wins = board.get_wins(affinity)\n winning_blocks = board.get_winning_blocks(affinity)\n best_move = None\n\n # sort the best win to counter \n for win in possible_wins:\n if best_move is None: best_move = win\n elif win[0] <= best_move[0]: \n if win[1] >= best_move[1]:\n best_move = win\n if best_move is not None: possible_wins.remove(best_move)\n return best_move",
"def custom_score(game, player):\n \n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n \n # First obtain the improved score\n player_legal_moves = game.get_legal_moves(player)\n opponent = game.get_opponent(player)\n opponent_legal_moves = game.get_legal_moves(opponent)\n improved = len(player_legal_moves) - len(opponent_legal_moves)\n if improved != 0:\n return float(improved)\n \n # Second get differences from center\n center_coordinates = center_y, center_x = get_center_coordinates(game)\n player_coordinates = game.get_player_location(player)\n opponent_coordinates = game.get_player_location(opponent)\n player_center_dist = get_distances_from_center(center_coordinates, player_coordinates)\n opponent_center_dist = get_distances_from_center(center_coordinates, opponent_coordinates)\n center_dist_diff = player_center_dist - opponent_center_dist\n \n # Third obtain next_moves\n player_next_moves = [get_next_moves(game, move, list(move)) for move in player_legal_moves]\n opponent_next_moves = [get_next_moves(game, move, list(move)) for move in opponent_legal_moves] \n improved_next = len(player_next_moves) - len(opponent_next_moves)\n \n # Put player and opponent feature differences in a tuple/vector surrogoate\n feature_diff_vector = (improved, center_dist_diff, improved_next)\n \n # Provide a weighting vector for the features of each player-participant\n weight_vector = (1.5,0.1,1.0)\n # Calculate the return value = weighted difference of players' features\n weighted_difference_dot_product = sum(p*q for p,q, \\\n in zip(feature_diff_vector, weight_vector))\n \n return float(weighted_difference_dot_product)",
"def custom_score(game, player):\n # TODO: finish this function!\n if game.is_winner(player): # check to see if player is in state winner\n #print(\"You win!\")\n return math.inf # abstraction of score, +inf equates to a win\n elif game.is_loser(player):\n #print(\"You lose!\")\n return -math.inf # abstraction of score, -inf equates to a loss\n\n # Opponent\n opponent = game.get_opponent(player)\n\n # Remaining spaces left on the board\n rem_spaces = len(game.get_blank_spaces())\n\n # number of agent's available moves\n no_moves = len(game.get_legal_moves(player))\n\n # number of opponent's available moves\n opp_moves = len(game.get_legal_moves(opponent))\n\n # evaluation of board \"goodness\"\n # using moves available to both players\n # Idea is player chooses moves with scores that maximise whilst minimizing\n # evaluate board states and positions as scores\n board_score = no_moves - opp_moves\n score = board_score/rem_spaces\n\n return float(score)",
"def open_move_score(game, player):\n if game.is_loser(player):\n return float(\"-inf\")\n\n if game.is_winner(player):\n return float(\"inf\")\n\n return float(len(game.get_legal_moves(player)))"
] | [
"0.7845445",
"0.7665763",
"0.7595392",
"0.7558798",
"0.74634653",
"0.72690827",
"0.72640866",
"0.7191977",
"0.7175509",
"0.7073882",
"0.70610803",
"0.7019335",
"0.696953",
"0.69266623",
"0.6912282",
"0.6897905",
"0.68971044",
"0.68832207",
"0.68773335",
"0.68591225",
"0.68503684",
"0.6804682",
"0.67956656",
"0.6740754",
"0.6737364",
"0.66916025",
"0.6664858",
"0.6657413",
"0.6641747",
"0.66351634"
] | 0.7709773 | 1 |
Clip the values of x from eps to 1eps and renormalize them so that they sum to 1. | def clip_and_renorm(x, eps=1e-5):
x = np.clip(x, eps, 1-eps)
return x / x.sum() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def threshold_and_normalize_pixels(x, eps=1e-2):\n x = torch.clamp(x, min=eps)\n x = x / torch.sum(x, dim=1, keepdim=True)\n return x",
"def _normalize(x):\n tol = 1e-10\n dims = x.shape\n\n x = x.flatten()\n inverse = (np.sum(x**2) + tol) ** -.5\n x = x * inverse\n x = np.reshape(x, dims)\n\n return x",
"def unit_scale(x, eps=1e-8):\n\tx = x.copy()\n\tx -= x.min()\n\tx *= 1.0 / (x.max() + eps)\n\treturn x",
"def normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)",
"def _normalize_(x: np.array) -> np.array:\n if x.max() != 0:\n x = x / x.max()\n return np.clip(x, 0, 1)# ensure that no values are >1\n else:\n raise ZeroDivisionError('Image Normalization')",
"def normalize(x):\n return (x - math_ops.reduce_min(x)) / (math_ops.reduce_max(x) - math_ops.reduce_min(x))",
"def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))",
"def normalize(x):\n # TODO: Implement Function\n data_max = np.max(x)\n data_min = np.min(x)\n x = (x - data_min) / (data_max - data_min)\n return x",
"def normalize(x):\n # TODO: Implement Function\n \n return x/255",
"def _normalize(self, x):\n # TODO: imagenet normalization\n\n return x",
"def scale_to_start(x):\n x = (x + eps) / (x[0] + eps)\n return x",
"def normalize(self, x):\n self.max = x.max()\n self.min = x.min()\n return (2 * (x - x.min()) / (x.max() - x.min()) - 1)",
"def normalise(self, x):\n norm = ((x + 1.2) / 2.4) * self.w\n clipped = np.clip(norm, 0, self.w)\n # convert to list to use base numpy type int\n return clipped.astype(int).tolist()",
"def x_nondim(self, x):\n x[0:4] /= self.r_scale\n return x",
"def rescale(x):\n if x.min() != 0:\n raise ValueError('input should have min zero.')\n\n x /= x.max() # max 1\n x *= 2 # max 2\n x -= 1 # range -1, 1\n\n if x.min() != -1 and x.max() != 1:\n raise Exception\n\n return x",
"def normalize(x):\n a = 0\n b = 1\n scale_min = 0\n scale_max = 255\n return a + ( ( (x - scale_min)*(b - a) )/( scale_max - scale_min ) )",
"def normalize(x):\r\n return x/norm(x)",
"def _normalize(self, x, axis, eps=1e-5):\n return x / (\n tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True)) + 1e-5)",
"def normalize_to_zero_one(x):\n return x - torch.min(x) / (torch.max(x) - torch.min(x))",
"def l1_normalize(x: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name\n return x / x.sum()",
"def Clip(x):\n return math_ops.maximum(math_ops.minimum(x, 1.), -1.)",
"def normalize(x, x_max, x_min):\n return (x - x_min) / (x_max - x_min)",
"def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)",
"def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)",
"def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp_(0, 1)",
"def denorm(self, x):\n out = (x + 1) / 2\n return out.clamp(0, 1)",
"def bin_normalize_moving(self, x):\n return _bin_normalize(x, self.mmin, self.mdelta)",
"def normalize(x, min_x, max_x):\n\treturn (x - min_x) / (max_x - min_x)",
"def denormalize(x):\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x",
"def denormalize(x):\n x_max = np.percentile(x, 98)\n x_min = np.percentile(x, 2)\n x = (x - x_min) / (x_max - x_min)\n x = x.clip(0, 1)\n return x"
] | [
"0.716385",
"0.71209276",
"0.7078646",
"0.6867248",
"0.68503493",
"0.68422705",
"0.6825308",
"0.6799856",
"0.6776279",
"0.6757192",
"0.66739017",
"0.66679573",
"0.6650903",
"0.650148",
"0.6492298",
"0.64902186",
"0.64388424",
"0.642077",
"0.6390071",
"0.63312405",
"0.6331007",
"0.63309443",
"0.6314628",
"0.6314628",
"0.6314628",
"0.6268047",
"0.6263713",
"0.6245197",
"0.6189526",
"0.6189526"
] | 0.76624894 | 0 |
Run the sumproduct belief propagation for a single ray accumulating the occupancy to ray messages in log space and producing the new ray to occupancy messages. Arguments | def single_ray_belief_propagation(ray_voxel_indices,
ray_to_occupancy_accumulated_pon,
ray_to_occupancy_pon, s):
# Create an index that when passed to a numpy array will return the voxels
# that this ray passes through
# TODO: Remove this check. This is just to make the code run for the
# 2D tests.
if ray_voxel_indices.shape[-1] == 3:
indices = (
ray_voxel_indices[:, 0],
ray_voxel_indices[:, 1],
ray_voxel_indices[:, 2]
)
else:
indices = (
ray_voxel_indices[:, 0],
ray_voxel_indices[:, 1]
)
# Compute the the occupancy_to_ray message
# NOTE: The ray_to_occupancy_accumulated is in log space
occupancy_to_ray_pon = (
ray_to_occupancy_accumulated_pon[indices] -
ray_to_occupancy_pon
)
# We assume that incoming messages are normalized to 1, thus we need to
# normalize the occupancy-to-ray message
# Make sure that the occupancy-to-ray message for every voxel is greater or
# equal to 0
max_occupancy_to_ray = np.maximum(0.0, occupancy_to_ray_pon)
t1 = np.exp(0.0 - max_occupancy_to_ray)
t2 = np.exp(occupancy_to_ray_pon - max_occupancy_to_ray)
# Now we normalize the occupancy to ray message for the positive case.
# The occupancy_to_ray holds the positive occupancy-to-ray messages for the
# current ray (not in logspace) from Equation (44) in my report
occupancy_to_ray = np.clip(
t2 / (t2 + t1),
1e-4,
1-1e-4
)
# Compute the cumulative products in linear time (see eq. 13, 14 Ulusoy
# 3DV)
# For the computation of the cumulative product we need
# the occupancy-to-ray messages for the negative case.
# We append 1 at the top because for the o_1 voxel this term is equal to 1
occupancy_to_ray_neg_cumprod = np.hstack([
[1.], (1 - occupancy_to_ray).cumprod()
])
# Get the number of voxels that intersect with the ray
M = ray_to_occupancy_pon.shape[0]
# Make space to compute the ray to occupancy messages for both the positive
# and the negative case according to eq 44, 48 in my report
ray_to_occupancy_new = np.zeros((2, M), dtype=np.float32)
# Compute the part of the messages that is the same for positive and
# negative messages
ray_to_occupancy_new[:] += np.hstack([
[0.], occupancy_to_ray * occupancy_to_ray_neg_cumprod[:-1] * s
])[:-1].cumsum()
# Finalize the positive messages
ray_to_occupancy_new[1] += occupancy_to_ray_neg_cumprod[:-1] * s
# Finalize the negative messages (adding 2nd part of eq. 14 Ulusoy 3DV)
# The summations we want to calculate are as follows:
# i=1, \sum_{i=2}^N(\cdot)
# i=2, \sum_{i=3}^N(\cdot)
# ...
# i=N-2, \sum_{i=N-1}^N(\cdot)
# lets assume that we have [a, b, c, d, e]. We first inverse the array,
# thus resulting in [e, d, c, b, a] and then we compute the cumulative sum
# on this array. The output is [e, e+d, e+d+c, e+d+c+b, e+d+c+b+a]. However
# we want them in the inverse order, thus we inverse the output once again
# and we have [e+d+c+b+a, e+d+c+b, e+d+c, e+d, e]
# Finally we also divide with the incoming message for the negative case
ray_to_occupancy_new[0] += np.hstack([
occupancy_to_ray * occupancy_to_ray_neg_cumprod[:-1] * s,
[0.0]
])[::-1].cumsum()[::-1][1:] / (1 - occupancy_to_ray)
# Normalize the positive ray_to_occupancy message
ray_to_occupancy_new_pos =\
ray_to_occupancy_new[1] / (ray_to_occupancy_new[1] + ray_to_occupancy_new[0])
# Return the quotient of the positive ray to occupancy message with the
# negative ray to occupancy message in logspace
t = np.log(ray_to_occupancy_new_pos) - np.log(1 - ray_to_occupancy_new_pos)
if np.isnan(t).any() or np.isinf(t).any():
print "ray_to_occupancy_pon contains weird values %r" % (t)
print "ray_to_occupancy_new_pos", ray_to_occupancy_new_pos
return t | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def belief_propagation(\n S,\n ray_voxel_indices,\n ray_voxel_count,\n ray_to_occupancy_messages_pon,\n grid_shape,\n gamma=0.05,\n bp_iterations=3,\n progress_callback=lambda *args: None\n):\n # Extract the number of rays\n N, M = S.shape\n\n # Initialize the ray to occupancy messages to uniform\n ray_to_occupancy_messages_pon.fill(0)\n\n # Initialize the ray-to-occupancy accumulated to $\\phi(o_i)$ The\n # ray_to_occupancy_accumulated_prev_pon and the\n # ray_to_occupancy_accumulated_new_pon holds the accumulation of the\n # quotient of the positive ray to occupancy message with the negative ray\n # to occupancy message in log space for the current and for the next belief\n # propagation iteration.\n # Both messages are initialized to\n # \\log(\\frac{\\phi_(o_i=1)}{\\phi_(o_i=0)}\n ray_to_occupancy_accumulated_prev_pon = np.ones(\n tuple(grid_shape),\n dtype=np.float32\n ) * (np.log(gamma) - np.log(1 - gamma))\n ray_to_occupancy_accumulated_new_pon = np.ones(\n tuple(grid_shape),\n dtype=np.float32\n ) * (np.log(gamma) - np.log(1 - gamma))\n\n # Iterate over the rays multiple times\n for it in xrange(bp_iterations):\n print \"Iteration %d \" % (it,)\n for r in xrange(N):\n # Get the actual number of voxels which this ray passes through\n c = ray_voxel_count[r]\n if c <= 1:\n continue\n ray_to_occupancy_pon = single_ray_belief_propagation(\n ray_voxel_indices[r, :c, :],\n ray_to_occupancy_accumulated_prev_pon,\n ray_to_occupancy_messages_pon[r, :c],\n clip_and_renorm(S[r, :c])\n )\n\n idxs = ray_voxel_indices[r, :c]\n idxs = (idxs[:, 0], idxs[:, 1], idxs[:, 2])\n ray_to_occupancy_accumulated_new_pon[idxs] += ray_to_occupancy_pon\n\n # Update the array of the ray-to-occupancy messages with the\n # current message that will be used for the next iteration\n ray_to_occupancy_messages_pon[r, :c] = ray_to_occupancy_pon\n\n # Swap the accumulators for the next bp iteration\n ray_to_occupancy_accumulated_prev_pon[:] = ray_to_occupancy_accumulated_new_pon\n ray_to_occupancy_accumulated_new_pon.fill(np.log(gamma) - np.log(1 - gamma))\n\n progress_callback(\n S,\n ray_voxel_indices,\n ray_voxel_count,\n ray_to_occupancy_messages_pon,\n ray_to_occupancy_accumulated_prev_pon,\n it\n )\n\n return ray_to_occupancy_accumulated_prev_pon, ray_to_occupancy_messages_pon",
"def single_ray_depth_estimate(\n ray_voxel_indices,\n ray_to_occupancy_accumulated_pon,\n ray_to_occupancy_pon,\n s\n):\n # Create an index that when passed to a numpy array will return the voxels\n # that this ray passes through\n if ray_voxel_indices.shape[-1] == 3:\n indices = (\n ray_voxel_indices[:, 0],\n ray_voxel_indices[:, 1],\n ray_voxel_indices[:, 2]\n )\n else:\n indices = (\n ray_voxel_indices[:, 0],\n ray_voxel_indices[:, 1]\n )\n\n # Compute the log of the occupancy_to_ray message for the positive case\n # NOTE: The ray_to_occupancy_accumulated is in log space\n occupancy_to_ray_pon = (\n ray_to_occupancy_accumulated_pon[indices] -\n ray_to_occupancy_pon\n )\n\n # We assume that incoming messages are normalized to 1, thus we need to\n # normalize the occupancy-to-ray message\n max_occupancy_to_ray = np.maximum(0, occupancy_to_ray_pon)\n t1 = np.exp(0.0 - max_occupancy_to_ray)\n t2 = np.exp(occupancy_to_ray_pon - max_occupancy_to_ray)\n\n # Now we normalize the occupancy to ray message for the positive case.\n # NOTE: We only normalize and store the occupancy-to-ray message for the\n # positive case\n # The occupancy_to_ray holds the positive occupancy-to-ray messages for the\n # current ray (not in logspace) from Equation (44) in my report\n occupancy_to_ray = np.clip(\n t2 / (t2 + t1),\n 1e-4,\n 1-1e-4\n )\n\n # Compute the cumulative products in linear time (see eq. 13, 14 Ulusoy\n # 3DV)\n # For the computation of the cumulative product we need\n # the occupancy-to-ray messages for the negative case.\n # We append 1 at the top because for the o_1 voxel this term is equal to 1\n occupancy_to_ray_neg_cumprod = np.hstack([\n [1.], (1 - occupancy_to_ray).cumprod()\n ])\n\n P = occupancy_to_ray * occupancy_to_ray_neg_cumprod[:-1] * s\n\n return P / P.sum()",
"def backpropagating(self): \n\n ######################### Configure the sensor inputs given the movement of the agent ######################### \n sensors_result_N = self.agent.sensors(self, direction=3) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(3)+[int(self.agent.get_previous_collision())]\n sensors_result_O = self.agent.sensors(self, direction=2) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(2) + [int(self.agent.get_previous_collision())]\n sensors_result_S = self.agent.sensors(self, direction=1) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(1) + [int(self.agent.get_previous_collision())]\n sensors_result_E = self.agent.sensors(self, direction=0) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(0) + [int(self.agent.get_previous_collision())]\n\n input_nn_N = np.asarray(sensors_result_N).astype(int) # input when the Nord action is performed \n input_nn_O = np.asarray(sensors_result_O).astype(int) # input when the West action is performed\n input_nn_S = np.asarray(sensors_result_S).astype(int) # input when the South action is performed\n input_nn_E = np.asarray(sensors_result_E).astype(int) # input when the West action is performed\n\n l_input = [input_nn_E.reshape(1,145),input_nn_S.reshape(1,145),input_nn_O.reshape(1,145),input_nn_N.reshape(1,145)]\n ######################### Configure the sensor inputs given the movement of the agent #########################\n\n print(\"The reward in baskpropagating is %f\" %(self.agent.reward) ) \n parameters = [self.gamma, self.agent.reward]\n Ui = self.U_list[self.agent.get_previousAction().index(1)]\n\n if not self.end:\n U_list_y = [self.nn.predict(input_nn_E.reshape(1,145)),\\\n self.nn.predict(input_nn_S.reshape(1,145)),\\\n self.nn.predict(input_nn_O.reshape(1,145)),\\\n self.nn.predict(input_nn_N.reshape(1,145))] \n #print(U_list_y)\n maxU = np.max(U_list_y)\n #print(np.max(U_list_y))\n index_input_maxU = np.argmax(U_list_y) # the input given for the backprogating is the one with the maximum utility\n input_target = l_input[index_input_maxU] # The input target with the max utility, add to the tuple given during the experience replay\n uprime = self.agent.reward + self.gamma * maxU # input of the utility with the best value\n \n else:\n uprime = self.agent.reward\n input_target = np.array(None)\n \n action = self.agent.get_previousAction().index(1)\n input_nn = self.input_list[action]\n ##### Add to the lesson the action chose in order to go the next state, \n ##### the next state after to have performed the action, and the reward given\n if(self.action_proba[action] > 0.01): # the Pl minimum to choose the action corresponding to the action policy, cf to the paper part experience replay\n #next_states = [copy.deepcopy(input_nn_E).reshape(1,145), copy.deepcopy(input_nn_S).reshape(1,145), copy.deepcopy(input_nn_O).reshape(1,145), copy.deepcopy(input_nn_N).reshape(1,145)]\n self.memory.append((input_nn,action,np.asarray(copy.deepcopy(l_input)),self.agent.reward)) # We add the experiment to the memory of the agent \n \n ############################\n self.nn.train_one_step_other(input_nn,uprime)\n #self.nn.train(input_nn,tf.convert_to_tensor([[uprime]])) # use the method fit to train the neural network",
"def calculate_marginal(self):\n self.marginal_ray=beam_field()\n m=self.marginal_ray\n m.U=np.array([[[0,0,1]]])\n m.Q_p=np.array([[[0,self.entrance_pupil,0]]])\n m.propagate(self.surfaces)",
"def step(self, observation, last_state):\n # We are omitting the details of network inference here.\n # ...\n feature_screen = observation[3]['feature_screen']\n feature_minimap = observation[3]['feature_minimap']\n feature_units = observation[3]['feature_units']\n feature_player = observation[3]['player']\n available_actions = observation[3]['available_actions']\n score_by_category = observation[3]['score_by_category']\n game_loop = observation[3]['game_loop']\n\n unit_type = feature_screen.unit_type\n empty_space = np.where(unit_type == 0)\n empty_space = np.vstack((empty_space[0], empty_space[1])).T\n random_point = random.choice(empty_space)\n #target = [random_point[0], random_point[1]]\n #action = [actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])]\n policy_logits = None\n new_state = None\n\n spatial_encoder_output = self.spatial_encoder(np.reshape(feature_screen, [1,128,128,27]))\n\n agent_statistics = get_agent_statistics(score_by_category)\n\n home_race = 'Terran'\n away_race = 'Terran'\n race = get_race_onehot(home_race, away_race)\n\n time = get_gameloop_obs(game_loop)\n\n upgrade_value = get_upgrade_obs(feature_units)\n if upgrade_value != -1:\n self.home_upgrade_array[np.where(upgrade_value[0] == 1)] = 1\n self.away_upgrade_array[np.where(upgrade_value[1] == 1)] = 1\n\n embedded_scalar = np.concatenate((agent_statistics, race, time, self.home_upgrade_array, self.away_upgrade_array), axis=0)\n scalar_encoder_output = self.scalar_encoder(np.reshape(embedded_scalar, [1,307]))\n embedded_feature_units = get_entity_obs(feature_units)\n entity_encoder_output = self.entity_encoder(np.reshape(embedded_feature_units, [1,512,464]))\n encoder_input = np.concatenate((spatial_encoder_output, scalar_encoder_output, entity_encoder_output), axis=1)\n\n core_input = np.reshape(encoder_input, [16, 8, 131])\n whole_seq_output, final_memory_state, final_carry_state = self.core(core_input)\n print(whole_seq_output.shape)\n print(final_memory_state.shape)\n print(final_carry_state.shape)\n\n action = [actions.FUNCTIONS.no_op()]\n\n return action, policy_logits, new_state",
"def step(self):\n # A = self.array\n # B = self.array2\n # ra, rb, f, k = self.params\n H = self.array\n L = self.array2\n birth_rate, death_rate, a, c = self.params\n \n # cA = correlate2d(A, self.kernel, **self.options)\n # cB = correlate2d(B, self.kernel, **self.options)\n cH = correlate2d(H, self.kernel,**self.options)\n cL = correlate2d(L, self.kernel, **self.options)\n\n # reaction = A * B**2\n\n # self.array += ra * cA - reaction + f * (1-A) \n # self.array2 += rb * cB + reaction - (f+k) * B\n self.array += birth_rate*cH - a*L*H\n self.array2 += c*cL*cH - death_rate*L",
"def Sarsa_lbda_w_bf_and_lin_FA(env, fvecs, idcs_per_action, weights, alpha,\n epsilon, gamma, num_actions, num_episodes, lbda):\n\n for episode in tqdm(range(num_episodes)):\n done = False\n \n state = env.reset()\n fvec_idx_per_tiling = fvecs.calc_feature_vec(state)\n Q_vals = init_Q_values(weights, fvec_idx_per_tiling, idcs_per_action,\n num_actions)\n action, Q_current = eps_greedy(Q_vals, epsilon, num_actions) \n curr_active_feat_idcs = fvec_idx_per_tiling + action*idcs_per_action \n \n z_trace = init_e_traces(len(weights)) ###\n \n step_count = 0\n while not done:\n step_count += 1\n if episode == (num_episodes -1):\n env.render(state[0])\n\n next_state, reward, done,__ = env.step(state, action)\n\n delta = reward\n for i in curr_active_feat_idcs:\n delta -= weights[i]\n z_trace[i] += 1 ### accumulating traces\n #z_trace[i] = 1 ### replacing traces\n if done:\n weights += alpha*delta*z_trace\n break\n\n fvec_idx_per_tiling = fvecs.calc_feature_vec(next_state)\n Q_vals = init_Q_values(weights, fvec_idx_per_tiling, idcs_per_action,\n num_actions)\n next_action, Q_next = eps_greedy(Q_vals, epsilon, num_actions)\n \n next_active_feat_idcs = fvec_idx_per_tiling + next_action*idcs_per_action### \n\n for i in next_active_feat_idcs:\n delta += gamma*weights[i]\n weights += alpha*delta*z_trace\n \n z_trace = gamma*lbda*z_trace\n state = next_state\n action = next_action \n curr_active_feat_idcs = next_active_feat_idcs\n \n env.plot_step_per_ep(episode, step_count)\n \n return",
"def propagate(self, ray, source=0):\n new_ray = self._propagate(ray)\n if new_ray is not None:\n self.footprint[source].append((new_ray.endpoint.x,\n new_ray.endpoint.y,\n new_ray.wavelength))\n return new_ray",
"def run():\n\n env = JacoEnv(64,\n 64,\n 100,\n 0.1,\n 0.8,\n True)\n nb_actions = env.real_num_actions\n new_floor_color = list((0.55 - 0.45) * np.random.random(3) + 0.45) + [1.]\n new_cube_color = list(np.random.random(3)) + [1.]\n env.change_floor_color(new_floor_color)\n env.change_cube_color(new_cube_color)\n\n global vae\n vae.load_weights(WEIGHTS_FILE)\n print(\"#########################\")\n nb_observation_space = (64, 64, 3)\n original_input = Input(shape=(WINDOW_LENGTH,) + nb_observation_space)\n in_layer = [Lambda(lambda x: x[:, i, :, :])(original_input) for i in range(WINDOW_LENGTH)]\n vae = Model(vae.inputs, [vae.layers[-2].outputs[2]])\n for layer in vae.layers:\n layer.trainable = False\n print(vae.summary())\n vae_output = [vae(x) for x in in_layer]\n\n x = Concatenate()(vae_output)\n x = Dense(512, activation='relu')(x)\n x = Dense(512, activation='relu')(x)\n x = Dense(nb_actions, activation='linear')(x)\n model = Model(original_input, [x])\n print(model.summary())\n if MULTI_GPU:\n model = multi_gpu_model(model, gpus=2)\n print(model.summary())\n\n num_warmup = 50000\n num_simulated_annealing = 500000 + num_warmup\n\n memory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)\n policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05, nb_steps=num_simulated_annealing)\n\n dqn = DQNAgent(model=model, nb_actions=nb_actions, policy=policy, memory=memory, nb_steps_warmup=num_warmup, gamma=.99, target_model_update=10000, train_interval=4, delta_clip=1.)\n dqn.compile(Adam(lr=.00025), metrics=['mae'])\n\n if False:\n checkpoint_callback = ModelCheckpoint(\"darla_dqn_checkpoint\", monitor='episode_reward', verbose=0, save_best_only=True, save_weights_only=True, mode='max', period = 10)\n history = dqn.fit(env, nb_steps=num_simulated_annealing + 450000,\n visualize=False, verbose=1, callbacks = [checkpoint_callback])\n dqn.save_weights(\"darla_dqn_weights\")\n np.savez_compressed(\"darla_dqn_history\",\n episode_reward=np.asarray(history.history['episode_reward']))\n else:\n dqn.load_weights(\"darla_dqn_weights\")\n\n print(\"original domain\")\n source_test_losses = dqn.test(env, nb_episodes=100, visualize=True)\n np.savez_compressed(\"darla_dqn_source_test\",\n episode_reward=np.asarray(source_test_losses.history['episode_reward']),\n nb_steps=np.asarray(source_test_losses.history['nb_steps']))\n\n print(\"target domain\")\n new_floor_color = [0.4, 0.6, 0.4, 1.]\n new_cube_color = [1.0, 0.0, 0.0, 1.]\n env.change_floor_color(new_floor_color)\n env.change_cube_color(new_cube_color)\n target_test_losses = dqn.test(env, nb_episodes=100, visualize=True)\n np.savez_compressed(\"darla_dqn_target_test\",\n episode_reward=np.asarray(target_test_losses.history['episode_reward']),\n nb_steps=np.asarray(target_test_losses.history['nb_steps']))\n source_array = np.asarray(source_test_losses.history['episode_reward'])\n target_array = np.asarray(target_test_losses.history['episode_reward'])\n print(source_array.min(), source_array.mean(), source_array.max())\n print(target_array.min(), target_array.mean(), target_array.max())",
"def train(args):\n # prepare environment\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n print('There are {} agents. Each observes a state with length: {}'.format(\n states.shape[0], state_size))\n print('The state for the first agent looks like:', states[0])\n\n # Crate instance of MADDPG Class, mainly possible to control the model dimensions, learnrates and batch sizes\n agent = MADDPG(state_size,\n action_size,\n lr_actor=args.lr_actor,\n lr_critic=args.lr_critic,\n lr_decay=args.lr_decay,\n replay_buff_size=args.replay_buff_size,\n gamma=args.gamma,\n batch_size=args.batch_size,\n random_seed=args.random_seed,\n soft_update_tau=args.soft_update_tau,\n actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3\n\n )\n\n total_rewards = []\n avg_scores = []\n max_avg_score = -1\n max_score = -1\n threshold_init = 20\n noise_t = args.epsilon\n noise_decay = args.epsilon_decay\n latest_avg_score = -1\n # for early-stopping training if consistently worsen for # episodes\n worsen_tolerance = threshold_init\n for i_episode in range(1, 1+args.num_episodes):\n\n env_inst = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_inst.vector_observations # get the current state\n # initialize score array\n scores = np.zeros(num_agents)\n dones = [False]*num_agents\n while not np.any(dones):\n # select an action\n actions = agent.act(states, noise_t)\n # send the action to the environment\n env_inst = env.step(actions)[brain_name]\n next_states = env_inst.vector_observations # get the next state\n rewards = env_inst.rewards # get the reward\n dones = env_inst.local_done # see if episode has finished\n agent.update(states, actions, rewards, next_states, dones)\n\n noise_t *= noise_decay\n scores += rewards # update scores\n states = next_states\n\n episode_score = np.max(scores)\n total_rewards.append(episode_score)\n print(\"\\rEpisodic {} Score: {:.4f}\\t Avg Score: {:.4f}\".format(\n i_episode, episode_score, latest_avg_score), end=' ')\n\n if max_score <= episode_score:\n max_score = episode_score\n # save best model so far\n agent.save(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n # record avg score for the latest 100 steps\n if len(total_rewards) >= args.test_n_run:\n latest_avg_score = sum(\n total_rewards[(len(total_rewards)-args.test_n_run):]) / args.test_n_run\n avg_scores.append(latest_avg_score)\n\n if max_avg_score <= latest_avg_score: # record better results\n worsen_tolerance = threshold_init # re-count tolerance\n max_avg_score = latest_avg_score\n else:\n if max_avg_score > 0.5:\n worsen_tolerance -= 1 # count worsening counts\n print(\"Loaded from last best model.\")\n # continue from last best-model\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n if worsen_tolerance <= 0: # earliy stop training\n print(\"Early Stop Training.\")\n break\n del agent\n return total_rewards",
"def GGQLambda_MultiProcess_Ave(ave_times=20,\n learning_rate=0.1,\n eligibility_factor=0.9):\n # Learning parameters\n precise = [10, 8, 10, 8]\n\n discount_factor = 0.9\n discount_of_learning_rate = 0.999\n epsilon = 0.1\n\n # Macro\n NUM_EPISODE = 600\n AVE_TIMES = ave_times\n REWARD_THREASHOLD = 40\n # Definition of dependencies\n env = gym.make('CartPole-v0')\n\n observation_space = (\n env.observation_space.low,\n env.observation_space.high\n )\n\n CartPole_universal_action_space = [i for i in xrange(0, env.action_space.n)]\n state_action_space = StateActionSpace_CartPole(\n observation_space,\n precise,\n CartPole_universal_action_space\n )\n\n for ave_times in range(AVE_TIMES):\n learning_agent_GGQLambda = GGQLambda(\n learning_rate,\n discount_factor,\n eligibility_factor,\n discount_of_learning_rate,\n epsilon,\n state_action_space.action_space\n )\n learning_agent = learning_agent_GGQLambda\n\n Qfunc_error_history = []\n total_reward_episode = []\n time_history = []\n max_reward = -float(\"inf\")\n for i_episode in range(NUM_EPISODE):\n time_start = time.clock()\n observation = env.reset()\n\n discret_state = state_action_space._m_observation_to_discrete_state(\n observation\n )\n discret_state_bar = deepcopy(discret_state)\n\n action = learning_agent._m_GreedyPolicy(\n discret_state,\n state_action_space\n )\n\n phi = state_action_space._m_discrete_state_to_feature(\n discret_state,\n action\n )\n\n rho = 1\n\n total_reward = 0\n Qfunc_previous = deepcopy(learning_agent.theta)\n learning_agent.e = np.zeros(learning_agent.num_element_qfunc)\n\n done = False\n step = 0\n while not done:\n step += 1\n while set(discret_state) == set(discret_state_bar):\n observation_bar, step_reward, done, info = env.step(action)\n\n if done:\n break\n\n discret_state_bar = state_action_space._m_observation_to_discrete_state(\n observation_bar\n )\n\n action_bar = learning_agent._m_GreedyPolicy(\n discret_state_bar,\n state_action_space\n )\n phi_bar = state_action_space._m_discrete_state_to_feature(\n discret_state_bar,\n action_bar\n )\n\n learning_agent._m_Learn(phi,\n phi_bar,\n step_reward,\n step_reward,\n rho,\n 1\n )\n\n phi = phi_bar\n action = action_bar\n discret_state = discret_state_bar\n total_reward += step_reward\n if done:\n break\n print \"Episode finished after {} timesteps in GQ(lambda)\".format(step), \"in \", ave_times + 1, \"times\"\n time_end = time.clock()\n time_consumed = time_end - time_start\n time_history.append(time_consumed)\n\n if total_reward > max_reward:\n if total_reward > REWARD_THREASHOLD:\n epsilon *= 0.999\n max_reward = total_reward\n\n total_reward_episode.append(total_reward) # Add total reward to reward history\n\n delta_q_func = Qfunc_previous - learning_agent.theta\n Qfunc_difference_this_episode = np.dot(\n delta_q_func,\n delta_q_func\n )\n Qfunc_error_history.append( # Add error to error history\n Qfunc_difference_this_episode\n )\n\n if i_episode % 10 == 0:\n print i_episode, \"th episode completed\"\n print \"Q update is\", Qfunc_difference_this_episode\n print \"Maximal reward is\", max_reward, \"\\n\"\n\n Qfunc_error_history = np.array(Qfunc_error_history)\n if 'Qfunc_error_history_ave' not in locals():\n Qfunc_error_history_ave = Qfunc_error_history\n else:\n Qfunc_error_history_ave = Qfunc_error_history_ave + (Qfunc_error_history - Qfunc_error_history_ave) / (ave_times * 1.0)\n\n total_reward_episode = np.array(total_reward_episode)\n if 'total_reward_episode_ave' not in locals():\n total_reward_episode_ave = total_reward_episode\n else:\n total_reward_episode_ave = total_reward_episode_ave + (total_reward_episode - total_reward_episode_ave) / (ave_times * 1.0)\n\n time_history = np.array(time_history)\n if 'time_history_ave' not in locals():\n time_history_ave = time_history\n else:\n time_history_ave = time_history_ave + (time_history - time_history_ave) / (ave_times * 1.0)\n\n Qfunc_error_history = Qfunc_error_history_ave\n total_reward_episode = total_reward_episode_ave\n time_history = time_history_ave\n with open(\n path + \"total_reward_GGQ-\" + str(learning_rate) + \"-\" + str(eligibility_factor), 'wb') as f:\n pickle.dump(total_reward_episode, f)\n with open(\n path + \"time_history_GGQ-\" + str(learning_rate) + \"-\" + str(eligibility_factor), 'wb') as f:\n pickle.dump(time_history, f)",
"def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._copy_ne_()\n [self._compute_(case) for case in [\"bgc\", \"flare\"]]\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec",
"def _propagate_step(self):\n\n # optical depth to next interaction\n self.tau = -np.log(self.RNG.rand(self.N_active))\n # optical depth to sphere edge\n self.tau_edge = np.sqrt(self.tau_sphere**2 - self.tau_i**2 *\n (1. - self.mu_i**2)) - self.tau_i * self.mu_i\n\n # identify packets that escape\n self.esc_mask = self.tau_edge < self.tau\n # update number of escaping packets\n self.N_esc += self.esc_mask.sum()\n\n # identify interacting packets\n self.nesc_mask = np.logical_not(self.esc_mask)\n\n # decide which interacting packets scatter and which get absorbed\n self.abs_mask = self.RNG.rand(self.nesc_mask.sum()) >= self.albedo\n self.scat_mask = np.logical_not(self.abs_mask)\n\n # select properties of scattering packets\n self.tau = self.tau[self.nesc_mask][self.scat_mask]\n self.tau_i = self.tau_i[self.nesc_mask][self.scat_mask]\n self.mu_i = self.mu_i[self.nesc_mask][self.scat_mask]\n\n # update number of active packets\n self.N_active = self.scat_mask.sum()\n\n # update properties (position in optical depth space, propagation\n # direction) of scattering packets\n self.tau_i = np.sqrt(self.tau_i**2 + self.tau**2 +\n 2. * self.tau * self.tau_i * self.mu_i)\n self.mu_i = 2 * self.RNG.rand(self.N_active) - 1.",
"def doCalculation(self, E1, E2, muL, muR, T, pot, C, TCalc, Density, E0, L):\n NEcut = len(E1) #we determine the number of single-particle states that we use\n VG=np.diag(pot)\n E= int(0.5*np.size(VG))\n V = VG[0:E] #since the potential of both barriers is symmetric and we only tunnel through one barrier. Therefore we only use one half of the potential.\n dx= L/(np.size(pot))\n\n #Following prints are for debugging purposes:\n #print(\"---------------------------------------------------------------------\")\n #print(\"---------------------------------------------------------------------\")\n #print(\"Hier beginnt die Ausgabe von Rates:\")\n #print(\"---------------------------------------------------------------------\")\n #print(\"V:\", V)\n #print(\"E1:\", E1)\n #print(\"E2:\", E2)\n #print(\"C:\", C)\n\n kB=0.08629 #Boltzmann constant in meV/K\n \n \n def fermi(E,mu,T):\n \"\"\"This fermi-function tells us with which likelyhood a state with an E is occupied on the lead.\n E(float): energy difference between the initial and the final state that the tunneling electron has to carry.\n mu(float): chemical potential of either drain(muR) or source(muL).\n T(float): temperature.\n \"\"\"\n if (E-mu)/T > 600:\n f=0\n\t\t\t\t\n else:\n f=1/(math.exp((E-mu)/(kB*T) )+1)\n return(f)\n \n\n\t#This function is called by the Gamma_ij-equations and includes the transmission-coefficient for each tunnelling-event\n #and the density of state function of the source and drain. \n def Gamma(Ea,Eb,V):\n \"\"\":math:`\\\\Gamma` includes the transmission coefficient and DOS: :math:`\\Gamma = | t |^2 * DOS`\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n V(np.array): barrier potential\n \"\"\"\n #print(Ea)\n #print(V)\n return (np.absolute(TCalc.calculate_transmission(Ea,V,dx))**2*Density.calculate_DensityofStates(np.absolute(Ea-Eb)))\n \n #These next four functions are used to calculate the transition rates.Each function for a different kind of transition:\n #We distinguish between transitions, in which the number of electrons on the dot changes from one to two(Gamma_12) and reverse(Gamma_21).\n #And between transitions in which the number of electrons on the dot change from zero to one(Gamma_01) and reverse(Gamma_10).\n\n def Gamma_12(Ea,Eb,mu,T):\n \"\"\"Calculates the rate of a transition from a one body state to a two body state.\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n summe=0\n j=0\n Cb=C[np.where(E2==Eb)[0][0]]\n while j< NEcut:\n summe=Cb[np.where(E1==Ea)[0][0]][j]+summe\n j=j+1\n return(Gamma(Ea,Eb,V)*(np.absolute(summe))**2*fermi((Eb-Ea),mu,T))\n\n\n def Gamma_01(Eb,mu,T):\n \"\"\"Calculates the transition rate from the vacuum state to a one-body state.\n\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n return(Gamma(E0,Eb,V)*fermi((Eb-E0),mu,T))\n\n def Gamma_21(Ea,Eb,mu,T):\n \"\"\"Calculates the rate of a transition from a two body state to a one body state.\n\n Ea(float): energy of initial state\n Eb(float): energy of final state\n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n summe=0\n nu=0\n Ca=C[np.where(E2==Ea)[0][0]]\n while nu < NEcut:\n summe=summe+Ca[np.where(E1==Eb)[0][0]][nu]\n nu=nu+1\n return(Gamma(Ea,Eb,V)*(np.absolute(summe))**2*(1-fermi((Ea-Eb),mu,T)))\n\n def Gamma_10(Ea,mu,T):\n \"\"\"Calculates the rate of a transition from a one body state to the vacuum state.\n\n Ea(float): energy of initial state \n mu(float): chemical potential of either drain(muR) or source(muL)\n T(float): temperature\n \"\"\"\n return(Gamma(Ea,E0,V)*(1-fermi((Ea-E0),mu,T)))\n\n #creating the output matrices that later contain all the transition rates through either\n #the left or the right barrier\n Gamma_R=np.zeros((1+np.size(E1)+np.size(E2),1+np.size(E1)+np.size(E2)))\n Gamma_L=np.zeros((1+np.size(E1)+np.size(E2),1+np.size(E1)+np.size(E2)))\n\n #using a loop to fill the output matrices with transition rates.\n i_=0\n for i in E1:\n j_=0\n for j in E2:\n Gamma_L[i_+1][j_+1+np.size(E1)]=Gamma_12(i,j,muL,T)\n Gamma_L[j_+1+np.size(E1)][i_+1]=Gamma_21(j,i,muL,T)\n Gamma_R[i_+1][j_+1+np.size(E1)]=Gamma_12(i,j,muR,T)\n Gamma_R[j_+1+np.size(E1)][i_+1]=Gamma_21(j,i,muR,T)\n j_=j_+1\n Gamma_L[0][i_+1]=Gamma_10(i,muL,T)\n Gamma_R[0][i_+1]=Gamma_10(i,muR,T)\n Gamma_L[i_+1][0]=Gamma_01(i,muL,T)\n Gamma_R[i_+1][0]=Gamma_01(i,muR,T)\n i_=1+i_\n\n #print(\"Gamma_L und Gamma_R:\")\n #print(Gamma_L,Gamma_R)\n #print(\"-----------------------------------------------------------------------\")\n #print(\"---------------------------------------------------------------------\")\n return(Gamma_L,Gamma_R)",
"def main():\n\n population = 276470345\n\n # These 3 variables are for the known probabilities.\n # Change them to see the effect on P(ill|positive)\n P_ill = 0.0806212326\n P_positive_if_ill = 0.94 # sensitivity\n P_negative_if_healthy = 0.98 # specificity\n\n print()\n\n calculate_with_bayes(P_ill, P_positive_if_ill, P_negative_if_healthy)",
"def usped(self):\n\n # assign variables\n ls_factor = 'ls_factor'\n slope = 'slope'\n aspect = 'aspect'\n flowacc = 'flowacc'\n qsx = 'qsx'\n qsxdx = 'qsxdx'\n qsy = 'qsy'\n qsydy = 'qsydy'\n grow_slope = 'grow_slope'\n grow_aspect = 'grow_aspect'\n grow_qsxdx = 'grow_qsxdx'\n grow_qsydy = 'grow_qsydy'\n erdep = 'erdep' # kg/m^2s\n sedflow = 'sedflow'\n\n # parse, advance, and stamp time\n (evolved_elevation, time, depth, sediment_flux, erosion_deposition,\n difference) = self.parse_time()\n\n # compute event-based erosivity (R) factor (MJ mm ha^-1 hr^-1 yr^-1)\n r_factor = self.event_based_r_factor()\n\n # compute slope and aspect\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n aspect=aspect,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=aspect,\n value=grow_aspect,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{aspect}={grow_aspect}\".format(\n aspect=aspect,\n grow_aspect=grow_aspect),\n overwrite=True)\n\n # compute flow accumulation\n gscript.run_command(\n 'r.watershed',\n elevation=self.elevation,\n accumulation=flowacc,\n flags=\"a\",\n overwrite=True)\n region = gscript.parse_command(\n 'g.region', flags='g')\n res = region['nsres']\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{depth}\"\n \"=({flowacc}*{res})\".format(\n depth=depth,\n flowacc=flowacc,\n res=res),\n overwrite=True)\n # add depression parameter to r.watershed\n # derive from landcover class\n\n\n # compute dimensionless topographic factor\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{ls_factor}\"\n \"=({flowacc}^{m})*(sin({slope})^{n})\".format(\n ls_factor=ls_factor,\n m=self.m,\n flowacc=depth,\n slope=slope,\n n=self.n),\n overwrite=True)\n\n # compute sediment flow at sediment transport capacity\n \"\"\"\n T = R * K * C * P * LST\n where\n T is sediment flow at transport capacity\n R is rainfall factor\n K is soil erodibility factor\n C is a dimensionless land cover factor\n P is a dimensionless prevention measures factor\n LST is the topographic component of sediment transport capacity\n of overland flow\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{sedflow}\"\n \"={r_factor}\"\n \"*{k_factor}\"\n \"*{c_factor}\"\n \"*{ls_factor}\".format(\n r_factor=r_factor,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n ls_factor=ls_factor,\n sedflow=sedflow),\n overwrite=True)\n\n # convert sediment flow from tons/ha/yr to kg/m^2s\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{converted_sedflow}\"\n \"={sedflow}\"\n \"*{ton_to_kg}\"\n \"/{ha_to_m2}\"\n \"/{yr_to_s}\".format(\n converted_sedflow=sediment_flux,\n sedflow=sedflow,\n ton_to_kg=1000.,\n ha_to_m2=10000.,\n yr_to_s=31557600.),\n overwrite=True)\n\n # compute sediment flow rate in x direction (m^2/s)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsx}={sedflow}*cos({aspect})\".format(\n sedflow=sediment_flux,\n aspect=aspect, qsx=qsx),\n overwrite=True)\n\n # compute sediment flow rate in y direction (m^2/s)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsy}={sedflow}*sin({aspect})\".format(\n sedflow=sediment_flux,\n aspect=aspect,\n qsy=qsy),\n overwrite=True)\n\n # compute change in sediment flow in x direction\n # as partial derivative of sediment flow field\n gscript.run_command(\n 'r.slope.aspect',\n elevation=qsx,\n dx=qsxdx,\n overwrite=True)\n\n # compute change in sediment flow in y direction\n # as partial derivative of sediment flow field\n gscript.run_command(\n 'r.slope.aspect',\n elevation=qsy,\n dy=qsydy,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=qsxdx,\n value=grow_qsxdx,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsxdx}={grow_qsxdx}\".format(\n qsxdx=qsxdx,\n grow_qsxdx=grow_qsxdx),\n overwrite=True)\n gscript.run_command(\n 'r.grow.distance',\n input=qsydy,\n value=grow_qsydy,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{qsydy}={grow_qsydy}\".format(\n qsydy=qsydy,\n grow_qsydy=grow_qsydy),\n overwrite=True)\n\n # compute net erosion-deposition (kg/m^2s)\n # as divergence of sediment flow\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erdep} = {qsxdx} + {qsydy}\".format(\n erdep=erdep,\n qsxdx=qsxdx,\n qsydy=qsydy),\n overwrite=True)\n\n # filter outliers\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erosion_deposition}\"\n \"=if({erdep}<{erdepmin},\"\n \"{erdepmin},\"\n \"if({erdep}>{erdepmax},{erdepmax},{erdep}))\".format(\n erosion_deposition=erosion_deposition,\n erdep=erdep,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax),\n overwrite=True)\n\n # set color table\n gscript.write_command(\n 'r.colors',\n map=erosion_deposition,\n rules='-',\n stdin=erosion_colors)\n\n # evolve landscape\n \"\"\"\n change in elevation (m)\n = change in time (s)\n * net erosion-deposition (kg/m^2s)\n / sediment mass density (kg/m^3)\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{evolved_elevation}\"\n \"={elevation}\"\n \"+({rain_interval}*60\"\n \"*{erosion_deposition}\"\n \"/{density})\".format(\n evolved_elevation=evolved_elevation,\n elevation=self.elevation,\n rain_interval=self.rain_interval,\n erosion_deposition=erosion_deposition,\n density=self.density),\n overwrite=True)\n\n # gravitational diffusion\n evolved_elevation = self.gravitational_diffusion(evolved_elevation)\n\n # compute elevation change\n difference = self.compute_difference(evolved_elevation, difference)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['slope',\n 'aspect',\n 'flowacc',\n 'qsx',\n 'qsy',\n 'qsxdx',\n 'qsydy',\n 'grow_slope',\n 'grow_aspect',\n 'grow_qsxdx',\n 'grow_qsydy',\n 'erdep',\n 'sedflow',\n 'r_factor',\n 'ls_factor'],\n flags='f')\n\n return (evolved_elevation, time, depth, erosion_deposition, difference)",
"def RGGQLambda_MultiProcess_Ave(ave_times=20,\n learning_rate=0.1,\n eligibility_factor=0.9,\n regularize_factor=0.0001):\n # Learning parameters\n precise = [10, 8, 10, 8]\n\n discount_factor = 0.9\n discount_of_learning_rate = 0.999\n epsilon = 0.1\n\n # Macro\n NUM_EPISODE = 600\n AVE_TIMES = ave_times\n REWARD_THREASHOLD = 40\n # Definition of dependencies\n env = gym.make('CartPole-v0')\n\n observation_space = (\n env.observation_space.low,\n env.observation_space.high\n )\n\n CartPole_universal_action_space = [i for i in xrange(0, env.action_space.n)]\n state_action_space = StateActionSpace_CartPole(\n observation_space,\n precise,\n CartPole_universal_action_space\n )\n\n # Run algorithm\n for ave_times in range(AVE_TIMES):\n learning_agent_RGGQLambda = RGGQLambda(\n learning_rate,\n discount_of_learning_rate,\n discount_factor,\n eligibility_factor,\n regularize_factor,\n epsilon,\n state_action_space.action_space\n )\n learning_agent = learning_agent_RGGQLambda\n\n sparsity = []\n Qfunc_error_history_2 = []\n total_reward_episode_2 = []\n time_history_2 = []\n max_reward = -float(\"inf\")\n for i_episode in range(NUM_EPISODE):\n time_start = time.clock()\n observation = env.reset()\n\n discret_state = state_action_space._m_observation_to_discrete_state(\n observation\n )\n discret_state_bar = deepcopy(discret_state)\n\n action = learning_agent._m_GreedyPolicy(\n discret_state,\n state_action_space\n )\n\n phi = state_action_space._m_discrete_state_to_feature(\n discret_state,\n action\n )\n\n rho = 1\n\n total_reward = 0\n Qfunc_previous = deepcopy(learning_agent.theta)\n learning_agent.e = np.zeros(learning_agent.num_element_qfunc)\n\n done = False\n step = 0\n while not done:\n step += 1\n while set(discret_state) == set(discret_state_bar):\n observation_bar, step_reward, done, info = env.step(action)\n\n if done:\n break\n\n discret_state_bar = state_action_space._m_observation_to_discrete_state(\n observation_bar\n )\n\n action_bar = learning_agent._m_GreedyPolicy(\n discret_state_bar,\n state_action_space\n )\n phi_bar = state_action_space._m_discrete_state_to_feature(\n discret_state_bar,\n action_bar\n )\n\n learning_agent._m_Learn(phi,\n phi_bar,\n step_reward,\n rho,\n 1\n )\n\n phi = phi_bar\n action = action_bar\n discret_state = discret_state_bar\n total_reward += step_reward\n if done:\n break\n print \"Episode finished after {} timesteps in RGGQ(lambda)\".format(step), \"in \", ave_times + 1, \"times\"\n time_end = time.clock()\n time_consumed = time_end - time_start\n time_history_2.append(time_consumed)\n\n if total_reward > max_reward:\n if total_reward > REWARD_THREASHOLD:\n epsilon *= 0.999\n max_reward = total_reward\n\n total_reward_episode_2.append(total_reward) # Add total reward to reward history\n\n delta_q_func = Qfunc_previous - learning_agent.theta\n Qfunc_difference_this_episode = np.dot(\n delta_q_func,\n delta_q_func\n )\n Qfunc_error_history_2.append( # Add error to error history\n Qfunc_difference_this_episode\n )\n\n sparsity.append(np.sum(learning_agent.theta == 0) / (learning_agent.num_element_qfunc * 1.0))\n\n if i_episode % 10 == 0:\n print i_episode, \"th episode completed\"\n print \"Q update is\", Qfunc_difference_this_episode\n print \"Maximal reward is\", max_reward, \"\\n\"\n\n Qfunc_error_history_2 = np.array(Qfunc_error_history_2)\n if 'Qfunc_error_history_ave_2' not in locals():\n Qfunc_error_history_ave_2 = Qfunc_error_history_2\n else:\n Qfunc_error_history_ave_2 = Qfunc_error_history_ave_2 + (Qfunc_error_history_2 - Qfunc_error_history_ave_2) / (ave_times * 1.0)\n\n total_reward_episode_2 = np.array(total_reward_episode_2)\n if 'total_reward_episode_ave_2' not in locals():\n total_reward_episode_ave_2 = total_reward_episode_2\n else:\n total_reward_episode_ave_2 = total_reward_episode_ave_2 + (total_reward_episode_2 - total_reward_episode_ave_2) / (ave_times * 1.0)\n\n time_history_2 = np.array(time_history_2)\n if 'time_history_ave_2' not in locals():\n time_history_ave_2 = time_history_2\n else:\n time_history_ave_2 = time_history_ave_2 + (time_history_2 - time_history_ave_2) / (ave_times * 1.0)\n\n Qfunc_error_history_2 = Qfunc_error_history_ave_2\n total_reward_episode_2 = total_reward_episode_ave_2\n time_history_2 = time_history_ave_2\n with open(\n path + \"total_reward_RGGQ-\" +\n str(learning_rate) + \"-\" + str(eligibility_factor) + \"-\" + str(regularize_factor), 'wb') as f:\n pickle.dump(total_reward_episode_2, f)\n with open(\n path + \"time_history_RGGQ-\" +\n str(learning_rate) + \"-\" + str(eligibility_factor) + \"-\" + str(regularize_factor), 'wb') as f:\n pickle.dump(time_history_2, f)\n with open(\n path + \"sparsity_RGGQ-\" +\n str(learning_rate) + \"-\" + str(eligibility_factor) + \"-\" + str(regularize_factor), 'wb') as f:\n pickle.dump(sparsity, f)",
"def compute_matrices_A_B(self, state, action, env):\n Fe, Fs, psi = action\n theta = state[THETA]\n m = env.lander.mass\n J = env.lander.inertia\n\n sin_psi = math.sin(psi)\n cos_psi = math.cos(psi)\n sin_theta = math.sin(theta)\n cos_theta = math.cos(theta)\n\n cos_t_cos_p = cos_theta * cos_psi\n sin_t_cos_p = sin_theta * cos_psi\n sin_t_sin_p = sin_theta * sin_psi\n sin_t_cos_t = sin_theta * cos_theta\n cos_t_sin_p = cos_theta * sin_psi\n\n a_25 = (Fe * (cos_t_cos_p - sin_psi * sin_theta) - Fs * sin_theta) / m\n a_45 = (Fe * (sin_t_cos_t - cos_t_sin_p) - Fs * cos_theta) / m\n\n A = [[0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, a_25, 0],\n [0, 0, 0, 1, 0, 0],\n [0, 0, 0, 0, a_45, 0],\n [0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 0]]\n\n b_21 = (sin_t_cos_p + cos_t_cos_p) / m\n b_22 = cos_theta / m\n b_23 = -Fe * sin_t_sin_p / m\n\n b_41 = (cos_t_cos_p - sin_t_sin_p) / m\n b_42 = -sin_theta / m\n b_43 = Fe * (-cos_t_sin_p - sin_t_cos_p) / m\n\n b_61 = -sin_psi * L1 / J\n b_62 = L2 / J\n b_63 = -Fe * cos_psi * L1 / J\n\n B = [[0, 0, 0],\n [b_21, b_22, b_23],\n [0, 0, 0],\n [b_41, b_42, b_43],\n [0, 0, 0],\n [b_61, b_62, b_63]]\n\n return np.array(A), np.array(B)",
"def rusle(self):\n\n # assign variables\n ls_factor = 'ls_factor'\n slope = 'slope'\n grow_slope = 'grow_slope'\n flowacc = 'flowacc'\n sedflow = 'sedflow'\n sedflux = 'flux'\n\n # parse, advance, and stamp time\n (evolved_elevation, time, depth, sediment_flux, erosion_deposition,\n difference) = self.parse_time()\n\n # compute event-based erosivity (R) factor (MJ mm ha^-1 hr^-1 yr^-1)\n r_factor = self.event_based_r_factor()\n\n # compute slope\n gscript.run_command(\n 'r.slope.aspect',\n elevation=self.elevation,\n slope=slope,\n overwrite=True)\n\n # grow border to fix edge effects of moving window computations\n gscript.run_command(\n 'r.grow.distance',\n input=slope,\n value=grow_slope,\n overwrite=True)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{slope}={grow_slope}\".format(\n slope=slope,\n grow_slope=grow_slope),\n overwrite=True)\n\n # compute flow accumulation\n gscript.run_command(\n 'r.watershed',\n elevation=self.elevation,\n accumulation=flowacc,\n flags=\"a\",\n overwrite=True)\n region = gscript.parse_command(\n 'g.region', flags='g')\n res = region['nsres']\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{depth}\"\n \"=({flowacc}*{res})\".format(\n depth=depth,\n flowacc=flowacc,\n res=res),\n overwrite=True)\n\n # compute dimensionless topographic factor\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{ls_factor}\"\n \"=({m}+1.0)\"\n \"*(({flowacc}/22.1)^{m})\"\n \"*((sin({slope})/5.14)^{n})\".format(\n ls_factor=ls_factor,\n m=self.m,\n flowacc=depth,\n slope=slope,\n n=self.n),\n overwrite=True)\n\n # compute sediment flow\n \"\"\"E = R * K * LS * C * P\n where\n E is average annual soil loss\n R is erosivity factor\n K is soil erodibility factor\n LS is a dimensionless topographic (length-slope) factor\n C is a dimensionless land cover factor\n P is a dimensionless prevention measures factor\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{sedflow}\"\n \"={r_factor}\"\n \"*{k_factor}\"\n \"*{ls_factor}\"\n \"*{c_factor}\".format(\n sedflow=sedflow,\n r_factor=r_factor,\n k_factor=self.k_factor,\n ls_factor=ls_factor,\n c_factor=self.c_factor),\n overwrite=True)\n\n # convert sediment flow from tons/ha/yr to kg/m^2s\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{converted_sedflow}\"\n \"={sedflow}\"\n \"*{ton_to_kg}\"\n \"/{ha_to_m2}\"\n \"/{yr_to_s}\".format(\n converted_sedflow=sedflux,\n sedflow=sedflow,\n ton_to_kg=1000.,\n ha_to_m2=10000.,\n yr_to_s=31557600.),\n overwrite=True)\n\n # filter outliers\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{sediment_flux}\"\n \"=if({sedflux}>{erdepmax},{erdepmax},{sedflux})\".format(\n sediment_flux=sediment_flux,\n sedflux=sedflux,\n erdepmax=self.erdepmax),\n overwrite=True)\n gscript.run_command(\n 'r.colors',\n map=sediment_flux,\n color='viridis',\n flags='g')\n\n # evolve landscape\n \"\"\"\n change in elevation (m)\n = change in time (s)\n * sediment flux (kg/ms)\n / mass of sediment per unit area (kg/m^2)\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{evolved_elevation}\"\n \"={elevation}\"\n \"-({rain_interval}*60\"\n \"*{sediment_flux}\"\n \"/{mass})\".format(\n evolved_elevation=evolved_elevation,\n elevation=self.elevation,\n rain_interval=self.rain_interval,\n sediment_flux=sediment_flux,\n mass=self.mass),\n overwrite=True)\n\n # gravitational diffusion\n evolved_elevation = self.gravitational_diffusion(evolved_elevation)\n\n # compute elevation change\n difference = self.compute_difference(evolved_elevation, difference)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['slope',\n 'grow_slope',\n 'flowacc',\n 'sedflow',\n 'flux',\n 'settled_elevation',\n 'divergence',\n 'r_factor',\n 'ls_factor'],\n flags='f')\n\n return (evolved_elevation, time, depth, sediment_flux, difference)",
"def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score",
"def breath_analyze(self, offset=0, th=10):\n # breath part\n breath_gd = np.gradient(gf(self.breath_list, 10))\n breath_gd[breath_gd > 0] = 1\n breath_gd[breath_gd < 0] = 0\n breath_pulse = breath_gd[:-1]-np.roll(breath_gd, -1)[:-1]\n breath_in = argrelextrema(breath_pulse, np.less, order=10)[0]#+offset\n breath_out = argrelextrema(breath_pulse, np.greater, order=10)[0]#+offset\n self.breath = np.sort(np.hstack([breath_in, breath_out, len(self.breath_list)-1]))\n \n if self.breath[0] == breath_in[0]:\n self.btype = 'in'\n else:\n self.btype = 'out' \n\n b_in = []\n b_out = []\n delidx = []\n\n if len(self.breath) != 0: \n for i, j in zip(self.breath[:-1], self.breath[1:]):\n breath_diff = abs(self.breath_list[j]-self.breath_list[i])\n if abs(breath_diff) > 3000: # really breath in/out\n if abs(breath_diff) < 30000: # not deep breath\n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_out.append(j-i)\n self.ngframe.append(i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_in.append(j-i)\n else: \n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j))\n b_out.append(j-i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j))\n b_in.append(j-i)\n else:\n delidx.append(np.argwhere(self.breath==j)[0][0])\n self.breath = np.delete(self.breath, np.array(delidx))\n\n print('\\naverage breath out freq is: '+str(np.round(30./np.mean(b_out), 2))+' Hz')\n print('\\naverage breath in freq is: '+str(np.round(30./np.mean(b_in), 2))+' Hz')\n else:\n raise ImportError('Doing too fast !! please redo again !!')",
"def eval_damping():\n # Environment\n env = WAMBallInCupSim(num_dof=7, max_steps=1500)\n\n # Policy (random init)\n policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))\n policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)\n\n # Do the rolllouts\n t_all = []\n qpos_all = []\n dp_vals = [0.0, 0.01, 0.1, 0.5, 1.0]\n print_cbt(f\"Run policy for damping coefficients: {dp_vals}\")\n for dpv in dp_vals:\n env.reset(\n domain_param=dict(\n joint_1_damping=dpv,\n joint_2_damping=dpv,\n joint_3_damping=dpv,\n joint_4_damping=dpv,\n joint_5_damping=dpv,\n joint_6_damping=dpv,\n joint_7_damping=dpv,\n )\n )\n ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)\n t_all.append(ro.time[:-1])\n qpos_all.append(ro.env_infos[\"qpos\"])\n\n # Plot\n fig, ax = plt.subplots(nrows=env.num_dof, sharex=\"all\", figsize=(16, 7))\n for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):\n ax[i].set_prop_cycle(color=plt.get_cmap(\"cividis\")(np.linspace(0, 1, env.num_dof)))\n ax[i].set_ylabel(f\"joint {idx_joint+1} pos [rad]\")\n for j in range(len(dp_vals)):\n ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls=\"--\", label=f\"d = {dp_vals[j]}\")\n if i == 0:\n ax[i].legend(ncol=len(dp_vals))\n ax[-1].set_xlabel(\"time [s]\")\n plt.suptitle(\"Evaluation of joint damping coefficients\")\n plt.show()",
"def main_ededge(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)\n\n list_to_eval_edge = []\n\n first_order_edge = [\n CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3\n , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7\n , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7\n\n , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7\n , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7\n\n , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7\n , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7\n\n , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5\n , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7\n\n , CONFIG.FILTERS.KITCHEN_MALIN_3x3\n , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7\n\n , CONFIG.FILTERS.KAYYALI_3x3\n , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7\n\n , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5\n , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7\n\n , CONFIG.FILTERS.KROON_3x3\n , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7\n\n , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5\n , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7\n ]\n\n for edge in first_order_edge:\n for gr_thr in [50]:\n for anc_thr in [10]:\n e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,\n max_edges=100, max_points_edge=100)\n list_to_eval_edge.append(e1 + '_L0')\n\n Application.create_config_file(verbose=False)\n Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')\n # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,\n # raw_image='TestData/BSR/BSDS500/data/images/' + dataset,\n # jobs_set=list_to_eval_edge, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',\n list_of_data=list_to_eval_edge, number_of_series=50,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),\n ('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),\n ('PIXEL_DIFFERENCE_', 'Pixel Dif '),\n ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),\n ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),\n ('ORHEI_', 'Orhei '),\n ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),\n ('DILATED_', 'dilated '),\n ('_GAUSS_BLUR_K_9', '')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n # Utils.create_latex_cpm_table_list()\n\n Utils.close_files()",
"def calculation_of_propagation(self): \n \n prop = PopulationPropagator(world.time, rate_matrix=world.KK)\n \n pop_ini = numpy.array([1.0, 0.0])\n \n pop_t = prop.propagate(pop_ini)\n \n sta = world.subtime\n \n U = prop.get_PropagationMatrix(sta)\n \n pop_sub = numpy.zeros((2,sta.length))\n \n for i in range(sta.length):\n pop_sub[:,i] = numpy.dot(U[:,:,i],pop_ini) \n \n world.pop_t = pop_t\n world.pop_sub = pop_sub",
"def main():\n # Handle CLI.\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--molecule\", type=str, default=\"H2\", help=\"the \"\n \"UCCSD molecule to perform HPO on\")\n parser.add_argument(\"--slice-index\", type=int, default=-1, help=\"the \"\n \"slice to perform HPO on, do not specify to run HPO \"\n \"on the full circuit\")\n parser.add_argument(\"--core-count\", type=int, default=1, help=\"the \"\n \"number of cpu cores this run may use\")\n args = vars(parser.parse_args())\n molecule = args[\"molecule\"]\n slice_index = args[\"slice_index\"]\n core_count = args[\"core_count\"]\n\n # Generate the state object that encapsulates the optimization for the circuit.\n state = ProcessState(molecule, slice_index)\n\n # Redirect everything the central process puts out to a log file.\n # By default, ray redirects the stdout of each worker process\n # to the central process.\n log_file = state.file_name + \".log\"\n log_file_path = os.path.join(state.data_path, log_file)\n with open(log_file_path, \"a+\") as log:\n sys.stdout = sys.stderr = log\n\n # Display run characteristics.\n print(\"PID={}\\nWALL_TIME={}\\nSLICE_INDEX={}\\nPULSE_TIME={}\\n\"\n \"(LR_LB, LR_UB)=({}, {})\\n(DECAY_LB, DECAY_UB)=({}, {})\\n\"\n \"CORE_COUNT={}\\n{}\"\n \"\".format(os.getpid(), time.time(), state.slice_index,\n state.pulse_time, LR_LB, LR_UB, DECAY_LB, DECAY_UB, \n core_count, state.circuit))\n\n # Define the search space on the parameters: learning rate and\n # learning rate decay.\n space = {\n \"lr\": hp.loguniform(\"lr\", np.log(LR_LB), np.log(LR_UB)),\n \"decay\": hp.uniform(\"decay\", DECAY_LB, DECAY_UB),\n }\n \n # We want to minimize QOC error/loss, i.e. we want to maximize\n # negative loss.\n algo = ray.tune.suggest.HyperOptSearch(space, max_concurrent=core_count,\n reward_attr=\"neg_loss\")\n run_config = {\n \"num_samples\": HPO_MAX_ITERATIONS,\n \"name\": state.file_name,\n \"loggers\": [ray.tune.logger.NoopLogger],\n \"search_alg\": algo,\n \"verbose\": 1,\n \"local_dir\": state.data_path,\n \"resume\": True,\n }\n \n # Ray cannot serialize python objects in its object store,\n # so we have to pass the state in a lambda wrapper.\n objective_wrapper = lambda config, reporter: objective(state, config,\n reporter)\n \n # Start ray and run HPO.\n ray.init(num_cpus=core_count, object_store_memory=OBJECT_STORE_MEMORY,\n redis_max_memory=REDIS_MAX_MEMORY)\n ray.tune.register_trainable(\"lambda_id\", objective_wrapper)\n ray.tune.run(\"lambda_id\", **run_config)",
"def LA_contribution(self):\n pr=paraxial(self.entrance_pupil,0)\n #hnu=-u*self.entrance_pupil #n=1\n pr.propagate(self.surfaces)\n #print('hnu',hnu,1/hnu)\n #print('paraxial y ',pr.y[1:])\n #print('paraxial nu',pr.nu[:-1])\n #print('paraxial u ',pr.nu[:-1]/self.get_n()[:-1])\n #print('paraxial u ',pr.nu[:-1]/self.get_n()[:-1]/hnu/5.715023)\n #print('paraxial i ',pr.i[1:])\n ni=self.get_n()[:-1]*pr.i[1:]\n #print('ni',ni)\n marginal=beam_field()\n marginal.single_beam_from_Kingslake_Q(self.entrance_pupil,0) #marginal beam\n marginal.propagate(self.surfaces)\n Q=marginal.Kingslake_Qabs(self.surfaces)[:,0]\n Q_=marginal.Kingslake_Q_abs(self.surfaces)[:,0]\n #print('marginal Q ',marginal.Kingslake_Qabs(ls.surfaces)[:,0])\n #print('marginal Q\\'',marginal.Kingslake_Q_abs(ls.surfaces)[:,0])\n #print(Q-Q_)\n #print('paraxial nu\\'',pr.nu[1:])\n #print('sin Uk\\'',marginal.U)\n target_surface=len(self.surfaces)-1\n #print(marginal.U[3,0,1]*pr.nu[target_surface])\n nusinU=marginal.U[3,0,1]*pr.nu[target_surface] #n'u'sinU'_k all values at end focus\n LA=-(Q-Q_)*ni/nusinU\n #print('spherical LA contribution',LA)\n #print('sum',sum(LA))\n return LA",
"def update(self, state_sequence, reward_sequence):\n\n for i in range(reward_sequence.shape[0]):\n\n trajt_1 = state_sequence[:,i][:,np.newaxis] # No use of V_mu in computing distances!\n trajt = state_sequence[:,i+1][:,np.newaxis]\n # trajt_1 = np.concatenate((trajt_1, self.V_mu(trajt_1)), axis=0) # Use V_mu as well\n # trajt = np.concatenate((trajt, self.V_mu(trajt)), axis=0)\n k_t_1 = self.kernel(self.D, trajt_1)\n k_t = self.kernel(self.D, trajt)\n ktt = self.kernel(trajt, trajt)\n at = np.dot(self.K_inv, k_t)\n delk_t_1 = k_t_1 - self.gamma*k_t\n\n ct = np.dot(self.C_, delk_t_1) - (self.A - self.gamma*at)\n st = self.sigma0**2 - np.dot(ct.T, delk_t_1)\n\n diff_r = np.dot(delk_t_1.T, self.alpha_)[0,0] - reward_sequence[i]\n self.alpha_ = self.alpha_ + ct/st*diff_r\n\n self.C_ = self.C_ + np.dot(ct, ct.T)/st\n\n self.A = at\n\n assert (not np.isnan(self.alpha_).any()), \"Check alpha for NaN values\"\n\n self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)",
"def prediction_aggregation(self, xt_s,mu_s,var_s, method='PoE', weighting='uniform', power=26):\n\n nt = xt_s.shape[0]\n mu = np.zeros([nt, self.C],dtype='float64')\n var = np.zeros([nt, self.C],dtype='float64')\n\n prior_var = self.experts[0].kernel(xt_s[0], xt_s[0])\n\n \n #Process each latent gp individually \n for j in range(self.C):\n \n mu_s_c = mu_s[:, :, j]\n var_s_c = var_s[:, :, j]\n \n weight_matrix = compute_weights(mu_s_c, var_s_c, power, weighting, prior_var)\n \n prec_s= 1/var_s_c\n\n if method == 'PoE':\n \n prec = tf.reduce_sum(prec_s, axis=0)\n \n\n if method == 'gPoE':\n \n weight_matrix = normalize_weights(weight_matrix)\n\n prec = tf.reduce_sum(weight_matrix * prec_s , axis=0)\n \n\n if method == 'BCM':\n \n prec = tf.reduce_sum(prec_s, axis=0) + (1 - self.M) / prior_var \n\n if method == 'rBCM':\n \n \n prec = tf.reduce_sum(weight_matrix * prec_s, axis=0) \\\n + (1 - tf.reduce_sum(weight_matrix, axis=0)) / prior_var\n \n \n \n if method != 'bar':\n \n var[:, j] = 1 / prec\n\n mu[:, j] = var[:, j] * tf.reduce_sum(weight_matrix * prec_s * mu_s_c, axis=0)\n \n else:\n \n weight_matrix = normalize_weights(weight_matrix)\n\n mu[:, j] = tf.reduce_sum(weight_matrix * mu_s_c, axis=0)\n var[:, j] = tf.reduce_sum(weight_matrix * var_s_c, axis=0)\n \n \n return self.lik_aggregation(mu, var)",
"def update(self, state_sequence, reward_sequence):\n\n for i in range(reward_sequence.shape[0]):\n\n trajt_1 = state_sequence[:,i][:,np.newaxis]\n Vt_1 = self.get_value_function(trajt_1)[0]\n trajt = state_sequence[:,i+1][:,np.newaxis]\n Vt = self.get_value_function(trajt)[0]\n k_t_1 = self.kernel(self.D, trajt_1)\n k_t = self.kernel(self.D, trajt)\n ktt = self.kernel(trajt, trajt)\n at = np.dot(self.K_inv, k_t)\n et = (ktt - np.dot(k_t.T, at))\n delk_t_1 = k_t_1 - self.gamma*k_t\n\n if ((et - self.nu) > 10**(-4)) and (abs(Vt_1 - self.gamma*Vt - reward_sequence[i]) > 2*abs(reward_sequence[i])):\n self.D = np.concatenate((self.D, trajt), axis=1)\n self.V_D = np.concatenate((self.V_D, self.V_mu(state_sequence[:,i+1][:,np.newaxis])), axis=0)\n\n at_by_et = at/et\n self.K_inv = np.concatenate((self.K_inv + np.dot(at, at.T)/et, -at_by_et), axis=1)\n self.K_inv = np.concatenate((self.K_inv, \\\n np.concatenate((-at_by_et.T, 1/et), axis=1)), axis=0)\n\n c_t = np.dot(self.C_, delk_t_1) - self.A\n\n delktt = np.dot(self.A.T, delk_t_1 - self.gamma*k_t) + (self.gamma**2)*ktt\n s_t = self.sigma0**2 + delktt - np.dot(delk_t_1.T, np.dot(self.C_, delk_t_1))\n\n diff_r = np.dot(delk_t_1.T, self.alpha_)[0,0] - reward_sequence[i]\n self.alpha_ = np.concatenate((self.alpha_ + c_t/s_t*diff_r, self.gamma/s_t*diff_r), axis=0)\n\n gc_t_by_s_t = (self.gamma/s_t)*c_t\n self.C_ = np.concatenate((self.C_ + np.dot(c_t, c_t.T)/s_t, gc_t_by_s_t), axis=1) \n self.C_ = np.concatenate((self.C_, \\\n np.concatenate((gc_t_by_s_t.T, self.gamma**2/s_t), axis=1)), axis=0)\n\n self.A = np.zeros((self.A.shape[0]+1, self.A.shape[1]), dtype=np.float64, order='C')\n self.A[-1, 0] = 1\n\n self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)\n\n else:\n\n ct = np.dot(self.C_, delk_t_1) - (self.A - self.gamma*at)\n st = self.sigma0**2 - np.dot(ct.T, delk_t_1)\n\n diff_r = np.dot(delk_t_1.T, self.alpha_)[0,0] - reward_sequence[i]\n self.alpha_ = self.alpha_ + ct/st*diff_r\n\n self.C_ = self.C_ + np.dot(ct, ct.T)/st\n\n self.A = at\n\n self.diff_alpha_CV_D = self.alpha_ - np.dot(self.C_, self.V_D)\n\n assert (not np.isnan(self.alpha_).any()), \"Check alpha for NaN values\"",
"def do_a_series_of_propagations(self):\n index = 0\n header = 'col1'\n while index < self.rounds and self.flag:\n index = index + 1\n print(\"\\nLabel propagation round: \" + str(index)+\".\\n\")\n self.do_a_propagation()\n print(index)\n with open ('./data/lpa.txt','w') as f:\n for i in self.nodes:\n f.write(str(self.labels[i])+'\\t')\n f.write(str(i)+'\\t')\n f.write('\\n')\n \n print(\"\")\n print(\"Modularity is: \" + str(round(modularity( self.labels,self.graph,0.2), 40)) + \".\\n\")\n json_dumper(self.labels, self.args.assignment_output)"
] | [
"0.6919233",
"0.5892002",
"0.5517866",
"0.54245466",
"0.52051145",
"0.51677525",
"0.5152469",
"0.5125298",
"0.5082556",
"0.50799584",
"0.5073586",
"0.505811",
"0.5040545",
"0.5011798",
"0.4996324",
"0.498793",
"0.49791676",
"0.49737632",
"0.4945683",
"0.49369043",
"0.49077606",
"0.48836026",
"0.48754796",
"0.4874645",
"0.4870194",
"0.48537922",
"0.48435807",
"0.48290685",
"0.4826983",
"0.48257628"
] | 0.7314743 | 0 |
Run the belief propagation for a set of rays | def belief_propagation(
S,
ray_voxel_indices,
ray_voxel_count,
ray_to_occupancy_messages_pon,
grid_shape,
gamma=0.05,
bp_iterations=3,
progress_callback=lambda *args: None
):
# Extract the number of rays
N, M = S.shape
# Initialize the ray to occupancy messages to uniform
ray_to_occupancy_messages_pon.fill(0)
# Initialize the ray-to-occupancy accumulated to $\phi(o_i)$ The
# ray_to_occupancy_accumulated_prev_pon and the
# ray_to_occupancy_accumulated_new_pon holds the accumulation of the
# quotient of the positive ray to occupancy message with the negative ray
# to occupancy message in log space for the current and for the next belief
# propagation iteration.
# Both messages are initialized to
# \log(\frac{\phi_(o_i=1)}{\phi_(o_i=0)}
ray_to_occupancy_accumulated_prev_pon = np.ones(
tuple(grid_shape),
dtype=np.float32
) * (np.log(gamma) - np.log(1 - gamma))
ray_to_occupancy_accumulated_new_pon = np.ones(
tuple(grid_shape),
dtype=np.float32
) * (np.log(gamma) - np.log(1 - gamma))
# Iterate over the rays multiple times
for it in xrange(bp_iterations):
print "Iteration %d " % (it,)
for r in xrange(N):
# Get the actual number of voxels which this ray passes through
c = ray_voxel_count[r]
if c <= 1:
continue
ray_to_occupancy_pon = single_ray_belief_propagation(
ray_voxel_indices[r, :c, :],
ray_to_occupancy_accumulated_prev_pon,
ray_to_occupancy_messages_pon[r, :c],
clip_and_renorm(S[r, :c])
)
idxs = ray_voxel_indices[r, :c]
idxs = (idxs[:, 0], idxs[:, 1], idxs[:, 2])
ray_to_occupancy_accumulated_new_pon[idxs] += ray_to_occupancy_pon
# Update the array of the ray-to-occupancy messages with the
# current message that will be used for the next iteration
ray_to_occupancy_messages_pon[r, :c] = ray_to_occupancy_pon
# Swap the accumulators for the next bp iteration
ray_to_occupancy_accumulated_prev_pon[:] = ray_to_occupancy_accumulated_new_pon
ray_to_occupancy_accumulated_new_pon.fill(np.log(gamma) - np.log(1 - gamma))
progress_callback(
S,
ray_voxel_indices,
ray_voxel_count,
ray_to_occupancy_messages_pon,
ray_to_occupancy_accumulated_prev_pon,
it
)
return ray_to_occupancy_accumulated_prev_pon, ray_to_occupancy_messages_pon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def single_ray_belief_propagation(ray_voxel_indices,\n ray_to_occupancy_accumulated_pon,\n ray_to_occupancy_pon, s):\n # Create an index that when passed to a numpy array will return the voxels\n # that this ray passes through\n # TODO: Remove this check. This is just to make the code run for the\n # 2D tests.\n if ray_voxel_indices.shape[-1] == 3:\n indices = (\n ray_voxel_indices[:, 0],\n ray_voxel_indices[:, 1],\n ray_voxel_indices[:, 2]\n )\n else:\n indices = (\n ray_voxel_indices[:, 0],\n ray_voxel_indices[:, 1]\n )\n\n # Compute the the occupancy_to_ray message\n # NOTE: The ray_to_occupancy_accumulated is in log space\n occupancy_to_ray_pon = (\n ray_to_occupancy_accumulated_pon[indices] -\n ray_to_occupancy_pon\n )\n # We assume that incoming messages are normalized to 1, thus we need to\n # normalize the occupancy-to-ray message\n # Make sure that the occupancy-to-ray message for every voxel is greater or\n # equal to 0\n max_occupancy_to_ray = np.maximum(0.0, occupancy_to_ray_pon)\n t1 = np.exp(0.0 - max_occupancy_to_ray)\n t2 = np.exp(occupancy_to_ray_pon - max_occupancy_to_ray)\n\n # Now we normalize the occupancy to ray message for the positive case.\n # The occupancy_to_ray holds the positive occupancy-to-ray messages for the\n # current ray (not in logspace) from Equation (44) in my report\n occupancy_to_ray = np.clip(\n t2 / (t2 + t1),\n 1e-4,\n 1-1e-4\n )\n\n # Compute the cumulative products in linear time (see eq. 13, 14 Ulusoy\n # 3DV)\n # For the computation of the cumulative product we need\n # the occupancy-to-ray messages for the negative case.\n # We append 1 at the top because for the o_1 voxel this term is equal to 1\n occupancy_to_ray_neg_cumprod = np.hstack([\n [1.], (1 - occupancy_to_ray).cumprod()\n ])\n\n # Get the number of voxels that intersect with the ray\n M = ray_to_occupancy_pon.shape[0]\n # Make space to compute the ray to occupancy messages for both the positive\n # and the negative case according to eq 44, 48 in my report\n ray_to_occupancy_new = np.zeros((2, M), dtype=np.float32)\n\n # Compute the part of the messages that is the same for positive and\n # negative messages\n ray_to_occupancy_new[:] += np.hstack([\n [0.], occupancy_to_ray * occupancy_to_ray_neg_cumprod[:-1] * s\n ])[:-1].cumsum()\n\n # Finalize the positive messages\n ray_to_occupancy_new[1] += occupancy_to_ray_neg_cumprod[:-1] * s\n\n # Finalize the negative messages (adding 2nd part of eq. 14 Ulusoy 3DV)\n # The summations we want to calculate are as follows:\n # i=1, \\sum_{i=2}^N(\\cdot)\n # i=2, \\sum_{i=3}^N(\\cdot)\n # ...\n # i=N-2, \\sum_{i=N-1}^N(\\cdot)\n # lets assume that we have [a, b, c, d, e]. We first inverse the array,\n # thus resulting in [e, d, c, b, a] and then we compute the cumulative sum\n # on this array. The output is [e, e+d, e+d+c, e+d+c+b, e+d+c+b+a]. However\n # we want them in the inverse order, thus we inverse the output once again\n # and we have [e+d+c+b+a, e+d+c+b, e+d+c, e+d, e]\n # Finally we also divide with the incoming message for the negative case\n ray_to_occupancy_new[0] += np.hstack([\n occupancy_to_ray * occupancy_to_ray_neg_cumprod[:-1] * s,\n [0.0]\n ])[::-1].cumsum()[::-1][1:] / (1 - occupancy_to_ray)\n\n # Normalize the positive ray_to_occupancy message\n ray_to_occupancy_new_pos =\\\n ray_to_occupancy_new[1] / (ray_to_occupancy_new[1] + ray_to_occupancy_new[0])\n\n # Return the quotient of the positive ray to occupancy message with the\n # negative ray to occupancy message in logspace\n t = np.log(ray_to_occupancy_new_pos) - np.log(1 - ray_to_occupancy_new_pos)\n\n if np.isnan(t).any() or np.isinf(t).any():\n print \"ray_to_occupancy_pon contains weird values %r\" % (t)\n print \"ray_to_occupancy_new_pos\", ray_to_occupancy_new_pos\n\n return t",
"def propagate(self, elements):\n while True:\n try:\n tti, bound, next_element = self._next_hit(elements)\n except TypeError:\n # That's all, folks!\n return\n self._ray = self._ray.propagate(tti)\n self._history.append((self._history[-1][0] + tti, self._ray))\n if not bound.is_reflective() and self._ray.direction.dot(\n bound.normal(self._ray.position)) > 0:\n next_element = Air()\n self._ray = bound.propagate(\n self._ray, self._element.index, next_element.index)\n self._history.append((self._history[-1][0], self._ray))\n self._element = next_element",
"def run(self, nrays=None, seed=None):\n #reset nrays/seed if given\n if (nrays is not None) and (seed is not None): self.src.set_rays(nrays=nrays, seed=seed)\n if (nrays is not None): self.src.set_rays(nrays)\n #generate source\n if self.iwrite: self.src.write(\"start.00\")\n self_repair_src(self.src)\n self.beam.genSource(self.src)\n if self.iwrite:\n self.src.write(\"end.00\")\n self.beam.write(\"begin.dat\")\n #trace oe1\n if self.iwrite: self.oe1.write(\"start.01\")\n self_repair_oe(self.oe1)\n self.beam.traceOE(self.oe1, 1)\n if self.iwrite:\n self.oe1.write(\"end.01\")\n self.beam.write(\"star.01\")\n #trace detector (not required yet)\n # if self.iwrite: self.det.write(\"start.02\")\n # self.beam.traceOE(self.det, 2)\n # if self.iwrite:\n # self.det.write(\"end.02\")\n # self.beam.write(\"star.02\")",
"def _pre_draw_bge(self):\r\n self._pre_draw_common()\r\n # draw rays\r\n self._drawRays()",
"def Sarsa_lbda_w_bf_and_lin_FA(env, fvecs, idcs_per_action, weights, alpha,\n epsilon, gamma, num_actions, num_episodes, lbda):\n\n for episode in tqdm(range(num_episodes)):\n done = False\n \n state = env.reset()\n fvec_idx_per_tiling = fvecs.calc_feature_vec(state)\n Q_vals = init_Q_values(weights, fvec_idx_per_tiling, idcs_per_action,\n num_actions)\n action, Q_current = eps_greedy(Q_vals, epsilon, num_actions) \n curr_active_feat_idcs = fvec_idx_per_tiling + action*idcs_per_action \n \n z_trace = init_e_traces(len(weights)) ###\n \n step_count = 0\n while not done:\n step_count += 1\n if episode == (num_episodes -1):\n env.render(state[0])\n\n next_state, reward, done,__ = env.step(state, action)\n\n delta = reward\n for i in curr_active_feat_idcs:\n delta -= weights[i]\n z_trace[i] += 1 ### accumulating traces\n #z_trace[i] = 1 ### replacing traces\n if done:\n weights += alpha*delta*z_trace\n break\n\n fvec_idx_per_tiling = fvecs.calc_feature_vec(next_state)\n Q_vals = init_Q_values(weights, fvec_idx_per_tiling, idcs_per_action,\n num_actions)\n next_action, Q_next = eps_greedy(Q_vals, epsilon, num_actions)\n \n next_active_feat_idcs = fvec_idx_per_tiling + next_action*idcs_per_action### \n\n for i in next_active_feat_idcs:\n delta += gamma*weights[i]\n weights += alpha*delta*z_trace\n \n z_trace = gamma*lbda*z_trace\n state = next_state\n action = next_action \n curr_active_feat_idcs = next_active_feat_idcs\n \n env.plot_step_per_ep(episode, step_count)\n \n return",
"def moveFunction(target, rays):\r\n for ray in rays:\r\n ray.hitTarget(target)",
"def process_belief(self, args):\n goal, belief = args\n\n if isinstance(belief, Beliefs):\n self.belief_module.process_belief(belief)\n self.initialize_action_queue()\n\n return [{}]",
"def emit_belief(self, args):\n goal, belief = args\n return [{belief: self.belief_module.emit_belief()}]",
"def filter(self):\n new_nodes_to_update = {}\n nodes_to_update = {}\n\n for agent_id in self.cameras.keys():\n nodes_to_update[agent_id] = []\n new_nodes_to_update[agent_id] = []\n if agent_id not in self.beliefs:\n world_name = self.cameras[agent_id].name.replace(\"-\",\"_\")+\"_beliefs\"\n rospy.logdebug(\"[perspective_filter] create new world <%s>\" % str(world_name))\n self.beliefs[agent_id] = self.ctx.worlds[world_name]\n self.node_mapping[agent_id] = {}\n\n dq = deque()\n dq.append(self.source.scene.rootnode)\n\n while not rospy.is_shutdown() and 0 < len(dq):\n node = dq.pop()\n if node.id != self.source.scene.rootnode.id:\n # Process start here\n if node.id in self.cameras.keys(): # if the node is the agent POV\n nodes_to_update[node.id].append(node) # we add it to his belief\n\n if node.parent in self.cameras.keys() and node.type == MESH: # if the node is part of an agent\n nodes_to_update[node.parent].append(node) # we add it to his belief\n\n for agent_id, visible_nodes in self.visible_nodes.items(): # then we add the visible nodes\n if agent_id in self.cameras.keys():\n if node in visible_nodes:\n nodes_to_update[agent_id].append(node)\n\n # And end here\n for child_id in node.children:\n dq.append(self.source.scene.nodes[child_id])\n\n for agent_id, nodes in nodes_to_update.items():\n if nodes:\n for node in nodes:\n new_node = node.copy()\n if node.id in self.node_mapping[agent_id]:\n new_node.id = self.node_mapping[agent_id][node.id]\n if new_node.id in self.nodes_transform:\n if not numpy.allclose(self.nodes_transform[new_node.id], new_node.transformation):\n new_nodes_to_update[agent_id].append(new_node)\n self.nodes_transform[new_node.id] = new_node.transformation\n else:\n self.nodes_transform[new_node.id] = new_node.transformation\n new_nodes_to_update[agent_id].append(new_node)\n else:\n self.node_mapping[agent_id][node.id] = new_node.id\n new_nodes_to_update[agent_id].append(new_node)\n\n # Finally we update the corresponding beliefs worlds\n for agent_id, nodes in new_nodes_to_update.items():\n for node in nodes:\n node.parent = self.node_mapping[agent_id][node.parent] if node.parent in self.node_mapping[agent_id] \\\n else self.beliefs[agent_id].scene.rootnode.id\n if nodes:\n self.beliefs[agent_id].scene.nodes.update(nodes)",
"def ray_trace(self, max_iterations=25):\n if not bool(self.optical_system):\n return\n \n self.clear_ray_history() \n starting_rays = self.optical_system._amalgamated_sources.copy()\n for i in range(max_iterations):\n result = self.single_pass(starting_rays)\n \n if bool(result):\n starting_rays = result\n else:\n break",
"def main():\n print(\"Constructing optical system...\")\n\n \"\"\"Place two lambertian point sources at the edges of an object\"\"\"\n image_distance = 200 # Distance from center of bed to sensor\n\n object_length = 100\n\n beam_width = math.radians(80) # Arbitrary; just limits the number of rays to propagate\n # First source\n obj_x1, obj_y1 = 0, object_length/2\n point_source1 = rt.LambertianPointSource(obj_x1, obj_y1, 0, -beam_width/2, beam_width/2, math.radians(0.1), color='blue')\n # Second source\n obj_x2, obj_y2 = 0, -object_length/2\n point_source2 = rt.LambertianPointSource(obj_x2, obj_y2, 0, -beam_width/2, beam_width/2, math.radians(0.1), color='blue')\n\n # Draw object\n rt.ax_sim.plot([obj_x1, obj_x2], [obj_y1, obj_y2], linewidth=5, color='gray')\n\n # Combine the sources into one element\n object_sources = [point_source1, point_source2]\n\n \"\"\"Model of optics\"\"\"\n # Aperture\n aperture_radius = 3\n aperture_start = image_distance - 30.0\n aperture = rt.Aperture(aperture_start, -aperture_radius, aperture_radius) # Start of sensor column\n\n # Define lens geometry\n lens_thickness = 2.5\n lens_diameter = 12\n lens_curvature = 20\n n_bk7 = 1.51\n lens_start_distance = image_distance - 25.0\n\n lens = rt.SphericalLens(lens_start_distance, lens_diameter,lens_curvature,lens_thickness, 0, n_bk7)\n\n image = rt.Image(image_distance, -10.0, image_distance, 10.0) # Sensor die perpendicular to lens/apertures\n\n \"\"\"Simulate!\"\"\"\n # System elements need to be in order (source --> image)\n system = rt.System(object_sources, [aperture, lens], image)\n system.run()",
"def backpropagating(self): \n\n ######################### Configure the sensor inputs given the movement of the agent ######################### \n sensors_result_N = self.agent.sensors(self, direction=3) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(3)+[int(self.agent.get_previous_collision())]\n sensors_result_O = self.agent.sensors(self, direction=2) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(2) + [int(self.agent.get_previous_collision())]\n sensors_result_S = self.agent.sensors(self, direction=1) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(1) + [int(self.agent.get_previous_collision())]\n sensors_result_E = self.agent.sensors(self, direction=0) + self.agent.get_energy_coarsed()+\\\n self.agent.rotate_previousAction(0) + [int(self.agent.get_previous_collision())]\n\n input_nn_N = np.asarray(sensors_result_N).astype(int) # input when the Nord action is performed \n input_nn_O = np.asarray(sensors_result_O).astype(int) # input when the West action is performed\n input_nn_S = np.asarray(sensors_result_S).astype(int) # input when the South action is performed\n input_nn_E = np.asarray(sensors_result_E).astype(int) # input when the West action is performed\n\n l_input = [input_nn_E.reshape(1,145),input_nn_S.reshape(1,145),input_nn_O.reshape(1,145),input_nn_N.reshape(1,145)]\n ######################### Configure the sensor inputs given the movement of the agent #########################\n\n print(\"The reward in baskpropagating is %f\" %(self.agent.reward) ) \n parameters = [self.gamma, self.agent.reward]\n Ui = self.U_list[self.agent.get_previousAction().index(1)]\n\n if not self.end:\n U_list_y = [self.nn.predict(input_nn_E.reshape(1,145)),\\\n self.nn.predict(input_nn_S.reshape(1,145)),\\\n self.nn.predict(input_nn_O.reshape(1,145)),\\\n self.nn.predict(input_nn_N.reshape(1,145))] \n #print(U_list_y)\n maxU = np.max(U_list_y)\n #print(np.max(U_list_y))\n index_input_maxU = np.argmax(U_list_y) # the input given for the backprogating is the one with the maximum utility\n input_target = l_input[index_input_maxU] # The input target with the max utility, add to the tuple given during the experience replay\n uprime = self.agent.reward + self.gamma * maxU # input of the utility with the best value\n \n else:\n uprime = self.agent.reward\n input_target = np.array(None)\n \n action = self.agent.get_previousAction().index(1)\n input_nn = self.input_list[action]\n ##### Add to the lesson the action chose in order to go the next state, \n ##### the next state after to have performed the action, and the reward given\n if(self.action_proba[action] > 0.01): # the Pl minimum to choose the action corresponding to the action policy, cf to the paper part experience replay\n #next_states = [copy.deepcopy(input_nn_E).reshape(1,145), copy.deepcopy(input_nn_S).reshape(1,145), copy.deepcopy(input_nn_O).reshape(1,145), copy.deepcopy(input_nn_N).reshape(1,145)]\n self.memory.append((input_nn,action,np.asarray(copy.deepcopy(l_input)),self.agent.reward)) # We add the experiment to the memory of the agent \n \n ############################\n self.nn.train_one_step_other(input_nn,uprime)\n #self.nn.train(input_nn,tf.convert_to_tensor([[uprime]])) # use the method fit to train the neural network",
"def test_edge_features(self):\n k = [4, 4, 4, 4, 4]\n mn = self.create_chain_model(k)\n\n d = 3\n\n for i in range(5):\n mn.set_edge_features((i, i+1), np.random.randn(d))\n\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp = MatrixBeliefPropagator(mn)\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert not np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on variable 0 did not change marginal of variable 4\"\n\n mn.set_edge_features((2, 3), np.zeros(d))\n mn.create_matrices()\n mn.set_unary_weight_matrix(np.random.randn(4, 4))\n mn.set_edge_weight_matrix(np.random.randn(d, 16))\n\n bp.infer()\n bp.load_beliefs()\n\n unconditional_marginals = bp.var_beliefs[4]\n\n bp.condition(0, 2)\n bp.infer()\n bp.load_beliefs()\n\n conditional_marginals = bp.var_beliefs[4]\n\n assert np.allclose(unconditional_marginals, conditional_marginals), \\\n \"Conditioning on var 0 changed marginal of var 4, when the features should have made them independent\"",
"def _exe_(self):\n print(\"\\n Start simulation (using Pharlap) ...\")\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n self._copy_ne_()\n [self._compute_(case) for case in [\"bgc\", \"flare\"]]\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"bgc\")\n plotlib.plot_exp_rays(dic, self.event, self.bmnum, \"flare\")\n self._compute_doppler_()\n rec = self._compute_velocity_()\n return rec",
"def _drawRays(self):\r\n for rayID, ray in self.rayDict.items():\r\n ray.drawPath()",
"def updateAndGetBeliefStates(self, evidences):\n\n beliefStates = self.beliefGhostStates\n # XXX: Your code here\n width = self.walls.width\n height = self.walls.height\n w = self.w\n p = self.p\n pastBeliefStates = self.beliefGhostStates\n\n\n beliefStates = list()\n for i in range(len(evidences)):\n prob = np.zeros((width, height))\n pastProb = pastBeliefStates[i]\n evidence = evidences[i]\n for x in range(evidence[0] - w, evidence[0] + w + 1):\n for y in range(evidence[1] - w, evidence[1] + w + 1):\n if x in range(width) and y in range(height):\n prob[x][y] = 1\n\n for x in range(width):\n for y in range(height):\n if prob[x][y] != 0:\n prob[x][y] *= self.forwarding(x, y, p, pastProb)\n\n alpha = 1/np.sum(prob)\n # Normalization of the probability of the evidence\n for x in range(width):\n for y in range(height):\n if prob[x][y] != 0:\n prob[x][y] *= alpha\n beliefStates.append(prob)\n\n # XXX: End of your code\n self.beliefGhostStates = beliefStates\n return beliefStates",
"def run_simulation(self):\n print(\"# Starting propagation simulation using {} propagtion routine\".format(\n self.__class__.__name__))\n self.propagate()\n print(\"# Finished propagation simulation\")",
"def intersect(self, rays):\n raise NotImplementedError",
"def _propagate_step(self):\n\n # optical depth to next interaction\n self.tau = -np.log(self.RNG.rand(self.N_active))\n # optical depth to sphere edge\n self.tau_edge = np.sqrt(self.tau_sphere**2 - self.tau_i**2 *\n (1. - self.mu_i**2)) - self.tau_i * self.mu_i\n\n # identify packets that escape\n self.esc_mask = self.tau_edge < self.tau\n # update number of escaping packets\n self.N_esc += self.esc_mask.sum()\n\n # identify interacting packets\n self.nesc_mask = np.logical_not(self.esc_mask)\n\n # decide which interacting packets scatter and which get absorbed\n self.abs_mask = self.RNG.rand(self.nesc_mask.sum()) >= self.albedo\n self.scat_mask = np.logical_not(self.abs_mask)\n\n # select properties of scattering packets\n self.tau = self.tau[self.nesc_mask][self.scat_mask]\n self.tau_i = self.tau_i[self.nesc_mask][self.scat_mask]\n self.mu_i = self.mu_i[self.nesc_mask][self.scat_mask]\n\n # update number of active packets\n self.N_active = self.scat_mask.sum()\n\n # update properties (position in optical depth space, propagation\n # direction) of scattering packets\n self.tau_i = np.sqrt(self.tau_i**2 + self.tau**2 +\n 2. * self.tau * self.tau_i * self.mu_i)\n self.mu_i = 2 * self.RNG.rand(self.N_active) - 1.",
"def main_ededge(dataset):\n Application.set_input_image_folder('TestData/BSR/BSDS500/data/images/' + dataset)\n\n # Application.delete_folder_appl_out()\n # Benchmarking.delete_folder_benchmark_out()\n\n Application.do_get_image_job(port_output_name='RAW')\n Application.do_grayscale_transform_job(port_input_name='RAW', port_output_name='GRAY_RAW')\n blur = Application.do_gaussian_blur_image_job(port_input_name='GRAY_RAW', sigma=0, kernel_size=9)\n\n list_to_eval_edge = []\n\n first_order_edge = [\n CONFIG.FILTERS.PIXEL_DIFF_3x3, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_3x3\n , CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_5x5, CONFIG.FILTERS.PIXEL_DIFF_SEPARATED_7x7\n , CONFIG.FILTERS.PIXEL_DIFF_5x5, CONFIG.FILTERS.PIXEL_DIFF_7x7\n\n , CONFIG.FILTERS.SOBEL_3x3, CONFIG.FILTERS.SOBEL_5x5, CONFIG.FILTERS.SOBEL_7x7\n , CONFIG.FILTERS.SOBEL_DILATED_5x5, CONFIG.FILTERS.SOBEL_DILATED_7x7\n\n , CONFIG.FILTERS.PREWITT_3x3, CONFIG.FILTERS.PREWITT_5x5, CONFIG.FILTERS.PREWITT_7x7\n , CONFIG.FILTERS.PREWITT_DILATED_5x5, CONFIG.FILTERS.PREWITT_DILATED_7x7\n\n , CONFIG.FILTERS.KIRSCH_3x3, CONFIG.FILTERS.KIRSCH_5x5\n , CONFIG.FILTERS.KIRSCH_DILATED_5x5, CONFIG.FILTERS.KIRSCH_DILATED_7x7\n\n , CONFIG.FILTERS.KITCHEN_MALIN_3x3\n , CONFIG.FILTERS.KITCHEN_MALIN_DILATED_5x5, CONFIG.FILTERS.KITCHEN_MALIN_DILATED_7x7\n\n , CONFIG.FILTERS.KAYYALI_3x3\n , CONFIG.FILTERS.KAYYALI_DILATED_5x5, CONFIG.FILTERS.KAYYALI_DILATED_7x7\n\n , CONFIG.FILTERS.SCHARR_3x3, CONFIG.FILTERS.SCHARR_5x5\n , CONFIG.FILTERS.SCHARR_DILATED_5x5, CONFIG.FILTERS.SCHARR_DILATED_7x7\n\n , CONFIG.FILTERS.KROON_3x3\n , CONFIG.FILTERS.KROON_DILATED_5x5, CONFIG.FILTERS.KROON_DILATED_7x7\n\n , CONFIG.FILTERS.ORHEI_3x3, CONFIG.FILTERS.ORHEI_B_5x5\n , CONFIG.FILTERS.ORHEI_DILATED_5x5, CONFIG.FILTERS.ORHEI_DILATED_7x7\n ]\n\n for edge in first_order_edge:\n for gr_thr in [50]:\n for anc_thr in [10]:\n e1, e2, = Application.do_edge_drawing_mod_job(port_input_name=blur, operator=edge,\n gradient_thr=gr_thr, anchor_thr=anc_thr, scan_interval=1,\n max_edges=100, max_points_edge=100)\n list_to_eval_edge.append(e1 + '_L0')\n\n Application.create_config_file(verbose=False)\n Application.configure_save_pictures(job_name_in_port=False, ports_to_save='ALL')\n # Application.configure_show_pictures(ports_to_show=list_to_save, time_to_show=200)\n\n # Application.run_application()\n\n # Do bsds benchmarking\n # Be ware not to activate job_name_in_port in Application.configure_save_pictures\n # Benchmarking.run_bsds500_boundary_benchmark(input_location='Logs/application_results',\n # gt_location='TestData/BSR/BSDS500/data/groundTruth/' + dataset,\n # raw_image='TestData/BSR/BSDS500/data/images/' + dataset,\n # jobs_set=list_to_eval_edge, do_thinning=False)\n\n Utils.plot_first_cpm_results(prefix='EDGE_DRAWING_MOD_', level='L0', order_by='f1', name='ed_results',\n list_of_data=list_to_eval_edge, number_of_series=50,\n inputs=[''], self_contained_list=True, set_legend_left=False,\n suffix_to_cut_legend='_S_0_GRAY_RAW_L0',\n replace_list=[('EDGE_DRAWING_MOD_THR_50_ANC_THR_10_SCAN_1_', ''),\n ('SEPARATED_PIXEL_DIFFERENCE_', 'Separated Px Dif '),\n ('PIXEL_DIFFERENCE_', 'Pixel Dif '),\n ('PREWITT_', 'Prewitt '), ('KIRSCH_', 'Kirsch '), ('SOBEL_', 'Sobel '),\n ('SCHARR_', 'Scharr '), ('KROON_', 'Kroon '), ('ORHEI_V1_', 'Orhei '),\n ('ORHEI_', 'Orhei '),\n ('KITCHEN_', 'Kitchen '), ('KAYYALI_', 'Kayyali '),\n ('DILATED_', 'dilated '),\n ('_GAUSS_BLUR_K_9', '')],\n save_plot=True, show_plot=False, set_all_to_legend=False)\n\n # Utils.create_latex_cpm_table_list()\n\n Utils.close_files()",
"def run(self, its):\n\n t = trange(its, leave=True)\n\n for i in t:\n self.calc_trace_term()\n KL_grad_R = self.grad_KL_R()\n KL_grad_mu = self.grad_KL_mu()\n\n eps = np.random.normal(size=self.n)\n r = self.q_mu + kron_mvp(self.Rs, eps)\n like_grad_R, like_grad_mu = self.grad_like(r, eps)\n grad_R = [-KL_grad_R[i] + like_grad_R[i]\n for i in range(len(KL_grad_R))]\n grad_mu = -KL_grad_mu + like_grad_mu\n R_and_grads = list(zip(grad_R, self.Rs))\n mu_and_grad = (grad_mu, self.q_mu)\n\n obj, kl, like = self.eval_obj(self.Rs, self.q_mu, r)\n self.elbos.append(-obj)\n\n if self.linesearch:\n ls_res = self.line_search(R_and_grads, mu_and_grad, obj, r, eps)\n step = 0.\n if ls_res is not None:\n step = ls_res[-1]\n t.set_description(\"ELBO: \" + '{0:.2f}'.format(-obj) +\n \" | KL: \" + '{0:.2f}'.format(kl) +\n \" | logL: \" + '{0:.2f}'.format(like) +\n \" | step: \" + str(step))\n if ls_res is not None:\n self.Rs = ls_res[0]\n self.q_mu = ls_res[1]\n else:\n t.set_description(\"ELBO: \" + '{0:.2f}'.format(-obj) +\n \" | KL: \" + '{0:.2f}'.format(kl) +\n \" | logL: \" + '{0:.2f}'.format(like))\n self.q_mu, self.mu_params = \\\n self.optimizer.step(mu_and_grad, self.mu_params)\n for d in range(self.d):\n self.Rs[d], self.R_params[d] = \\\n self.optimizer.step(R_and_grads[d], self.R_params[d])\n self.f_pred = self.predict()\n return",
"def propagation(self,map):\n near_cells = self.get_near(map)\n \n #fire spreading\n burnable = [] #list of burnable cells\n for cell in near_cells:\n if(cell.nat != 0 and cell.state == 0): #conditions to burn a cell\n burnable.append(cell)\n \n if(self.nat == 2): #spread faster if it's a forest\n n = rdm.randint(0,(self.state*2)) #n: number of cells to burn, n < 9\n if n>8: n=8\n else: n = rdm.randint(0,self.state)\n \n if map.wind_active: \n for i in range(n):\n \n #creating the list in which the choice is made (changing probability according to the wind direction)\n indexes=[]\n for ce in burnable:\n \n if map.wind==0:\n if ce.y > self.y:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.y == self.y:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==4:\n if ce.y < self.y:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.y== self.y: \n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==2:\n if ce.x > self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.x == self.x:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest\n elif map.wind==6:\n if ce.x < self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif ce.x == self.x:\n indexes.append(near_cells.index(ce)) #0 probability if cell against the fire\n #1 for the rest \n elif map.wind==1:\n if ce.y >= self.y and ce.x >= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x < self.x) or (ce.y < self.y and ce.x > self.x):\n indexes.append(near_cells.index(ce)) \n\n elif map.wind==3:\n if ce.y <= self.y and ce.x >= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x > self.x) or (ce.y < self.y and ce.x < self.x):\n indexes.append(near_cells.index(ce)) \n \n elif map.wind==5:\n if ce.y <= self.y and ce.x <= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x < self.x) or (ce.y < self.y and ce.x > self.x):\n indexes.append(near_cells.index(ce))\n \n elif map.wind==7:\n if ce.y >= self.y and ce.x <= self.x:\n indexes.append(near_cells.index(ce)) #*2 probability if the cells in direction of fire\n indexes.append(near_cells.index(ce))\n elif (ce.y > self.y and ce.x > self.x) or (ce.y < self.y and ce.x < self.x):\n indexes.append(near_cells.index(ce))\n \n \n if len(indexes)>0:\n r = rdm.choice(indexes) #choose randoly the cell, among the availables, with weight\n cell = near_cells[r]\n cell.state = 1 #cell is burned\n map.burn_list.append(cell)\n burnable.remove(cell) #the cell is no more available\n \n\n\n\n\n #without the wind active\n else:\n if n>=len(burnable): #if n is greater than the number of burnable cells, they are all burned\n for cell in burnable:\n cell.state = 1\n map.burn_list.append(cell) #add cell to burn_list\n else: \n for i in range(n):\n r = rdm.randint(0,len(burnable)-1) #choose randoly the cell, among the availables\n cell = burnable[r]\n cell.state = 1 #cell is burned\n map.burn_list.append(cell)\n burnable.remove(cell) #the cell is no more available\n \n\n\n \n #fire intensity growing \n if(self.nat == 3): #burn faster if it's a house\n self.state += 2\n else:\n self.state += 1\n \n if(self.state > 5): #if it's burned\n self.charred = True\n self.state = 1\n map.burn_list.remove(self) #burned cells are removed form the burn_list",
"def update_chains(self):\r\n _, black_positions, white_positions = self.get_positions()\r\n\r\n self.bfs(black_positions, 1)\r\n self.bfs(white_positions, 2)",
"def propagate(self, ray, index_0, index_1):\n if self._reflective:\n return self.reflect(ray)\n else:\n return self.refract(ray, index_1/index_0)",
"def build(self):\n # weights to apply to training samples, updated on each\n # iteration of the boosting algo, normalised to 1\n sigWeights = np.ones(self.nSig, dtype=float)\n bkgWeights = np.ones(self.nBkg, dtype=float)\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight \n\n # Weight of each tree, strong classifers have higher weight\n self.treeWeights = np.zeros(self.ntrees, dtype=float)\n\n for i in xrange(self.ntrees):\n\n # build new tree\n newTree = Tree()\n newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights))\n newTree.build()\n self.dTrees.append(newTree) \n\n # evaluate trees\n # keep track of each event\n err = 0.0\n sigWrong = np.zeros(self.nSig)\n bkgWrong = np.zeros(self.nBkg)\n\n for j in range(self.nSig):\n if newTree.classify(np.array((self.sigData[j,])))<0:\n sigWrong[i]=1\n err+=sigWeights[j]\n\n for j in range(self.nBkg):\n if newTree.classify(np.array((self.bkgData[j,])))>0:\n bkgWrong[i]=1\n err+=bkgWeights[j]\n\n alpha = self.beta*math.log((1.0-err)/err)\n print err,alpha\n corFactor = math.exp(-alpha)\n wrongFactor = math.exp(alpha)\n\n if (err<1e-20 or err >= 0.5):\n print \"SOEMTHING WRONG!!\"\n\n self.treeWeights[i] = alpha\n\n # reweight training samples\n for j in range(self.nSig):\n if sigWrong[j]:\n sigWeights[j]*=wrongFactor\n else :\n sigWeights[j]*=corFactor\n\n for j in range(self.nBkg):\n if bkgWrong[j]:\n bkgWeights[j]*=wrongFactor\n else :\n bkgWeights[j]*=corFactor\n\n # normalise weights\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight",
"def updateAndGetBeliefStates(self, evidences):\n # XXX: Your code here\n\n # if self.iter < 0:\n # np.save('Entropy{}_{}'.format(self.w, self.p), self.entropy)\n # sys.exit()\n #\n # self.iter = self.iter - 1\n\n if (self.m or self.n) is None:\n self.m = self.walls.height\n self.n = self.walls.width\n\n if not self.board:\n for x in np.arange(self.n):\n for y in np.arange(self.m):\n self.board.append((x, y))\n\n if self.transitionMatrix is None:\n self.transitionMatrix = self.createTransitionMatrix()\n\n if self.sensorMatrix is None:\n self.sensorMatrix = self.createSensorModel()\n\n beliefStates = self.beliefGhostStates\n\n # self.entropy.append(self.entropyF(beliefStates))\n\n for i, e in enumerate(evidences):\n \"\"\"\n To manage multiple ghosts.\n \"\"\"\n col_beliefStates = np.reshape(beliefStates[i, :, :], (-1, 1))\n\n index = self.board.index(e)\n O_col = self.sensorMatrix[:, index]\n\n O = np.diag(O_col)\n \"\"\"\n O = Observation matrix.\n \"\"\"\n\n col_bel = np.dot(O, self.transitionMatrix)\n col_beliefStates = np.dot(col_bel, col_beliefStates)\n\n alpha = 1/(np.sum(col_beliefStates))\n col_beliefStates = alpha*col_beliefStates\n\n beliefState = col_beliefStates.reshape((self.n, self.m))\n beliefStates[i, :, :] = beliefState\n\n # XXX: End of your code\n self.beliefGhostStates = beliefStates\n return beliefStates",
"def main():\n \"\"\"\n This is just for testing the functions\n \"\"\"\n\n x1 = np.array([1, 1, 1, 1, -1, -1, 1, 1, 1])\n x2 = np.array([1, -1, 1, 1, 1, 1, 1, -1, 1])\n x3 = np.array([-1, 1, -1, -1, 1, -1, -1, 1, -1])\n train_set = np.vstack((x1, x2))\n train_set = np.vstack((train_set, x3))\n\n\n params = {\n \"epochs\": 100,\n \"neurons\": len(x1),\n \"learn_method\": 'classic'\n }\n\n hop = hop_net.HopfieldNet(train_set, **params)\n hop.batch_train()\n show_trained(train_set)\n\n x4d = [1,1,1,1,1,1,1,1,1]\n x5d = [1,1,1,1,-1,-1,1,-1,-1]\n x45d = np.vstack((x4d, x5d))\n test_set = np.vstack((x45d, train_set))\n recalled_set = hop.recall(test_set)\n for i in range(test_set.shape[0]):\n show_tested(test_set[i], recalled_set[i])",
"def run_all(logdir, verbose=False):\n run_box_to_gaussian(logdir, verbose=verbose)\n run_sobel(logdir, verbose=verbose)",
"def ml_loop(side: str):\n\n # === Here is the execution order of the loop === #\n # 1. Put the initialization code here\n ball_served = False\n blocker_last_x = 0\n\n class Pred:\n pred = 100\n blocker_pred_x = 0\n last_command = 0\n blocker_vx = 0\n\n \n def move_to(player, pred) : #move platform to predicted position to catch ball \n if player == '1P':\n if scene_info[\"platform_1P\"][0]+20 > (pred-10) and scene_info[\"platform_1P\"][0]+20 < (pred+10): return 0 # NONE\n elif scene_info[\"platform_1P\"][0]+20 <= (pred-10) : return 1 # goes right\n else : return 2 # goes left\n else :\n if scene_info[\"platform_2P\"][0]+20 > (pred-10) and scene_info[\"platform_2P\"][0]+20 < (pred+10): return 0 # NONE\n elif scene_info[\"platform_2P\"][0]+20 <= (pred-10) : return 1 # goes right\n else : return 2 # goes left\n\n def ml_loop_for_1P(): \n # ball slicing\n if scene_info[\"ball_speed\"][1] > 0 and (scene_info[\"ball\"][1]+scene_info[\"ball_speed\"][1]) >= 415 and Pred.last_command == 0:\n print(\"------\")\n ball_x = scene_info[\"ball\"][0]\n ball_y = scene_info[\"ball\"][1]\n ball_vx = scene_info[\"ball_speed\"][0]\n ball_slice_vx = scene_info[\"ball_speed\"][0]+np.sign(scene_info[\"ball_speed\"][0])*3\n ball_vy = scene_info[\"ball_speed\"][1] \n blocker_x = scene_info['blocker'][0] + Pred.blocker_vx\n \n y = abs((415 - ball_y) // ball_vy)\n pred_ball_1P = ball_x + ball_vx * y\n\n y = abs((415 - 260) // ball_vy)\n pred_ball_blocker = pred_ball_1P + ball_slice_vx * y\n bound = pred_ball_blocker // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n pred_ball_blocker = pred_ball_blocker - bound*200 \n else :\n pred_ball_blocker = 200 - (pred_ball_blocker - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n pred_ball_blocker = abs(pred_ball_blocker - (bound+1) *200)\n else :\n pred_ball_blocker = pred_ball_blocker + (abs(bound)*200)\n \n y = abs((415 - 260) // ball_vy)\n Pred.blocker_pred_x = blocker_x + Pred.blocker_vx * y \n if Pred.blocker_pred_x < 0: Pred.blocker_pred_x = abs(Pred.blocker_pred_x)\n elif Pred.blocker_pred_x > 170: Pred.blocker_pred_x = 170 - (Pred.blocker_pred_x - 170)\n \n if pred_ball_blocker >= Pred.blocker_pred_x-10 and pred_ball_blocker < Pred.blocker_pred_x+40:\n print(\"slice will hit blicker\")\n # don't slice \n # use origin ball vx to predict will hit blocker or not\n # if will hit blicker let ball go reverse direction\n y = abs((415 - 260) // ball_vy)\n pred_ball_blocker = pred_ball_1P + ball_vx * y\n bound = pred_ball_blocker // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n pred_ball_blocker = pred_ball_blocker - bound*200 \n else :\n pred_ball_blocker = 200 - (pred_ball_blocker - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n pred_ball_blocker = abs(pred_ball_blocker - (bound+1) *200)\n else :\n pred_ball_blocker = pred_ball_blocker + (abs(bound)*200)\n\n if pred_ball_blocker >= Pred.blocker_pred_x-10 and pred_ball_blocker < Pred.blocker_pred_x+40:\n print(\"will hit blocker, hit reversed direction\")\n if scene_info[\"ball_speed\"][0] > 0: return 2\n else: return 1\n else: \n print(\"will not hit blicker, do nothing\")\n return 0\n else:\n # slice\n print(\"slice will not hit blocker\")\n if scene_info[\"ball_speed\"][0] > 0: return 1\n else: return 2\n\n elif scene_info[\"ball_speed\"][1] > 0 : # 球正在向下 # ball goes down\n x = ( scene_info[\"platform_1P\"][1]-scene_info[\"ball\"][1] ) // scene_info[\"ball_speed\"][1] # 幾個frame以後會需要接 # x means how many frames before catch the ball\n Pred.pred = scene_info[\"ball\"][0]+(scene_info[\"ball_speed\"][0]*x) # 預測最終位置 # pred means predict ball landing site \n bound = Pred.pred // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n Pred.pred = Pred.pred - bound*200 \n else :\n Pred.pred = 200 - (Pred.pred - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n Pred.pred = abs(Pred.pred - (bound+1) *200)\n else :\n Pred.pred = Pred.pred + (abs(bound)*200)\n return move_to(player = '1P',pred = Pred.pred)\n \n else : # 球正在向上 # ball goes up\n return move_to(player = '1P',pred = 100)\n\n\n\n def ml_loop_for_2P(): # as same as 1P\n if scene_info[\"ball_speed\"][1] > 0 : \n return move_to(player = '2P',pred = 100)\n else : \n x = ( scene_info[\"platform_2P\"][1]+30-scene_info[\"ball\"][1] ) // scene_info[\"ball_speed\"][1] \n pred = scene_info[\"ball\"][0]+(scene_info[\"ball_speed\"][0]*x) \n bound = pred // 200 \n if (bound > 0):\n if (bound%2 == 0):\n pred = pred - bound*200 \n else :\n pred = 200 - (pred - 200*bound)\n elif (bound < 0) :\n if bound%2 ==1:\n pred = abs(pred - (bound+1) *200)\n else :\n pred = pred + (abs(bound)*200)\n return move_to(player = '2P',pred = pred)\n\n # 2. Inform the game process that ml process is ready\n comm.ml_ready()\n\n # 3. Start an endless loop\n while True:\n # 3.1. Receive the scene information sent from the game process\n scene_info = comm.recv_from_game()\n\n # 3.2. If either of two sides wins the game, do the updating or\n # resetting stuff and inform the game process when the ml process\n # is ready.\n if scene_info[\"status\"] != \"GAME_ALIVE\":\n # Do some updating or resetting stuff\n ball_served = False\n\n # 3.2.1 Inform the game process that\n # the ml process is ready for the next round\n comm.ml_ready()\n continue\n\n # 3.3 Put the code here to handle the scene information\n\n # 3.4 Send the instruction for this frame to the game process\n if not ball_served:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"SERVE_TO_LEFT\"})\n blocker_last_x = scene_info[\"blocker\"][0]\n Pred.last_command = 0\n ball_served = True\n else:\n if side == \"1P\":\n Pred.blocker_vx = scene_info[\"blocker\"][0] - blocker_last_x\n if scene_info[\"blocker\"][0] == 0: Pred.blocker_vx = 5\n elif scene_info[\"blocker\"][0] == 170: Pred.blocker_vx = -5\n command = ml_loop_for_1P()\n blocker_last_x = scene_info[\"blocker\"][0]\n Pred.last_command = command\n else:\n command = ml_loop_for_2P()\n\n if command == 0:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"NONE\"})\n elif command == 1:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"MOVE_RIGHT\"})\n else :\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"MOVE_LEFT\"})",
"def run(layers):"
] | [
"0.6051191",
"0.57601404",
"0.56488925",
"0.5455181",
"0.5427728",
"0.54135686",
"0.5383709",
"0.53433657",
"0.5283373",
"0.52386534",
"0.5231889",
"0.52266884",
"0.5205245",
"0.5200786",
"0.51926756",
"0.5170246",
"0.51418513",
"0.51362956",
"0.5133803",
"0.51281625",
"0.5126687",
"0.512434",
"0.5122906",
"0.5107245",
"0.5055977",
"0.5054276",
"0.50491804",
"0.50390226",
"0.5037528",
"0.502793"
] | 0.64419127 | 0 |
Plot stats for an optimization run property specified by opt_run_property. It is possible to plot a histogram or a line plot. In a line plot, on the x axis are the numbers of the multistarts, where the multistarts are ordered with respect to a function value. On the y axis of the line plot the value of the corresponding parameter for each multistart is displayed. | def optimization_run_property_per_multistart(
results: Union[Result, Sequence[Result]],
opt_run_property: str,
axes: Optional[matplotlib.axes.Axes] = None,
size: Tuple[float, float] = (18.5, 10.5),
start_indices: Optional[Union[int, Iterable[int]]] = None,
colors: Optional[Union[List[float], List[List[float]]]] = None,
legends: Optional[Union[str, List[str]]] = None,
plot_type: str = 'line',
) -> matplotlib.axes.Axes:
supported_properties = {
'time': 'Wall-clock time (seconds)',
'n_fval': 'Number of function evaluations',
'n_grad': 'Number of gradient evaluations',
'n_hess': 'Number of Hessian evaluations',
'n_res': 'Number of residuals evaluations',
'n_sres': 'Number of residual sensitivity evaluations',
}
if opt_run_property not in supported_properties:
raise ValueError(
"Wrong value of opt_run_property. Only the following "
"values are allowed: 'time', 'n_fval', 'n_grad', "
"'n_hess', 'n_res', 'n_sres'"
)
# parse input
(results, colors, legends) = process_result_list(results, colors, legends)
# axes
if axes is None:
ncols = 2 if plot_type == 'both' else 1
fig, axes = plt.subplots(1, ncols)
fig.set_size_inches(*size)
fig.suptitle(
f'{supported_properties[opt_run_property]} per optimizer run'
)
else:
axes.set_title(
f'{supported_properties[opt_run_property]} per optimizer run'
)
# loop over results
for j, result in enumerate(results):
if plot_type == 'both':
axes[0] = stats_lowlevel(
result,
opt_run_property,
supported_properties[opt_run_property],
axes[0],
start_indices,
colors[j],
legends[j],
)
axes[1] = stats_lowlevel(
result,
opt_run_property,
supported_properties[opt_run_property],
axes[1],
start_indices,
colors[j],
legends[j],
plot_type='hist',
)
else:
axes = stats_lowlevel(
result,
opt_run_property,
supported_properties[opt_run_property],
axes,
start_indices,
colors[j],
legends[j],
plot_type,
)
if sum((legend is not None for legend in legends)) > 0:
if plot_type == 'both':
for ax in axes:
ax.legend()
else:
axes.legend()
return axes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def optimization_run_properties_one_plot(\n results: Result,\n properties_to_plot: Optional[List[str]] = None,\n size: Tuple[float, float] = (18.5, 10.5),\n start_indices: Optional[Union[int, Iterable[int]]] = None,\n colors: Optional[Union[List[float], List[List[float]]]] = None,\n legends: Optional[Union[str, List[str]]] = None,\n plot_type: str = 'line',\n) -> matplotlib.axes.Axes:\n if properties_to_plot is None:\n properties_to_plot = [\n 'time',\n 'n_fval',\n 'n_grad',\n 'n_hess',\n 'n_res',\n 'n_sres',\n ]\n\n if colors is None:\n colors = assign_colors_for_list(len(properties_to_plot))\n elif len(colors) == 4 and isinstance(colors[0], Real):\n colors = [colors]\n\n if len(colors) != len(properties_to_plot):\n raise ValueError(\n 'Number of RGBA colors should be the same as number '\n 'of optimization properties to plot'\n )\n\n if legends is None:\n legends = properties_to_plot\n elif not isinstance(legends, list):\n legends = [legends]\n\n if len(legends) != len(properties_to_plot):\n raise ValueError(\n 'Number of legends should be the same as number of '\n 'optimization properties to plot'\n )\n\n ax = plt.subplots()[1]\n fig = plt.gcf()\n fig.set_size_inches(*size)\n\n for idx, prop_name in enumerate(properties_to_plot):\n optimization_run_property_per_multistart(\n results,\n prop_name,\n ax,\n size,\n start_indices,\n colors[idx],\n legends[idx],\n plot_type,\n )\n\n ax.set_ylabel(\"property value\")\n ax.set_title(\"Optimization properties per optimization run\")\n return ax",
"def plot_stats(values, path='', experiment='', run_type='', x_var_name='', plot_agg=True, plot_runs=True, smth_wnd=10,\n\t\t\t show=True, save=True):\n\n\tif experiment is not None or experiment != '':\n\t\texperiment = '_' + experiment\n\n\tif path != '' and path[-1] != '/':\n\t\tpath = path + '/'\n\n\tfig = plt.figure(figsize=(10, 5))\n\n\tx_values = np.arange(1, values.shape[1] + 1)\n\n\tsmoothen = True if 0 < 3 * smth_wnd < values.shape[1] else False\n\n\tif plot_agg:\n\t\tplot_aggregate(values, smth_wnd=smth_wnd, plot_ext=True)\n\n\t# Plot individual runs\n\tif plot_runs:\n\t\tfor i in range(values.shape[0]):\n\n\t\t\tif len(values.shape) == 1:\n\t\t\t\trun_values = values[i]\n\t\t\telse:\n\t\t\t\trun_values = values[i, :]\n\n\t\t\tif smoothen:\n\t\t\t\trun_values = pd.Series(run_values).rolling(smth_wnd, min_periods=smth_wnd).mean()\n\n\t\t\tplt.plot(x_values, run_values, label='Run {}'.format(i + 1), linewidth=0.25)\n\n\t# Plot Information\n\tplt.xlabel(\"Episode\")\n\tplt.ylabel(\"Episode \" + x_var_name)\n\tplt.title(\"{} Episode {} over Time\".format(run_type.title(), x_var_name))\n\tplt.legend()\n\n\t# Save Plot as png\n\tif save:\n\t\tmkdir(path)\n\t\tfig.savefig('{}plot_{}_{}_ep_{}_{}.png'.format(path, experiment, run_type.lower(), x_var_name.lower(), timestamp()))\n\n\tif show:\n\t\tplt.show(fig)\n\telse:\n\t\tplt.close(fig)",
"def plot_a_run(run, ax):\n npz = np.load(run['npz_fname'])\n ckg = npz['nums']\n y_var = run['y_variable']\n full_y = ckg[y_var]\n x_var = run['x_variable']\n full_x = ckg[x_var]\n ### each run is a plot, but it could have multiple lines.\n # this requires some magic, in seperating our data by the second var.\n ## I ASSUME, and this is important, that only two variables change\n x_to_plot = full_x\n x_to_calc = full_x\n y_to_plot = full_y\n ckg_fc = ckg\n if 'second_var' in run: \n ckg_fc = ckg[:,0]\n x_to_calc = full_x[:,0]\n elif ('average_over' in run):#### always do log average\n #y_to_plot = np.average(full_y, axis=1)\n y_to_plot = np.exp(np.average(np.log(full_y), axis=1))\n \n ckg_fc = ckg[:,0]\n x_to_plot = x_to_calc = full_x[:,0]\n #pdb.set_trace()\n ax.plot(x_to_plot, y_to_plot,\".\")\n plot_localization_length(ax, ckg_fc['c'],ckg_fc['k'], ckg_fc['dis_param'], ckg_fc['number_of_points'] , x_to_calc)\n ax.set_xlabel(x_var)\n ax.set_ylabel(y_var)",
"def optimization_run_properties_per_multistart(\n results: Union[Result, Sequence[Result]],\n properties_to_plot: Optional[List[str]] = None,\n size: Tuple[float, float] = (18.5, 10.5),\n start_indices: Optional[Union[int, Iterable[int]]] = None,\n colors: Optional[Union[List[float], List[List[float]]]] = None,\n legends: Optional[Union[str, List[str]]] = None,\n plot_type: str = 'line',\n) -> Dict[str, plt.Subplot]:\n if properties_to_plot is None:\n properties_to_plot = [\n 'time',\n 'n_fval',\n 'n_grad',\n 'n_hess',\n 'n_res',\n 'n_sres',\n ]\n\n num_subplot = len(properties_to_plot)\n # compute, how many rows and columns we need for the subplots\n num_row = int(np.round(np.sqrt(num_subplot)))\n num_col = int(np.ceil(num_subplot / num_row))\n fig, axes = plt.subplots(num_row, num_col, squeeze=False)\n fig.set_size_inches(*size)\n\n for ax in axes.flat[num_subplot:]:\n ax.remove()\n axes = dict(zip(range(num_subplot), axes.flat))\n for idx, prop_name in enumerate(properties_to_plot):\n ax = axes[idx]\n optimization_run_property_per_multistart(\n results,\n prop_name,\n ax,\n size,\n start_indices,\n colors,\n legends,\n plot_type,\n )\n return axes",
"def plot(self, **kwargs):\n\n # get colors\n colors = kwargs.get(\"colors\", GW_OBSERVATORY_COLORS)\n\n # get Result samples\n self._samples = {\n label: value.posterior\n for label, value in self.results.items()\n if isinstance(value, Result)\n }\n\n # get Grid posteriors\n self._grids = {\n label: [value, value.ln_evidence] # store grid and log evidence\n for label, value in self.results.items()\n if isinstance(value, Grid)\n }\n\n # apply offsets for slightly nicer plots axes\n self.parameter_offsets = {parameter: 0.0 for parameter in self.parameters}\n if len(self._grids) == 0 and len(self._samples) == 1:\n for label in self._samples:\n for parameter in self.parameters:\n srange = [\n np.min(self._samples[label][parameter]),\n np.max(self._samples[label][parameter]),\n ]\n label_suffix = \"\"\n\n # offset values\n median = np.median(self._samples[label][parameter])\n relwidth = np.abs((srange[1] - srange[0]) / median)\n\n if relwidth < 1e-4:\n offsetstr = f\"{median:.4e}\"\n a, b = offsetstr.split(\"e\")\n\n if np.abs(int(b)) < 3:\n offsetstr = f\"{median:.4f}\"\n offset = float(offsetstr)\n else:\n offset = float(offsetstr)\n offsetstr = a + rf\"\\!\\times\\!10^{{{int(b)}}}\"\n\n self.parameter_offsets[parameter] = offset\n\n self._samples[label][parameter] -= offset\n label_suffix = rf\" [${{\\scriptstyle {offsetstr}}}$]\"\n\n self.latex_labels[parameter] += label_suffix\n\n colordicts = []\n for j, res in enumerate([self._samples, self._grids]):\n colordicts.append({})\n for i, key in enumerate(res):\n if key in colors:\n colordicts[-1][key] = colors[key]\n elif key.lower() == \"joint\":\n # if using \"Joint\" as the multi-detector analysis key, set the color to black\n colordicts[-1][key] = \"k\"\n else:\n # use PESummary color cycle\n colordicts[-1][key] = list(colorcycle)[\n (j * 2 + i) % len(colorcycle)\n ]\n\n # store original keywords arguments\n origkwargs = kwargs.copy()\n\n # plot samples\n fig = None\n if len(self._samples) > 0:\n kwargs[\"colors\"] = list(colordicts[0].values())\n if self._num_parameters == 1:\n fig = self._1d_plot_samples(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_samples(**kwargs)\n else:\n fig = self._nd_plot_samples(**kwargs)\n\n # restore keywords\n kwargs = origkwargs\n\n if len(self._grids) > 0:\n kwargs[\"colors\"] = list(colordicts[1].values())\n if fig is not None and \"fig\" not in kwargs:\n kwargs[\"fig\"] = fig\n if self._num_parameters == 1:\n fig = self._1d_plot_grid(**kwargs)\n elif self._num_parameters == 2 and self.plottype != \"corner\":\n fig = self._2d_plot_grid(**kwargs)\n else:\n fig = self._nd_plot_grid(**kwargs)\n\n # add further figure information\n if self._num_parameters == 1:\n ax = fig.gca()\n\n # set figure bounds if outside defaults\n if self.parameters[0] in DEFAULT_BOUNDS:\n _set_axes_limits(ax, self.parameters[0], axis=\"x\")\n\n # add injection values\n if self.injection_parameters is not None:\n if self.injection_parameters[self.parameters[0]] is not None:\n ax.axvline(\n (\n self.injection_parameters[self.parameters[0]]\n - self.parameter_offsets[self.parameters[0]]\n ),\n color=kwargs.get(\"injection_color\", \"k\"),\n linewidth=1,\n )\n elif self._num_parameters == 2:\n if \"triangle\" in self.plottype:\n a1, a2, a3 = fig[1:]\n order = [\"x\", \"y\"] if self.plottype == \"triangle\" else [\"y\", \"x\"]\n params = (\n self.parameters[:2]\n if self.plottype == \"triangle\"\n else self.parameters[1::-1]\n )\n\n # set figure bounds if outside defaults\n for param, axes, axis in zip(params, [[a1, a2], [a2, a3]], order):\n for ax in axes:\n _set_axes_limits(ax, param, axis=axis)\n\n self.fig = fig\n return self.fig",
"def plot_optima(cfg, results, results_inftau):\n\n def subplot_args(experiment_idx, row):\n return 4, len(cfg['experiments']), \\\n row * len(cfg['experiments']) + experiment_idx + 1\n\n for e, experiment in enumerate(cfg['experiments']):\n def set_rowlabels(rowname):\n if e <= 0:\n plt.ylabel(rowname)\n else:\n plt.yticks([])\n\n plt.subplot(*subplot_args(e, 0))\n\n plt.title(experiment['name'])\n plt.xlabel(\"time / ms\")\n plt.ylim(0, 210)\n if e > 0:\n plt.yticks([])\n plot_stparams(\n [cfg['l1_length'], cfg['l2_lengths'][1], cfg['l3_length'],\n cfg['l4_lengths'][1]], experiment['rates_a'], 'm')\n plot_stparams(\n [cfg['l1_length'], cfg['l2_lengths'][1], cfg['l3_length'],\n cfg['l4_lengths'][1]], experiment['rates_b'], 'k')\n\n plt.subplot(*subplot_args(e, 1))\n set_rowlabels(r\"$\\langle I^* \\rangle$\")\n plt.xticks([])\n plt.xlim(\n 0,\n (len(cfg['l4_lengths']) + 0.5) * (len(cfg['l2_lengths']) + 2))\n plot_optimal_uncertainty_reduction(results[e], results_inftau[e])\n\n plt.subplot(*subplot_args(e, 2))\n plt.ylim([10 * cfg['time_scales'][0], 3 * cfg['time_scales'][-1]])\n plt.semilogy()\n set_rowlabels(r\"$\\langle \\tau^* \\rangle$\")\n plt.xticks([])\n plt.xlim(\n 0,\n (len(cfg['l4_lengths']) + 0.5) * (len(cfg['l2_lengths']) + 2))\n plot_optimal_tau(results[e], results_inftau[e])\n\n plt.subplot(*subplot_args(e, 3))\n plt.ylim([10 * cfg['time_scales'][0], 3 * cfg['time_scales'][-1]])\n plt.semilogy()\n set_rowlabels(r\"$\\tau^*_{\\langle I \\rangle}$\")\n plt.xlim(\n 0,\n (len(cfg['l4_lengths']) + 0.5) * (len(cfg['l2_lengths']) + 2))\n plot_optimal_tau_for_mean_uncertainty_reduction(\n results[e], results_inftau[e])\n\n plt.xticks(\n (sp.arange(len(cfg['l4_lengths'])) + 0.5) *\n (len(cfg['l2_lengths']) + 2),\n ['%s' % l for l in cfg['l4_lengths']])",
"def subplot_run_basic(acc_span, acc_tot, setting_val, str_title='Learning rate', \n str_leg='F', subplotids=[1,1,1]):\n # Get nmber of features to plot\n n_plot = acc_span.shape[0]\n # Create corresponding subplot\n plt.subplot(subplotids[0], subplotids[1], subplotids[2])\n # Iterate over sorted args (smallest to biggest)\n for i in np.argsort(setting_val[:,0]):\n # Change value display depending on its value\n if setting_val[i,0] >= 1:\n # Interger representation if > 1\n str_legend = '(' + str_leg + ',N)' + '=({}, {})'.format(int(setting_val[i,0]),int(setting_val[i,1])) \n else:\n str_legend ='{:.1e}'.format(setting_val[i,0]) # Scientific representation if < 1\n plt.plot(acc_span[i,:], acc_tot[i,:], '-',label=str_legend, linewidth=2) # Plot feature\n # Set legend location and column numbers (max 3 features per column)\n plt.legend(loc = 4, ncol=int(np.ceil(n_plot/3)))\n plt.title('Variation - ' + str_title, fontsize=16)\n # Set plot limit\n plt.grid(); plt.ylim([0, 1]);",
"def show_results(self):\n\n N = split_list(self.N)\n # create subplot\n fig = make_subplots(rows=1,cols=2,\n subplot_titles=('Fish population', 'Harvested fish'),\n specs=[[{'type': 'xy'}, {'type': 'pie'}]])\n #Add population line graph\n fig.add_trace(go.Scatter(y=N['odds'], x=np.linspace(1, 11, 6), name='odd year population',\n hovertemplate =\n 'Year: %{x}'+ '<br>Pop: %{y}'),\n row=1, col=1)\n fig.add_trace(go.Scatter(y=N['evens'], x=np.linspace(2, 12, 6), name='even year population',\n hovertemplate =\n 'Year: %{x}'+ '<br>Pop: %{y}'),\n row=1, col=1)\n fig.update_xaxes(title_text=\"year\", row=1, col=1)\n fig.update_yaxes(title_text=\"population\", row=1, col=1)\n\n # cannot use 'paper' as yref due to bug in sublplot.\n fig.add_shape(type='line',\n xref='x', yref='y',\n x0=2.5, y0=-10, x1=2.5, y1=1000,\n line=dict(color='Black', width=3),\n row=1, col=1)\n\n # create pie chart\n colors = ['#636EFA', '#EF553B'] \n labels = ['total odd year harvest', 'total even year harvest']\n M = split_list(self.harvest_record)\n values = [sum(M['odds']), sum(M['evens'])]\n fig.add_trace(go.Pie(labels=labels, values=values, hoverinfo='label', textinfo='value', marker=dict(colors=colors)), \n row=1, col=2)\n\n # add title\n fig.update_layout(title_text='Results') \n fig.write_html(\"fish_trap_simulation.html\")\n\n \n return fig",
"def get_multiobjective_plot(evaluator_list, stride=500):\n num_objectives = (\n evaluator_list[0].stats['multiobj_stats']['episode_totals'].shape[1])\n values = [collections.defaultdict(list) for _ in range(num_objectives)]\n for e in evaluator_list:\n for i in range(num_objectives):\n values[i][e.task_name].append(\n e.stats['multiobj_stats']['episode_totals'][:, i])\n means = [None] * num_objectives\n stds = [None] * num_objectives\n for i in range(num_objectives):\n values[i] = _map(np.vstack, values[i])\n means[i] = _map(functools.partial(np.mean, axis=0), values[i])\n stds[i] = _map(functools.partial(np.std, axis=0), values[i])\n\n fig, axes = plt.subplots(num_objectives, 1, figsize=(8, 6 * num_objectives))\n for objective_idx in range(num_objectives):\n ax = axes[objective_idx]\n for i, task_name in enumerate(means[objective_idx]):\n m = means[objective_idx][task_name]\n s = stds[objective_idx][task_name]\n idx = i % len(_COLORS)\n x = np.arange(len(m))\n ax.plot(x, m, lw=2, color=_COLORS[idx], alpha=.6, label=None)\n ax.plot(x[::stride], m[::stride], 'o', lw=2, marker=_MARKERS[idx],\n markersize=10, color=_COLORS[idx], label=task_name)\n ax.fill_between(x, m - s, m + s, alpha=.4, lw=2, color=_COLORS[idx])\n ax.legend()\n ax.set_ylabel('Objective {}'.format(objective_idx))\n ax.set_xlabel('Episode')\n return fig",
"def plot_results(param_names, param_options, results, params):\n\n\t# circuit/run_num where run_num is one before the existing one\n\tdirectory = params.circuit\n\tif not os.path.isdir(directory):\n\t\tos.mkdir(directory)\n\truns = immediate_subdir(directory)\n\tlatest_run = 0\n\tif runs:\n\t\tnatural_sort(runs)\n\t\tlatest_run = get_trailing_num(runs[-1])\n\tdirectory = os.path.join(directory, \"run\" + str(latest_run+1))\n\n\tprint(directory)\n\tif not os.path.isdir(directory):\n\t\tos.mkdir(directory)\n\n\twith Chdir(directory):\n\n\t\texport_results_to_csv(param_names, results, params)\n\n\t\tx = results.keys()\n\t\ty = []\n\t\tnext_figure = True\n\n\t\tp = 0\n\t\tplt.figure()\n\t\twhile p < len(param_names):\n\t\t\tprint(param_names[p])\n\n\t\t\tif param_options[p]:\n\t\t\t\tnf = True\n\t\t\t\tfor option in param_options[p].split():\n\t\t\t\t\t# stopping has veto power (must all be True to pass)\n\t\t\t\t\tnf = nf and plot_options(option)\n\t\t\t\tnext_figure = nf\n\n\t\t\tif not next_figure:\n\t\t\t\t# y becomes list of lists (for use with stackable plots)\n\t\t\t\ty.append([result[p] for result in results.values()])\n\t\t\t\tp += 1\n\t\t\t\tcontinue\n\t\t\telif not y:\n\t\t\t\ty = [result[p] for result in results.values()]\n\n\t\t\tlx = x[-1]\n\t\t\tly = y[-1]\n\t\t\tplot_method(x,y)\n\t\t\tplt.xlabel('iteration')\n\t\t\tplt.xlim(xmin=0)\n\t\t\t\n\t\t\tplt.ylabel(param_names[p])\n\n\t\t\t# annotate the last value\n\t\t\tannotate_last(lx,ly)\n\n\t\t\tif next_figure:\n\t\t\t\tplt.savefig(param_names[p])\n\t\t\t\tplt.figure()\n\n\t\t\tp += 1\n\t\t# in case the last figure hasn't been shuffled onto file yet\n\t\tif not next_figure:\n\t\t\tplot_method(x,y)\n\t\t\tplt.savefig(param_names[-1])",
"def plot_pipeline(differences_dir, plots_dir_intonation, num_quantiles = 31):\n perf_list = ['54363310_1939750539', '540791114_1793842568']\n difference_path_list = [os.path.join(differences_dir, perf_list[i] + \".npy\") for i in range(len(perf_list))]\n comparisons_list = [np.load(path) for _, path in enumerate(difference_path_list)]\n num_samples = 10000\n # quantile indices\n q_indices = (np.linspace(0, 1, num_quantiles)*(num_samples-1)).astype(np.int32)\n plt.style.use('ggplot')\n labels = ['perf. A', 'perf. B']\n colors = ['blue', 'red']\n linestyles = ['dotted', 'dashed']\n grid = plt.GridSpec(2, 2)\n ax1 = plt.subplot(grid[1, 0])\n ax2 = plt.subplot(grid[1, 1])\n ax4 = plt.subplot(grid[0, :])\n ax4.plot(comparisons_list[0], color=colors[0], label=labels[0], linestyle=linestyles[0])\n ax4.plot(comparisons_list[1], color=colors[1], label=labels[1], linestyle=linestyles[1])\n ax4.set_title(\"Difference between MIDI and pYIN, two performances\")\n ax4.set_ylabel(\"Cents\")\n ax4.set_xlabel(\"Frames\")\n ax4.axhline(y=200, linestyle=\"solid\", linewidth=0.7, c=\"black\", zorder=2, label=\"thresh.\")\n ax4.axhline(y=-200, linestyle=\"solid\", linewidth=0.7, c=\"black\", zorder=2)\n ax4.legend(loc=\"upper right\")\n ax1.set_title(\"10k random sample of distances\")\n ax1.set_ylabel(r\"$|$Cents$|$\")\n ax1.set_xlabel(\"Frames sorted by distance\")\n ax2.set_title(\"Sample quantiles\")\n ax2.set_xlabel(\"Quantile indices\")\n # run analysis song by song\n for i, arr in enumerate(comparisons_list):\n # random sample so all arrays have the same size\n samples = np.random.choice(arr, num_samples, replace=True)\n # sort\n samples = np.sort(np.abs(samples))\n # discard the high values (might be due to misalignment, etc...)\n samples = samples[samples <= 200]\n samples = np.random.choice(samples, num_samples, replace=True)\n samples = np.sort(np.abs(samples))\n ax1.plot(samples, color=colors[i], linestyle=linestyles[i], label=labels[i])\n # get the quantiles\n samples = samples[q_indices]\n ax2.plot(samples, color=colors[i], linestyle=linestyles[i], label=labels[i])\n ax1.legend()\n ax2.legend()\n plt.tight_layout()\n plt.savefig(os.path.join(plots_dir_intonation, \"data processing pipeline.eps\"), format=\"eps\")\n plt.show()",
"def mult_plot_runtime_performance(out_parser_dict, plot_type='performance', colormap='jet', reverse_legend=False,\n plot_over='population', **kwargs):\n fig, ax = viz_utils.setup_figure_1ax(x_label='Input size [population]',\n y_label=' '.join([LABEL_MAP[plot_type], UNITS_MAP[plot_type]]))\n\n # Check that all runs have same configuration and only one algo / obj func\n first_config = list(out_parser_dict.values())[0].config\n for parser in list(out_parser_dict.values())[1:]:\n if parser.config.keys() != first_config.keys():\n raise AssertionError('For multiple runtime / performance plots, need equal config keys..')\n if parser.config.items() != first_config.items():\n raise AssertionError('For multiple runtime / performance plots, need equal configs.')\n for parser in out_parser_dict.values():\n assert len(parser.config['algorithm']) == 1, 'Only one algorithm over different runs per plot.'\n assert len(parser.config['obj_func']) == 1, 'Only one objective func over different runs per plot.'\n\n if 'vmax' in kwargs:\n vmax = kwargs['vmax']\n else:\n vmax = len(out_parser_dict)\n cmap_norm, cmap = norm_cmap(colormap, vmin=0, vmax=vmax)\n\n idx = 0\n for run_label, out_parser in out_parser_dict.items():\n plot_mean_runtime_vs_input_size(out_parser, plot_type, plot_over=plot_over, ax=ax,\n color=cmap(cmap_norm(idx)), label=run_label,\n reverse_legend=reverse_legend, **kwargs)\n idx += 1\n ax.set_ylim(bottom=0.0)\n # Shrink current axis by 20%\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n if reverse_legend:\n handles, labels = ax.get_legend_handles_labels()\n ax.legend(reversed(handles), reversed(labels), frameon=False, loc='center left', bbox_to_anchor=(1, 0.5))\n else:\n ax.legend(frameon=False, loc='center left', bbox_to_anchor=(1, 0.5))",
"def plotDifferentSettings():\n\t#save location of the results\n\tresloc = 'Different_settings_results'\n\n\t#the numrows limits\n\tnrows_lims = [1e4, 1e7]\n\tnbits = 30\n\t\n\t#string for the file names of the to be saved files\n\tsettingsstr = 'nrows={:.0e}--{:.0e}_nbits={}'.format(nrows_lims[0], nrows_lims[1], nbits)\n\t\n\t#the relative approximation error for the different counting algorithms\n\tll_RAE = []\n\tprob_RAE = []\n\tcomb_RAE = []\n\t#the runtime for the different algorithms\n\ttc_runtime = []\n\tll_runtime = []\n\tprob_runtime = []\n\tcomb_runtime = []\n\n\t#the different settings we want to test\n\tnumrows = np.linspace(nrows_lims[0], nrows_lims[1], num = 15, dtype = int)\n\tnumbits = np.array([nbits])\n\t\n\tlooplength = len(numrows)\n\t\n\ttry:\n\t\t(ll_RAE, prob_RAE, comb_RAE, tc_runtime, ll_runtime, prob_runtime, comb_runtime) = np.loadtxt('./{0}/diffset_results_{1}.txt'.format(resloc, settingsstr))\n\texcept:\n\t\tfor i in np.arange(len(numrows)):\n\t\t\toF.progress(i, looplength)\n\t\t\tfor j in np.arange(len(numbits)):\n\t\t\t\tresults = runCounts(numrows[i], numbits[j], doprints = False)\n\t\t\t\n\t\t\t\tll_RAE = np.append(ll_RAE, results[0])\n\t\t\t\tprob_RAE = np.append(prob_RAE, results[1])\n\t\t\t\tcomb_RAE = np.append(comb_RAE, results[2])\n\t\t\t\n\t\t\t\ttc_runtime = np.append(tc_runtime, results[3])\n\t\t\t\tll_runtime = np.append(ll_runtime, results[4])\n\t\t\t\tprob_runtime = np.append(prob_runtime, results[5])\n\t\t\t\tcomb_runtime = np.append(comb_runtime, results[6])\n\t\t\t\n\t\tnp.savetxt('./{0}/diffset_results_{1}.txt'.format(resloc, settingsstr), \n\t\t\tnp.array([ll_RAE, prob_RAE, comb_RAE, tc_runtime, ll_runtime, prob_runtime, comb_runtime]), \n\t\t\theader = '#ll_RAE, prob_RAE, comb_RAE, tc_runtime, ll_runtime, prob_runtime, comb_runtime')\n\t\n\tplotTwoValues(numrows, ll_RAE, ll_runtime, 'Number of rows', 'RAE [\\%]', 'Runtime [s]', 'RAE and runtime of loglog count for different number of rows. \\nNumbits = {}'.format(nbits), 'RAEandRuntime_loglog_{0}.pdf'.format(settingsstr))\n\t\n\tplotTwoValues(numrows, prob_RAE, prob_runtime, 'Number of rows', 'RAE [\\%]', 'Runtime [s]', 'RAE and runtime of probabilisic count for different \\nnumber of rows. Numbits = {}'.format(nbits), 'RAEandRuntime_prob_{0}.pdf'.format(settingsstr))",
"def plot_reion_properties(rank, size, comm, reion_ini_files, gal_ini_files,\n model_tags, reion_plots, output_dir, output_format):\n\n # Check to see if the output directory exists.\n if rank == 0:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n print(\"Made output directory {0}\".format(output_dir))\n\n MC_dir = \"{0}/MC\".format(output_dir)\n os.makedirs(MC_dir)\n print(\"Made directory {0}\".format(MC_dir))\n\n # First calculate all the properties and statistics we need.\n reion_data = generate_data(rank, size, comm, reion_ini_files,\n gal_ini_files, reion_plots, output_dir,\n model_tags, output_format)\n\n # Gather all the fractions onto the master process.\n # This will be used for many different plots. \n master_mass_frac = collective.collect_hist_across_tasks(rank, comm, \n reion_data[\"mass_frac_allmodels\"]) \n master_mass_frac = comm.bcast(master_mass_frac, root = 0)\n\n # Then find out what we need and plot em!\n if reion_plots[\"history\"] and rank == 0:\n\n \n duration_z, duration_t, reion_completed = \\\n calc_duration(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"lookback_array_reion_allmodels\"],\n master_mass_frac, reion_plots[\"duration_definition\"])\n\n for model_number in range(len(master_mass_frac)):\n print(\"Model {0}: Start {1:.2f} \\tMid {2:.2f}\\tEnd {3:.2f}\\t\"\n \"dz {4:.2f}\\tdt {5:.1f}Myr\\tReion Completed {6}\" \\\n .format(model_number, duration_z[model_number][0],\n duration_z[model_number][1], duration_z[model_number][-1],\n duration_z[model_number][0]-duration_z[model_number][-1],\n duration_t[model_number][-1]-duration_t[model_number][0],\n reion_completed[model_number]))\n\n print(\"Plotting the reionization history.\")\n reionplot.plot_history(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"lookback_array_reion_allmodels\"],\n reion_data[\"cosmology_allmodels\"],\n reion_data[\"t_bigbang_allmodels\"],\n master_mass_frac,\n model_tags, output_dir, \"history\",\n output_format)\n\n\n\n if reion_plots[\"nion\"]:\n\n master_nion = collective.collect_hist_across_tasks(rank, comm, \n reion_data[\"nion_allmodels\"])\n\n if rank == 0:\n print(\"Plotting the ionizing emissivity.\")\n reionplot.plot_nion(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"lookback_array_reion_allmodels\"],\n reion_data[\"cosmology_allmodels\"],\n reion_data[\"t_bigbang_allmodels\"],\n master_nion,\n reion_data[\"nion_factor_allmodels\"], \n model_tags, output_dir, \"nion\", output_format)\n\n if reion_plots[\"ps_fixed_XHI\"]:\n k, P21, PHII = determine_ps_fixed_XHI(rank, size, comm,\n reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"cosmology_allmodels\"],\n master_mass_frac, \n reion_data[\"XHII_fbase_allmodels\"],\n reion_data[\"XHII_precision_allmodels\"],\n reion_data[\"density_fbase_allmodels\"],\n reion_data[\"density_precision_allmodels\"],\n reion_data[\"GridSize_allmodels\"],\n reion_data[\"boxsize_allmodels\"],\n reion_data[\"first_snap_allmodels\"],\n reion_plots[\"fixed_XHI_values\"])\n\n if rank == 0:\n print(\"Plotting PS at fixed neutral fraction.\")\n reionplot.plot_ps_fixed_XHI(k, P21, PHII,\n reion_plots[\"fixed_XHI_values\"],\n model_tags, output_dir, \"ps_fixed_XHI\",\n output_format)\n\n if reion_plots[\"contours\"] and rank == 0:\n # tau is used for multiple plots. So check if we need to calculate it.\n try:\n tau_allmodels\n except NameError:\n tau_allmodels = calc_tau(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"cosmology_allmodels\"],\n reion_data[\"helium_allmodels\"],\n master_mass_frac)\n\n # For the contours, only plot the optical depth at the highest z.\n tau_highz = []\n for model_number in range(len(tau_allmodels)):\n tau_highz.append(tau_allmodels[model_number][0])\n\n duration_z, duration_t, reion_completed = \\\n calc_duration(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"lookback_array_reion_allmodels\"],\n master_mass_frac, reion_plots[\"duration_definition\"])\n\n print(\"Plotting contours of constant tau.\")\n reionplot.plot_tau_contours(tau_highz, reion_completed,\n reion_plots[\"alpha_beta_limits\"],\n output_dir, \"tau_contours\", output_format)\n\n print(\"Plotting contours of constant reionization duration.\")\n reionplot.plot_duration_contours(duration_t, reion_completed,\n reion_plots[\"alpha_beta_limits\"],\n output_dir, \"duration_contours\",\n output_format)\n\n if reion_plots[\"optical_depth\"] and rank == 0:\n # tau is used for multiple plots. So check if we need to calculate it.\n try:\n tau_allmodels\n except NameError:\n tau_allmodels = calc_tau(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"cosmology_allmodels\"],\n reion_data[\"helium_allmodels\"],\n master_mass_frac)\n\n print(\"Plotting the optical depth.\")\n reionplot.plot_tau(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"lookback_array_reion_allmodels\"], \n reion_data[\"cosmology_allmodels\"],\n reion_data[\"t_bigbang_allmodels\"],\n tau_allmodels,\n model_tags, output_dir, \"optical_depth\",\n output_format)\n\n if reion_plots[\"optical_depth\"] and reion_plots[\"history\"] and rank == 0:\n print(\"Plotting the combined optical depth/ionization history.\")\n reionplot.plot_combined_history_tau(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"lookback_array_reion_allmodels\"], \n reion_data[\"cosmology_allmodels\"],\n reion_data[\"t_bigbang_allmodels\"],\n master_mass_frac, tau_allmodels, \n model_tags, output_dir,\n \"history_tau\", output_format)\n\n if reion_plots[\"optical_depth\"] and reion_plots[\"nion\"] and rank == 0:\n print(\"Plotting the combined optical depth/ionizing emissivity.\")\n reionplot.plot_combined_nion_tau(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"lookback_array_reion_allmodels\"],\n reion_data[\"cosmology_allmodels\"],\n reion_data[\"t_bigbang_allmodels\"],\n master_nion,\n reion_data[\"nion_factor_allmodels\"],\n tau_allmodels, model_tags, output_dir,\n \"nion_tau\", output_format)\n\n if reion_plots[\"ps_scales\"] or reion_plots[\"ps_scales_beta\"]:\n print(\"Gathering the 21cm Power Spectra across processors\")\n k, P21, PHII = gather_ps(rank, size, comm,\n reion_data[\"k_allmodels\"],\n reion_data[\"P21_allmodels\"],\n reion_data[\"PHII_allmodels\"],\n reion_data[\"first_snap_allmodels\"],\n reion_data[\"last_snap_allmodels\"])\n\n if rank == 0:\n print(\"Plotting the large scale power as a function of small \"\n \"scale.\")\n\n if reion_plots[\"ps_scales_beta\"]:\n calc_beta = True\n else:\n calc_beta = False \n\n # Now that we have all the PS on the master rank, calculate the\n # amplitude at the specified scales.\n scale_power_dict = calc_scale_power(k, P21, PHII,\n reion_data[\"z_array_reion_allmodels\"], \n reion_plots[\"small_scale_def\"],\n reion_plots[\"large_scale_def\"],\n reion_plots[\"small_scale_err\"],\n reion_plots[\"large_scale_err\"],\n calc_beta=calc_beta)\n\n k_small_scale = scale_power_dict[\"k_small_scale\"]\n k_large_scale = scale_power_dict[\"k_large_scale\"]\n\n P21_small_scale = scale_power_dict[\"P21_small_scale\"]\n P21_large_scale = scale_power_dict[\"P21_large_scale\"]\n\n PHII_small_scale = scale_power_dict[\"PHII_small_scale\"]\n PHII_large_scale = scale_power_dict[\"PHII_large_scale\"]\n\n if reion_plots[\"ps_scales\"]:\n reionplot.plot_ps_scales(P21_small_scale,\n P21_large_scale, master_mass_frac, \n reion_data[\"z_array_reion_allmodels\"],\n reion_plots[\"fixed_XHI_values\"],\n reion_plots[\"ps_scales_z\"],\n reion_plots[\"small_scale_def\"],\n reion_plots[\"large_scale_def\"],\n reion_plots[\"small_scale_err\"],\n reion_plots[\"large_scale_err\"],\n model_tags, output_dir, \"ps_scales\",\n output_format)\n\n if reion_plots[\"ps_scales_beta\"]:\n\n P21_beta = scale_power_dict[\"P21_beta\"]\n P21_beta_error = scale_power_dict[\"P21_beta_error\"]\n PHII_beta = scale_power_dict[\"PHII_beta\"]\n\n reionplot.plot_ps_beta(P21_beta, P21_beta_error,\n reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"lookback_array_reion_allmodels\"],\n reion_data[\"cosmology_allmodels\"],\n reion_data[\"t_bigbang_allmodels\"],\n reion_plots[\"small_scale_def\"],\n reion_plots[\"large_scale_def\"],\n model_tags, output_dir,\n \"ps_scales_beta\", output_format)\n\n\n\n if reion_plots[\"slices_fixed_XHI\"] and rank == 0:\n print(\"Plotting slices at fixed XHI fractions.\")\n reionplot.plot_slices_XHI(reion_data[\"z_array_reion_allmodels\"],\n reion_data[\"cosmology_allmodels\"],\n master_mass_frac, \n reion_data[\"XHII_fbase_allmodels\"],\n reion_data[\"XHII_precision_allmodels\"],\n reion_data[\"GridSize_allmodels\"],\n reion_data[\"boxsize_allmodels\"],\n reion_data[\"first_snap_allmodels\"],\n reion_plots[\"fixed_XHI_values\"],\n reion_plots[\"cut_slice\"],\n reion_plots[\"cut_thickness\"],\n model_tags, output_dir, \"slices_XHI\",\n output_format)\n\n\n if reion_plots[\"bubble_size\"] and rank == 0:\n print(\"Determining bubble sizes at fixed XHI.\")\n reionplot.determine_bubble_size(reion_data[\"z_array_reion_allmodels\"],\n master_mass_frac,\n reion_data[\"first_snap_allmodels\"],\n reion_data[\"GridSize_allmodels\"],\n reion_data[\"boxsize_allmodels\"],\n reion_plots[\"fixed_XHI_values\"],\n model_tags, output_dir)\n\n if reion_plots[\"zreion_dens_cross\"] and rank == 0:\n print(\"Calculating the zreion-density cross correlation.\")\n k, crosspspec, crosscorr, bias = \\\n zreion_dens_cross(reion_data[\"density_fbase_allmodels\"],\n reion_data[\"density_precision_allmodels\"],\n reion_data[\"zreion_path_allmodels\"],\n reion_data[\"GridSize_allmodels\"],\n reion_data[\"boxsize_allmodels\"],\n reion_data[\"last_snap_allmodels\"])\n\n reionplot.plot_zreion_dens_cross(k, crosscorr, bias, model_tags,\n output_dir, \"zreion_dens_crosscorr\",\n output_format)\n\n if reion_plots[\"dens_ion_contours\"] and rank == 0:\n print(\"Plotting contours of density-ionization.\")\n reionplot.plot_dens_reion_contours(master_mass_frac,\n reion_data[\"XHII_fbase_allmodels\"],\n reion_data[\"XHII_precision_allmodels\"],\n reion_data[\"density_fbase_allmodels\"],\n reion_data[\"density_precision_allmodels\"],\n reion_data[\"GridSize_allmodels\"],\n reion_data[\"first_snap_allmodels\"],\n reion_plots[\"fixed_XHI_values\"],\n model_tags, output_dir,\n \"dens_ion_contours\", output_format)\n\n if reion_plots[\"dens_zreion_contours\"] and rank == 0:\n print(\"Plotting contours of density-zreion.\")\n reionplot.plot_dens_zreion_contours(reion_data[\"density_fbase_allmodels\"],\n reion_data[\"density_precision_allmodels\"],\n reion_data[\"zreion_path_allmodels\"],\n reion_data[\"GridSize_allmodels\"],\n reion_data[\"last_snap_allmodels\"],\n model_tags, output_dir,\n \"dens_zreion_contours\", output_format)",
"def plot_analysis(opt):\n LOG.debug(\"Plotting GetLLM analysis.\")\n mdl_analysis = opt.subnode in mdl_subnodes\n\n ps.set_style(\"standard\", MANUAL_STYLE)\n xmin = min(opt.xplot_xmin, opt.yplot_xmin)\n xmax = max(opt.xplot_xmax, opt.yplot_xmax)\n\n gs = gridspec.GridSpec(2, 1, height_ratios=[1, 1])\n ax_x = plt.subplot(gs[0])\n ax_y = None\n ir_pos = None\n\n paths = opt.path.split(',')\n\n if opt.label == 'None':\n if mdl_analysis:\n labels = [\"mo_\" + opt.path.rsplit('/', 1)[-1], \"me_\" + opt.path.rsplit('/', 1)[-1]]\n else:\n labels = paths\n else:\n labels = opt.label.split(',')\n\n for idx, path in enumerate(paths):\n data_x, data_y = get_data(path, opt.mainnode, opt.subnode)\n plot_data(ax_x, data_x, labels, idx, opt.change_marker)\n\n if ir_pos is None:\n ir_pos = get_irpos(data_x, opt.accel)\n\n if data_y is not None:\n if ax_y is None:\n ax_x.axes.get_xaxis().set_visible(False)\n ax_y = plt.subplot(gs[1])\n plot_data(ax_y, data_y, labels, idx, opt.change_marker)\n\n ax_x.set_xlim(xmin, xmax)\n ax_x.set_ylim(opt.xplot_ymin, opt.xplot_ymax)\n set_yaxis_label(ax_x, 'x', opt.subnode)\n\n if ax_y is not None:\n ax_y.set_xlim(xmin, xmax)\n ax_y.set_ylim(opt.yplot_ymin, opt.yplot_ymax)\n set_yaxis_label(ax_y, 'y', opt.subnode)\n ps.set_xaxis_label(ax_y)\n if ir_pos:\n ps.show_ir(ir_pos, ax_y, mode='outside')\n ps.show_ir(ir_pos, ax_x, mode='lines')\n else:\n ax_x.axes.get_xaxis().set_visible(True)\n ps.set_xaxis_label(ax_x)\n if ir_pos:\n ps.show_ir(ir_pos, ax_x, mode='outside')\n\n if int(opt.legendh) > 12:\n show_legend(ax_x, int(opt.legendx), int(opt.legendy))\n return gs",
"def plot_eval_results(eval_results, metric=None, param=None,\n xaxislabel=None, yaxislabel=None,\n title=None,\n title_fontsize='xx-large',\n subfig_fontsize='large',\n axes_title_fontsize='medium',\n show_metric_direction=True,\n metric_direction_font_size='medium',\n subplots_adjust_opts=None,\n figsize='auto',\n fig_opts=None,\n subfig_opts=None,\n subplots_opts=None):\n if type(eval_results) not in (list, tuple) or not eval_results:\n raise ValueError('`eval_results` must be a list or tuple with at least one element')\n\n first_row = next(iter(eval_results))\n\n if type(first_row) not in (list, tuple):\n raise ValueError('`eval_results` must be a list or tuple containing a (param, values) tuple. '\n 'Maybe `eval_results` must be converted with `results_by_parameter`.')\n\n n_params = len(first_row) - 1\n\n if n_params < 1:\n raise ValueError('each entry in `eval_results` must contain at least two values '\n '(n parameter values and evaluation results)')\n\n if isinstance(param, str):\n param = [param]\n\n if param and len(param) != n_params:\n raise ValueError('if `param` is given, its length must equal the number of parameters in the eval. results')\n\n eval_colwise = list(zip(*eval_results))\n n_param_combinations = 1\n for p in range(0, n_params-1): # we don't count the last level as this will go on the x-axis\n n_param_combinations *= len(set(eval_colwise[p]))\n\n if metric is not None and type(metric) not in (list, tuple):\n metric = [metric]\n elif metric is None:\n # remove special evaluation result 'model': the calculated model itself\n metric = sorted(set(first_row[-1].keys()) - {'model'})\n\n metric = sorted(metric)\n\n metric_direction = []\n for m in metric:\n if m == 'perplexity':\n metric_direction.append('minimize')\n else:\n m_fn_name = 'metric_%s' % (m[:16] if m.startswith('coherence_gensim') else m)\n m_fn = getattr(evaluate, m_fn_name, None)\n if m_fn:\n metric_direction.append(getattr(m_fn, 'direction', 'unknown'))\n else:\n metric_direction.append('unknown')\n\n n_metrics = len(metric)\n\n assert n_metrics == len(metric_direction)\n\n metrics_ordered = []\n for m_dir in sorted(set(metric_direction), reverse=True):\n metrics_ordered.extend([(m, d) for m, d in zip(metric, metric_direction) if d == m_dir])\n\n assert n_metrics == len(metrics_ordered)\n\n if n_param_combinations > 3:\n n_fig_rows = math.ceil(math.sqrt(n_param_combinations))\n n_fig_cols = n_fig_rows\n\n n_fig_rows -= (n_fig_rows**2 - n_param_combinations) // n_fig_rows\n else:\n n_fig_rows = 1\n n_fig_cols = n_param_combinations\n\n # get figures and subplots (axes)\n if figsize == 'auto':\n figsize = (6 * n_fig_cols, 2 * n_fig_rows * n_metrics)\n\n fig = plt.figure(layout='constrained', figsize=figsize, **(fig_opts or {}))\n\n subfigs = fig.subfigures(nrows=n_fig_rows, ncols=n_fig_cols, **(subfig_opts or {}))\n if isinstance(subfigs, np.ndarray):\n subfigs = subfigs.flatten()\n else:\n subfigs = [subfigs]\n\n #unique_param_values_param_index = []\n unique_param_values = []\n for col in eval_colwise[:-2]:\n unique_vals = set(col)\n #unique_param_values_param_index.append([i] * len(unique_vals))\n unique_param_values.append(sorted(unique_vals))\n\n param_combinations = list(itertools.product(*unique_param_values))\n assert len(param_combinations) == n_param_combinations\n\n x = np.array(sorted(set(eval_colwise[-2])))\n all_metrics_results = np.array(eval_colwise[-1])\n\n subfigs_axes = []\n\n for i_subfig, subfig in enumerate(subfigs):\n if len(subfigs) > 1:\n if i_subfig >= len(param_combinations):\n break\n param_vals = param_combinations[i_subfig]\n if param:\n subfig_titles = [f'{param[i]} = {v}' for i, v in enumerate(param_vals)]\n else:\n subfig_titles = [str(v) for v in param_vals]\n\n subfig.suptitle('\\n'.join(subfig_titles), fontsize=subfig_fontsize)\n which_results = np.repeat(True, len(all_metrics_results))\n for i, v in enumerate(param_vals):\n which_results &= np.isclose(np.array(eval_colwise[i]), v)\n\n metrics_results = all_metrics_results[which_results]\n else:\n metrics_results = all_metrics_results\n\n axes = subfig.subplots(nrows=n_metrics, ncols=1, sharex=True, **(subplots_opts or {}))\n subfigs_axes.append(axes)\n\n # draw subplot for each metric\n axes_pos_per_dir = defaultdict(list)\n axes_sequence = axes.flatten() if n_metrics > 1 else [axes]\n assert len(axes_sequence) == len(metrics_ordered)\n for i, (ax, (m, m_dir)) in enumerate(zip(axes_sequence, metrics_ordered)):\n if show_metric_direction:\n axes_pos_per_dir[m_dir].append(ax.get_position())\n\n y = [mres[m] for mres in metrics_results]\n ax.plot(x, y, label=m)\n\n ax.set_title(m, fontsize=axes_title_fontsize)\n\n # set axis labels\n if (param or xaxislabel) and i == len(metric)-1:\n if xaxislabel:\n ax.set_xlabel(xaxislabel)\n else:\n ax.set_xlabel(param[-1])\n if yaxislabel:\n ax.set_ylabel(yaxislabel)\n\n # show grouped metric direction on the left\n if axes_pos_per_dir: # = if show_metric_direction\n left_xs = []\n ys = []\n for m_dir, bboxes in axes_pos_per_dir.items():\n left_xs.append(min(bb.x0 for bb in bboxes))\n min_y = min(bb.y0 for bb in bboxes)\n max_y = max(bb.y1 for bb in bboxes)\n ys.append((min_y, max_y))\n\n left_x = min(left_xs) / 2.5\n\n for (min_y, max_y), m_dir in zip(ys, axes_pos_per_dir.keys()):\n center_y = min_y + (max_y - min_y) / 2\n\n subfig.text(left_x / 1.5, center_y, m_dir, fontsize=metric_direction_font_size, rotation='vertical',\n horizontalalignment='right', verticalalignment='center')\n\n # set adjustments\n subplots_adjust_kwargs = {}\n\n if show_metric_direction:\n subplots_adjust_kwargs.update({'left': 0.15})\n\n subplots_adjust_kwargs.update(subplots_adjust_opts or {})\n\n if subplots_adjust_kwargs:\n fig.subplots_adjust(**subplots_adjust_kwargs)\n\n if title:\n fig.suptitle(title, fontsize=title_fontsize)\n\n return fig, subfigs, subfigs_axes",
"def plot_evaluation(parameters_dict, log_df, settings, evaluation_set_kde, plotname):\n\n\n plots = []\n\n\n ### setup the colors for each component\n if int(settings['nr_components']) < 3:\n colors = ['rgb(228,26,28)', 'rgb(55,126,184)']\n elif int(settings['nr_components']) < 13:\n colors = np.array(cl.scales[str(settings['nr_components'])]['qual']['Paired'])\n else:\n colors = cl.interp(cl.scales['10']['qual']['Paired'], 20)\n\n\n ### set up ab list\n ab_list = evaluation_set_kde['contact'].keys()\n\n\n\n\n ####################### plotting of settings\n print_to_table = {}\n for key in sorted(settings.keys()):\n if key not in ['fold_id_dir','plot_name', 'fixed_parameters', 'threads_proteins', 'qijab_dir',\n 'debug_mode', 'parameter_file', 'settings_file', 'optimization_log_file', 'braw_dir', 'pdb_dir', 'paramdir',\n 'mask_sse', 'lambda_w_fix', 'lfactor', 'plotdir', 'psicov_dir', 'contact', 'hessian_pseudocount']:\n print_to_table[key] = settings[key]\n\n print(\"Generate settings table...\")\n table_settings_1 = plot_settings_table(print_to_table, 1)\n table_settings_2 = plot_settings_table(print_to_table, 2)\n table_settings_3 = plot_settings_table(print_to_table, 3)\n plots.append(table_settings_1)\n plots.append(table_settings_2)\n plots.append(table_settings_3)\n\n\n ####################### negLL and realted plots\n if 'step' in log_df.columns and 'pass' in log_df.columns:\n\n if 'negLL' in log_df.columns:\n plot_negll = plot_convergence_trace_plotly(log_df,\n name=['negLL', 'negLL_crossval'],\n plot_title='neg LL trace for training and cross-val set')\n plots.append(plot_negll)\n\n plot_expfit_negll = plot_exponentialFit_negLL(log_df, plot_title='exponential Fit neg LL')\n plots.append(plot_expfit_negll)\n\n if 'timestamp' in log_df.columns:\n plot_timestamps = plot_convergence_trace_plotly(log_df,\n name=['timestamp'],\n plot_title='time (s) per iteration')\n plots.append(plot_timestamps)\n\n\n if 'gradient_norm_weights' in log_df.columns:\n plot_grad_norm_weights = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_weights'],\n plot_title='norm of weight gradients')\n plots.append(plot_grad_norm_weights)\n\n if 'gradient_norm_means' in log_df.columns:\n plot_grad_norm_means = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_means'],\n plot_title='norm of mean gradients')\n plots.append(plot_grad_norm_means)\n\n if 'gradient_norm_prec' in log_df.columns:\n plot_grad_norm_prec = plot_convergence_trace_plotly(log_df,\n name=['gradient_norm_prec'],\n plot_title='norm of precMat gradients')\n plots.append(plot_grad_norm_prec)\n\n\n ####################### plotting of parameters\n print(\"Generate distribution of parameters...\")\n\n #weights\n weights_dict = {}\n for component in range(settings['nr_components']):\n weights_dict['component ' + str(component)] = {\n 'weights (contact)': parameters_dict[\"weight_contact_\" + str(component)][0],\n 'weights (bg)': parameters_dict[\"weight_bg_\" + str(component)][0]\n }\n plot_weights = plot_barplot(\n weights_dict,\n 'Distribution of weights',\n 'component weights',\n type='group',\n colors=colors\n #,plot_out=\"/home/vorberg/weights.html\"\n )\n\n #mu\n mu_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'mu' in k))\n plot_means = plot_boxplot(\n mu_df,\n 'Distribution of Means',\n \"values of mean parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/mus.html\"\n )\n\n #std deviation\n prec_df = pd.DataFrame.from_dict(dict((k, parameters_dict[k]) for k in sorted(parameters_dict.keys()) if 'prec' in k))\n try:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/p))\n if settings['prec_wrt_L']:\n std_dev = prec_df.apply(lambda p: np.sqrt(1.0/(p*142))) #in case precision is specified depending on L=142\n except ZeroDivisionError as e:\n print(e)\n std_dev=prec_df\n\n std_dev.columns = [column_name.replace(\"prec\", \"std\") for column_name in std_dev.columns]\n plot_stddev = plot_boxplot(\n std_dev,\n 'Distribution of std deviations',\n \"values of std deviation parameters\",\n colors=colors\n #,plot_out=\"/home/vorberg/std.html\"\n )\n\n\n plots.append(plot_weights)\n plots.append(plot_means)\n plots.append(plot_stddev)\n\n ####################### Scatterplot mu vs std dev\n print(\"Generate scatter plot mu vs std...\")\n scatter_dict = {}\n for component in range(settings['nr_components']):\n scatter_dict['mu_'+str(component)] = [\n mu_df['mu_'+str(component)].tolist(),\n std_dev['std_'+str(component)].tolist(),\n AB.values()\n ]\n plot_mu_vs_stddev = plot_scatter(scatter_dict,\n 'Mean vs std deviation',\n 'mean',\n \"std deviation\",\n False,\n colors\n #,plot_out=\"/home/vorberg/mu_vs_std.html\"\n )\n\n plots.append(plot_mu_vs_stddev)\n\n\n ############################################## plotting of gradient norms\n print(\"Generate gradient norms plot...\")\n\n #gradients for mu\n mu_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'mu_'+str(component)\n mu_grad_dict[key] = log_df[key].tolist()[-1]\n annotations_dict[key] = AB\n\n\n plot_gradient_mu_stats = jitter_plot(mu_grad_dict,\n 'Distribution of gradients for mean in last iteration',\n annotations_dict,\n colors,\n None)\n plots.append(plot_gradient_mu_stats)\n\n\n #gradients for precMat\n precMat_grad_dict = {}\n annotations_dict = {}\n for component in range(settings['nr_components']):\n key = 'prec_'+str(component)\n precMat_grad_dict['diagPrecMat_'+str(component)] = log_df[key].tolist()[-1]\n annotations_dict['diagPrecMat_'+str(component)] = AB\n\n\n plot_gradient_precMat_stats = jitter_plot(\n precMat_grad_dict,\n 'Distribution of gradients for precMat in last iteration',\n annotations_dict,\n colors,\n None\n )\n plots.append(plot_gradient_precMat_stats)\n\n ##################################### plotting of gradient trace of a specific ab pair for all components\n print(\"Generate gradient trace plot...\")\n\n gradient_df = log_df.filter(regex=(\"mu_[0-9]*\"))\n plot_gradient_mu_ab_trace = plot_gradient_ab_trace(gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_mu_ab_trace)\n\n gradient_df = log_df.filter(regex=(\"prec_[0-9]*\"))\n plot_gradient_prec_ab_trace = plot_gradient_ab_trace(\n gradient_df,\n ab_list,\n colors\n )\n plots.append(plot_gradient_prec_ab_trace)\n\n\n ##################################### plotting of univariate mixtures\n if len(evaluation_set_kde['contact']) == 0 or len(evaluation_set_kde['bg']) == 0:\n print \"Evaluation set is empty. Cannot plot Mixture Visualization.\"\n else:\n print(\"Generate parameter visualization 1d plots...\")\n plots.append(plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L']))\n # plot_parameter_visualisation_1d(parameters_dict, evaluation_set_kde, settings, colors, settings['prec_wrt_L'], plot_out=\"/home/vorberg/1d_vis.html\")\n\n # ------------------------------------------------------------------------------\n ### define merged plot\n # ------------------------------------------------------------------------------\n cols = 3.0\n rows = int(np.ceil((len(plots)-1) / cols)) + 2\n subplot_titles = []\n\n # set up titles\n for plot in range(len(plots)-1):\n subplot_titles.append(plots[plot]['layout']['title'])\n if len(subplot_titles) < (cols * (rows-2)):\n for i in range(int((cols * (rows-2))) - len(subplot_titles) ):\n subplot_titles.append(\" \")\n subplot_titles.append(plots[-1]['layout']['title'])\n\n\n # plot all plots as subplots\n fig = tools.make_subplots(rows=rows,\n cols=3,\n specs = [ [{} for col in range(int(cols))] for row in range(rows-2)] + \\\n [[{'rowspan':2, 'colspan': 3}, None, None], [None, None, None]],\n subplot_titles=tuple(subplot_titles),\n print_grid=False)\n\n\n\n\n for i, plot in enumerate(plots[:-1]):\n col = i % int(cols)\n row = (i - col) / int(cols)\n\n #add traces to subplot\n for trace in plot['data']:\n trace['showlegend']=False\n fig.append_trace(trace, row + 1, col + 1)\n\n # adjust x and y axis for table plotting\n if 'annotations' in plot['layout'].keys():\n for cell in plot['layout']['annotations']:\n cell['yref'] = 'y' + str(i + 1)\n cell['xref'] = 'x' + str(i + 1)\n fig['layout']['annotations'] += plot['layout']['annotations']\n\n # adjust axis for all plots\n fig['layout']['xaxis' + str(i + 1)].update(plot['layout']['xaxis1'])\n fig['layout']['yaxis' + str(i + 1)].update(plot['layout']['yaxis1'])\n\n ## add mixture visualisation plot - spans 3 columns\n for trace in plots[-1]['data']:\n fig.append_trace(trace, int(rows)-1, 1)\n fig['layout']['xaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['xaxis1'])\n fig['layout']['yaxis' + str(int(cols * (rows-2) + 1))].update(plots[-1]['layout']['yaxis1'])\n\n #check which plots are visible/invisible according to menu selection\n trace_visibility_ab = {}\n for ab in range(len(ab_list)):\n trace_visibility_ab[ab] = []\n for i, plot in enumerate(plots):\n if 'updatemenus' not in plot['layout'].keys():\n trace_visibility_ab[ab].extend([True] * len(plot['data']))\n else:\n trace_visibility_ab[ab].extend(plot['layout']['updatemenus'][0]['buttons'][ab]['args'][1])\n\n\n #use menu of last plot (=vis of mixture) as template for multiplot menu\n fig['layout']['updatemenus'] = plots[-1]['layout']['updatemenus']\n for ab in range(len(ab_list)):\n fig['layout']['updatemenus'][0]['buttons'][ab]['args'][1] = trace_visibility_ab[ab]\n\n\n fig['layout']['legend']['yanchor'] = 'bottom'\n fig['layout']['legend']['y'] = 0\n fig['layout']['height'] = rows * 250\n fig['layout']['font'] = {'size': 18} # set global font size\n\n plotly_plot(fig, filename=plotname, auto_open=False)",
"def make_summary_plot(run_lists, file_descriptor, attr='sipm1.threeSampleAmpl'):\n biases = []\n gains = []\n pes = []\n currs = []\n gainerrs = []\n quad_terms = []\n quad_errs = []\n for row in sorted(run_lists):\n biases.append(row[0])\n gain_out = fit_gain(row[1], attr=attr)\n out_tuple = gain_out[0]\n gains.append(out_tuple[0])\n gainerrs.append(out_tuple[3])\n smeans = sorted(gain_out[1])\n currs.append(0.5*(smeans[-1] + smeans[-2]))\n pes.append(currs[-1]/gains[-1])\n quad_terms.append(out_tuple[1])\n quad_errs.append(out_tuple[4])\n\n maxgain = max(gains)\n gains = np.array(gains)/maxgain\n gainerrs = np.array(gainerrs)/maxgain\n # gainerrs = 0.1*gains\n\n currs = np.array(currs)/max(currs)\n pes = np.array(pes)\n pe_errs = gainerrs/gains*pes\n maxpe = max(pes)\n fig, ax1 = plt.subplots()\n\n coeffs, V = np.polyfit(biases, gains, 1, w=1.0/gainerrs, cov=True)\n breakdown = -1*coeffs[1]/coeffs[0]\n\n breakdown_sigma = sigma_from_cov(coeffs, V)\n\n # calculate sigmas throughout range\n vals, vecs = np.linalg.eig(V)\n U = np.transpose(vecs)\n xs_for_error = np.arange(breakdown - 0.1, max(biases) + 0.1, 0.01)\n gain_sigmas = sig_from_diag(xs_for_error, U, vals)\n error_band_ys = np.array([i*coeffs[0] + coeffs[1] for i in xs_for_error])\n ax1.fill_between(xs_for_error, error_band_ys + gain_sigmas,\n error_band_ys - gain_sigmas, facecolor='red', alpha=0.5)\n\n fitline = [i*coeffs[0] + coeffs[1] for i in biases] + [0]\n fitbiases = biases + [breakdown]\n\n ax1.set_title('bias scan %s' % file_descriptor)\n fitplot = ax1.plot(fitbiases, fitline, 'r-')\n gainplot = ax1.errorbar(\n biases, gains, yerr=gainerrs, fmt='ro', markersize=10)\n currplot = ax1.plot(biases, currs, 'g*', markersize=15)\n ax1.set_ylim(0, 1.105)\n ax1.set_xlim([breakdown - 0.1, max(biases) + 0.1])\n ax1.set_xlabel('bias voltage [V]')\n ax1.set_ylabel('relative gain, charge [a.u.]')\n\n ticks = [breakdown]\n ticks.extend([bias for bias in biases[::2]])\n tick_labels = ['%.1f $\\pm$ %.1f' % (breakdown, breakdown_sigma)]\n tick_labels.extend([str(bias) for bias in biases[::2]])\n ax1.set_xticks(ticks)\n ax1.set_xticklabels(tick_labels)\n ax1.grid()\n ax1.get_xticklabels()[0].set_color('r')\n\n ax2 = ax1.twinx()\n peplot = ax2.errorbar(biases, pes, yerr=pe_errs, fmt='b^', markersize=10)\n ax2.set_ylabel('pe', color='b')\n ax2.set_ylim(0, maxpe*1.105)\n ax2.set_xlim([breakdown - 0.1, max(biases) + 0.1])\n for tick in ax2.get_yticklabels():\n tick.set_color('b')\n ax1.legend([gainplot[0]]+currplot+[peplot[0]]+fitplot,\n ['gain', 'charge', 'pes', 'gain fit'],\n loc='best', numpoints=1)\n\n plt.savefig('pdfs/breakdownPlot%s.pdf' % file_descriptor)\n plt.show()\n\n quadploterrs = 0.5/np.sqrt(quad_terms)*quad_errs\n plt.errorbar(biases, np.sqrt(quad_terms)*100, yerr=quadploterrs*100, fmt='ko')\n plt.xlim(min(biases) - 0.1, max(biases) + 0.1)\n plt.xlabel('bias [V]')\n plt.ylabel('sqrt(quadratic term) [%]')\n plt.title('quadratic terms %s' % file_descriptor)\n\n plt.savefig('pdfs/quadraticTerms%s.pdf' % file_descriptor)\n plt.show()",
"def plot_results(epochs: int = 20, segments: int = 5, plot: bool = True):\n \"\"\"\n plt.figure(0)\n plot_approximation(\"product\", modelSetProd, 1, epochs, gpus=0)\n \"\"\"\n\n data = [\n {\n \"title\": \"Piecewise Discontinuous Function Approximation\",\n \"layer\": \"discontinuous\",\n \"model_set\": modelSetD,\n },\n {\n \"title\": \"Piecewise Continuous Function Approximation\",\n \"layer\": \"continuous\",\n \"model_set\": modelSetC,\n },\n {\n \"title\": \"Polynomial function approximation\",\n \"layer\": \"polynomial\",\n \"model_set\": modelSetP,\n },\n {\n \"title\": \"Fourier function approximation\",\n \"layer\": \"fourier\",\n \"model_set\": modelSetF,\n },\n ]\n\n for index, element in enumerate(data):\n if plot is True:\n plt.figure(index)\n plot_approximation(\n element[\"layer\"],\n element[\"model_set\"],\n 5,\n epochs,\n accelerator=\"cpu\",\n periodicity=2,\n )\n\n if plot is True:\n plt.title(\"Piecewise Discontinuous Function Approximation\")\n\n if plot is True:\n plt.show()",
"def plot_observer(population, num_generations, num_evaluations, args):\r\n import pylab\r\n import numpy\r\n \r\n stats = inspyred.ec.analysis.fitness_statistics(population)\r\n best_fitness = stats['best']\r\n worst_fitness = stats['worst']\r\n median_fitness = stats['median']\r\n average_fitness = stats['mean']\r\n colors = ['black', 'blue', 'green', 'red']\r\n labels = ['average', 'median', 'best', 'worst']\r\n data = []\r\n if num_generations == 0:\r\n pylab.ion()\r\n data = [[num_evaluations], [average_fitness], [median_fitness], [best_fitness], [worst_fitness]]\r\n lines = []\r\n for i in range(4):\r\n line, = pylab.plot(data[0], data[i+1], color=colors[i], label=labels[i])\r\n lines.append(line)\r\n # Add the legend when the first data is added.\r\n pylab.legend(loc='lower right')\r\n args['plot_data'] = data\r\n args['plot_lines'] = lines\r\n pylab.xlabel('Evaluations')\r\n pylab.ylabel('Fitness')\r\n else:\r\n data = args['plot_data']\r\n data[0].append(num_evaluations)\r\n data[1].append(average_fitness)\r\n data[2].append(median_fitness)\r\n data[3].append(best_fitness)\r\n data[4].append(worst_fitness)\r\n lines = args['plot_lines']\r\n for i, line in enumerate(lines):\r\n line.set_xdata(numpy.array(data[0]))\r\n line.set_ydata(numpy.array(data[i+1]))\r\n args['plot_data'] = data\r\n args['plot_lines'] = lines\r\n ymin = min([min(d) for d in data[1:]])\r\n ymax = max([max(d) for d in data[1:]])\r\n yrange = ymax - ymin\r\n pylab.xlim((0, num_evaluations))\r\n pylab.ylim((ymin - 0.1*yrange, ymax + 0.1*yrange))\r\n pylab.draw()",
"def plot_run_basic():\n # Define files prefix, model variables to plot and title\n str_learning_rate = ['lr_', 'learning_rate', 'Learning rate', 'LR']\n str_f_number = ['f_', 'F', 'Filter number', 'F']\n str_regular = ['reg_param_', 'reg_par', 'Regularization', 'RP']\n\n fig = plt.figure(figsize=(16,12))\n # Plot learning rate\n acc_span, acc_tot, setting_val = CNN.get_acc_run_basic(str_learning_rate[0], str_learning_rate[1])\n CNN.subplot_run_basic(acc_span, acc_tot, setting_val, str_learning_rate[2], str_learning_rate[3], [2,2,1])\n # Plot F number\n acc_span, acc_tot, setting_val = CNN.get_acc_run_basic(str_f_number[0], str_f_number[1])\n CNN.subplot_run_basic(acc_span, acc_tot, setting_val, str_f_number[2], str_f_number[3], [2,2,2])\n # Plot Regularization\n acc_span, acc_tot, setting_val = CNN.get_acc_run_basic(str_regular[0], str_regular[1])\n CNN.subplot_run_basic(acc_span, acc_tot, setting_val, str_regular[2], str_regular[3], [2,2,3])\n plt.suptitle('Validation accuracy - Sweep parameters', fontsize=20)\n plt.show();\n \n # Save as PDF file if wanted\n if DataLoader.SAVE_FIGURE:\n DataLoader.save_plot(fig, 'sweep_parameters_CNN.pdf')",
"def plot_stop_and_cost_output(opt_res, path, scale='log', nframes=4):\n import matplotlib\n font = {'size' : 15, 'weight': 'normal'}\n matplotlib.rc('font', **font)\n\n nplots=len(opt_res)\n ncols=2\n nrows=int(np.ceil(nframes/ncols))\n _, axes = plt.subplots(nrows, ncols, figsize=(25, 5*nrows))\n for n in range(nframes):\n i = int(np.floor(n / ncols))\n j=n % ncols\n axes[i,j].plot(opt_res[n]['stop'], label='Stopping criterion', color='b')\n axes[i,j].set_title('Outer loop # '+ str(n))\n axes[i,j].set_yscale(scale)\n # axes[i,j].set_xscale('log')\n axes[i,j].grid(True)\n axes[i,j].set_ylabel('Stopping Criterion')\n ax_2=axes[i,j].twinx()\n ax_2.plot(opt_res[n]['obj'], label='Total cost', color='g')\n ax_2.set_ylabel('Total cost')\n ax_2.set_yscale(scale)\n # axes[i,j].set_ylim([10**2,10**8])\n # axes[i,j].set_xlim([0,100])\n axes[i,j].legend(loc=1)\n ax_2.legend(loc=2)\n plt.savefig(path,transparent=True, dpi=400)",
"def _plot_experiment(df, axes, metric_name, isTrain):\n # colors: https://stackoverflow.com/questions/42086276/get-default-line-colour-cycle\n ldf = metric_short_to_long(df)\n plotted = \"Train\" if isTrain else \"Val\"\n m = ldf.query(\"stat == 'mse' and metric == @metric_name\")[[\"trial\",\"state\",\"value\"]].rename({\"value\":\"mse\"},axis=1)\n # aggregated\n ax = sns.barplot(x=\"trial\", y=\"mse\", data=m, palette=[u'#1f77b4'], ci=\"sd\", ax=axes[0])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"Aggregated State Errors ({plotted})\")\n ax.set_xlabel(\"Trial Number\")\n\n # individual state plots\n ax = sns.barplot(x=\"trial\", y=\"mse\", hue=\"state\",data=m, ci=\"sd\", ax=axes[1])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"State Error by Trial ({plotted})\")\n ax.set_xlabel(\"Trial Number\")",
"def plot_distribution(folder: str,\n neat: bool = False,\n neat_gru: bool = False,\n neat_lstm: bool = False,\n neat_sru: bool = False,\n neat_sru_s: bool = False,\n gen: int = 500,\n ):\n # Collect all the populations\n populations = []\n if neat: populations.append(D_NEAT)\n if neat_gru: populations.append(D_NEAT_GRU)\n if neat_lstm: populations.append(D_NEAT_LSTM)\n if neat_sru: populations.append(D_NEAT_SRU)\n if neat_sru_s: populations.append(D_NEAT_SRU_S)\n if len(populations) == 0: return\n \n # Collect all the measure options\n OPTIONS = ['distance', 'finished', 'fitness', 'score', 'time', 'training']\n \n # Go over all possibilities\n print(f\"\\n===> CREATING POPULATION DISTRIBUTIONS <===\")\n path = f\"population_backup/storage/{folder}/\"\n path_images = get_subfolder(path, 'images')\n for option in OPTIONS:\n plt.figure(figsize=(10, 2.5))\n min_val = float(\"inf\")\n max_val = -float(\"inf\")\n for pop in populations:\n d = load_dict(f\"{path}{pop}/evaluation/{option}\")\n dist = d[str(gen)]\n if min(dist) < min_val: min_val = min(dist)\n if max(dist) > max_val: max_val = max(dist)\n \n # Remove outliers first\n dist = sorted(dist)\n q1 = min(dist[int(round(1 / 4 * len(dist)))], dist[int(round(3 / 4 * len(dist)))])\n q3 = max(dist[int(round(1 / 4 * len(dist)))], dist[int(round(3 / 4 * len(dist)))])\n iqr = q3 - q1\n \n for i in range(len(dist) - 1, -1, -1):\n if (dist[i] < (q1 - 1.5 * iqr)) or (dist[i] > (q3 + 1.5 * iqr)): del dist[i]\n sns.distplot(dist,\n hist=False,\n kde=True,\n norm_hist=True,\n bins=100,\n color=COLORS[pop],\n kde_kws={'linewidth': 2},\n label=pop,\n )\n plt.xlim(min_val, max_val)\n # plt.title(f\"Probability density across populations for '{option}' at generation {gen}\")\n plt.xlabel(option)\n # plt.yticks([])\n plt.ylabel('probability density')\n leg = plt.legend(loc='upper center',\n bbox_to_anchor=(0.5, 1.2),\n fancybox=True,\n fontsize=8,\n ncol=len(populations))\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n plt.tight_layout()\n plt.savefig(f\"{path_images}dist_{option}.png\", bbox_inches='tight', pad_inches=0.02)\n plt.savefig(f\"{path_images}dist_{option}.eps\", format='eps', bbox_inches='tight', pad_inches=0.02)\n # plt.show()\n plt.close()",
"def display_results(results, sizes):\r\n plot.xlabel('Array size')\r\n plot.ylabel('Time')\r\n plot.title('Sorting algorithms comparison')\r\n for name, result in results.items():\r\n plot.plot(sizes, result, label=name)\r\n plot.grid(True)\r\n plot.legend()\r\n plot.show()",
"def plot_scalar_reduce(self, quantity, plane, pval, draw=False, fixed=None):\n\n sim = self.sims[0]\n base = os.path.expandvars(sim.conf['General']['results_dir'])\n self.log.info('Plotting scalar reduction of %s for quantity %s' % (base, quantity))\n cm = plt.get_cmap('jet')\n max_depth = sim.conf['Simulation']['max_depth']\n period = sim.conf['Simulation']['params']['array_period']\n x = np.arange(0, period, sim.dx)\n y = np.arange(0, period, sim.dy)\n z = np.arange(0, max_depth + sim.dz, sim.dz)\n ftype = sim.conf['General']['save_as']\n if ftype == 'npz':\n globstr = os.path.join(base, 'scalar_reduce*_%s.npy' % quantity)\n files = glob.glob(globstr)\n elif ftype == 'hdf5':\n self.log.warning('FIX LOAD IN GLOBAL SCALAR REDUCE')\n globstr = os.path.join(base, 'scalar_reduce*_%s.npy' % quantity)\n files = glob.glob(globstr)\n else:\n raise ValueError('Incorrect file type in config')\n title = 'Reduction of %s' % quantity\n for datfile in files:\n p = False\n if ftype == 'npz':\n scalar = np.load(datfile)\n elif ftype == 'hdf5':\n self.log.warning('FIX LOAD IN GLOBAL SCALAR REDUCE')\n scalar = np.load(datfile)\n else:\n raise ValueError('Incorrect file type in config')\n cs = self.get_plane(scalar, plane, pval)\n if plane == 'yz' or plane == 'zy':\n labels = ('y [um]', 'z [um]', quantity, title)\n if sim.conf['General']['save_plots']:\n fname = 'scalar_reduce_%s_plane_2d_yz.png' % quantity\n p = os.path.join(base, fname)\n show = sim.conf['General']['show_plots']\n self.sims[0].heatmap2d(y, z, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)\n elif plane == 'xz' or plane == 'zx':\n labels = ('x [um]', 'z [um]', quantity, title)\n if sim.conf['General']['save_plots']:\n fname = 'scalar_reduce_%s_plane_2d_xz.png' % quantity\n p = os.path.join(base, fname)\n show = sim.conf['General']['show_plots']\n self.sims[0].heatmap2d(sim, x, z, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)\n elif plane == 'xy' or plane == 'yx':\n labels = ('y [um]', 'x [um]', quantity, title)\n if sim.conf['General']['save_plots']:\n fname = 'scalar_reduce_%s_plane_2d_xy.png' % quantity\n p = os.path.join(base, fname)\n self.sims[0].heatmap2d(sim, x, y, cs, labels, plane, pval,\n save_path=p, show=show, draw=draw, fixed=fixed)",
"def plot_lr(self, show_text=True, show_moms=True):\n phase_limits = [0]\n for nb_batch, phase in zip(self.nb_batches, self.phases):\n phase_limits.append(phase_limits[-1] + nb_batch * phase.epochs)\n if not in_ipynb():\n plt.switch_backend('agg')\n np_plts = 2 if show_moms else 1\n fig, axs = plt.subplots(1,np_plts,figsize=(6*np_plts,4))\n if not show_moms: axs = [axs]\n for i in range(np_plts): axs[i].set_xlabel('iterations')\n axs[0].set_ylabel('learning rate')\n axs[0].plot(self.iterations,self.lrs)\n if show_moms:\n axs[1].set_ylabel('momentum')\n axs[1].plot(self.iterations,self.momentums)\n if show_text: \n for i, phase in enumerate(self.phases):\n text = phase.opt_fn.__name__\n if phase.wds is not None: text+='\\nwds='+str(phase.wds)\n if phase.beta is not None: text+='\\nbeta='+str(phase.beta)\n for k in range(np_plts):\n if i < len(self.phases)-1:\n draw_line(axs[k], phase_limits[i+1])\n draw_text(axs[k], (phase_limits[i]+phase_limits[i+1])/2, text) \n if not in_ipynb():\n plt.savefig(os.path.join(self.save_path, 'lr_plot.png'))",
"def plotProp(pdict, title=None, sameax=True, showmean=True, \n bounds=[None,None]):\n try:\n pdict.pop('all stats')\n except:\n pass\n spk, groups = [], list(pdict.keys())\n fig = plt.figure()\n c_colors = {}\n \n if sameax:\n ax = fig.add_subplot(111)\n for g in range(len(groups)):\n sofar = []\n for cell in pdict[groups[g]].keys():\n if cell not in c_colors.keys():\n c_colors[cell] = np.random.random(3)\n this = [u for u in pdict[groups[g]][cell][0]]\n if len(pdict[groups[g]][cell]) > 1:\n for sp in pdict[groups[g]][cell][1]:\n this.append(sp)\n ax.plot([i for i in np.random.normal(loc=g, scale=0.1, size=len(this))], this, 'o',\n color=c_colors[cell], label=groups[g], alpha=0.3,\n markeredgecolor='none', markersize=1)\n for t in this:\n sofar.append(t)\n if showmean:\n ax.plot([g-.5,g+.5], [np.mean(sofar), np.mean(sofar)],\n '--', color='black', lw=2)\n # Cosmetics\n plt.xticks(range(len(groups)), groups, rotation=30)\n plt.ylim([bounds[0], bounds[1]])\n \n else:\n plots = [fig.add_subplot(1, len(groups)+1, p) for p in range(len(groups))]\n for g in range(len(groups)):\n for cell in pdict[groups[g]].keys():\n if cell not in c_colors.keys():\n c_colors[cell] = np.random.random(3)\n this = [u for u in pdict[groups[g]][cell][0]]\n if len(pdict[groups[g]][cell]) > 1:\n for sp in pdict[groups[g]][cell][1]:\n this.append(sp)\n plots[g].plot([i+g for i in np.random.random(len(this))], this, 'o',\n color=c_colors[cell], label=groups[g], alpha=0.3,\n markeredgecolor='none')\n \n if title:\n plt.title(title)\n plt.show()\n return",
"def visualization(obj_value):\n for n in range(3):\n plt.loglog(obj_value[n],\".\");\n\n plt.ylabel('objective values');\n plt.xlabel('iteration counter');\n plt.title('objective values for each pair against iterations');\n plt.legend();\n plt.show();",
"def basic_stats_and_plots():\n \n basename = sys.argv[1]\n ops = (\"two_opt\", \"twoh_opt\", \"three_opt\", \"three_opt_broad\", \"swap\", \"swap_adj\")\n opfs = {\n \"two_opt\": tsp.two_opt,\n \"twoh_opt\": tsp.twoh_opt,\n \"three_opt\": tsp.three_opt,\n \"three_opt_broad\": tsp.three_opt_broad,\n \"swap\": tsp.swap_two,\n \"swap_adj\": tsp.swap_adj\n }\n \n lengths = range(6, 11)\n for length in lengths:\n stddev = []\n gini = []\n nneighbours = []\n prop_unique = []\n for op in ops:\n filename = os.path.join(basename,\n \"tsp_length_%d_%s\" % (length, op),\n \"TP_row0.dat\")\n print op, length\n x = np.genfromtxt(filename)\n # stats to get:\n stddev.append(np.std(x))\n gini.append(random_walks.gini_coeff(x))\n nneighbours.append(np.sum(x > 0))\n mu, sigma = rw_experiment_with_op(length, opfs[op])\n prop_unique.append((mu, sigma))\n\n gini_barchart(length, gini, ops)\n stddev_barchart(length, stddev, ops)\n plot_gini_v_nneighbours(length, gini, nneighbours, ops)\n plot_stddev_v_nneighbours(length, stddev, nneighbours, ops)\n plot_gini_v_prop_unique(length, gini, prop_unique, ops)\n plot_stddev_v_prop_unique(length, stddev, prop_unique, ops)"
] | [
"0.66925603",
"0.63589454",
"0.6322748",
"0.6245997",
"0.602035",
"0.59249425",
"0.59024245",
"0.5690448",
"0.568816",
"0.5664423",
"0.5628853",
"0.56173986",
"0.56059617",
"0.55577713",
"0.55445033",
"0.5530686",
"0.55180305",
"0.55145836",
"0.54932714",
"0.547722",
"0.5460326",
"0.54556453",
"0.54548836",
"0.5453728",
"0.5446839",
"0.54424745",
"0.5435132",
"0.54218954",
"0.541277",
"0.5408202"
] | 0.7293934 | 0 |
Checks a row & peg combination to see if it refers to a real place in the triangle. | def is_valid(row, peg):
return (
(row < TRI_SIZE) and
(row >= 0) and
(peg < TRI_SIZE) and
(peg >= 0) and
(peg <= row)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __check_row(self, x: int, y: int) -> bool:\n return not any([self.__maze[x, y + i] for i in (-1, 0, 1)])",
"def _pre_check(self) -> bool:\n if self._fuse_row:\n rows = (\n self._tiling.cells_in_row(self._row_idx),\n self._tiling.cells_in_row(self._row_idx + 1),\n )\n else:\n rows = (\n self._tiling.cells_in_col(self._col_idx),\n self._tiling.cells_in_col(self._col_idx + 1),\n )\n has_a_long_row = any(len(row) > 1 for row in rows)\n if has_a_long_row:\n return False\n first_cell = next(iter(rows[0]))\n second_cell = next(iter(rows[1]))\n cells_are_adjacent = (\n first_cell[0] == second_cell[0] or first_cell[1] == second_cell[1]\n )\n if not cells_are_adjacent:\n return False\n same_basis = (\n self._tiling.cell_basis()[first_cell][0]\n == self._tiling.cell_basis()[second_cell][0]\n )\n if not same_basis:\n return False\n self._first_cell = first_cell\n self._second_cell = second_cell\n return True",
"def check_rows(self):\n\t\tfor i in range(len(self.board)):\n\t\t\tpts = 0\n\t\t\tfor j in range(len(self.board[i])):\n\t\t\t\tif self.board[i][j] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('YOU WON')\n\t\t\t\treturn True",
"def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True",
"def check_rows(self):\r\n for i in range(0, len(self.grid),3):\r\n if self.grid[i][-1] != ' ' and self.grid[i][-1] == self.grid[i+1][-1] and self.grid[i+1][-1] == self.grid[i+2][-1]:\r\n return (i, (self.grid[i], self.grid[i+2]))\r\n return (-1, None)",
"def valid_guess(self, row, col):\n # if row nor col is at an edge space, returns False\n if not isinstance(row, int) or not isinstance(col, int):\n return False\n # ensures no corner spaces have been selected\n if row < 1 or row > 8:\n return False\n if col < 1 or col > 8:\n return False\n return True",
"def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)",
"def checkvalid(self,borad,row,col,n):\n # check the above column has 'Q'\n i=0\n while i!=row:\n if borad[i][col]=='Q':\n return False\n i+=1\n # check the left-top 135 and right-top 45\n i,j=row-1,col-1\n while i>=0 and j>=0:\n if borad[i][j]=='Q':\n return False\n i-=1\n j-=1\n \n i,j=row-1,col+1\n while i>=0 and j<n:\n if borad[i][j]=='Q':\n return False\n i-=1\n j+=1\n \n return True",
"def isSafe(coords, row, col):\n rows = []\n cols = []\n diag_r = []\n diag_l = []\n\n for square in coords:\n rows.append(square[0])\n cols.append(square[1])\n diag_r.append(square[0] + square[1])\n diag_l.append(square[1] - square[0])\n\n if row in rows or col in cols:\n return False\n if row + col in diag_r or col - row in diag_l:\n return False\n\n return True",
"def check_quadline(self, row: int, col: int, drow: int, dcol: int) -> bool:\n count = 1\n token = self.get_token(row, col)\n count_token = 1\n while self.check_bounds(row+drow, col+dcol) and count <= 3:\n if self.grid[row+drow][col+dcol] == token:\n row += drow\n col += dcol\n count_token += 1\n if count_token == 4:\n return True\n count += 1\n return False",
"def is_up_diagonal_win(self, checker):\n for row in range(3, self.height):\n for col in range(self.width-3):\n if self.slots[row][col] == checker and \\\n self.slots[row-1][col+1] == checker and \\\n self.slots[row-2][col+2] == checker and \\\n self.slots[row-3][col+3] == checker:\n return True\n return False",
"def is_lower_triangular(self):\n self.check_square()\n\n for i in range(self.rows):\n for j in range(i+1, self.rows):\n if self[i, j] != 0:\n return False\n return True",
"def test_case_05_not_legal_triangle(self):\n self.__assert_equals_test_case([(4, 6, 11)], 'NotATriangle')",
"def check_pointing_pair(self):\n\n for index in range(self.board_size):\n squ = self.squares[index]\n nos = self.get_numbers([self.possibles[cell[0]][cell[1]] for cell in squ])\n\n for num in nos:\n s_row, s_col, found = self.same_row_col(num, squ)\n if s_row:\n row = found[0][0]\n for c in range(self.board_size):\n if (row, c) not in squ:\n if num in self.possibles[row][c]:\n self.possibles[row][c].remove(num)\n if s_col:\n col = found[0][1]\n for r in range(self.board_size):\n if (r, col) not in squ:\n if num in self.possibles[r][col]:\n self.possibles[r][col].remove(num)",
"def any_possible_moves(grid):\n if get_empty_cells(grid):\n return True\n for row in grid:\n if any(row[i]==row[i+1] for i in range(len(row)-1)):\n return True\n for i,val in enumerate(grid[0]):\n column = get_column(grid, i)\n if any(column[i]==column[i+1] for i in range(len(column)-1)):\n return True\n return False",
"def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True",
"def check_diagonals(self):\n\t\tdiags = [[(0,0), (1,1), (2,2)], [(0,2), (1,1), (2,0)]]\n\n\t\tfor diag in diags:\n\t\t\tpts = 0\n\t\t\tfor loc in diag:\n\t\t\t\tif self.board[loc[0]][loc[1]] == self.marker:\n\t\t\t\t\tpts+=1\n\t\t\tif pts == 3:\n\t\t\t\tprint('WE WON')\n\t\t\t\treturn True",
"def check_row(row, player):\n for marker in row:\n if marker != player:\n return False\n return True",
"def row_win(board):\n\tfor row in range(3):\n\t\tif board[row][0] != EMPTY and board[row][0] == board[row][1] == board[row][2]:\n\t\t\treturn True\n\treturn False",
"def isToeplitz(mat):\n for j in range(row):\n if not checkDiag(mat, 0, j):\n return False\n for i in range(1, col):\n if not checkDiag(mat, i, 0):\n return False\n return True",
"def valid(self):\n # Verify correct vertex values\n self.verify_vertex_values()\n # Check for duplicate values in lines\n for line in range(9):\n seen = []\n for row in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in rows\n for row in range(9):\n seen = []\n for line in range(9):\n if self.grid[line][row] is None:\n pass\n elif self.grid[line][row] in seen:\n return False\n else:\n seen.append(self.grid[line][row])\n # Check for duplicate values in subgrids\n for (subgrid_line, subgrid_row) in [(subg_ln, subg_rw) for subg_ln in range(3) for subg_rw in range(3)]:\n seen = []\n for (line, row) in [(ln, rw) for ln in range(3) for rw in range(3)]:\n if self.grid[3*subgrid_line + line][3*subgrid_row + row] is None:\n pass\n elif self.grid[3*subgrid_line + line][3*subgrid_row + row] in seen:\n return False\n else:\n seen.append(self.grid[3*subgrid_line + line][3*subgrid_row + row])\n # No duplicates found\n return True",
"def test_case_04_legal_triangle(self):\n self.__assert_not_equal_test_case([(4, 4, 8), (4, 5, 8)], 'NotATriangle')",
"def checkRow(self, x):\n used = []\n for y in range(len(self.board[0])):\n cur = self.board[x][y]\n if cur not in used:\n if cur !=0:\n used += [cur]\n else:\n return False\n return True",
"def row1_invariant(self, target_col):\r\n # replace with your code\r\n conditions = 0\r\n current = self._grid[1][target_col] == 0\r\n if current:\r\n conditions +=1\r\n else:\r\n # print 'Tile ZERO is not at (0, %s) position' %(target_col)\r\n return False\r\n \r\n below_row = 1 + 1\r\n for _ in range(1, self._height - below_row):\r\n below_row += 1\r\n for ind in range(len(self._grid[below_row])):\r\n if self.current_position(below_row, ind) != (below_row, ind):\r\n # print 'Some tile in the lower row does not in correct place in row1_invariant' \r\n return False\r\n conditions += 1\r\n if conditions == 2:\r\n # print 'All conditions are correct!'\r\n return True",
"def _valid_placement(self, i_row, i_col):\n if not self._empty_cell(i_row, i_col):\n return (False, [])\n adj_opp_cells = []\n\n if (i_row, i_col) == self._tl_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_corners, \"tl\")\n elif (i_row, i_col) == self._tr_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_rs_corners, \"tr\")\n elif (i_row, i_col) == self._bl_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_corners, \"bl\")\n elif (i_row, i_col) == self._br_cell:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_rs_corners, \"br\")\n elif (i_row, i_col) in self._ls_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_and_rs, \"ls\")\n elif (i_row, i_col) in self._ts_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ts_and_bs, \"ts\")\n elif (i_row, i_col) in self._rs_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ls_and_rs, \"rs\")\n elif (i_row, i_col) in self._bs_cells:\n self._handle_border(i_row, i_col, adj_opp_cells, self._check_ts_and_bs, \"bs\")\n else:\n self._check_inner_dirs(i_row, i_col, adj_opp_cells)\n\n #print(\"\\nFOR TESTING. adj_opp_cells: \", adj_opp_cells)\n\n if adj_opp_cells == []:\n return (False, [])\n else:\n can_place, flip_lst = self._flip_dirs(adj_opp_cells)\n return (can_place, flip_lst)",
"def chk_hor_sym(self):\n for row in self.rows:\n rrow = copy(row)\n rrow.reverse()\n for i in xrange(int(round(len(row)/2))):\n if row[i] == rrow[i]:\n continue\n else:\n return False\n return True",
"def _point_in_tris(self, pos, obj):\n these_tris = obj._tris['fill'].reshape(-1, 3)\n for tri in these_tris:\n if self._point_in_tri(pos, obj._points['fill'][tri]):\n return True\n return False",
"def TestRow(SudokuGrid):\r\n for i in range(9):\r\n for j in range(8):\r\n for k in range(j+1,9):\r\n if SudokuGrid[i][j]==SudokuGrid[i][k]:\r\n return False\r\n return True",
"def lower_row_invariant(self, target_row, target_col):\r\n conditions = 0\r\n curent = self._grid[target_row][target_col] == 0\r\n if curent:\r\n conditions +=1\r\n else:\r\n print 'Tile ZERO is not at current position'\r\n return False\r\n\r\n last_row_ind = self._height - 1\r\n if target_row != last_row_ind:\r\n lower_row = target_row + 1\r\n for ind in range(len(self._grid[lower_row])):\r\n if self.current_position(lower_row, ind) != (lower_row, ind):\r\n print 'Some tile in the lower row does not in correct place' \r\n return False\r\n conditions += 1\r\n # print len(self._grid[target_row])\r\n # print self._grid[target_row]\r\n # print self._grid[target_row][target_col+1:]\r\n right_part = self._grid[target_row][target_col+1:]\r\n \r\n for tile in range(1,len(right_part)+1):\r\n # print right_part.index(self._grid[target_col+1])\r\n # print tile\r\n # print self.current_position(target_row, target_col + tile)\r\n # print (target_row, target_col+tile)\r\n if self.current_position(target_row, target_col+tile) != (target_row, target_col+tile):\r\n print 'Right part tile does not in correct place'\r\n return False\r\n conditions +=1\r\n if conditions == 3:\r\n print 'All conditions are correct!'\r\n return True",
"def is_valid_position(self, somerow, somecol):\n valid_row = 0 <= somerow <= (self.size-1)\n valid_col = 0 <= somecol <= (self.size-1)\n #need to use self.size - 1 because while we're thinking of an 8x8 chess board, the computer is thinking of a 0x7 chess board\n return valid_row and valid_col"
] | [
"0.7280164",
"0.68686604",
"0.6789244",
"0.67537653",
"0.6733248",
"0.6684262",
"0.6682173",
"0.65795547",
"0.6564693",
"0.65318656",
"0.6483577",
"0.6470317",
"0.6459898",
"0.6441177",
"0.64353055",
"0.64183944",
"0.63982993",
"0.63813514",
"0.6358549",
"0.6357853",
"0.6351154",
"0.6349884",
"0.6341581",
"0.63376486",
"0.63248044",
"0.6308865",
"0.6308499",
"0.63013476",
"0.6290936",
"0.62839085"
] | 0.79514277 | 0 |
Returns a copy of the triangle (faster than deepcopy). | def copy_triangle(tri):
return [[peg for peg in row] for row in tri] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def triangle(self):\n [r,c] = self.D\n m = min(r,c)\n S = self\n T = zeros(r,c)\n while m > 0:\n NoLigne = 0\n while S[NoLigne, 0] == 0 and (NoLigne < m - 1):\n NoLigne += 1\n S = S.swap(NoLigne,0)\n if S[0, 0] != 0:\n pivot = S[0,0]\n for k in range(1,m):\n if S[k,0] != 0:\n S = S.comb_lignes(pivot, -S[k,0],k,0)\n #print(\"pivot = \"+str(pivot))\n #print(\"S dans for :\")\n #print(S)\n T = T.remplace_ligned(r - m,S.F)\n #print(\"Évolution de T :\")\n #print(T)\n S = S.decoupe()\n m -= 1\n return T",
"def copy(self):\n return vertex(self.x, self.y, self.z)",
"def triangle(self):\n \n R = Householder.triangle_operation(self)[0] \n \n return(R.round(10))",
"def _triangulateSelf(self, pointIndex, _triangleList):\n if pointIndex in (self.pointIndex0, self.pointIndex1, self.pointIndex2):\n return []\n # the new point always takes the original triangle's point1\n pInd2 = self.pointIndex2\n pInd1 = self.pointIndex1\n pInd0 = self.pointIndex0\n\n self.pointIndex1 = pointIndex\n # create the new triangles\n newTriangle1 = ConstrainedDelaunayAdjacencyTriangle(pInd0, pInd1, pointIndex,\n self._primitiveInterface.vdata,\n self._primitiveInterface.primitives,\n self._rewriter)\n newTriangle2 = ConstrainedDelaunayAdjacencyTriangle(pointIndex, pInd1, pInd2,\n self._primitiveInterface.vdata,\n self._primitiveInterface.primitives,\n self._rewriter)\n listToFix = [newTriangle1, newTriangle2]\n listToFix.append(self)\n naybs = list(self.getNeighbors(includeEmpties=False))\n for n in range(0, len(naybs)):\n naybs[n] = _triangleList[naybs[n]]\n listToFix.extend(naybs)\n ConstrainedDelaunayAdjacencyTriangle.setAllNeighbors(listToFix, _triangleList)\n return [newTriangle1, newTriangle2]",
"def create_pascal_triangle(self, n):\r\n if n == 0:\r\n return list()\r\n\r\n # Initialize full cache array\r\n c = [[None] * i\r\n for i\r\n in range(1, n + 1, 1)]\r\n\r\n for i in range(0, n, 1):\r\n for j in range(0, i + 1, 1):\r\n # Iterate over level-depth \"i\" and node position \"j\"\r\n\r\n if (j == 0 or\r\n j == i):\r\n # Evaluate as outer triangle edge\r\n c[i][j] = 1\r\n\r\n else:\r\n # Evaluate as sum of corresponding previous elements\r\n c[i][j] = c[i - 1][j - 1] + c[i - 1][j]\r\n\r\n return c",
"def triangulate(self):\n npts = self._vertices.shape[0]\n if np.any(self._vertices[0] != self._vertices[1]):\n # start != end, so edges must wrap around to beginning.\n edges = np.empty((npts, 2), dtype=np.uint32)\n edges[:, 0] = np.arange(npts)\n edges[:, 1] = edges[:, 0] + 1\n edges[-1, 1] = 0\n else:\n # start == end; no wrapping required.\n edges = np.empty((npts-1, 2), dtype=np.uint32)\n edges[:, 0] = np.arange(npts)\n edges[:, 1] = edges[:, 0] + 1\n\n tri = Triangulation(self._vertices, edges)\n tri.triangulate()\n return tri.pts, tri.tris",
"def simplify(phi):\n\n # 1. only manipulate the copy\n #phic = copy.deepcopy(phi)\n #return phic\n pass",
"def triangle_operation(self):\n \n shape = np.shape(self)\n m,n = shape\n \n R = np.copy(self)\n \n #Dependent on the shape of the matrix you have to do the transformation on a \n #different number r of columns\n if m > n:\n r = n\n elif n > m:\n r = m\n else:\n r = n - 1\n \n # Create identity matrix I of same size as A\n I = np.zeros(m*r).reshape(m,r)\n \n I[:r] = np.identity(r)\n\n # Create list_v \n list_v = []\n\n # write out vectors a and e of decreasing size from the columns of R and I \n \n for j in list(range(r)): \n a = [row[j] for row in R[j:]] # j'th column of A but only the m-i last rows.\n e = [row[j] for row in I[j:]] # same for the identity matrix\n \n a = np.array(a)\n e = np.array(e)\n sigma = np.linalg.norm(a) # this is the norm of the vector/column of A \n v = a.reshape(m-j,1) + (np.dot(sigma, e.reshape(m-j,1))) # v = x + sigma * e\n list_v.append(v)\n\n H = Reflection(list_v[j]) # calculate the Housholder transformation for the vector v\n R = H * R # apply the transformation to the matrix A and obtain R stepwise\n\n \n return(R, list_v)",
"def triangle(halfSideLength = 15, robotHeight = -90):\n# ^ \n# / \\ \n# / \\ \n# / \\ \n# /_______\\\n# \n# | a | \n# a = halfSideLength\n\n hHalf = (halfSideLength * m.sqrt(3)/2)/2\n\n posTriangle = [\n [-hHalf,halfSideLength,robotHeight,0,0,0,'mov'],\n [-hHalf,-halfSideLength,robotHeight,0,0,0,'lin'],\n [hHalf,0,robotHeight,0,0,0,'lin'],\n [-hHalf,halfSideLength,robotHeight,0,0,0,'lin'],\n [0,0,-127,0,0,0,'mov']\n ]\n\n return posTriangle",
"def copy(self):\n newVertices = [v.copy() for v in self.vertices]\n return face(newVertices)",
"def triangleFunction(self):\n \n w = np.zeros((self.N))\n l = self.l\n for i in range(self.r.shape[0]):\n r = np.abs(self.r[i])\n if r <= l:\n tf = lambda r,l : 1 - r/l\n w[i] = tf(r,l)\n else:\n w[i] = 0\n self.w = w",
"def tri(self):\n if self._tri is None:\n self._tri = mtri.Triangulation(self.meshx[:self.npoin2],\n self.meshy[:self.npoin2],\n self.ikle2)\n\n return self._tri",
"def create_intermediate_triangle(source_tri, target_tri, alpha):\n return ((get_point_in_segment(source_tri[0], target_tri[0], alpha)),\n (get_point_in_segment(source_tri[1], target_tri[1], alpha)),\n (get_point_in_segment(source_tri[2], target_tri[2], alpha)))",
"def triangle(length=40.0, r=3.175 / 2):\n\t# equilateral triangle:\n\ta = np.array([0, 0])\n\tb = np.array([length, 0])\n\tc = np.array([length / 2, length * math.sin(math.pi / 3)])\n\ttri_pts = PolyLine([a, b, c, a])\n\toffs_pts = addOffset(tri_pts, r)\n\ttri_pts = centerObjects(offs_pts, tri_pts)\n\treturn tri_pts, offs_pts",
"def split_triangles(mesh):\n triangles = np.asarray(mesh.triangles).copy()\n vertices = np.asarray(mesh.vertices).copy()\n\n triangles_3 = np.zeros_like(triangles)\n vertices_3 = np.zeros((len(triangles) * 3, 3), dtype=vertices.dtype)\n\n for index_triangle, t in enumerate(triangles):\n index_vertex = index_triangle * 3\n vertices_3[index_vertex] = vertices[t[0]]\n vertices_3[index_vertex + 1] = vertices[t[1]]\n vertices_3[index_vertex + 2] = vertices[t[2]]\n\n triangles_3[index_triangle] = np.arange(index_vertex, index_vertex + 3)\n\n mesh_return = deepcopy(mesh)\n mesh_return.triangles = o3d.utility.Vector3iVector(triangles_3)\n mesh_return.vertices = o3d.utility.Vector3dVector(vertices_3)\n mesh_return.triangle_normals = mesh.triangle_normals\n mesh_return.paint_uniform_color([0.5, 0.5, 0.5])\n return mesh_return",
"def pascal_triangle(n):\n triangle = []\n begin = 1\n for y in range(0, n):\n row = []\n for x in range(0, y + 1):\n if y == 0 or x == 0 or (y > 0 and x == y):\n row.append(begin)\n else:\n row.append(triangle[y - 1][x] + triangle[y - 1][x - 1])\n triangle.append(row)\n return triangle",
"def _triangulate(self,x):\n\n t = tr.triangulate({\"vertices\": x},\"-n\")\n tri = t[\"triangles\"]\n neighbours = t[\"neighbors\"]\n\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n\n three_b_cell_mask = b_cells[tri].sum(axis=1)==3\n tri = tri[~three_b_cell_mask]\n\n neigh_map = np.cumsum(~three_b_cell_mask)-1\n neigh_map[three_b_cell_mask] = -1\n neigh_map = np.concatenate((neigh_map,[-1]))\n\n neighbours = neighbours[~three_b_cell_mask]\n neighbours = neigh_map[neighbours]\n\n #6. Store outputs\n self.tris = tri\n self.n_v = tri.shape[0]\n self.Cents = x[self.tris]\n self.vs = self.get_vertex()\n\n\n #7. Manually calculate the neighbours. See doc_string for conventions.\n self.v_neighbours = neighbours\n self.neighbours = self.vs[neighbours]\n self.neighbours[neighbours == -1] = np.nan\n\n self.reset_k2s()",
"def triangle(n):\n return n*(n+1)/2",
"def get_triangles( self, N ):\n\n # store N as an instance variable\n self.N = N\n\n # initialize array to store locations of points for all triangles in the\n # tessellation sequence\n self.triangles = np.zeros( ( self.N, 3, 2 ) )\n\n # define points of the first triangle in the tessellation sequence\n point_c = np.array( [ 0, 0 ] )\n point_b = self.a * np.array( [ np.cos( self.C ), np.sin( self.C ) ] )\n point_a = np.array( [ self.b, 0 ] )\n\n # stack the points into a single array of shape (3, 2 )\n triangle = np.vstack( [ point_c, point_b, point_a ] )\n\n # loop over the number of triangles in the sequence\n for i in range( self.N ):\n\n # store the points of the i-th triangle in the array\n self.triangles[ i ] = triangle\n\n # compute the next triangle in the tessellation sequence\n triangle = self.next_triangle( triangle = triangle )\n\n # shift the next triangle in the tessellation sequence such that its\n # point C is in the same location as point B of the previous triangle\n triangle += ( self.triangles[ i - 1, 1 ] - self.triangles[ 0, 0 ] )",
"def triangle(n):\n j = 1\n k = 1\n result = []\n for num in range(1, n + 1):\n result.append(num)\n j = j + 1\n k = k + j\n\n return result",
"def triangle(n):\n return (n * (n + 1)) / 2",
"def triangle(row):\n if len(row) == 0:\n raise ValidationError(\"Row empty\")\n if len(row) == 1:\n return row\n if len(row) < 8:\n return small_triangles(row)\n make_steps()\n while len(row) > 50:\n streams = defaultdict(list)\n j = 0\n streams[j] = list(row)\n for i in range(len(row) - 4):\n for j in range(10):\n step = j * 4\n if i >= step:\n streams[j + 1].append(do_row(streams[j], i - step))\n row = streams[j]\n return small_triangles(row)",
"def triangle(self, freq: int, /) -> None:",
"def pascal_triangle(n):\n if n <= 0:\n return []\n\n a = [[1 for i in range(1, j + 1)] for j in range(1, n + 1)]\n for i in range(2, n):\n for j in range(1, i):\n a[i][j] = a[i - 1][j - 1] + a[i - 1][j]\n\n return a",
"def inverse_copy(self):\n\n return Table(self.right, self.left)",
"def next_triangle( self, triangle ):\n\n # initialize rotation matrix to rotate by angle ``A``\n rot_mat = np.array( [\n [ np.cos( self.A ), np.sin( self.A ) ],\n [ -np.sin( self.A ), np.cos( self.A ) ] ] )\n\n # initialize next triangle and translate it such that point C is at the\n # origin\n _next_triangle = triangle.copy( )\n rot_shift = _next_triangle[ 0 ]\n _next_triangle -= rot_shift\n\n # rotate each point in the next triangle by angle ``A`` using the rotation\n # matrix\n for i in range( 3 ):\n _next_triangle[ i ] = np.dot( rot_mat, _next_triangle[ i ] )\n\n # scale the next triangle by the scaling factor\n _next_triangle *= self.s\n\n # translate the next triangle such that point C is at point B of the\n # original triangle\n _next_triangle += rot_shift\n _next_triangle += ( triangle[ 1 ] - triangle[ 0 ] )\n\n return _next_triangle",
"def copy(self):\n return Vector(self.x, self.y)",
"def copy(self):\n cpy = deepcopy(self)\n # usually we use copy to perform transformations on the board\n # so it's good to reset memoized values\n cpy._memoized_compact = None \n return cpy",
"def triangulate(polyline):\n\timport p2t\n\tfrom ynlib.beziers import Point\n\n\t# Convert into p2t Points\n\tfor p in polyline:\n\t\tp = p2t.Point(p.x, p.y)\n\t\n\tcdt = p2t.CDT(polyline)\n\tp2ttriangles = cdt.triangulate()\n\t\n\ttriangles = []\n\tfor t in p2ttriangles:\n\t\ttriangles.append( (Point(t.a.x, t.a.y), Point(t.b.x, t.b.y), Point(t.c.x, t.c.y)) )\n\n\treturn triangles",
"def copy(self):\n vList = GeneralVertexList(len(self.V))\n vList.setVertices(list(self.V.values()))\n return vList"
] | [
"0.6823598",
"0.642301",
"0.6033022",
"0.6030731",
"0.5999631",
"0.5969139",
"0.59297556",
"0.58959395",
"0.58513814",
"0.58339965",
"0.57237965",
"0.5711828",
"0.56681204",
"0.560612",
"0.55897707",
"0.5587957",
"0.5586594",
"0.55755776",
"0.5572819",
"0.5558009",
"0.55569875",
"0.55534035",
"0.5532185",
"0.55311036",
"0.55038047",
"0.5496597",
"0.54813373",
"0.54678047",
"0.5435692",
"0.54319286"
] | 0.720594 | 0 |
Performs a jump between an occupied (row, peg) tuple A and an unoccupied C, passing over B. If anything is bad with the jump, returns False; otherwise returns True. | def jump(tri, A, B, C):
start_row, start_peg = A
mid_row, mid_peg = B
end_row, end_peg = C
# Check to make sure A is occupied and B is clear
if tri[start_row][start_peg] == False: return False
if tri[end_row][end_peg]: return False
# Make sure we're jumping over an occupied space.
if tri[mid_row][mid_peg] == False: return False
# Clear B, clear A and set C
tri[mid_row][mid_peg] = False
tri[start_row][start_peg] = False
tri[end_row][end_peg] = True
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def jump(self, j_orig, j_over, j_land):\n orig_x, orig_y = j_orig\n over_x, over_y = j_over\n land_x, land_y = j_land\n\n # indexes for each square\n orig_i = orig_y * self.ncols + orig_x\n over_i = over_y * self.ncols + over_x\n land_i = land_y * self.ncols + land_x\n\n # piece for quicker access\n orig_p = self.squares[orig_i]\n over_p = self.squares[over_i]\n land_p = self.squares[land_i]\n\n if orig_p.can_jump(j_orig, [(j_over, over_p)], [(j_land, land_p)]):\n self.squares[land_i] = self.squares[orig_i]\n self.squares[over_i], self.squares[orig_i] = None, None\n return True\n return False",
"def checkPossibleMoves():\n for row in range(9):\n for column in range(7):\n if board[row][column] == board[row][column+1]: #A\n a = board[row][column]\n if column != 6: #column +3 would lead to an error\n if a == board[row+1][column+2] or a == board[row][column+3] or a == board[row-1][column+2] or a == board[row-1][column-1] or a == board[row][column-2] or a ==board[row+1][column-1]:\n return False\n else: \n if a == board[row+1][column+2] or a == board[row-1][column+2] or a == board[row-1][column-1] or a == board[row][column-2] or a ==board[row+1][column-1]:\n return False\n if board[row][column] == board[row][column+2]: # B\n if board[row][column] == board[row+1][column+1] or board[row][column] == board[row-1][column+1]:\n return False\n\n if board[row][column] == board[row+1][column]: #C\n a = board[row][column]\n if row != 8: #row +3 would lead to an error\n if a == board[row-1][column+1] or a == board[row-2][column] or a == board[row-1][column-1] or a == board[row+2][column-1] or a == board[row+3][column] or a == board[row+2][column+1]:\n return False\n else:\n if a == board[row-1][column+1] or a == board[row-2][column] or a == board[row-1][column-1] or a == board[row+2][column-1] or a == board[row+2][column+1]:\n return False\n\n if board[row][column] == board[row+2][column]: #D\n if board[row][column] == board[row+1][column-1] or board[row][column] == board[row+1][column+1]:\n return False\n return True",
"def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)",
"def op_jump_postconditions(self,oldPieceCoords,newPieceCoords):\n\n # Start of new state constrution\n next_gs_board = Board.from_binary_matrix(self.board)\n next_gs_board.set_element(newPieceCoords[0], newPieceCoords[1], self.curr_player)\n next_gs_board.remove_element(oldPieceCoords[0], oldPieceCoords[1])\n next_gs_next_player = self.curr_player\n next_gs_next_move = self.FREE\n next_gs_next_pieces = set()\n\n\n new_gs = Eximo(next_gs_next_player,next_gs_next_move,next_gs_next_pieces,next_gs_board)\n\n # Check if moved piece has reached opposite side\n if(new_gs.reach_otherside(newPieceCoords)):\n new_gs.board.remove_element(newPieceCoords[0], newPieceCoords[1])\n new_gs.next_move = self.ADDPIECE_2\n new_gs.next_pieces = new_gs.addition_viable_tiles()\n new_gs.perform_checkup()\n\n # Check if the next move must also be a jump by the same player\n elif(new_gs.can_jump(newPieceCoords)):\n new_gs.next_move = self.JUMP\n new_gs.next_pieces = {newPieceCoords}\n\n else:\n new_gs.curr_player = self.get_enemy(self.curr_player)\n\n # Check if the next_piece checkup needs to be made\n if new_gs.curr_player == self.get_enemy(self.curr_player):\n new_gs.perform_checkup()\n\n new_gs.last_piece = newPieceCoords\n\n return new_gs",
"def op_jump_neast(self,piece):\n\n # Check common preconditions\n if(not self.op_jump_preconditions(piece)):\n return False\n\n # Check particular preconditions\n if not self.op_jump_neast_pre(piece):\n return False\n\n # Variable extraction\n piece_x = piece[0]\n piece_y = piece[1]\n\n dir_ = self.get_direction()\n lastPieceCoords = (piece_x - (2 * dir_),piece_y + (2 * dir_))\n\n # End of pre conditions\n\n return self.op_jump_postconditions(piece,lastPieceCoords)",
"def _pre_check(self) -> bool:\n if self._fuse_row:\n rows = (\n self._tiling.cells_in_row(self._row_idx),\n self._tiling.cells_in_row(self._row_idx + 1),\n )\n else:\n rows = (\n self._tiling.cells_in_col(self._col_idx),\n self._tiling.cells_in_col(self._col_idx + 1),\n )\n has_a_long_row = any(len(row) > 1 for row in rows)\n if has_a_long_row:\n return False\n first_cell = next(iter(rows[0]))\n second_cell = next(iter(rows[1]))\n cells_are_adjacent = (\n first_cell[0] == second_cell[0] or first_cell[1] == second_cell[1]\n )\n if not cells_are_adjacent:\n return False\n same_basis = (\n self._tiling.cell_basis()[first_cell][0]\n == self._tiling.cell_basis()[second_cell][0]\n )\n if not same_basis:\n return False\n self._first_cell = first_cell\n self._second_cell = second_cell\n return True",
"def op_jump_neast_pre(self,piece):\n\n # Check particular preconditions\n\n # Check for board-end/right-side colisions\n if self.curr_player == self.PLAYER1:\n if piece[1] <= 2 or piece[0] >= 7:\n return False\n else:\n if piece[1] >= 7 or piece[0] <= 2:\n return False\n\n dir_ = self.get_direction()\n lastPieceCoords = (piece[0] - (2 * dir_),piece[1] + (2 * dir_))\n\n firPieceValue = self.board.get_element(piece[0], piece[1])\n medPieceValue = self.board.get_element(piece[0] - (1 * dir_), piece[1] + (1 * dir_))\n lstPieceValue = self.board.get_element(*lastPieceCoords)\n\n # Check if destination tile is available\n if lstPieceValue != self.EMPTY :\n return False\n\n # Check if the intermediate piece belongs to the jumping player\n if medPieceValue != firPieceValue:\n return False\n\n return True",
"def test(brickheight,bricklength,row,column,walllength,wallheight,occupied,answer):\n if brickheight>wallheight or bricklength>walllength:\n return False\n elif over(brickheight,bricklength,row,column,walllength,wallheight):\n return False\n else:\n for x in range(column,column+bricklength):\n for y in range(row,row+brickheight):\n if (x,y) in occupied:\n return False \n break\n else:\n return True",
"def op_jump_preconditions(self,piece):\n\n # Flag must be FREE or JUMP\n if(self.next_move == self.CAPT or self.next_move == self.ADDPIECE_1 or self.next_move == self.ADDPIECE_2):\n return False\n\n # Check if the piece is in the next pieces (deals with obligatory jumps)\n if(self.next_move == self.JUMP and piece not in self.next_pieces):\n return False\n\n return True",
"def op_jump_nwest(self,piece):\n\n # Check common preconditions\n if(not self.op_jump_preconditions(piece)):\n return False\n\n # Check particular preconditions\n\n if not self.op_jump_nwest_pre(piece):\n return False\n\n # Variable extraction\n piece_x = piece[0]\n piece_y = piece[1]\n\n\n dir_ = self.get_direction()\n lastPieceCoords = ( piece_x + (2 * dir_),piece_y + (2 * dir_),)\n\n # End of pre conditions\n\n return self.op_jump_postconditions(piece,lastPieceCoords)",
"def jump(neighbour, with_piece):\n return Coords(2 * neighbour.q - with_piece.q,\n 2 * neighbour.r - with_piece.r)",
"def __check_and_join_row(self, x: int, y: int, tree: int, increment: int) -> bool:\n for m in [self.__maze[x + (2 * increment), y + i] for i in (-1, 0, 1)]:\n # if any square maps to a different maze connect it and redo the mappings\n if m == 0:\n continue\n main_tree = self.__mappings.get(m, tree)\n if main_tree != tree:\n self.__activate(x + increment, y, tree)\n self.__activate(x + (2 * increment), y, tree)\n self.__remap(tree, main_tree)\n return False\n return True",
"def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([row, col]))",
"def check_path(self, cur_pos, new_pos, board, state):\n\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n cannon_pieces = [Cannon('BLUE'), Cannon('RED')]\n \n # Ensures the range is always in the right order\n if new_row > cur_row: \n ran_r = range(cur_row + 1, new_row, 1)\n elif cur_row > new_row:\n ran_r = range(cur_row - 1, new_row, -1)\n \n elif new_col > cur_col:\n ran_c = range(cur_col + 1, new_col, 1)\n elif cur_col > new_col:\n ran_c = range(cur_col - 1, new_col, -1)\n else:\n return False\n \n # Checking if the movement is left or right is legal\n if new_row == cur_row:\n print(\"it's in the new_row == cur_row\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n print(counter)\n for col_spot in ran_c:\n if board[cur_row][col_spot] is not None:\n counter += 1\n\n if counter == 0: \n print(\"jump!\")\n return True\n \n # Checking if the movement vertical is legal\n if new_col == cur_col:\n print(\"it's in the new_col == cur_col\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n for row_spot in ran_r:\n if board[row_spot][cur_col] is not None:\n counter += 1\n print(board[row_spot][cur_col])\n print(counter)\n if counter == 0:\n print(\"jump!\")\n return True",
"def _canJump(cls, target, piece, layout):\n # figure out the jump direction. Assume that the target is\n # legal as far as going forard/backward and don't check color\n # or king status\n if target == cls.adjacency_matrix[piece][cls.FORWARD][cls.LEFT]:\n landing_sq = cls.adjacency_matrix[target][cls.FORWARD][cls.LEFT]\n elif target == cls.adjacency_matrix[piece][cls.FORWARD][cls.RIGHT]:\n landing_sq = cls.adjacency_matrix[target][cls.FORWARD][cls.RIGHT]\n elif target == cls.adjacency_matrix[piece][cls.BACK][cls.LEFT]:\n landing_sq = cls.adjacency_matrix[target][cls.BACK][cls.LEFT]\n elif target == cls.adjacency_matrix[piece][cls.BACK][cls.RIGHT]:\n landing_sq = cls.adjacency_matrix[target][cls.BACK][cls.RIGHT]\n\n if landing_sq is None:\n return None\n\n # check if there's an unoccupied square to land in\n if cls._getColorAt(landing_sq, layout) == cls.Pieces.NONE:\n return landing_sq\n\n return None",
"def JMP():\n global pointer, memory\n pointer = memory[pointer + 0x02]",
"def test_cell_existence(board: list, i: int, j: int) -> bool:\n return not (i < 0 or i > len(board)-1 or j < 0 or j > len(board)-1)",
"def op_jump_nwest_pre(self, piece):\n\n # Check particular preconditions\n\n # Check for board-end/left-side colisions\n if self.curr_player == self.PLAYER1:\n if piece[1] <= 2 or piece[0] <= 2:\n return False\n else:\n if piece[1] >= 7 or piece[0] >= 7:\n return False\n\n dir_ = self.get_direction()\n lastPieceCoords = ( piece[0] + (2 * dir_),piece[1] + (2 * dir_))\n\n firPieceValue = self.board.get_element(piece[0], piece[1])\n medPieceValue = self.board.get_element(piece[0] + (1 * dir_),piece[1] + (1 * dir_))\n lstPieceValue = self.board.get_element(*lastPieceCoords)\n\n # Check if destination tile is available\n if lstPieceValue != self.EMPTY :\n return False\n\n # Check if the intermediate piece belongs to the jumping player\n if medPieceValue != firPieceValue:\n return False\n\n return True",
"def _checkCells(self):\r\n if(self.startCell.isEmpty()):\r\n raise IllegalMoveException(\"No pawn in start cell\")\r\n if(self.endCell.isOccupied()):\r\n raise IllegalMoveException(\"Targeted cell is already occupied\")\r\n return True",
"def _is_occupied(\n grid: List[List[str]], row: int, col: int, dx: int, dy: int) -> bool:\n while 0 <= (row + dy) < len(grid) and 0 <= (col + dx) < len(grid[0]):\n row += dy\n col += dx\n if grid[row][col] == 'L':\n return False\n if grid[row][col] == '#':\n return True\n return False",
"def can_jump(self, orig_pos, over_pos, land_pos):\n possibles = []\n # test the possible jumps first\n for i, _ in enumerate(over_pos): # same length as land_pos\n if over_pos[i][1] and self != over_pos[i][1]:\n possibles.append(land_pos[i])\n return self.can_move(orig_pos, possibles)",
"def isJump(self) -> bool:\n ...",
"def enter_loop(self):\n if (self.tape.current_cell()==0):\n # Jump past the end.\n self.instruction_pointer = (self.jump_map[self.instruction_pointer])\n else:\n pass",
"def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )",
"def is_jumping(self):\n if(self.going_down or self.going_up or self.mid_air):\n return True\n else:\n return False",
"def op_jump_north_pre(self,piece):\n\n # Check particular preconditions\n\n # Check for board-end colisions\n if self.curr_player == self.PLAYER1:\n if piece[1] <= 2:\n return False\n else:\n if piece[1] >= 7:\n return False\n\n dir_ = self.get_direction()\n lastPieceCoords = (piece[0],piece[1] + (2 * dir_))\n firPieceValue = self.board.get_element(piece[0], piece[1])\n medPieceValue = self.board.get_element(piece[0], piece[1] + (1 * dir_))\n lstPieceValue = self.board.get_element(*lastPieceCoords)\n\n # Check if destination tile is available\n if lstPieceValue != self.EMPTY :\n return False\n\n # Check if the intermediate piece belongs to the jumping player\n if medPieceValue != firPieceValue:\n return False\n\n return True",
"def exist_adjacent_cell(board: list, cell_index: tuple) -> bool:\n #Each time the result = [(-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1)]\n possible_cells_direction = list(filter(lambda x: x[0] != 0 or x[1] != 0, list(product(range(-1, 2), range(-1, 2)))))\n\n for coord_couple in possible_cells_direction:\n i = cell_index[0] + coord_couple[0]\n j = cell_index[1] + coord_couple[1]\n\n if not test_cell_existence(board, i, j):\n continue\n\n # If a cell isn't empty\n if board[i][j] != 0:\n return True\n return False",
"def hasJunction(junc, chr, leftEdge, rightEdge, wiggle=0):\n\n for i in range(leftEdge-wiggle, leftEdge+wiggle+1):\n for j in range(rightEdge-wiggle, rightEdge+wiggle+1):\n try:\n if junc[chr].has_key( (i, j) ):\n return True\n except KeyError:\n return False\n\n return False",
"def perform_action(self, cell_location, player):\n cell = self.get_cell(cell_location)\n if cell is not None:\n if cell.get_cell_state() == 0 and player == 1:\n cell.set_cell_state(1)\n elif cell.get_cell_state() == 0 and player == 2:\n cell.set_cell_state(2)\n else:\n raise Exception(\"Move is not available because the cell is occupied\")\n else:\n raise Exception(\"Given cell location is invalid\")",
"def __can_enter(self, position, traversed):\n row, col = position\n # Check index values\n if row < 0 or col < 0:\n return False\n if row >= self.__row_count or col >= self.__col_count:\n return False\n # Check if already traversed\n if traversed[row][col]:\n return False\n # Check if blocked\n if self.__grid[row][col].blocked:\n return False\n return True"
] | [
"0.58794034",
"0.5780335",
"0.56505096",
"0.56226003",
"0.5529821",
"0.548902",
"0.545118",
"0.54377955",
"0.5421571",
"0.54183495",
"0.5403734",
"0.5387588",
"0.53724825",
"0.5344193",
"0.5342577",
"0.5336441",
"0.52679",
"0.52414954",
"0.5229286",
"0.52260715",
"0.52221286",
"0.5197883",
"0.51897085",
"0.5176036",
"0.51679367",
"0.5167046",
"0.51327646",
"0.51310503",
"0.51216596",
"0.512035"
] | 0.7989009 | 0 |
Returns a (mid_row, mid_peg) tuple between (start_row, start_peg) and (end_row, end_peg). | def mid(start_row, start_peg, end_row, end_peg):
if start_row + 2 == end_row:
mid_row = start_row + 1
elif start_row == end_row + 2:
mid_row = start_row - 1
elif start_row == end_row:
mid_row = start_row
if start_peg + 2 == end_peg:
mid_peg = start_peg + 1
elif start_peg == end_peg + 2:
mid_peg = start_peg - 1
elif start_peg == end_peg:
mid_peg = start_peg
return (mid_row, mid_peg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def startAndEnd(self):\n upperRow = 0\n upperCol = 0\n lowerRow = 0\n lowerCol = 0\n if self.selectionMode == kSelectionNone:\n upperRow = self.penRow\n upperCol = self.penCol\n lowerRow = self.penRow\n lowerCol = self.penCol\n elif self.selectionMode == kSelectionAll:\n upperRow = 0\n upperCol = 0\n lowerRow = self.parser.rowCount() - 1\n lowerCol = self.parser.rowWidth(-1)\n elif self.selectionMode == kSelectionBlock:\n upperRow = min(self.markerRow, self.penRow)\n upperCol = min(self.markerCol, self.penCol)\n lowerRow = max(self.markerRow, self.penRow)\n lowerCol = max(self.markerCol, self.penCol)\n elif (self.selectionMode == kSelectionCharacter or\n self.selectionMode == kSelectionLine or\n self.selectionMode == kSelectionWord):\n upperRow = self.markerRow\n upperCol = self.markerCol\n lowerRow = self.penRow\n lowerCol = self.penCol\n if upperRow == lowerRow and upperCol > lowerCol:\n upperCol, lowerCol = lowerCol, upperCol\n elif upperRow > lowerRow:\n upperRow, lowerRow = lowerRow, upperRow\n upperCol, lowerCol = lowerCol, upperCol\n #app.log.detail('start and end', upperRow, upperCol, lowerRow, lowerCol)\n return (upperRow, upperCol, lowerRow, lowerCol)",
"def midpoint(self) -> Tuple[int, int]:\n minx, miny, maxx, maxy = self.substrates.bounds\n return ((minx + maxx) // 2, (miny + maxy) // 2)",
"def midpoint(self) -> Tuple[int, int]:\n pass",
"def midleft(self):\n return (self.left, self.centery)",
"def startEndPoints(mazz):\n for i in range (len(mazz)):\n for j in range (len(mazz[i])):\n if mazz[i][j] == 6:\n startx = i\n starty = j\n elif mazz[i][j] == 7:\n endx = i\n endy = j\n return startx, starty, endx, endy",
"def mid(self, line):\n return [(line.x1 + line.x2) // 2, (line.y1 + line.y2) // 2]",
"def get_start_cell(self):\n return (self.st_row, self.st_col)",
"def find_start_end(grid):\n #------------------------------------\n #\n # Fill and submit this code\n #\n start = 0\n end = 0\n for i in range(len(grid)):\n if grid[i] == \"emerald_block\":\n start = i\n elif grid[i] == \"redstone_block\":\n end = i\n return (start, end)\n #-------------------------------------",
"def _middle_point(p1, p2):\n x = int((p1.x + p2.x) / 2)\n y = int((p1.y + p2.y) / 2)\n return (x, y)",
"def mid(p1, p2):\n\treturn [(p1[0]+p2[0])/2., (p1[1]+p2[1])/2.]",
"def midright(self):\n return (self.right, self.centery)",
"def get_feature_start_end(feature_record):\n return (feature_record.location.start.position+1, feature_record.location.end.position)",
"def get_current_edges(self) -> Tuple[int, int, int, int]:\n top = int(self.tile_rows[0], 2)\n bottom = int(self.tile_rows[-1], 2)\n left = int(''.join([r[0] for r in self.tile_rows]), 2)\n right = int(''.join([r[-1] for r in self.tile_rows]), 2)\n\n return (top, bottom, left, right)",
"def _mid(pt1, pt2):\n (x0, y0), (x1, y1) = pt1, pt2\n return 0.5 * (x0 + x1), 0.5 * (y0 + y1)",
"def to_tuple(self):\n return (self.row_start, self.row_end, self.col_start, self.col_end)",
"def get_text_positions(self, node, padded):\n # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]\n if not hasattr(node, 'first_token'):\n return (1, 0), (1, 0)\n\n start = node.first_token.start\n end = node.last_token.end\n if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):\n # Set col_offset to 0 to include leading indentation for multiline statements.\n start = (start[0], 0)\n\n return start, end",
"def span(self):\r\n return self._start, self._end",
"def midbottom(self):\n return (self.centerx, self.bottom)",
"def getPos(self,len,end,nodes):\n start=end\n if self.count==nodes:\n last=len\n else:\n last=end+(int)(len/(nodes+1))\n self.count+=1\n return (start,last)",
"def find_start_and_end(arr):\n start = None\n end = None\n for i, line in enumerate(arr):\n for j, node in enumerate(line):\n if node.symbol is 'A':\n start = node\n if node.symbol is 'B':\n end = node\n return start, end",
"def match_pair(expr, pair=(r'{', r'}'), start=0):\n\n beg = pair[0]\n fin = pair[1]\n\n # find first opening\n sstart = expr.find(beg, start)\n\n count = 0\n\n if beg == fin:\n eend = expr.find(fin, sstart + 1)\n return sstart, eend\n\n p = re.compile('(' + beg + '|' + fin + ')', re.M)\n ps = re.compile(beg, re.M)\n\n iterator = p.finditer(expr, start)\n\n for match in iterator:\n if ps.match(match.group()):\n count += 1\n else:\n count += -1\n\n if count == 0:\n return sstart, match.end()\n\n return None",
"def find_midpoint(start, end):\n mid = (start + end) / 2\n return int(mid)",
"def raw_span(self, always=False):\n row, col = self.cell.xlrd_pos\n for box in self.cell.sheet.merged_cells:\n rlo, rhi, clo, chi = box\n # note the high indexes are NOT inclusive!\n rhi = rhi - 1\n chi = chi - 1\n if row >= rlo and row <= rhi and col >= clo and col <= chi:\n return rlo, rhi, clo, chi\n if always:\n return (row, row, col, col)\n else:\n return None",
"def _pasrse_data_start_end(self, data):\n first = data['obs_time'].iloc[0]\n last = data['obs_time'].iloc[-1]\n\n return (first, last)",
"def determine_begin_end(molecule, index):\n # Validte the length of the molecule\n validate_molecule_length(molecule)\n # Validate_the index1, index2, index3, \n # Define the constant gap for accessing the 9mers sequence from a certain residue\n gap = 8\n # Get the total numbers of residues\n length = len(molecule)\n # Set the begin point \n begin = index - gap\n # Set the end point\n end = index + gap\n if begin < 0:\n begin = 0 \n if end > length:\n end = length\n \n return begin, end",
"def index_range(page: int, page_size: int) -> Tuple[int, int]:\n if page and page_size:\n start: int = (page - 1) * page_size\n end: int = start + page_size\n return (start, end)",
"def get_start_end_points(path, transect): \n transect_array = np.genfromtxt(path + 'tran_sim_pts.csv', delimiter=\",\")\n start_point = transect_array[2 * transect, :]\n end_point = transect_array[2 * transect + 1, :]\n \n # force start points to be west of end points\n if start_point[0] > end_point[0]:\n previous_start_point = start_point\n start_point = end_point\n end_point = previous_start_point\n return start_point, end_point",
"def midtop(self):\n return (self.centerx, self.top)",
"def mid(p1, p2):\n return (p1[0]+p2[0])/2, (p1[1]+p2[1])/2, (p1[2]+p2[2])/2",
"def get_midpoint_and_extend(bed_file, chrom_sizes_file, extend_len, out_file):\n slop_bed = (\n \"zcat {0} | \"\n \"awk -F '\\t' 'BEGIN{{OFS=\\\"\\t\\\"}} \"\n \"{{ midpoint=$2+int(($3-$2)/2); \"\n \"$2=midpoint; $3=midpoint+1; print }}' | \"\n \"bedtools slop -i stdin -g {1} -b {2} | \"\n \"gzip -c > {3}\").format(\n bed_file,\n chrom_sizes_file,\n extend_len,\n out_file)\n run_shell_cmd(slop_bed)\n\n return None"
] | [
"0.6485405",
"0.59999824",
"0.599718",
"0.57569534",
"0.57080907",
"0.5679897",
"0.5622903",
"0.5562309",
"0.5506314",
"0.5498558",
"0.54762554",
"0.54337853",
"0.53962946",
"0.5344107",
"0.53079104",
"0.52601105",
"0.5217689",
"0.5206089",
"0.51990014",
"0.51918226",
"0.5168565",
"0.51656896",
"0.5164955",
"0.5157929",
"0.51464736",
"0.5126655",
"0.5124141",
"0.5124063",
"0.5113096",
"0.51084256"
] | 0.84714884 | 0 |
Searches, using recursive backtracking. | def search(tri, history = []):
count = 0
children = []
for start_row in range(len(tri)):
for start_peg in range(len(tri[start_row])):
if tri[start_row][start_peg] == True:
count += 1
for end_row, end_peg in jump_lookup[(start_row, start_peg)]:
if tri[end_row][end_peg] == False:
mid_row, mid_peg = mid(start_row, start_peg, end_row, end_peg)
if (tri[mid_row][mid_peg] == True):
new_tri = copy_triangle(tri)
jump(new_tri, (start_row, start_peg), (mid_row, mid_peg), (end_row, end_peg))
children.append(new_tri)
if children:
return min([search(new_tri, history=history + [new_tri]) for new_tri in children])
else:
if count <= 1:
raise SolutionException(history)
return count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def search(self):\r\n #get the initial state\r\n initialState = State()\r\n \r\n #create root node\r\n rootNode = Node(initialState)\r\n \r\n #show the search tree explored so far\r\n treeplot = TreePlot()\r\n treeplot.generateDiagram(rootNode, rootNode)\r\n \r\n #perform search from root node\r\n self.performBacktrackSearch(rootNode, rootNode)\r\n \r\n rootNode.printTree()",
"def search():\n pass",
"def search(board):\n depth = 0\n while True:\n result = depth_first(board, depth)\n if result:\n return result\n else:\n depth += 1",
"def search(values):\n \"Using depth-first search and propagation, try all possible values.\"\n ## Used the provided solutions to be sure that my implementation of diagonals and\n ## Twins is ok\n\n # First, reduce the puzzle using the previous function\n values = reduce_puzzle(values)\n if values is False:\n return False ## Failed earlier\n if all(len(values[s]) == 1 for s in boxes):\n return values ## Solved!\n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n # Now use recurrence to solve each one of the resulting sudokus, and\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt",
"def recursive_search(i, F, t, s, explored, leaders, order):\n x = len(explored)\n if x % 10 == 0:\n print(\"Length of explored: {}\".format(x))\n explored.append(i)\n if order == 2:\n leaders[i] = s\n arc_list = db.Database.find_one(collection=\"biggraph\", query={\"key\": i})\n if arc_list:\n for node in arc_list['value']:\n if node not in explored:\n F, t, leaders, explored = recursive_search(node, F, t, s, explored, leaders, order)\n if order == 1:\n t += 1\n F[i] = t\n return F, t, leaders, explored",
"def depth_first_search(problem):\n fringe = util.Stack()\n return general_search(problem, fringe)",
"def search(d,key):\n\treturn dfs(d,key)",
"def _dfsearch_recursive(self, footprint):\n self.visited[footprint] = 1\n self.temp_component.append(footprint)\n for neighbour in self.neighbours[footprint]:\n if self.visited[neighbour] == 0:\n self._dfsearch(neighbour)",
"def search(values):\n\n\tif values is False:\n\t\treturn values\n\n\tvalues = reduce_puzzle(values)\n\n\tunsolved = [box for box in boxes if len(values[box]) > 1]\n\n\tif len(unsolved) == 0:\n\t\treturn values\n\t\n\tstart_box = unsolved[0]\n\n\tfor digit in values[start_box]:\n\t\tnew_values = values.copy()\n\t\tnew_values[start_box] = digit\n\t\tattempt = search(new_values)\n\t\t\n\t\tif attempt:\n\t\t\treturn attempt",
"def search(word, current_directory, search_result_list=search_list):\n if search_result_list:\n for counter in range(len(search_result_list)):\n search_result_list.pop()\n if current_directory:\n searcher_object = CompleteSearch(current_directory, word)\n searcher_object.start()\n searcher_object.join()\n return remove_equals(search_result_list)\n\n else:\n for cleaner in range(len(search_result_list)):\n search_result_list.pop()\n for driver in drivers():\n searcher_object = CompleteSearch(driver, word)\n searcher_object.start()\n return remove_equals(search_result_list)",
"def recursive_search(needle, haystack, idx=0):\n\t# def _recursive_haystack(needle, haystack, idx=0):\n\t# \t# if the index is equal to len of haystack, then we've gone thru haystack & needle not there\n\t# \tif idx == len(haystack):\n\t# \t\treturn None\n\t# \t# if the haystack item at index is the needle. we found it, return that index\n\t# \tif haystack[idx] == needle:\n\t# \t\treturn idx\n\t# \t# if we haven't found it, go to next index\n\t# \treturn _recursive_haystack(needle, haystack, idx+1)\n\t# # call the recursive function with a new variable, indx starting at zero\n\t# return _recursive_haystack(needle, haystack, 0)\n\tif idx == len(haystack):\n\t\treturn None\n\tif haystack[idx] == needle:\n\t\treturn idx\n\treturn recursive_search(needle, haystack, idx+1)",
"def search(self, search):\n raise NotImplementedError",
"def search(values):\n # TODO: Copy your code from the classroom to complete this function\n # First, reduce the puzzle using the previous function\n #print (\"before\")\n #display(values)\n reduce_puzzle(values)\n #print(\"after\")\n #display(values)\n \n for box in boxes:\n if len(values[box]) < 1:\n return False ## Failed earlier\n if all(len(values[s]) == 1 for s in boxes): \n return values ## Solved!\n \n # Choose one of the unfilled squares with the fewest possibilities\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n #print (n,s,values[s])\n \n # Now use recursion to solve each one of the resulting sudokus, and if one returns a value (not False), return that answer!\n for value in values[s]:\n values_copy = values.copy()\n values_copy[s] = value\n #print (s, \"values:\", values[s],\"=>\",value)\n #display(values_copy)\n attempt = search(values_copy)\n if attempt:\n return attempt",
"def uninformed_search(start, end, graph):\n\n class SearchNode():\n def __init__(self, step_cost, name, predecessor):\n self.path_cost = predecessor.path_cost + step_cost if predecessor is not None else 0\n self.step_cost = step_cost\n self.name = name\n self.predecessor = predecessor\n def __repr__(self):\n return self.predecessor.name + \"->\" + self.name + \"=\" + self.path_cost\n\n class Problem():\n def __init__(self, start, end, graph, goal_predicate):\n self.start = start\n self.end = end\n self.graph = graph\n self.is_goal = goal_predicate\n self.visited_nodes = []\n\n nodes_expanded = 0\n nodes_generated = 0\n max_nodes_in_memory = 0\n\n def tree_search(problem, fringe):\n nonlocal nodes_generated\n nonlocal nodes_expanded\n nonlocal max_nodes_in_memory\n\n # create the initial node\n nodes_generated = 1\n fringe = [SearchNode(0, problem.start, None)]\n\n while len(fringe) > 0:\n # keep track of some metrics\n max_nodes_in_memory = max(max_nodes_in_memory, len(fringe))\n nodes_expanded += 1\n\n node = fringe.pop(0)\n while node.name in problem.visited_nodes:\n # ran out of nodes in the fringe\n if len(fringe) == 0:\n return None\n\n node = fringe.pop(0)\n\n if problem.is_goal(node):\n return node\n \n # make sure we never visit this node again, since we'll be expanding it\n problem.visited_nodes.append(node.name)\n\n # keep the fringe sorted by the path cost\n fringe.extend(expand(node, problem))\n fringe = sorted(\n fringe, \n key=lambda node: node.path_cost\n )\n\n return None\n\n def expand(node, problem):\n nonlocal nodes_generated\n nodes = []\n for edge in problem.graph.edges(node.name):\n nodes.append(SearchNode(edge.weight, edge.destination, node))\n \n nodes_generated += len(nodes)\n return nodes\n\n initial_problem = Problem(start, end, graph, lambda x: x.name == end)\n result = tree_search(initial_problem, [])\n\n # convert the resulting nested structure into an actual path of (start, end, cost)\n def walk(node):\n pred = node.predecessor\n if pred is None:\n return []\n \n path = walk(pred)\n path.append((pred.name, node.name, node.step_cost))\n return path\n\n path = walk(result) if result is not None else None\n return (path, nodes_expanded, nodes_generated, max_nodes_in_memory)",
"def big_fun_search(game, grid_size, pokemon_locations, index):\n queue = [index]\n discovered = [index]\n visible = []\n\n if game[index] == FLAG:\n \treturn queue\n\n number = number_at_cell(game, pokemon_locations, grid_size, index)\n if number != 0:\n return queue\n\n while queue:\n node = queue.pop()\n for neighbour in neighbour_directions(node, grid_size):\n if neighbour in discovered or neighbour is None:\n continue\n\n discovered.append(neighbour)\n if game[neighbour] != FLAG:\n number = number_at_cell(game, pokemon_locations, grid_size, neighbour)\n if number == 0:\n queue.append(neighbour)\n visible.append(neighbour)\n return visible",
"def search(self, word):\n return self.subsearch(self.root, word)",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n fringe = util.Stack()\n return GraphSearch(problem, 'dfs').search(fringe)",
"def customBreadthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n i = 0\n dirList = []\n closed = util.Counter()\n fringe = util.Queue()\n state = problem.getStartState()\n followPac = []\n closed[hash(state)] = 1\n\n for triple in problem.getSuccessors(state):\n fringe.push((triple, dirList.copy()))\n while not fringe.isEmpty():\n i += 1\n state = fringe.pop()\n succ = state[0][0]\n act = state[0][1]\n cost = state[0][2]\n dirList = state[1]\n dirList.append(act)\n \n if problem.isGoalState(succ):\n return dirList\n if problem.isPacman(succ):\n followPac.append(dirList.copy())\n if closed[hash(succ)] == 0:\n closed[hash(succ)] = 1\n for triple in problem.getSuccessors(succ):\n fringe.push((triple, dirList.copy()))\n if not followPac:\n return\n followPac = max(followPac, key=lambda x: len(x))\n last = followPac.pop()\n followPac.append(last)\n followPac.append('place')\n followPac.append(reverse[last])\n return followPac.copy()",
"def find_urls_with_search_text(url, depth, search_text, urls_with_search_text):\n if depth <= 0:\n return\n\n if find_search_text(url, search_text) and url not in urls_with_search_text:\n urls_with_search_text.append(url)\n\n links = find_urls(url)\n for link in links:\n find_urls_with_search_text(link, depth-1, search_text, urls_with_search_text)",
"def depth_first_search(problem):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = []\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.append(state)\r\n\r\n while(len(fringe) > 0):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n #states = problem.get_successors(state)\r\n for stat in states:\r\n if stat[0] not in path and stat[0] not in fringe:\r\n fringe.append(stat[0])\r\n\r\n while(True):\r\n if state == problem.get_start_state():\r\n break\r\n for key, val in acts.items():\r\n for va in val: #( x, y, z)\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n\r\n final.reverse()\r\n\r\n return final",
"def search(line, pattern_tree, pattern_path, result_tree, result_path):\n node = (node for node in pattern_path[:])\n pattern_path[:] = [] # Start search at root\n while not search_down(line, pattern_tree, pattern_path, result_tree, result_path):\n try:\n pattern_path.append(node.next())\n except StopIteration:\n break",
"def search(self, *args, **kwargs): # real signature unknown\n pass",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n util.raiseNotDefined()",
"def depthFirstSearch(problem):\n \"*** YOUR CODE HERE ***\"\n \n util.raiseNotDefined()",
"def graph_search(problem, verbose=False, debug=False):\r\n \r\n # PriorityQueue should be used to maintain the order of the queue.\r\n frontier = PriorityQueue()\r\n \r\n frontier.append(Node(problem, problem.initial))\r\n \r\n current_node = frontier.pop()\r\n \r\n p = True\r\n #depth first search\r\n if current_node.expand(current_node.problem)[0].g < 0:\r\n \r\n frontier = deque()\r\n frontier.append(Node(problem, problem.initial))\r\n #breadth first search\r\n elif current_node.expand(current_node.problem)[0].h < 2:\r\n \r\n p = False\r\n frontier = deque()\r\n frontier.append(Node(problem, problem.initial))\r\n #manhattan\r\n else:\r\n \r\n frontier.append(current_node)\r\n\r\n f_hash = Explored()\r\n f_hash.add(problem.initial.state_tuple())\r\n done = False\r\n n_explored = 0\r\n explored = Explored()\r\n\r\n #graph_search\r\n while not done:\r\n \r\n if p:\r\n current_node = frontier.pop()\r\n else:\r\n current_node = frontier.popleft()\r\n explored.add(current_node.state.state_tuple())\r\n n_explored = n_explored + 1 #inc the number of explored nodes\r\n\r\n if current_node.state.solved():\r\n path = current_node.path()\r\n done = True\r\n return path, n_explored\r\n #if not found in the tree return none and number of nodes explored\r\n else:\r\n \r\n for child in current_node.expand(current_node.problem):\r\n if not explored.exists(child.state.state_tuple()) and not \\\r\n f_hash.exists(child.state.state_tuple()):\r\n frontier.append(child)\r\n f_hash.add(child)\r\n done = len(frontier) == 0\r\n\r\n return None, n_explored",
"def _search(dork): \n retVal = [] \n paths = [] \n\n if not dork: \n return None \n\n headers = {} \n\n headers[HTTP_HEADER.USER_AGENT] = dict(conf.httpHeaders).get(HTTP_HEADER.USER_AGENT, DUMMY_SEARCH_USER_AGENT) \n headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE \n\n gpage = conf.googlePage if conf.googlePage > 1 else 1 \n\n#polluted by xi4okv QQ£º48011203 \n\n for gpage in xrange(1,10): \n logger.info(\"using search result page #%d\" % gpage) \n\n url = \"https://m.baidu.com/s?\" \n url += \"word=%s&\" % urlencode(dork, convall=True) \n url += \"&pn=%d\" % ((gpage - 1) * 10) \n\n try: \n req = urllib2.Request(url, headers=headers) \n conn = urllib2.urlopen(req) \n\n requestMsg = \"HTTP request:\\nGET %s\" % url \n requestMsg += \" %s\" % httplib.HTTPConnection._http_vsn_str \n logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg) \n\n page = conn.read() \n code = conn.code \n status = conn.msg \n\n responseHeaders = conn.info() \n page = decodePage(page, responseHeaders.get(\"Content-Encoding\"), responseHeaders.get(\"Content-Type\")) \n #print page \n\n responseMsg = \"HTTP response (%s - %d):\\n\" % (status, code) \n\n if conf.verbose <= 4: \n responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING) \n elif conf.verbose > 4: \n responseMsg += \"%s\\n%s\\n\" % (responseHeaders, page) \n\n logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg) \n except urllib2.HTTPError, e: \n pass \n\n urls = [urllib.unquote(match.group(0) or match.group(1)) for match in re.finditer(GOOGLE_REGEX, page, re.I)] \n #retVal = re.findall(GOOGLE_REGEX, page, re.I) \n\n import urlparse \n\n for url in urls: \n urls_pat = re.compile(r\"http://(.*)[^']\") \n aurl = re.findall(urls_pat, url) \n if \"?\" in url and \"baidu\" not in url: \n xpath = urlparse.urlparse(url).path \n if xpath not in paths: \n paths.append(xpath) \n retVal.append(aurl[0]) \n\n #print retVal \n\n return retVal",
"def search(self) -> int:\n # crete node list\n for x in range(self.n):\n for y in range(self.n):\n if not self.grid[y][x] == 0:\n self.all_nodes.append((x, y))\n # recursively create paths\n i = 0\n paths = [[(0, 0)]]\n while i < self.n * self.n:\n paths = self.generate_paths(paths)\n if isinstance(paths, int):\n return paths\n i += 1\n\n return -1",
"def search(grid, dictionary):\n neighbours = all_grid_neighbours(grid)\n paths = []\n full_words, stems = dictionary\n \n def do_search(path): # nested function\n word = path_to_word(grid, path)\n if word in full_words: # word_in_dictionary(word, dictionary): # word in dictionary: - modified again\n paths.append(path)\n if word not in stems:\n return\n for next_pos in neighbours[path[-1]]:\n if next_pos not in path:\n do_search(path + [next_pos])\n \n for position in grid:\n do_search([position])\n \n words = []\n for path in paths:\n words.append(path_to_word(grid, path))\n return set(words)",
"def _search(self, node, term, i):\n if not term:\n # No search term, no results\n return set()\n\n if i == len(term):\n # We hit the end of the search term, everything at\n # the current node is a match for the term\n return node.get_elements()\n\n char = term[i]\n children = node.get_children()\n if char not in children:\n # We're not at a leaf node or the end of the search\n # term but none of the children of the current node\n # match, no results\n return set()\n\n # Otherwise, continue searching at the next node\n return self._search(children[char], term, i + 1)",
"def search(self, word):\n def r_search(word,i,d):\n if len(word) <= i:\n return True\n \n if d == 0:\n return False\n \n return (word[i] in d) and r_search(word,i+1,d[word[i]])\n \n tri = self.root.d\n if len(word) == 0: \n return True\n \n if len(tri) == 0:\n return False\n \n return r_search(word + '$',0,tri)"
] | [
"0.6654038",
"0.65033793",
"0.6456524",
"0.6307618",
"0.6231046",
"0.62213635",
"0.6207362",
"0.61557186",
"0.6107835",
"0.6091845",
"0.6074678",
"0.60684437",
"0.60239583",
"0.5979148",
"0.59736043",
"0.5972576",
"0.5958814",
"0.5956313",
"0.5955384",
"0.5945115",
"0.5939703",
"0.5936097",
"0.5934155",
"0.5924881",
"0.59209675",
"0.5890939",
"0.5879788",
"0.587897",
"0.5858954",
"0.5851083"
] | 0.6503747 | 1 |
Create a redis connection by uri. | def connect_redis(uri):
puri = urlparse.urlparse(uri)
host = puri.hostname
port = puri.port
password = puri.password if puri.password else ''
db_name = puri.path.split('/')[1]
r = redis.Redis(host=host, port=port, password=password, db=db_name)
assert r.ping()
return r | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conn_redis(host, port, db=0):\r\n r = redis.Redis(host=host, port=port, db=db)\r\n return r",
"def create_connection():\n # REDIS_URL is defined in .env and loaded into the environment by Honcho\n redis_url = os.getenv('REDIS_URL')\n # If it's not defined, use the Redis default\n if not redis_url:\n redis_url = 'redis://localhost:6379'\n urlparse.uses_netloc.append('redis')\n url = urlparse.urlparse(redis_url)\n return redis.StrictRedis(\n host=url.hostname,\n port=url.port,\n db=0,\n password=url.password\n )",
"def make_redis_store(uri):\n result = urlparse(uri)\n scheme = result.scheme.lower()\n if not result.scheme.startswith('redis'):\n raise ValueError('not a redis uri')\n host = result.hostname\n port = result.port\n database = int(result.path[1:])\n if result.password:\n password = unquote(result.password)\n else:\n password = None\n if scheme == 'redis+legacy':\n class_ = redis.Redis\n else:\n class_ = redis.StrictRedis\n store = class_(\n host, port, database, password,\n socket_timeout=SOCKET_TIMEOUT,\n socket_connect_timeout=SOCKET_CONNECT_TIMEOUT\n )\n return store",
"def connect_to_redis():\n return Redis(host=redis_host, port=redis_port, db=0)",
"def get_connection(self, params):\r\n return Redis(connection_pool=self.get_or_create_connection_pool(params))",
"def get_redis(**kwargs):\n redis_cls = kwargs.pop('redis_cls', DEFAULT_REDIS_CLS)\n url = kwargs.pop('url', None)\n if url:\n return redis_cls.from_url(url, **kwargs)\n else:\n return redis_cls(**kwargs)",
"def create_redis_connection(app=None):\n\n if app:\n app.logger.info('Instantiated new redis connection.')\n\n redis_connection = redis.StrictRedis(\n host=\"localhost\",\n port=6379,\n db=0\n )\n\n if not redis_connection.exists('last_queue_idx'):\n redis_connection.set('last_queue_idx', 0)\n\n return redis_connection",
"def __init__(self, namespace, redis_url=(\"localhost\", 6379)):\n\n self.namespace = namespace\n\n if isinstance(redis_url, tuple):\n self.redis = StrictRedis(host=redis_url[0], port=redis_url[1])\n elif isinstance(redis_url, str):\n self.redis = StrictRedis(host=redis_url)",
"def __init__(self, host, port):\n self.r = redis.StrictRedis(host=host, port=port)",
"def create_connection(loop, uri):\n\n proto_pos = uri.find('://')\n protocol_name = uri[0:proto_pos]\n\n if protocol_name not in PROTOCOL_MAP:\n raise ValueError(\"Unknown protocol %s\" % protocol_name)\n\n address_str = uri[proto_pos + 3:]\n\n protocol_cls, address_parser = PROTOCOL_MAP[protocol_name]\n\n address = address_parser(address_str)\n\n connection = protocol_cls(loop, address)\n\n return connection",
"def _conn_redis(self) -> Redis:\n return Redis(host=self._REDIS_DB_HOST, port=self._REDIS_DB_PORT, db=0,decode_responses=True)",
"def redis_from_url(url, db=None, charset='utf-8', errors='strict',\n decode_responses=False, socket_timeout=None, **kwargs):\n url = urlparse.urlparse(url)\n\n # We only support redis:// schemes.\n assert url.scheme == 'redis' or not url.scheme\n\n # Extract the database ID from the path component if hasn't been given.\n if db is None:\n try:\n db = int(url.path.replace('/', ''))\n except (AttributeError, ValueError):\n db = 0\n\n # TODO: unix domain sockets\n pool = redis.ConnectionPool(connection_class=Connection,\n host=url.hostname, port=int(url.port or 6379), db=db,\n password=url.password, decode_responses=decode_responses,\n encoding=charset, encoding_errors=errors,\n socket_timeout=socket_timeout)\n\n return redis.StrictRedis(connection_pool=pool, **kwargs)",
"def connect(self):\n self.connection = redis.Redis(\n host=self.host,\n port=self.port,\n socket_connect_timeout=self.timeout,\n socket_timeout=self.timeout\n )",
"def _connect(self):\n self.connection = RedisConnection(self.host, self.port, self.dbname)",
"def _get_conn(self):\n return redis.Redis(connection_pool=self.pool)",
"def _connect(self):\n try: \n self.r = redis.StrictRedis(host=self.host, port=self.port, db=self.db)\n except:\n raise",
"def get_redis_client():\n return redis.from_url(settings.REDIS_URI)",
"def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client",
"def _CreateRedisClient(self):\n try:\n redis_client = redis.from_url(self._REDIS_URL, socket_timeout=60)\n redis_client.ping()\n except redis.exceptions.ConnectionError:\n redis_client = fakeredis.FakeStrictRedis()\n\n return redis_client",
"def connect(uri: str, *args: Any, **kwargs: Any) -> Connection:\n match = _uri_regex.match(uri)\n if match:\n scheme, path = match.groups()\n connector, engine_kls = Connection.get_connector(scheme)\n return connector(engine_kls(), path, *args, **kwargs)\n raise InvalidURI(f\"Invalid database connection URI {uri}\")",
"def get_redis_client(host='localhost', port=6379, db=0):\n host = os.environ.get('REDIS_HOST') or host\n port = os.environ.get('REDIS_PORT') or port\n return StrictRedis(host=host, port=port, db=db)",
"def dbConnect(self):\n r = redis.StrictRedis()\n try:\n r = redis.from_url(os.environ.get(\"REDIS_URL\"))\n print(\"DB Connection seems okay!\")\n except Exception as error:\n print (\"Oops! An exception has occured:\", error)\n print (\"Exception TYPE:\", type(error))\n r = None\n finally:\n return r",
"def _connect_to_redis(self):\n for name, config in settings.STREAM_REDIS_CONFIG.items():\n self._redis_client = tornadoredis.Client(host=config['host'],\n port=config['port'],\n password=config['password'],\n connection_pool=pool)\n self._redis_client.connect()",
"def __init__(self):\n try:\n config = redis_settings[\"REDIS_BACKEND\"]\n self.servers = config[\"servers\"]\n self.port = config[\"port\"]\n self.db = config[\"db\"]\n self.password = config[\"password\"]\n # r = redis.Redis('10.66.136.84', '6379', 0,password=\"xsw2CDE#vfr4\")\n #r = redis.Redis('10.66.136.84', '6379', 0)\n self.redis = Redis(self.servers, self.port, self.db,\n password=self.password, socket_timeout=1)\n except Exception, e:\n print \"Redis YAMLConfig Error :\", e\n logging.error(e)",
"def connect_redis(conn):\n # Don't pass empty password to the client\n if not conn.get('password', None):\n conn.pop('password', None)\n\n return redis.StrictRedis(**conn)",
"def __init__(self, config):\n self.r = redis.StrictRedis(host=config['REDIS_HOST'],\n port=config['REDIS_PORT'],\n db=config['REDIS_DB'])",
"def get_redis() -> redis.Redis:\n global redis_conn\n if not redis_conn:\n host = app.config.get(\"REDIS_HOST\", \"127.0.0.1\")\n port = app.config.get(\"REDIS_PORT\", \"6379\")\n db = app.config.get(\"REDIS_DB\", \"0\")\n redis_conn = redis.Redis(host=host, port=port, db=db)\n\n return redis_conn",
"def __call__(self, settings):\n self.clear() # make sure you can reconfigure the client\n db = settings.get('redis.db', 0)\n config = {'db': int(db)}\n if ('redis.unix_socket_path' in settings and\n settings['redis.unix_socket_path'] is not None):\n config['unix_socket_path'] = settings['redis.unix_socket_path']\n elif ('redis.url' in settings and\n settings['redis.url'] is not None): # should default to\n # `redis://localhost:6379`\n # Unpack.\n url = settings['redis.url']\n\n # Parse into a config dict.\n o = self.parse_url(url)\n config.update({\n 'host': o.hostname,\n 'port': o.port,\n })\n if o.password:\n config['password'] = o.password\n\n max_connections = settings.get('redis.max_connections', None)\n if max_connections is not None:\n config['max_connections'] = int(max_connections)\n config = {'connection_pool': self.pool_cls(**config)}\n else:\n raise pyramid.exceptions.ConfigurationError(\n \"\"\"To use redis with pyramid, redis.url or\n redis.unix_socket_path should be provided\"\"\"\n )\n self.update(config)\n return self",
"def create_channel(uri, loop=None):\n\n if not loop:\n loop = Mainloop()\n\n connection = create_connection(loop, uri)\n chan = Channel(connection)\n return loop, chan",
"def get_redis():\n return redis.StrictRedis(host='redis', port=6379)"
] | [
"0.7483556",
"0.74211794",
"0.7264241",
"0.72543937",
"0.68576866",
"0.67962694",
"0.6592169",
"0.65768725",
"0.6565991",
"0.6559168",
"0.65246797",
"0.6495612",
"0.6445122",
"0.64029026",
"0.63978356",
"0.6371485",
"0.6359366",
"0.6345691",
"0.6345691",
"0.6323229",
"0.62616175",
"0.62521183",
"0.62435377",
"0.6217733",
"0.6208792",
"0.6173494",
"0.61578643",
"0.6109993",
"0.6084934",
"0.6074598"
] | 0.82560194 | 0 |
Update next_waypoint based on base_waypoints and current_pose. True if a valid waypoint has been updated, False otherwise | def _update_next_waypoint(self):
if not self.base_waypoints:
#rospy.logwarn("Waypoints not updated: base_waypoints not available yet.")
return False
if not self.current_pose:
#rospy.logwarn("Waypoints not updated: current_pose not available yet.")
return False
# Get ego car variables
ego_x = self.current_pose.position.x
ego_y = self.current_pose.position.y
ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)
# If I do have a next_waypoint, I will start looking from it, and stop looking
# as soon as get a local minimum. Otherwise I will do a full search across the whole track
t = time.time()
wp = None
yaw = 0
dist = 1000000 # Long number
if self.next_waypoint:
idx_offset = self.next_waypoint
full_search = False
else:
idx_offset = 0
full_search = True
num_base_wp = len(self.base_waypoints)
for i in range(num_base_wp):
idx = (i + idx_offset)%(num_base_wp)
wp_x = self.base_waypoints[idx].pose.pose.position.x
wp_y = self.base_waypoints[idx].pose.pose.position.y
wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)
if wp_d < dist:
dist = wp_d
wp = idx
if debugging:
# Angle betwee car heading and waypoint heading
yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta
elif not full_search:
# Local minimum. If the waypoint makes sense, just use it and break
if dist < max_local_distance:
break; # Found a point
else:
# Seem to have lost track. Do search again
rospy.logwarn("Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.", dist, i+1)
full_search = True
if debugging:
rospy.loginfo("New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)
if wp is None:
rospy.logwarn("Waypoint updater did not find a valid waypoint")
return False
self.next_waypoint = wp
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update(self):\n\n # If the agent has already reached the\n # last waypoint it doesn't need to update\n if self.finished:\n return True\n\n # Skip if the proxy don't have any [new] data\n if (self.pp.info.datatime == 0) or \\\n (self.pp.info.datatime == self.last_read):\n return False\n\n self.last_read = self.pp.info.datatime\n\n # If this is the first update then head toward the first waypoint\n if self.first_update:\n self.pp.set_cmd_pose(self.active_waypoint['x'],\n self.active_waypoint['y'],\n self.get_heading({'x': self.pp.px, 'y': self.pp.py}, self.active_waypoint),\n 1)\n self.first_update = False\n return False\n\n # Calculate how far the agent is from its current waypoint\n dist = math.hypot(self.pp.px - self.active_waypoint['x'],\n self.pp.py - self.active_waypoint['y'])\n\n # Has it reached it yet?\n if dist < self.waypoint_distance_tolerance:\n\n # If all waypoints have been reached, stop the agent and return True\n if (self.active_waypoint_index + 1) >= len(self.waypoints):\n self.pp.set_cmd_vel(0.0, 0.0, 0.0, 0)\n self.pp.enable(False) # redundant?\n self.finished = True\n return True\n\n # Otherwise select the next waypoint\n prev_waypoint = self.active_waypoint\n self.active_waypoint_index += 1\n self.active_waypoint = self.waypoints[self.active_waypoint_index]\n\n # ...and drive to it\n self.pp.set_cmd_pose(self.active_waypoint['x'],\n self.active_waypoint['y'],\n self.get_heading(prev_waypoint, self.active_waypoint),\n 1)\n\n # Still have waypoints to visit\n return False",
"def update_and_publish(self):\n # 1. Find next_waypoint based on ego position & orientation\n if self._update_next_waypoint():\n\n # 2. Generate the list of next LOOKAHEAD_WPS waypoints\n num_base_wp = len(self.base_waypoints)\n last_base_wp = num_base_wp-1\n waypoint_idx = [idx % num_base_wp for idx in range(self.next_waypoint,self.next_waypoint+LOOKAHEAD_WPS)]\n final_waypoints = [self.base_waypoints[wp] for wp in waypoint_idx]\n\n # 3. If there is a red light ahead, update velocity for them\n if self.stop_on_red:\n # Start from original velocities\n self.restore_velocities(waypoint_idx)\n try:\n red_idx = waypoint_idx.index(self.red_light_waypoint)\n self.decelerate(final_waypoints, red_idx, self.stop_distance)\n except ValueError:\n # No red light available: self.red_light_waypoint is None or not in final_waypoints\n red_idx = None\n if debugging:\n v = self.get_waypoint_velocity(final_waypoints, 0)\n rospy.loginfo(\"Target velocity: %.1f, RL:%s wps ahead\", v, str(red_idx))\n\n # 3b. If we are close to the end of the circuit, make sure that we stop there\n if self.force_stop_on_last_waypoint or self.base_wp_orig_v[-1] < 1e-5:\n try:\n last_wp_idx = waypoint_idx.index(last_base_wp)\n self.decelerate(final_waypoints, last_wp_idx, 0)\n except ValueError:\n # Last waypoint is not one of the next LOOKAHEAD_WPS\n pass\n\n # 4. Publish waypoints to \"/final_waypoints\"\n self.publish_msg(final_waypoints)",
"def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)",
"def update_waypoints(self, waypoints, start_time=None):\n self.control_instance.update_waypoints(waypoints)\n if start_time:\n self._last_waypoint_command = start_time",
"def nextWaypoint(self, pose):\n #DONE implement\n location = pose.position\n dist = 100000.\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n nwp = 0\n for i in range(len(self.waypoints)):\n d1 = dl(location, self.waypoints[i].pose.pose.position)\n if dist > d1:\n nwp = i\n dist = d1\n x = self.waypoints[nwp].pose.pose.position.x\n y = self.waypoints[nwp].pose.pose.position.y\n heading = np.arctan2((y-location.y), (x-location.x))\n angle = np.abs(self.theta-heading)\n if angle > np.pi/4.:\n nwp += 1\n if nwp >= len(self.waypoints):\n nwp = 0\n return nwp",
"def passed_waypoint(self, waypoint_num):\n bools = self.ros_node.get_data('/diff_drive/waypoints_achieved', simple_data = False)\n # Waits for the data\n if bools is not None:\n if len(bools.bools) >= waypoint_num:\n return bools.bools[waypoint_num -1]\n \n rospy.logerr_throttle(15, \"Checking Waypoint Failed. Did not find a waypoint with the number '%s' in the path\" %(waypoint_num))\n return False\n else:\n return False",
"def update_wp_position(self, event):\n wp = -1\n cur_pos = np.array(\n [self.global_pose.latitude, self.global_pose.longitude])\n for idx, waypoint in enumerate(self.waypoints):\n temp = np.array([waypoint['lat'], waypoint['long']])\n alt_diff = abs(self._rel_alt[-1] - waypoint['rel_alt'])\n if idx == 0 and (np.linalg.norm(cur_pos - temp) < self._radius):\n wp = idx\n break\n elif (np.linalg.norm(cur_pos - temp) <\n self._radius) and (alt_diff < self._alt_radius):\n wp = idx\n break\n self._current_wp = wp",
"def waypoints_cb(self, msg):\n t = time.time()\n waypoints = msg.waypoints\n num_wp = len(waypoints)\n\n if self.base_waypoints and self.next_waypoint is not None:\n # Normally we assume that waypoint list doesn't change (or, at least, not\n # in the position where the car is located). If that happens, just handle it.\n if not self.is_same_waypoint(self.base_waypoints[self.next_waypoint],\n waypoints[self.next_waypoint]):\n self.next_waypoint = None # We can't assume previous knowledge of waypoint\n self.base_waypoints = None # Just for debugging. Will be updated later\n rospy.logwarn(\"Base waypoint list changed\")\n else:\n # No change. We could probably return here.\n pass\n\n \"\"\"\n # -- Uncomment for debugging\n # Stamp waypoint index in PoseStamped and TwistStamped headers of internal messages\n for idx in range(len(waypoints)):\n waypoints[idx].pose.header.seq = idx\n waypoints[idx].twist.header.seq = idx\n \"\"\"\n\n self.base_wp_orig_v = [self.get_waypoint_velocity(waypoints, idx) for idx in range(num_wp)]\n\n if debugging and not self.base_waypoints:\n dist = self.distance(waypoints, 0, num_wp-1)\n rospy.loginfo(\"Received: %d waypoints, %.1f m, %.1f m/wp in t=%f\", num_wp, dist, dist/num_wp, time.time()-t)\n\n self.base_waypoints = waypoints\n\n if self.unsubscribe_base_wp:\n self.base_wp_sub.unregister()",
"def update(self):\n self.logger.debug(\" %s [GenerateNextPose::update()]\" % self.name)\n\n # This behavior will always be successfull. But if it that was not the\n # case, it would return failure\n # self.feedback_message = \"Some failure message!\"\n # return py_trees.common.Status.FAILURE\n\n # If the behavior could be unning for a while, we would have to return\n # py_trees.common.Status.RUNNING, and not block its execution.\n\n # In this example we just need to create the position and orientation\n # keys corresponding to the next desired pose.\n self.curr_waypoint = (self.curr_waypoint + 1) % len(self.waypoints)\n self.blackboard.set(self.pos_key,\n action.Move2Pos.Goal(target_position=Point(\n x=self.waypoints[self.curr_waypoint].x,\n y=self.waypoints[self.curr_waypoint].y,\n z=0.0)))\n self.blackboard.set(\n self.angle_key,\n action.Rotate2Angle.Goal(\n target_orientation=self.waypoints[self.curr_waypoint].theta))\n self.feedback_message = \"New position and orientation generated!\"\n return py_trees.common.Status.SUCCESS",
"def switch_to_next_objective(self):\n self.current_objective_idx += 1\n if self.current_objective_idx >= len(self.objectives): # track complete\n self.current_distance = 0.0\n return True\n else:\n self.current_objective = Gate(self.objectives[self.current_objective_idx])\n self.last_distance = self.last_position.distance_to(self.current_objective.gate_pose.position)\n return False",
"def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx",
"def is_same_waypoint(self, wp1, wp2, max_d=0.5, max_v=0.5):\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n ddif = dl(wp1.pose.pose.position, wp2.pose.pose.position)\n if ddif < max_d:\n return True\n return False",
"def process_waypoint(self, waypoint: Waypoint) -> Union[Trip, None]:\n\n # ignore the first entry, just remember it for further compares\n if not self.prev_point:\n self.prev_point = waypoint\n return None\n\n if self.is_driving(self.prev_point, waypoint):\n if not self.start_point:\n # indicates trip start\n self.start_point = self.prev_point\n else:\n # indicates trip finish\n if self.start_point:\n d = self.calc_distance(self.start_point, self.prev_point)\n trip = Trip(d, self.start_point, self.prev_point)\n self.start_point = None\n return trip\n self.prev_point = waypoint\n return None",
"def move_to(self, waypoint):\n self.set_final_wp(waypoint)\n self.go()\n currPos = np.asarray(self.rexarm.get_positions())\n while(np.linalg.norm(np.asarray(waypoint) - currPos) > 0.15):\n time.sleep(0.01)",
"def try_waypoints(waypoint_data, current_point, segmented_points):\n\n # waypoint_data will be a list [waypoint_n, ... , waypoint_w]\n # where waypoint_n ... w is (lat, lng)\n\n # store the waypoints retreived and compare their crime_index\n # ret [{dicte}, {dictw}]\n waypoint_geohash_data_all = get_position_geohash(waypoint_data)\n crime_index_storage = []\n for data in waypoint_geohash_data_all:\n crime_index_storage.append(data['crime_index'])\n crime_index_storage.append(current_point['crime_index'])\n\n lowest_crime_index = min(*crime_index_storage)\n\n # check and assemble dict for lowest_crime_index waypoint\n generate_waypoint(lowest_crime_index,\n waypoint_geohash_data_all,\n segmented_points)",
"def _update_valid_directions(self, valid_directions, velocity):\n # If not preventing backtracking, all open directions are valid\n if not self._prevent_backtracking:\n return\n axis = np.argmax(np.abs(velocity))\n direction = np.sign(velocity[axis])\n\n # If velocity is zero, all open directions are valid\n if direction == 0:\n return\n \n # If hit a wall and allow wall backtracking, all open directions are\n # valid\n can_continue = valid_directions[axis, int(0.5 * (1 + direction))]\n if not can_continue and self._allow_wall_backtracking:\n return\n # If not hit a wall and only turn at wall, then continue\n if can_continue and self._only_turn_at_wall:\n valid_directions.fill(0)\n valid_directions[axis, int(0.5 * (1 + direction))] = 1\n return\n\n # If none of the above conditions are true, prevent backtracking\n valid_directions[axis, int(0.5 * (1 - direction))] = False",
"def update_movement(self):\n if self.way_idx < len(self.waypoints) and not self.moving_object.is_moving:\n self.moving_object.start_moving(self.waypoints[self.way_idx])\n self.way_idx += 1",
"def followWaypoints(self, poses):\n self.debug(\"Waiting for 'FollowWaypoints' action server\")\n while not self.follow_waypoints_client.wait_for_server(timeout_sec=1.0):\n self.info(\"'FollowWaypoints' action server not available, waiting...\")\n\n goal_msg = FollowWaypoints.Goal()\n goal_msg.poses = poses\n\n self.info('Following ' + str(len(goal_msg.poses)) + ' goals.' + '...')\n send_goal_future = self.follow_waypoints_client.send_goal_async(goal_msg,\n self._feedbackCallback)\n rclpy.spin_until_future_complete(self, send_goal_future)\n self.goal_handle = send_goal_future.result()\n\n if not self.goal_handle.accepted:\n self.error('Following ' + str(len(poses)) + ' waypoints request was rejected!')\n return False\n\n self.result_future = self.goal_handle.get_result_async()\n return True",
"def pos_updated(self,next_pos):\n #if (int(self.oldx) == int(self.x) and int(self.oldy) == int(self.y)):\n if (int(next_pos[0]) == int(self.x) and int(next_pos[1]) == int(self.y)):\n return False\n else:\n return True",
"def check_directions_find_waypoint(current_point, current_segment,\n delta_before_after, segmented_points):\n\n delta_lat_before_current = delta_before_after[0]\n delta_lng_before_current = delta_before_after[1]\n\n delta_lat_after_current = delta_before_after[2]\n delta_lng_after_current = delta_before_after[3]\n\n # check to see if the delta x's in both directions are longer\n # than the delta y's in both directions\n if (delta_lat_before_current > delta_lng_before_current) and \\\n (delta_lat_after_current > delta_lng_after_current):\n print \"inside first if\"\n # the latitudes are longer than the longitudes, get waypoints\n # in the longitude direction\n\n # don't forget to generate waypoints\n waypoint_e_w = inspect_waypoints(current_point, \"lngwise\")\n try_waypoints(waypoint_e_w, current_segment, segmented_points)\n elif (delta_lng_before_current > delta_lat_before_current) and \\\n (delta_lng_after_current > delta_lat_after_current):\n print \"inside elif, checks the north and south creation\"\n # the longitudes are longer than the latitudes, get waypoints\n # in the latitude direction\n\n # don't forget to generate waypoints\n waypoint_n_s = inspect_waypoints(current_point, \"latwise\")\n try_waypoints(waypoint_n_s, current_segment, segmented_points)\n else:\n print \"inside else, checks all directions NS-EW\"\n\n # don't forget to generate waypoints\n waypoint_all = inspect_waypoints(current_point, \"all\")\n try_waypoints(waypoint_all, current_segment, segmented_points)\n\n # return only the waypoints and start/end lat,lngs\n return segmented_points",
"def process_waypoint(self, waypoint: Waypoint) -> Union[Trip, None]:\n ...",
"def check_reached_waypoint_goal(self):\n return self.control_instance.check_reached_waypoint_goal()",
"def _get_next_waypoint(self, tolerance_step):\n print('\\nGetting new nav plan.')\n\n for i in range(4):\n try:\n self.plan = self.swarmie.get_plan(\n self.goal,\n tolerance=self.tolerance,\n use_home_layer=self.avoid_home\n )\n break # plan received\n except rospy.ServiceException:\n print('ServiceException.')\n if i < 3:\n print('Expanding tolerance.')\n self.tolerance += tolerance_step\n else:\n raise # tried 3 times, we give up\n\n print('Received nav plan.')\n pose = self.plan.plan.poses[0]\n\n return Point(x=pose.pose.position.x, y=pose.pose.position.y)",
"def update_position(steps):\n\n global position_x, position_y\n new_x = position_x\n new_y = position_y\n\n if directions[current_direction_index] == 'forward':\n new_y = new_y + steps\n elif directions[current_direction_index] == 'right':\n new_x = new_x + steps\n elif directions[current_direction_index] == 'back':\n new_y = new_y - steps\n elif directions[current_direction_index] == 'left':\n new_x = new_x - steps\n\n if is_position_allowed(new_x, new_y):\n position_x = new_x\n position_y = new_y\n return True\n return False",
"def goal_callback(self, pose: PoseStamped) -> None:\n\n # Update existing path\n #\n if self.soccerbot.robot_path is not None:\n print(\"Updating New Goal\")\n start = time.time()\n goal_position = Transformation(pose=pose.pose)\n self.soccerbot.setWalkingTorsoHeight(goal_position)\n self.new_path = copy.deepcopy(self.soccerbot.robot_path)\n\n try:\n self.t_new_path = self.new_path.dynamicallyUpdateGoalPosition(self.t, goal_position)\n except Exception as ex:\n print(ex)\n return\n\n end = time.time()\n\n self.soccerbot.publishPath(self.new_path)\n print(\"New Goal Updated, Time Taken: \", end - start)\n pass\n self.new_goal = pose",
"def __isTileGoalState(self, point):\n return point == self.goalPoint",
"def update(self, time_step, checkpoints):\n if self.at_checkpoint:\n return True\n\n if self._arrived_at_checkpoint(time_step):\n self.checkpoint_target.add_attendee(self, time_step)\n print(\"Attendee\", self.attendee_id, \"at:\", self.current_location,\\\n \"has moved to checkpoint at:\", self.checkpoint_target.get_location())\n self.current_location = self.checkpoint_target.get_location()\n self.walk_route[-1] = tuple(self.current_location)\n # print(\"Attendee Walk Route: \", self.walk_route) \n return True\n self.find_checkpoint(checkpoints, time_step)\n self.inter_step()\n return False",
"def has_uav_reached_current_waypoint(self):\n return self.drone.has_reached_waypoint()",
"def next_point(self, start_pos, goal_pos):\r\n\t\tself.shift = 0\r\n\t\tself.start_pos = start_pos\r\n\t\tself.goal_pos = goal_pos",
"def _is_all_direct_next_moves_blocked(self, reference_board=None):\n # Use untraversed board if none is specified\n if reference_board is None:\n reference_board = BoardPath._untraversed_board\n\n # Case #1 - Goal and Current Location in the Same Row\n if self._current_loc.get_row() == self._goal_loc.get_row():\n # Case 1A - Need to move left but path is blocked\n if self._current_loc.get_column() > self._goal_loc.get_column() and\\\n not self.is_move_valid(\"l\", reference_board):\n return True\n # Case 1B - Need to move left but path is blocked\n elif self._current_loc.get_column() < self._goal_loc.get_column() and\\\n not self.is_move_valid(\"r\", reference_board):\n return True\n else:\n return False\n\n # Case #2 - Goal and Current Location in the Same Row\n if self._current_loc.get_column() == self._goal_loc.get_column():\n # Case 2A - Need to move left but path is blocked\n if self._current_loc.get_row() > self._goal_loc.get_row() and\\\n not self.is_move_valid(\"u\", reference_board):\n return True\n # Case 1B - Need to move left but path is blocked\n elif self._current_loc.get_row() < self._goal_loc.get_row() and\\\n not self.is_move_valid(\"d\", reference_board):\n return True\n else:\n return False\n # Case #3 - Goal and current location are diagonal from one another\n else:\n number_invalid_conditions = 0\n # Case 3A - Check if need to move down but it is blocked\n if self._current_loc.get_row() < self._goal_loc.get_row() \\\n and not self.is_move_valid(\"d\", reference_board):\n number_invalid_conditions += 1\n # Case 3B - Check if need to move up but it is blocked\n if self._current_loc.get_row() > self._goal_loc.get_row() \\\n and not self.is_move_valid(\"u\", reference_board):\n number_invalid_conditions += 1\n # Case 3C - Check if need to move right but it is blocked\n if self._current_loc.get_column() < self._goal_loc.get_column() \\\n and not self.is_move_valid(\"r\", reference_board):\n number_invalid_conditions += 1\n # Case 3D - Check if need to move left but it is blocked\n if self._current_loc.get_column() > self._goal_loc.get_column() \\\n and not self.is_move_valid(\"l\", reference_board):\n number_invalid_conditions += 1\n # Only two direct moves when need to move diagonal. If invalid\n # count equals two, then return true as condition met.\n if number_invalid_conditions == 2:\n return True\n return False"
] | [
"0.75298524",
"0.6561931",
"0.6387742",
"0.6317193",
"0.63150394",
"0.622506",
"0.60167783",
"0.59918606",
"0.5980918",
"0.59700096",
"0.5907589",
"0.59038836",
"0.5851159",
"0.5754915",
"0.5693167",
"0.5637678",
"0.56281024",
"0.5589967",
"0.5589809",
"0.5587563",
"0.5584952",
"0.55799997",
"0.5531569",
"0.5529437",
"0.5413856",
"0.54046357",
"0.53997827",
"0.5398029",
"0.5380442",
"0.53768235"
] | 0.78717625 | 0 |
Update next_waypoint based on current_pose and base_waypoints Generate the list of the next LOOKAHEAD_WPS waypoints Update velocity for them Publish them to "/final_waypoints" | def update_and_publish(self):
# 1. Find next_waypoint based on ego position & orientation
if self._update_next_waypoint():
# 2. Generate the list of next LOOKAHEAD_WPS waypoints
num_base_wp = len(self.base_waypoints)
last_base_wp = num_base_wp-1
waypoint_idx = [idx % num_base_wp for idx in range(self.next_waypoint,self.next_waypoint+LOOKAHEAD_WPS)]
final_waypoints = [self.base_waypoints[wp] for wp in waypoint_idx]
# 3. If there is a red light ahead, update velocity for them
if self.stop_on_red:
# Start from original velocities
self.restore_velocities(waypoint_idx)
try:
red_idx = waypoint_idx.index(self.red_light_waypoint)
self.decelerate(final_waypoints, red_idx, self.stop_distance)
except ValueError:
# No red light available: self.red_light_waypoint is None or not in final_waypoints
red_idx = None
if debugging:
v = self.get_waypoint_velocity(final_waypoints, 0)
rospy.loginfo("Target velocity: %.1f, RL:%s wps ahead", v, str(red_idx))
# 3b. If we are close to the end of the circuit, make sure that we stop there
if self.force_stop_on_last_waypoint or self.base_wp_orig_v[-1] < 1e-5:
try:
last_wp_idx = waypoint_idx.index(last_base_wp)
self.decelerate(final_waypoints, last_wp_idx, 0)
except ValueError:
# Last waypoint is not one of the next LOOKAHEAD_WPS
pass
# 4. Publish waypoints to "/final_waypoints"
self.publish_msg(final_waypoints) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_next_waypoint(self):\n if not self.base_waypoints:\n #rospy.logwarn(\"Waypoints not updated: base_waypoints not available yet.\")\n return False\n\n if not self.current_pose:\n #rospy.logwarn(\"Waypoints not updated: current_pose not available yet.\")\n return False\n\n # Get ego car variables\n ego_x = self.current_pose.position.x\n ego_y = self.current_pose.position.y\n ego_theta = math.atan2(self.current_pose.orientation.y, self.current_pose.orientation.x)\n\n # If I do have a next_waypoint, I will start looking from it, and stop looking\n # as soon as get a local minimum. Otherwise I will do a full search across the whole track\n t = time.time()\n wp = None\n yaw = 0\n dist = 1000000 # Long number\n if self.next_waypoint:\n idx_offset = self.next_waypoint\n full_search = False\n else:\n idx_offset = 0\n full_search = True\n num_base_wp = len(self.base_waypoints)\n\n for i in range(num_base_wp):\n idx = (i + idx_offset)%(num_base_wp)\n wp_x = self.base_waypoints[idx].pose.pose.position.x\n wp_y = self.base_waypoints[idx].pose.pose.position.y\n wp_d = math.sqrt((ego_x - wp_x)**2 + (ego_y - wp_y)**2)\n\n if wp_d < dist:\n dist = wp_d\n wp = idx\n if debugging:\n # Angle betwee car heading and waypoint heading\n yaw = math.atan2(wp_y - ego_y, wp_x - ego_x) - ego_theta\n elif not full_search:\n # Local minimum. If the waypoint makes sense, just use it and break\n if dist < max_local_distance:\n break; # Found a point\n else:\n # Seem to have lost track. Do search again\n rospy.logwarn(\"Waypoint updater lost track (local min at %.1f m after %d waypoints). Going back to full search.\", dist, i+1)\n full_search = True\n\n if debugging:\n rospy.loginfo(\"New next wp [%d] -> (%.1f,%.1f) after searching %d points in %fs\", wp, dist * math.cos(yaw), dist * math.sin(yaw), i, time.time()-t)\n\n if wp is None:\n rospy.logwarn(\"Waypoint updater did not find a valid waypoint\")\n return False\n\n self.next_waypoint = wp\n return True",
"def publish_waypoints(self):\n\n # Make a lane message\n lane = Lane()\n\n # Get closest waypoint index\n closest_idx = self.get_closest_waypoint_idx()\n\n # Get farthest waypoint index\n farthest_idx = closest_idx + LOOKAHEAD_WPS\n\n # Slice to get the upcoming waypoints\n upcoming_waypoints = self.waypoints.waypoints[closest_idx:farthest_idx]\n\n # If no stopline detected or stopline is beyond farthest index...\n if (self.stopline_waypoint_idx == -1) or (self.stopline_waypoint_idx >= farthest_idx):\n\n # Follow the upcoming waypoints\n lane.waypoints = upcoming_waypoints\n\n else:\n\n # Create a list to hold modified upcoming waypoints\n temp = []\n\n # Find the relative stopline index within the upcoming waypoints\n # Back off by two waypoints so that front of car stays behind\n # stopline.\n stop_idx = max(self.stopline_waypoint_idx-closest_idx-2, 0)\n\n # Get the deceleration velocities at each upcoming waypoint\n velocities = self.deceleration_velocities(upcoming_waypoints, stop_idx)\n\n # For each upcoming waypoint...\n for i, wp in enumerate(upcoming_waypoints[:-1]):\n\n # Create a new waypoint\n p = Waypoint()\n\n # Dupicate the pose of the existing waypoint\n p.pose = wp.pose\n\n # Limit current velocities to decelration velocities\n p.twist.twist.linear.x = min(velocities[i], p.twist.twist.linear.x)\n\n # Add the modified waypoint to the list\n temp.append(p)\n\n # Follow the modified upcoming waypoints\n lane.waypoints = temp\n\n # Publish the lane message\n self.final_waypoints_pub.publish(lane)",
"def update_waypoints(self, idx):\n # Create the header and set its timestamp\n header = Header()\n header.stamp = rospy.Time.now()\n\n msg = Lane()\n msg.header = header\n # Keep the copy of base_waypoints so that you don't have to recompute them\n # we are using the same base_waypoints when we get multiple messages for stopping\n # at a stopline.\n base_waypoints = self.__base_waypoints[idx: idx + LOOKAHEAD_WPS]\n msg.waypoints = base_waypoints\n # If you find out that one of the generated waypoints lies on a stop line\n # that we should be stopping at then start decelerating\n if self.__stopline_wp_idx != -1 and self.__stopline_wp_idx < (idx + LOOKAHEAD_WPS):\n rospy.logdebug('Planning to stop at '+str(self.__stopline_wp_idx)+' from total '+str(idx + LOOKAHEAD_WPS))\n msg.waypoints = self.__decelerate(base_waypoints, idx)\n\n self.final_waypoints_pub.publish(msg)",
"def waypoints_cb(self, msg):\n t = time.time()\n waypoints = msg.waypoints\n num_wp = len(waypoints)\n\n if self.base_waypoints and self.next_waypoint is not None:\n # Normally we assume that waypoint list doesn't change (or, at least, not\n # in the position where the car is located). If that happens, just handle it.\n if not self.is_same_waypoint(self.base_waypoints[self.next_waypoint],\n waypoints[self.next_waypoint]):\n self.next_waypoint = None # We can't assume previous knowledge of waypoint\n self.base_waypoints = None # Just for debugging. Will be updated later\n rospy.logwarn(\"Base waypoint list changed\")\n else:\n # No change. We could probably return here.\n pass\n\n \"\"\"\n # -- Uncomment for debugging\n # Stamp waypoint index in PoseStamped and TwistStamped headers of internal messages\n for idx in range(len(waypoints)):\n waypoints[idx].pose.header.seq = idx\n waypoints[idx].twist.header.seq = idx\n \"\"\"\n\n self.base_wp_orig_v = [self.get_waypoint_velocity(waypoints, idx) for idx in range(num_wp)]\n\n if debugging and not self.base_waypoints:\n dist = self.distance(waypoints, 0, num_wp-1)\n rospy.loginfo(\"Received: %d waypoints, %.1f m, %.1f m/wp in t=%f\", num_wp, dist, dist/num_wp, time.time()-t)\n\n self.base_waypoints = waypoints\n\n if self.unsubscribe_base_wp:\n self.base_wp_sub.unregister()",
"def nextWaypoint(self, pose):\n #DONE implement\n location = pose.position\n dist = 100000.\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n nwp = 0\n for i in range(len(self.waypoints)):\n d1 = dl(location, self.waypoints[i].pose.pose.position)\n if dist > d1:\n nwp = i\n dist = d1\n x = self.waypoints[nwp].pose.pose.position.x\n y = self.waypoints[nwp].pose.pose.position.y\n heading = np.arctan2((y-location.y), (x-location.x))\n angle = np.abs(self.theta-heading)\n if angle > np.pi/4.:\n nwp += 1\n if nwp >= len(self.waypoints):\n nwp = 0\n return nwp",
"def update_wp_position(self, event):\n wp = -1\n cur_pos = np.array(\n [self.global_pose.latitude, self.global_pose.longitude])\n for idx, waypoint in enumerate(self.waypoints):\n temp = np.array([waypoint['lat'], waypoint['long']])\n alt_diff = abs(self._rel_alt[-1] - waypoint['rel_alt'])\n if idx == 0 and (np.linalg.norm(cur_pos - temp) < self._radius):\n wp = idx\n break\n elif (np.linalg.norm(cur_pos - temp) <\n self._radius) and (alt_diff < self._alt_radius):\n wp = idx\n break\n self._current_wp = wp",
"def update_waypoints(self, waypoints, start_time=None):\n self.control_instance.update_waypoints(waypoints)\n if start_time:\n self._last_waypoint_command = start_time",
"def __spur_on_if_needed(self):\n if len(self.waypoints) < 2:\n return\n next_speed = (get_waypoint_speed(self.waypoints[0]) +\n get_waypoint_speed(self.waypoints[1])) / 2.0\n set_waypoint_speed(self.waypoints[0], next_speed)",
"def update(self):\n\n # If the agent has already reached the\n # last waypoint it doesn't need to update\n if self.finished:\n return True\n\n # Skip if the proxy don't have any [new] data\n if (self.pp.info.datatime == 0) or \\\n (self.pp.info.datatime == self.last_read):\n return False\n\n self.last_read = self.pp.info.datatime\n\n # If this is the first update then head toward the first waypoint\n if self.first_update:\n self.pp.set_cmd_pose(self.active_waypoint['x'],\n self.active_waypoint['y'],\n self.get_heading({'x': self.pp.px, 'y': self.pp.py}, self.active_waypoint),\n 1)\n self.first_update = False\n return False\n\n # Calculate how far the agent is from its current waypoint\n dist = math.hypot(self.pp.px - self.active_waypoint['x'],\n self.pp.py - self.active_waypoint['y'])\n\n # Has it reached it yet?\n if dist < self.waypoint_distance_tolerance:\n\n # If all waypoints have been reached, stop the agent and return True\n if (self.active_waypoint_index + 1) >= len(self.waypoints):\n self.pp.set_cmd_vel(0.0, 0.0, 0.0, 0)\n self.pp.enable(False) # redundant?\n self.finished = True\n return True\n\n # Otherwise select the next waypoint\n prev_waypoint = self.active_waypoint\n self.active_waypoint_index += 1\n self.active_waypoint = self.waypoints[self.active_waypoint_index]\n\n # ...and drive to it\n self.pp.set_cmd_pose(self.active_waypoint['x'],\n self.active_waypoint['y'],\n self.get_heading(prev_waypoint, self.active_waypoint),\n 1)\n\n # Still have waypoints to visit\n return False",
"def make_trajectory(self, NextwpPosition, NextwpOrientation):\n d = np.linalg.norm(self.CurrentPosition - NextwpPosition)\n inter_segment_distance = 1\n self.no_of_segments = 1+int(d//inter_segment_distance)\n \n\n # enter sequence of waypoints: no of points should be self.no_of_segments+1\n x_wp = np.linspace(self.CurrentPosition[0], NextwpPosition[0], self.no_of_segments+1)\n y_wp = np.linspace(self.CurrentPosition[1], NextwpPosition[1], self.no_of_segments+1)\n z_wp = np.linspace(self.CurrentPosition[2], NextwpPosition[2], self.no_of_segments+1)\n \n # add intial and final condiions vel, acc, jerk\n x_ic = np.array([0, 0, 0])\n x_fc = np.array([0, 0, 0])\n x0 = np.array([x_wp[0], x_ic[0], x_ic[1], x_ic[2]])\n xT = np.array([x_wp[-1], x_fc[0], x_fc[1], x_fc[2]])\n\n y_ic = np.array([0, 0, 0])\n y_fc = np.array([0, 0, 0])\n y0 = np.array([y_wp[0], y_ic[0], y_ic[1], y_ic[2]])\n yT = np.array([y_wp[-1], y_fc[0], y_fc[1], y_fc[2]])\n \n z_ic = np.array([0, 0, 0])\n z_fc = np.array([0, 0, 0])\n z0 = np.array([z_wp[0], z_ic[0], z_ic[1], z_ic[2]])\n zT = np.array([z_wp[-1], z_fc[0], z_fc[1], z_fc[2]])\n\n path = [np.sqrt((x_wp[i]-x_wp[i-1])**2 + (y_wp[i]-y_wp[i-1])**2 + (z_wp[i]-z_wp[i-1])**2) for i in range(1, self.no_of_segments+1, 1)]\n\n \n T = []; T.insert(0, 0)\n T.insert(1, T[-1] + path[0]/self.reduced_speed)\n for i in range(1, len(path)-1, 1):\n T.append(T[-1] + path[i]/self.average_speed)\n T.insert(len(T)+1, T[-1]+path[-1]/self.reduced_speed) \n\n\n\n\n #T = []; T.insert(0, 0) # insert 0 at 0 position\n #for i in range(self.no_of_segments): \n # T.append(T[-1]+path[i]/self.average_speed)\n\n r = self.r\n N = 1 + self.N # because number of terms in a polynomial = degree+1\n\n QQ = []; AA_inv = []\n\n for i in range(self.no_of_segments): \n q = self.construct_Q(N, r, T[i], T[i+1])\n a = self.construct_A(N, r, T[i], T[i+1])\n a_inv = scipy.linalg.pinv(a)\n QQ = block_diag(QQ, q)\n AA_inv = block_diag(AA_inv, a_inv)\n \n order = 2*r*self.no_of_segments\n R = np.dot(AA_inv.T, np.dot(QQ, AA_inv))\n \n bx = self.construct_b(x0, xT)\n by = self.construct_b(y0, yT)\n bz = self.construct_b(z0, zT)\n\n m = Model(\"qp\")\n order = 2*r*self.no_of_segments\n dx = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dx\")\n dy = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dy\") \n dz = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dz\") \n\n # making objective using quicksum, takes a lot of time \n #obj1 = quicksum(dx[i] * quicksum(R[i][j] * dx[j] for j in range(order)) for i in range(order))\n #obj2 = quicksum(dy[i] * quicksum(R[i][j] * dy[j] for j in range(order)) for i in range(order))\n #obj3 = quicksum(dz[i] * quicksum(R[i][j] * dz[j] for j in range(order)) for i in range(order))\n \n # using LinExpr for the second expression is significantly faster \n obj1 = quicksum(dx[i] * LinExpr([(R[i][j], dx[j]) for j in range(order)]) for i in range(order))\n obj2 = quicksum(dy[i] * LinExpr([(R[i][j], dy[j]) for j in range(order)]) for i in range(order))\n obj3 = quicksum(dz[i] * LinExpr([(R[i][j], dz[j]) for j in range(order)]) for i in range(order))\n obj = obj1 + obj2 + obj3\n j = 0\n for i in range(order): \n if i < r: \n m.addConstr(dx[i] == bx[i])\n m.addConstr(dy[i] == by[i])\n m.addConstr(dz[i] == bz[i])\n elif i >= order-r: \n m.addConstr(dx[i] == bx[r+j])\n m.addConstr(dy[i] == by[r+j])\n m.addConstr(dz[i] == bz[r+j])\n j += 1\n \n c = 1 # counter\n for i in range(r, order-2*r, 2*r): \n #m.addConstr(dx[i] == self.x_wp[c])\n #m.addConstr(dy[i] == self.y_wp[c])\n #m.addConstr(dz[i] == self.z_wp[c])\n m.addConstr(dx[i] <= x_wp[c] + 0.2)\n m.addConstr(dx[i] >= x_wp[c] - 0.2)\n m.addConstr(dy[i] <= y_wp[c] + 0.2)\n m.addConstr(dy[i] >= y_wp[c] - 0.2)\n m.addConstr(dz[i] <= z_wp[c] + 0.2)\n m.addConstr(dz[i] >= z_wp[c] - 0.2)\n c = c+1\n for j in range(r): \n m.addConstr(dx[i+j] == dx[i+j+r])\n m.addConstr(dy[i+j] == dy[i+j+r])\n m.addConstr(dz[i+j] == dz[i+j+r])\n #if j ==2: \n # m.addConstr(dx[i+j] == 2.0)\n\n m.setObjective(obj, GRB.MINIMIZE)\n #m.write('model.lp')\n m.setParam('OutputFlag', 0)\n m.setParam('PSDtol', 1e-1)\n m.optimize()\n\n\n runtime = m.Runtime\n\n\n x_coeff = [dx[i].X for i in range(order)]\n y_coeff = [dy[i].X for i in range(order)]\n z_coeff = [dz[i].X for i in range(order)]\n\n Dx = np.asarray(x_coeff)[np.newaxis].T\n Dy = np.asarray(y_coeff)[np.newaxis].T \n Dz = np.asarray(z_coeff)[np.newaxis].T \n pcx = np.dot(AA_inv, Dx); pcy = np.dot(AA_inv, Dy); pcz = np.dot(AA_inv, Dz)\n\n\n poly_coeff_x = pcx.T.ravel().tolist()\n poly_coeff_y = pcy.T.ravel().tolist()\n poly_coeff_z = pcz.T.ravel().tolist()\n\n return poly_coeff_x, poly_coeff_y, poly_coeff_z, T, time.time()\n #self.publish(poly_coeff_x, poly_coeff_y, poly_coeff_z)",
"def _get_next_waypoint(self, tolerance_step):\n print('\\nGetting new nav plan.')\n\n for i in range(4):\n try:\n self.plan = self.swarmie.get_plan(\n self.goal,\n tolerance=self.tolerance,\n use_home_layer=self.avoid_home\n )\n break # plan received\n except rospy.ServiceException:\n print('ServiceException.')\n if i < 3:\n print('Expanding tolerance.')\n self.tolerance += tolerance_step\n else:\n raise # tried 3 times, we give up\n\n print('Received nav plan.')\n pose = self.plan.plan.poses[0]\n\n return Point(x=pose.pose.position.x, y=pose.pose.position.y)",
"def waypoints_cb(self, waypoints):\n # This callback should be called only once, with the list of waypoints not yet initialised.\n assert self.waypoints is None\n\n for wp in waypoints.waypoints:\n wp.twist.twist.linear.x = 9.\n\n self.waypoints = waypoints.waypoints # No need to guarantee mutual exclusion in accessing this data member\n\n # Now that the waypoints describing the track have been received, it is time to subscribe to pose updates.\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/current_velocity', TwistStamped, self.current_velocity_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.DBW_enabled_cb)",
"def move_to(self, waypoint):\n self.set_final_wp(waypoint)\n self.go()\n currPos = np.asarray(self.rexarm.get_positions())\n while(np.linalg.norm(np.asarray(waypoint) - currPos) > 0.15):\n time.sleep(0.01)",
"def waypoint_callback(self, wp):\n if self.trajectory_constructed == False: \n NextwpPosition = np.array([wp.position.x, wp.position.y, wp.position.z])\n NextwpOrientation = np.array([wp.orientation.x, wp.orientation.y, wp.orientation.z, wp.orientation.w])\n self.pc_x, self.pc_y, self.pc_z, self.seg_times, self.traj_t0 = self.make_trajectory(NextwpPosition, NextwpOrientation) \n self.trajectory_constructed = True",
"def translate_waypoint(self, vector: Sequence[float], n_steps: int):\n for component in range(len(self.coordinates)):\n self.waypoint_vector[component] += vector[component] * n_steps",
"def move2goal(self):\n \n global points, point, point_old, distance_tolerance, trigger, start\n\n goal_pose = Pose()\n\n # Get the input from the user.\n goal_pose.x = points[point][0] # float(input(\"Set your x goal: \"))\n goal_pose.y = points[point][1] # float(input(\"Set your y goal: \"))\n\n vel_msg = Twist()\n\n data = [['nameservers','panel'], ['nameservers','panel']]\n\n file_name2 = \"/home/kmro/wr_ws/src/zad2_package/short_distances/distances-p%d\" % point\n short_distances = open(file_name2, \"w\")\n \n file_name1 = \"/home/kmro/wr_ws/src/zad2_package/distances/distances-p%d\" % point\n all_distances_file = open(file_name1, \"w\")\n\n val = \"dx%d\\t\\t\" % (point-1) + \"dy%d\\t\\t\" % (point-1) + \"dx%d\\t\\t\" % point + \"dy%d\\n\" % point \n short_distances.write(str(val))\n\n val = \"dx\\t\\tdy\"\n for i in range(22):\n val = val + \"\\t\\tdx%d\\t\\t\" % i + \"dy%d\" % i \n all_distances_file.write(str(val))\n\n while self.euclidean_distance(goal_pose) >= distance_tolerance:\n\n # Porportional controller.\n # https://en.wikipedia.org/wiki/Proportional_control\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = self.linear_vel(goal_pose, vel_mult)\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = self.angular_vel(goal_pose, rot_mult)\n\n # Publishing our vel_msg\n self.velocity_publisher.publish(vel_msg)\n\n # Print results to files\n if point_old != point:\n print(\"point = \", point)\n point_old = point\n if point > 0:\n val = \"{:.3f}\\t\".format(points[point-1][0] - self.pose.x)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point-1][1] - self.pose.y)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point][0] - self.pose.x)\n short_distances.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[point][1] - self.pose.y)\n short_distances.write(str(val))\n # print(val, end=' ')\n if trigger == True:\n smallest_distances.append(((points[point-1][0] - self.pose.x)**2 + (points[point-1][1] - self.pose.y)**2)**0.5)\n trigger = False\n short_distances.write(\"\\n\")\n\n val = \"{:.3f}\\t\".format(goal_pose.x - self.pose.x)\n all_distances_file.write(str(val))\n val = \"{:.3f}\\t\".format(goal_pose.y - self.pose.y)\n all_distances_file.write(str(val))\n for i in range(1,len(points)):\n val = \"{:.3f}\\t\".format(points[i-1][0] - self.pose.x)\n all_distances_file.write(str(val))\n # print(val, end=' ')\n val = \"{:.3f}\\t\".format(points[i-1][1] - self.pose.y)\n all_distances_file.write(str(val))\n # print(val, end=' ')\n all_distances_file.write(\"\\n\")\n\n # Publish at the desired rate.\n self.rate.sleep()\n \n short_distances.close()\n all_distances_file.close()\n\n # If it was not the last goal, then move to the second one\n if point < len(points) - 1:\n trigger = True\n point = point + 1\n goal_pose.x = points[point][0]\n goal_pose.y = points[point][1]\n vel_msg.linear.x = self.linear_vel(goal_pose, vel_mult)\n vel_msg.angular.z = self.angular_vel(goal_pose, rot_mult)\n self.move2goal()\n # Stopping our robot after the movement is over.\n else:\n vel_msg.linear.x = 0\n vel_msg.angular.z = 0\n suma = 0\n i = 0\n for j in smallest_distances:\n print(\"p%d: \" % i , \"%.3f error\" % j)\n i = i + 1\n print(\"error_sum(22) = %.3f\" % sum(smallest_distances))\n end = time.time()\n print(\"Elapsed time: \", end - start)\n exit()\n \n point = point + 1\n self.velocity_publisher.publish(vel_msg)\n\n # If we press control + C, the node will stop.\n rospy.spin()",
"def run_step(self, target_speed, waypoint, velocity, vehicle_location, vehicle_rotation ):\n throttle = self._lon_controller.run_step(target_speed, velocity)\n steering = self._lat_controller.run_step(waypoint, vehicle_location, vehicle_rotation)\n\n brake = 0.0 \n\n\n return steering, throttle, brake",
"def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx",
"def __init__(self):\n\n # Set a node name - something relevant\n rospy.init_node('waypoint_updater')\n\n # Most recent pose\n self.pose = None\n\n # Map waypoint list \n self.waypoints = None\n\n # Map waypoint list xy only \n self.waypoints_2d = None\n\n # Map waypoint list xy only as KDTree\n self.waypoint_tree = None\n\n # Index at which to stop the vehicle\n # Negative one is a sentinel meaning no stop is required\n self.stopline_waypoint_idx = -1\n\n # Add subscriptions and handlers for relevant messages\n rospy.Subscriber('/base_waypoints', Lane, self.base_waypoints_cb)\n rospy.Subscriber('/current_pose', PoseStamped, self.current_pose_cb)\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_waypoint_cb)\n\n # Create publisher for final waypoints\n self.final_waypoints_pub = rospy.Publisher('/final_waypoints', Lane, queue_size=1)\n\n # Start loop\n self.loop()",
"def update_total_speed_input_step(self,curr_v):\n \n tot_speed_input_east=np.dot(self.W_speed_east,self.speed_inputs_east)/self.N_e\n tot_speed_input_west=np.dot(self.W_speed_west,self.speed_inputs_west)/self.N_e\n tot_speed_input_north=np.dot(self.W_speed_north,self.speed_inputs_north)/self.N_e\n tot_speed_input_south=np.dot(self.W_speed_south,self.speed_inputs_south)/self.N_e\n\n self.tot_speed_input_all_padded[:self.N_e,0]=\\\n tot_speed_input_east+tot_speed_input_west+\\\n tot_speed_input_north+tot_speed_input_south\n \n if self.use_eight_directions is True:\n tot_speed_input_north_east=np.dot(self.W_speed_north_east,\n self.speed_inputs_north_east)/self.N_e\n tot_speed_input_north_west=np.dot(self.W_speed_north_west,\n self.speed_inputs_north_west)/self.N_e\n tot_speed_input_south_east=np.dot(self.W_speed_south_east,\n self.speed_inputs_south_east)/self.N_e\n tot_speed_input_south_west=np.dot(self.W_speed_south_west,\n self.speed_inputs_south_west)/self.N_e\n \n self.tot_speed_input_all_padded[:self.N_e,0]+=\\\n tot_speed_input_north_east+tot_speed_input_north_west+\\\n tot_speed_input_south_east+tot_speed_input_south_west\n \n else:\n \n # diagonal move with four directions\n if abs(curr_v[0])>0 and abs(curr_v[1])>0:\n self.tot_speed_input_all_padded[:self.N_e,0]*=.5",
"def getNextWaypoint(self, request, context):\n\n waypointNumber = self.vehicle.commands.next -1\n missionlist = self.vehicle.waypoints\n if len(missionlist)==0:\n waypointNumber = -1\n dronePosition = droneconnect_pb2.Position(lat = float(0),\n lon = float(0),\n gpsAltitude = float(0))\n else:\n waypoint = missionlist[waypointNumber]\n dronePosition = droneconnect_pb2.Position(lat = float(waypoint[0]),\n lon = float(waypoint[1]),\n gpsAltitude = float(waypoint[2]))\n \n return droneconnect_pb2.IndexedPosition(position = dronePosition, index = waypointNumber)",
"def update(self):\n self.updateCount = self.updateCount + 1\n if self.updateCount > self.updateCountMax:\n\n # update previous positions\n for i in range(self.length - 1, 0, -1):\n self.x[i] = self.x[i - 1]\n self.y[i] = self.y[i - 1]\n\n # update position of player : party lead\n if self.direction == 0:\n self.x[0] = self.x[0] + self.step\n if self.direction == 1:\n self.x[0] = self.x[0] - self.step\n if self.direction == 2:\n self.y[0] = self.y[0] - self.step\n if self.direction == 3:\n self.y[0] = self.y[0] + self.step\n\n self.updateCount = 0",
"def base_waypoints_cb(self, waypoints):\n\n # Save the waypoint list\n self.waypoints = waypoints\n\n # If waypoints_2d hasn't been initialized...\n if not self.waypoints_2d:\n\n # Extract xy coordinates from the waypoint list\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n\n # Construct a KDTree from the xy coordinate list to allow fast lookup \n self.waypoint_tree = KDTree(self.waypoints_2d)",
"def base_waypoints_cb(self, waypoints):\n\n # Save the waypoint list\n self.waypoints = waypoints\n\n # If waypoints_2d hasn't been initialized...\n if not self.waypoints_2d:\n\n # Extract xy coordinates from the waypoint list\n self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]\n\n # Construct a KDTree from the xy coordinate list to allow fast lookup \n self.waypoint_tree = KDTree(self.waypoints_2d)",
"def next_gps(self):\n \n return Waypoint(0.0, 0.0)",
"def waypoints_cb(self, msg):\n rospy.loginfo(rospy.get_name() + ': waypoints received')\n self.base_waypoints = msg.waypoints",
"def update(self):\n self.logger.debug(\" %s [GenerateNextPose::update()]\" % self.name)\n\n # This behavior will always be successfull. But if it that was not the\n # case, it would return failure\n # self.feedback_message = \"Some failure message!\"\n # return py_trees.common.Status.FAILURE\n\n # If the behavior could be unning for a while, we would have to return\n # py_trees.common.Status.RUNNING, and not block its execution.\n\n # In this example we just need to create the position and orientation\n # keys corresponding to the next desired pose.\n self.curr_waypoint = (self.curr_waypoint + 1) % len(self.waypoints)\n self.blackboard.set(self.pos_key,\n action.Move2Pos.Goal(target_position=Point(\n x=self.waypoints[self.curr_waypoint].x,\n y=self.waypoints[self.curr_waypoint].y,\n z=0.0)))\n self.blackboard.set(\n self.angle_key,\n action.Rotate2Angle.Goal(\n target_orientation=self.waypoints[self.curr_waypoint].theta))\n self.feedback_message = \"New position and orientation generated!\"\n return py_trees.common.Status.SUCCESS",
"def on_global_trajectory(self, msg):\n self._logger.debug('@{}: global trajectory has {} waypoints'.format(\n msg.timestamp, len(msg.data)))\n if len(msg.data) > 0:\n # The last waypoint is the goal location.\n self._goal_location = msg.data[-1][0].location\n else:\n # Trajectory does not contain any waypoints. We assume we have\n # arrived at destionation.\n self._goal_location = self._vehicle_transform.location\n assert self._goal_location, 'Planner does not have a goal'\n self._waypoints = deque()\n for waypoint_option in msg.data:\n self._waypoints.append(waypoint_option[0])\n self._prev_waypoints = self._waypoints",
"def move2goal(self):\n vel_msg = Twist()\n\n # Linear velocity in the x-axis.\n vel_msg.linear.x = 0.4 # m/s\n vel_msg.linear.y = 0\n vel_msg.linear.z = 0\n\n # Angular velocity in the z-axis.\n vel_msg.angular.x = 0\n vel_msg.angular.y = 0\n vel_msg.angular.z = 1.5 # rad/s\n\n # Starting point reference\n goal_x = 1.0 \n goal_y = 1.0\n x_ref = 1.0\n y_ref = 1.0\n\n # Previous Reference\n x_prev_ref = 0.0\n y_prev_ref = 0.0\n theta_prev_ref = self.theta\n vrefA = 0.5\n wrefA = 0.0\n \n i = 0\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n\n x_prev_ref = tPx[0]\n y_prev_ref = tPy[0]\n theta_prev_ref = tPTheta[0]\n\n print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n print(f'ACTUAL THETA: {self.theta}')\n\n while not rospy.is_shutdown():\n \n if i >= 8:\n i = 0\n\n x_ref = goal_x\n y_ref = goal_y\n\n tPx, tPy, tPTheta = self.initiate_trajectory(\n x_ref, y_ref, vel_msg, \n x_prev_ref, y_prev_ref, \n theta_prev_ref, vrefA, wrefA\n )\n # inputRef = ControllerInput(\n # xref=x_ref,\n # yref=y_ref,\n # RstateX=self.x_position,\n # RstateY=self.y_position,\n # RstateTheta=self.theta,\n # RstateVelocity=vel_msg.linear.x,\n # RstateW=vel_msg.angular.z,\n # xrefA=x_prev_ref,\n # yrefA=y_prev_ref,\n # thetarefA=theta_prev_ref,\n # vrefA=vrefA,\n # wrefA=wrefA\n # )\n\n # rospy.loginfo(f'X: {self.x_position} \\tY: {self.y_position}\\t Theta: {self.theta} ')\n # nmpc = NMPC_Controller(inputRef)\n # tPx, tPy, tPTheta = nmpc.test_create_mini_path()\n\n # print(f'X TRAJECTORY: {tPx}\\nY TRAJECTORY: {tPy}\\nTHETA TRAJ: {tPTheta}')\n # print(f'ACTUAL THETA: {self.theta}')\n \n # new_v, new_w = nmpc.start_optmizer()\n # new_v = round(new_v, 4)\n # new_w = round(new_w, 4)\n\n # print(new_v, new_w)\n # rospy.loginfo(\n # f'X: {self.x_position}, Y: {self.y_position}, THETA: {self.theta}')\n \n # self.velocity_publisher.publish(vel_msg)\n # x_prev_ref = self.x_position\n # y_prev_ref = self.y_position\n # theta_prev_ref = self.theta\n # vrefA = vel_msg.linear.x\n # wrefA = vel_msg.angular.z\n \n\n # theta_prev_ref = self.theta\n # vel_msg.angular.z = 0.0\n\n\n '''Update the linear & angular velocity'''\n # vel_msg.linear.x = new_v\n # vel_msg.angular.z = new_w\n\n if i < 8:\n inputRef = ControllerInput(\n xref = tPx[i],\n yref = tPy[i],\n RstateX = self.x_position,\n RstateY = self.y_position,\n RstateTheta = self.theta,\n RstateVelocity = vel_msg.linear.x,\n RstateW = vel_msg.angular.z,\n xrefA = x_prev_ref,\n yrefA = y_prev_ref,\n thetarefA = theta_prev_ref,\n vrefA = vrefA,\n wrefA = wrefA\n )\n\n nmpc = NMPC_Controller(inputRef)\n new_v, new_w = nmpc.start_optmizer()\n new_v = round(new_v, 4)\n new_w = round(new_w, 4)\n\n print(f'(actual) X: {self.x_position}, Y: {self.x_position}, THETA: {self.theta}')\n print(f'(desired) X: {tPx[i]}, Y: {tPy[i]}')\n print(f'V: {vel_msg.linear.x}\\tW: {vel_msg.angular.z}')\n\n x_prev_ref = tPx[i-1]\n y_prev_ref = tPy[i-1]\n theta_prev_ref = tPTheta[i-1]\n vrefA = vel_msg.linear.x\n wrefA = vel_msg.angular.z\n\n vel_msg.linear.x = new_v\n vel_msg.angular.z = new_w\n # vel_msg.angular.z = 0.0\n\n print(f'index: {i}')\n\n distance = math.sqrt((self.x_position - tPx[i])**2 + (self.y_position - tPy[i])**2)\n if distance < 0.3:\n print(f'Distance: {distance}')\n i+=1\n\n\n self.velocity_publisher.publish(vel_msg)\n self.rate.sleep()\n\n rospy.spin()",
"def execute_waypoint_sequence(detail_of_trip):\n\n # rets (route_line, line_points)\n sliced_route_and_line_points = chunk_user_route(detail_of_trip)\n\n sliced_route = sliced_route_and_line_points[0]\n line_points = sliced_route_and_line_points[1]\n\n # Interpolate/Break into 1/10 segments\n segmented_points = interpolate_points(sliced_route, line_points)\n waypoints = find_crime_areas(segmented_points)\n\n # print \"segmented_points\", json.dumps(segmented_points, indent=2)\n print \"\\n\\n\\n\\n\" # compensating for the giant GET request\n return waypoints"
] | [
"0.7230159",
"0.6776315",
"0.67544454",
"0.6514871",
"0.6341936",
"0.6335558",
"0.633497",
"0.6204625",
"0.61543787",
"0.6134523",
"0.6099235",
"0.6051331",
"0.59626335",
"0.5945264",
"0.5943431",
"0.59124935",
"0.5906773",
"0.5842161",
"0.5803007",
"0.5779287",
"0.57633066",
"0.57331604",
"0.571748",
"0.571748",
"0.5710706",
"0.5702599",
"0.56983984",
"0.5646975",
"0.5622844",
"0.5612903"
] | 0.8069657 | 0 |
Restore original velocities of points | def restore_velocities(self, indexes):
for idx in indexes:
self.set_waypoint_velocity(self.base_waypoints, idx, self.base_wp_orig_v[idx]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_velocities(self):\r\n self.wx = np.copy(Turbine.wzero)\r\n self.wy = np.copy(Turbine.wzero)",
"def reset(self):\n self.t = 0.0\n self.last_t = None\n self.current_y = np.copy(self.start_y)\n self.current_yd = np.copy(self.start_yd)",
"def reset(self):\n self.x_pos1 = 0\n self.x_pos2 = self.x_pos1 + self.width\n self.y_pos = self.offset_y\n self.velocity = self.origin_velocity",
"def reset(self):\n self.position = self.initial_position\n self.velocity = [0, 0, 0]",
"def new_velocity(self):\n self.velocity = self.vafter",
"def _update_positions(self):\n self._velocities += self._accelerations * self.time_step\n self._positions += self._velocities * self.time_step",
"def reset(self):\n self._previous_v = 0\n self._previous_m = 0\n self._previous_shape = 0",
"def reset_state(self):\n self.y = np.copy(self.start)\n self.dy = np.zeros(self.n_dmps)\n self.ddy = self.ay * (self.by * (self.goal - self.y) - self.dy) + self.force[0]\n self.timestep = 0",
"def revolver(self):\r\n\t\tself.__revuelto=True",
"def apply_changes(self):\n self.x = self.buff_x\n self.y = self.buff_y\n self.buff_x = None\n self.buff_y = None",
"def resetPos(self):\n self.angle = self.startangle\n self.pos = []\n self.pos.extend(self.startpos)",
"def stop(self):\n self.change_x = 0\n self.change_y = 0",
"def teardown_simulate(self):\n self.positions = self.calibrated_positions",
"def restore(self, ev):\n self.phi = self.save_phi",
"def set_points(self):\n for p in range(len(self.points)):\n self.points[p] = self.points[p] + self.speeds[p]\n if self.points[p].x > SCREEN_DIM[0] or self.points[p].x < 0:\n self.speeds[p].x = -self.speeds[p].x\n if self.points[p].y > SCREEN_DIM[1] or self.points[p].y < 0:\n self.speeds[p].y = -self.speeds[p].y",
"def decelerate(self):\n self.decelerationRate=0.99 \n self.xMomentum=self.xMomentum*self.decelerationRate\n self.yMomentum=self.yMomentum*self.decelerationRate",
"def decimate(self, valid):\r\n self.points = self.points[valid]\r\n for obj in self.data: self.data[obj] = self.data[obj][valid]",
"def reset(self):\n\t\tself.offsets = self.start_off.copy()",
"def reload_positions(self):\n self._initial_position = load_pose_from_npz(self._target_filename,\n self._actuator_name, str(self._target_number), 'initial')\n self._target_position = load_pose_from_npz(self._target_filename,\n self._actuator_name, str(self._target_number), 'target')\n self._initial_image = load_data_from_npz(self._target_filename,\n self._actuator_name, str(self._target_number), 'initial',\n 'image', default=None)\n self._target_image = load_data_from_npz(self._target_filename,\n self._actuator_name, str(self._target_number), 'target',\n 'image', default=None)",
"def _new_velocities(self):\n opts = self.options\n # has shape (100, 1), as opposed to (100,) for multiplication with (100, 2)\n rand1, rand2 = [np.random.rand(len(self._pso_data.velocities))[:, None] for _ in range(2)]\n\n current_best_position = self.positions[np.argmin(self.scores)]\n\n inertia = opts['weight_inertia'] * self._pso_data.velocities\n cognition = opts['weight_cognition'] * rand1 * (self._pso_data.best_positions - self.positions)\n social = opts['weight_social'] * rand2 * (current_best_position - self.positions)\n\n new_velocities = inertia + cognition + social\n preliminary_positions = self.positions + new_velocities\n new_positions = self._clamp_into_bounds(preliminary_positions)\n\n return new_positions - self.positions",
"def reset(self) -> None:\n self._vector = self._original_vector.copy()",
"def reset_state(self):\n self.y = self.y0.copy()\n self.dy = jnp.zeros(self.n_dmps)\n self.ddy = jnp.zeros(self.n_dmps)\n self.cs.reset_state()",
"def reset_position(self):\n import interface\n\n print(\"Start restet position...\")\n\n sign = lambda x: int(x > 0) - int(x < 0) # Renvoi le signe de x (-1, 0, 1).\n fact_speed = 0.7 # On divise les vitesses.\n\n eps_angle = np.pi*20/180 # Tolerance angulaire. (en radian)\n eps_pos = 50 # Tolerance sur le carre centre autour du point d'arrive (en pxl).\n x0, y0 = 320, 230 # Point a atteindre.(en pxl)\n\n self.position, self.orientation = interface.get_position()\n\n # Calcul de l'angle entre barycentre de la voiture et point de depart.\n def get_alpha():\n \"\"\"\n Recupere l'angle entre l'axe horizontal et le vecteur position de la voiture.\n \"\"\"\n norm = np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2)\n if norm:\n return np.arccos((self.position[0] - x0)/norm) * (1 - 2*(self.position[1] > y0))\n return 0\n\n control_angle = lambda a: (a+np.pi)%(2*np.pi) - np.pi\n\n # alpha : orientation souhaitee de la voiture pour retourner au point de depart (comprise entre -pi et +pi)\n\n # As long as we are not in the direction of the center, the car rotates on itself\n print(\"angle de la voiture en degre:\", self.orientation*180/np.pi)\n print(\"angle qui reste a faire:\", control_angle(np.pi - get_alpha() + self.orientation))\n print(\"\\tOrientation vers la cible....\")\n fact_bis = fact_speed\n while abs(control_angle(np.pi - get_alpha() + self.orientation)) > eps_angle:\n # while True:\n fact_bis *= 1.01\n # interface.move_wheel(\"l\", -0.4)\n # interface.move_wheel(\"r\", 0.4)\n interface.move_wheel(\"l\", -fact_bis*control_angle(np.pi + get_alpha() - self.orientation)/np.pi)\n interface.move_wheel(\"r\", fact_bis*control_angle(np.pi + get_alpha() - self.orientation)/np.pi)\n self.position, self.orientation = interface.get_position()\n print(\"Orientation: \", control_angle(np.pi - get_alpha() + self.orientation),\n \"position actuelle: \", self.position, self.orientation)\n # print(\"fact speed : \", fact_bis)\n # As long as we are not at the center, the car goes straight\n interface.move_wheel(\"\", 0)\n\n input(\"suite\")\n\n print(\"\\tavancer vers la cible\")\n while abs(x0 - self.position[0]) > eps_pos or abs(y0 - self.position[1]) > eps_pos:\n # print(abs(x0 - self.position[0]), abs(y0 - self.position[1]))\n print(\"Avancer vers la cible - distance\", 0.5*(np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2) / norm))\n interface.move_wheel(\"\", (0.5*(np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2) / norm)))\n self.position, self.orientation = interface.get_position()\n print(\"Avancer vers la cible - position : \", self.position, self.orientation)\n\n # As long as the the car is not facing the chosen direction, it rotates on itself\n interface.move_wheel(\"\", 0)\n print(\"\\torientation finale\")\n while abs(np.pi/2 - self.orientation) > eps_angle:\n print(\"Orientation finale - Angle : \", abs(np.pi/2 - self.orientation))\n interface.move_wheel(\"l\", -fact_speed*(0.5+0.5*(abs(abs(self.orientation)-np.pi/2))/np.pi))\n interface.move_wheel(\"r\", fact_speed*(0.5+0.5*(abs(abs(self.orientation)-np.pi/2))/np.pi))\n self.position, self.orientation = interface.get_position()\n\n interface.move_wheel(\"\", 0)\n print(\"\\tterminated\")",
"def reset_position(self):\n self.translate_to_point_O()\n\n # inverse rotation:\n rotation_matrix = np.stack(\n (self.pcs.i_hat, self.pcs.j_hat, self.pcs.k_hat), axis=0\n )\n\n self.rotate(rotation_matrix)",
"def currentstate_callback(self, odom):\n self.CurrentPosition = np.array([odom.pose.pose.position.x, odom.pose.pose.position.y, odom.pose.pose.position.z])\n self.CurrentVelocity = np.array([odom.twist.twist.linear.x, odom.twist.twist.linear.y, odom.twist.twist.linear.z])",
"def update_pose(self, data):\n self.pose = data\n \n self.pose.y = round(self.pose.y, 6)",
"def lose_point(self, points):\n self.points -= points\n print(f\"Oh no! You have lost {points} point(s)! That means you now have {self.points} points!\")",
"def reset(self):\n self.resetPos()\n self.vx, self.vy = 0, 0\n self.accel, self.dangle = 0, 0\n self.crashed = False\n self.timeDriving, self.score, self.checkpoint, self.laps = 0, 0, 0, 0\n self.targetCheckpointPos = self.maze.checkpoints[0].getMidInt()\n self.inputColour = [sensor_colours[0] for i in range(self.dimensions[0])]\n self.scan = np.array([0 for i in range(self.dimensions[0])])\n self.cost = [0 for i in range(6)]\n #Extrapos for CTS LOS\n self.extrapos = []",
"def reduce_velocity(self):\n if self.controls[\"make_velocity_0\"]:\n # print(self.controls[\"bar_move_velocity\"])\n self.controls[\"bar_move_velocity\"] = 0",
"def reset(self):\n self.position = np.zeros(self.ndegres)\n self.velocity = np.zeros(self.ndegres)\n self.state = np.zeros(2*self.ndegres)\n self.flag = 0\n self.h_ref = np.array([self.ref for _ in range(self.horizon)])\n self.action = np.zeros(self.ACTION_DIM) \n self.h_action = np.zeros(self.ACTION_DIM*self.horizon)"
] | [
"0.7025841",
"0.6620128",
"0.63186425",
"0.62810314",
"0.6206654",
"0.61409414",
"0.6138598",
"0.6134401",
"0.6096291",
"0.60953915",
"0.6012672",
"0.5995094",
"0.59774",
"0.5973119",
"0.59690225",
"0.59552336",
"0.59322405",
"0.5886006",
"0.58640605",
"0.58620423",
"0.5855571",
"0.5839498",
"0.58289963",
"0.5823618",
"0.581656",
"0.5806353",
"0.5805807",
"0.57984924",
"0.57682216",
"0.57641155"
] | 0.6673411 | 1 |
Decelerate a list of wayponts so that they stop on stop_index | def decelerate(self, waypoints, stop_index, stop_distance):
if stop_index <= 0:
return
dist = self.distance(waypoints, 0, stop_index)
step = dist / stop_index
# Generate waypoint velocity by traversing the waypoint list backwards:
# - Everything beyond stop_index will have velocity = 0
# - Before that, constant (de)cceleration is applied until reaching
# previous waypoint velocity.
# We assume constant distance between consecutive waypoints for simplicity
v = 0.
d = 0.
for idx in reversed(range(len(waypoints))):
if idx < stop_index:
d += step
if d > self.stop_distance:
v = math.sqrt(2*abs(self.accel)*(d-stop_distance))
if v < self.get_waypoint_velocity(waypoints, idx):
self.set_waypoint_velocity(waypoints, idx, v) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plan_stop(wps, idx, min_decel, max_decel, speed_limit):\n\n if idx < 0:\n return []\n\n wps = wps[0: idx+1]\n\n # Calculate the acceleration needed to stop the car at the last waypoint in wps\n path_length = distance(wps, 0, len(wps)-1)\n a = -wps[0].twist.twist.linear.x**2/(2*path_length) # From the kinematic equations\n\n ''' Constrain the acceleration to be within min_decel and max_decel (note, a, min_decel and\n max_decel are all supposed to be negative, being decelerations) '''\n decel = max(a, max_decel)\n decel = min(decel, min_decel)\n\n wps[idx].twist.twist.linear.x = 0\n current_speed = 0\n current_i = idx-1\n while current_i >= 0 and (current_i == 0 or current_speed < wps[current_i-1].twist.twist.linear.x):\n dist = distance(wps, current_i, current_i+1)\n current_speed = (current_speed**2 - 2*decel*dist)**.5\n if current_i >= 1:\n current_speed = min(current_speed, wps[current_i-1].twist.twist.linear.x)\n else:\n current_speed = min(current_speed, speed_limit)\n wps[current_i].twist.twist.linear.x = current_speed\n current_i -= 1\n\n return wps",
"def _all_dist_to_end(args):\n idx = args[0]\n spiketrains = args[1]\n start = args[2]\n end = args[3]\n N = args[4]\n num_spiketrains = len(spiketrains)\n distances = []\n for jdx in range(idx+1, num_spiketrains):\n dist = distance(spiketrains[idx], spiketrains[jdx], start, end, N)\n times = dist[0] # should be the same for all\n distances.append(dist[1])\n return times, distances",
"def update_waypoints(self, idx):\n # Create the header and set its timestamp\n header = Header()\n header.stamp = rospy.Time.now()\n\n msg = Lane()\n msg.header = header\n # Keep the copy of base_waypoints so that you don't have to recompute them\n # we are using the same base_waypoints when we get multiple messages for stopping\n # at a stopline.\n base_waypoints = self.__base_waypoints[idx: idx + LOOKAHEAD_WPS]\n msg.waypoints = base_waypoints\n # If you find out that one of the generated waypoints lies on a stop line\n # that we should be stopping at then start decelerating\n if self.__stopline_wp_idx != -1 and self.__stopline_wp_idx < (idx + LOOKAHEAD_WPS):\n rospy.logdebug('Planning to stop at '+str(self.__stopline_wp_idx)+' from total '+str(idx + LOOKAHEAD_WPS))\n msg.waypoints = self.__decelerate(base_waypoints, idx)\n\n self.final_waypoints_pub.publish(msg)",
"def _travel_times(self, trip_list, index=0):\n\n def distance_in_travel_time(dep_secs, arr_secs):\n t_dist = arr_secs - dep_secs\n if t_dist < 0:\n t_dist = self._DUMMY_SEPARATOR # min separation\n return t_dist\n\n if not trip_list:\n return []\n\n if 0 < index < len(trip_list):\n trip = trip_list[index]\n else:\n trip = trip_list[0]\n\n t_dists2 = [distance_in_travel_time(stop[3], tail[2]) for (stop, tail)\n in zip(trip.get_time_stops(), trip.get_time_stops()[1:])]\n return t_dists2",
"def gen_stops():\r\n stop_ = list(nasal_stop)\r\n stop_.extend(voiced_stop)\r\n stop_.extend(unvoiced_stop)\r\n return stop_",
"def remove_jumps(self) -> None:\n q_diff = np.diff(self.array, axis=0)\n jumps = np.nonzero(np.where(np.linalg.norm(q_diff, axis=1)>1, 1, 0))[0]+1\n if len(jumps) % 2:\n jumps = np.append(jumps, [len(q_diff)+1])\n jump_pairs = jumps.reshape((len(jumps)//2, 2))\n for j in jump_pairs:\n self.array[j[0]:j[1]] *= -1.0",
"def create_weight_stops(breaks):\n num_breaks = len(breaks)\n weight_breaks = scale_between(0, 1, num_breaks)\n stops = []\n\n for i, b in enumerate(breaks):\n stops.append([b, weight_breaks[i]])\n return stops",
"def deceleration_velocities(self, waypoints, stop_idx):\n\n # TODO:\n # It would be cool to calculate the jerk minimizing trajectory as the velocity profile\n # here instead of using uniform accelaration. I think this might be straightforward\n # and it would give the test passengers a nice smooth ride. Consider doing this if there \n # is time.\n\n # Get waypoint xyz coordinates as np array\n xyz = np.asarray([ [wp.pose.pose.position.x, wp.pose.pose.position.y, wp.pose.pose.position.z] for wp in waypoints])\n\n # Compute the cumulative distance between points\n cumulative_distances = np.cumsum(np.sqrt(np.sum(np.square(xyz[1:,:] - xyz[:-1,:]), axis = -1)))\n\n # Compute relative distance to stopping point\n stop_distances = np.maximum(0, cumulative_distances[stop_idx] - cumulative_distances)\n\n # Compute velocity needed to cause uniform deceleration\n velocities = np.sqrt(2*MAX_DECEL*stop_distances)\n\n # Return the desired deceleration velocity at each point\n return velocities",
"def step1(ball_list, step,borders,obstacle=None):\n\n index_list = range(len(ball_list))\n for i in index_list:\n ball_list[i].compute_refl(step,borders,obstacle)\n for j in index_list:\n if i!=j:\n ball_list[i].compute_coll(ball_list[j],step)\n return ball_list",
"def set_scan_linear(starts, stops, steps, back_and_force=False, oversteps=10000):\n starts = np.array(starts)\n stops = np.array(stops)\n steps = np.array(steps)\n\n if np.any(np.abs(steps) < 1e-12) or \\\n np.any(np.sign(stops - starts) != np.sign(steps)) or \\\n np.any(starts == stops):\n return np.array([starts])\n\n else:\n axis_1_unique = linspace_step(starts[0], stops[0], steps[0])\n len1 = len(axis_1_unique)\n\n axis_2_unique = linspace_step(starts[1], stops[1], steps[1])\n len2 = len(axis_2_unique)\n # if number of steps is over oversteps, reduce both axis in the same ratio\n if len1 * len2 > oversteps:\n axis_1_unique = axis_1_unique[:int(np.ceil(np.sqrt(oversteps * len1 / len2)))]\n axis_2_unique = axis_2_unique[:int(np.ceil(np.sqrt(oversteps * len2 / len1)))]\n\n positions = []\n for ind_x, pos1 in enumerate(axis_1_unique):\n if back_and_force:\n for ind_y, pos2 in enumerate(axis_2_unique):\n if not odd_even(ind_x):\n positions.append([pos1, pos2])\n else:\n positions.append([pos1, axis_2_unique[len(axis_2_unique) - ind_y - 1]])\n else:\n for ind_y, pos2 in enumerate(axis_2_unique):\n positions.append([pos1, pos2])\n\n return np.array(positions)",
"def stops_on_routes_with_direction():\n routes_and_stops = {}\n routes = ['102y', '102z', '104y', '104z', '111y', '111z', '114y', '114z', '116y', '116z', '118y', '11y', '11z', '120y', '120z', '122y', '122z', '123y', '123z', '130y', '130z', '13y', '13z', '140y', '140z', '142y', '142z', '145y', '145z', '14Cy', '14Cz', '14y', '14z', '150y', '150z', '151y', '151z', '15Ay', '15Az', '15By', '15Bz', '15y', '15z', '161y', '161z', '16Cy', '16Cz', '16y', '16z', '17Ay', '17Az', '17y', '17z', '184y', '184z', '185y', '185z', '18y', '18z', '1y', '1z', '220y', '220z', '236y', '236z', '238y', '238z', '239y', '239z', '25Ay', '25Az', '25By', '25Bz', '25Xy', '25Xz', '25y', '25z', '26y', '26z', '270y', '270z', '27Ay', '27Az', '27By', '27Bz', '27Xy', '27Xz', '27y', '27z', '29Ay', '29Az', '31Ay', '31Az', '31By', '31Bz', '31y', '31z', '32Ay', '32Az', '32By', '32Bz', '32Xy', '32Xz', '32y', '32z', '33Ay', '33Az', '33By', '33Bz', '33Xy', '33Xz', '33y', '33z', '37y', '37z', '38Ay', '38Az', '38By', '38Bz', '38y', '38z', '39Ay', '39Az', '39y', '39z', '40By', '40Bz', '40Dy', '40Dz', '40y', '40z', '41Ay', '41By', '41Bz', '41Cy', '41Cz', '41Xy', '41Xz', '41y', '41z', '42y', '42z', '43y', '43z', '44By', '44Bz', '44y', '44z', '45Ay', '45Az', '46Ay', '46Az', '46Ey', '47y', '47z', '49y', '49z', '4y', '4z', '51Dy', '51Dz', '51Xy', '53By', '53Bz', '53y', '53z', '54Ay', '54Az', '56Ay', '56Az', '59y', '59z', '61y', '61z', '63y', '63z', '65By', '65Bz', '65y', '65z', '66Ay', '66Az', '66By', '66Bz', '66Xy', '66Xz', '66y', '66z', '67Xy', '67Xz', '67y', '67z', '68Ay', '68Az', '68y', '68z', '69Xy', '69Xz', '69y', '69z', '70y', '70z', '747y', '747z', '75y', '75z', '76Ay', '76Az', '76y', '76z', '77Ay', '77Az', '79Ay', '79Az', '79y', '79z', '7By', '7Bz', '7Dy', '7Dz', '7y', '7z', '83Ay', '83Az', '83y', '83z', '84Ay', '84Az', '84Xy', '84Xz', '84y', '84z', '8y', '8z', '9y', '9z']\n for route in routes:\n routes_and_stops[route] = [] # new array value for each route key\n reader = csv.reader(open(\"../Data/Sorted Data/stopped_bus_data.csv\"))\n for line in reader:\n try:\n current_route = extract_route_and_direction(line[3])\n if int(line[13]) not in routes_and_stops[current_route]:\n routes_and_stops[current_route].append(int(line[13]))\n except:\n continue\n return routes_and_stops",
"def longest_walk(straights, index):\n counter = range(len(index))\n acc1 = []\n longest = []\n while len(index) > 2:\n i = index[0]\n j = index[1]\n count = counter[0]\n index.pop(0)\n counter.pop(0)\n if abs(i - j) < .01:\n acc1.append(count)\n else:\n if len(acc1) >= len(longest):\n longest = list(acc1)\n acc1 = []\n else:\n acc1 = []\n return straights[[longest]]",
"def restore_velocities(self, indexes):\n for idx in indexes:\n self.set_waypoint_velocity(self.base_waypoints, idx, self.base_wp_orig_v[idx])",
"def unlink_wrap(dat, lims=[-np.pi, np.pi], thresh = 0.95):\n jump = np.nonzero(np.abs(np.diff(dat)) > ((lims[1] - lims[0]) * thresh))[0]\n lasti = 0\n for ind in jump:\n yield slice(lasti, ind + 1)\n lasti = ind + 1\n yield slice(lasti, len(dat))",
"def recalculate_turn_off(switches: List[Union[datetime, Tuple[datetime, int]]], start_index: int,\n lightbulb_num: int, auto_turn_off: datetime) -> None:\n for i in range(start_index + 1, len(switches)):\n i_time, i_lightbulb_num = parse_switch_time(switches[i])\n if i_lightbulb_num == lightbulb_num:\n if auto_turn_off < i_time:\n switches[i] = (auto_turn_off, lightbulb_num)\n switches.sort(key=lambda switch: switch if isinstance(switch, datetime) else switch[0])\n return\n else:\n switches.append((auto_turn_off, lightbulb_num))\n switches.sort(key=lambda switch: switch if isinstance(switch, datetime) else switch[0])",
"def remove_stops(tokens, stops):\n return [(token, pos) for token, pos in tokens if token not in stops.value]",
"def spiketraintimerejection(spiketrain, t_start, t_stop):\n t_start, t_stop = np.array(t_start), np.array(t_stop)\n if not t_start.size == 1:\n if t_start.size != t_stop.size:\n print('Argments t_start and t_stop must have the same size')\n return [], []\n else:\n spikerejected_ind = np.zeros(len(spiketrain), dtype=bool)\n for i in range(len(t_start)):\n _, _, spikerejected_ind_i = spiketraintimerejection(spiketrain, t_start[i], t_stop[i])\n spikerejected_ind = spikerejected_ind | spikerejected_ind_i\n spiketimesel = np.array(spiketrain).ravel()[~spikerejected_ind]\n spiketrainsel = neo.core.SpikeTrain(times=spiketimesel, units=spiketrain.units, t_start=spiketrain.t_start,\n t_stop=spiketrain.t_stop, sampling_rate=spiketrain.sampling_rate,\n file_origin=spiketrain.file_origin, name=spiketrain.name)\n return spiketrainsel, spiketimesel, spikerejected_ind\n else:\n spiketimes = np.array(spiketrain).ravel()\n spikerejected_ind = np.logical_and(spiketimes >= t_start, spiketimes <= t_stop)\n spiketimesel = spiketimes[np.logical_or(spiketimes < t_start, spiketimes > t_stop)]\n spiketrainsel = neo.core.SpikeTrain(times=spiketimesel, units=spiketrain.units, t_start=spiketrain.t_start,\n t_stop=spiketrain.t_stop, sampling_rate=spiketrain.sampling_rate,\n file_origin=spiketrain.file_origin, name=spiketrain.name)\n return spiketrainsel, spiketimesel, spikerejected_ind",
"def calcDistanceList(work_list):\n distance_list = []\n for swap in work_list: # for every work item find distance\n distance_list.append(Cluster.calcDistance(*swap))\n return distance_list",
"def test_directions_handles_more_than_max_waypoints(self):\n stops = [\n mommy.make(Stop, trips_year=self.trips_year, lat_lng=coord)\n for coord in (\n '43.705639,-72.297404',\n '43.680288,-72.527876',\n '43.779934,-72.042908',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '44.875039,-71.05471',\n '43.736252,-72.2519',\n '43.788074,-72.099655',\n '44.227489,-71.477737',\n '43.705639,-72.297404',\n '43.680288,-72.527876',\n '43.779934,-72.042908',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '44.875039,-71.05471',\n '43.736252,-72.2519',\n '43.788074,-72.099655',\n '44.227489,-71.477737',\n '43.705639,-72.297404',\n '44.831956,-71.075664',\n '43.753303,-72.124643',\n '43.703049,-72.289567',\n )\n ]\n directions = maps.get_directions(stops)\n self.assertEqual(len(stops), len(directions.legs) + 1)\n for i, leg in enumerate(directions.legs):\n self.assertEqual(leg.start_stop, stops[i])\n self.assertEqual(leg.end_stop, stops[i + 1])",
"def departing_flights(self):\n for i in range(self.flights.n_flights):\n if self.flights.departing(i):\n yield i, self.bay[i], self.flights.flight_schedule[i]",
"def compute_min_refills(distance: int, tank: int, stops: List[int]):\n location: int = 0\n n_stops = 0\n last_stop = 0\n max_drive = location + tank\n\n while max_drive < distance:\n counter = 0\n\n # Handle the case that stops are depleted before we reach distance\n if len(stops) == 0:\n return -1\n for s in stops:\n if s <= max_drive:\n counter += 1\n last_stop = s\n max_drive = last_stop + tank\n\n # Handle the case that wi did not reach the next stop\n if counter == 0:\n return -1\n else:\n del stops[0:counter]\n n_stops += 1\n\n return n_stops",
"def _decomposition_with_many_workers(control_wires, target_wire, work_wires):\n num_work_wires_needed = len(control_wires) - 2\n work_wires = work_wires[:num_work_wires_needed]\n\n work_wires_reversed = list(reversed(work_wires))\n control_wires_reversed = list(reversed(control_wires))\n\n gates = []\n\n for i in range(len(work_wires)):\n ctrl1 = control_wires_reversed[i]\n ctrl2 = work_wires_reversed[i]\n t = target_wire if i == 0 else work_wires_reversed[i - 1]\n gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))\n\n gates.append(qml.Toffoli(wires=[*control_wires[:2], work_wires[0]]))\n\n for i in reversed(range(len(work_wires))):\n ctrl1 = control_wires_reversed[i]\n ctrl2 = work_wires_reversed[i]\n t = target_wire if i == 0 else work_wires_reversed[i - 1]\n gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))\n\n for i in range(len(work_wires) - 1):\n ctrl1 = control_wires_reversed[i + 1]\n ctrl2 = work_wires_reversed[i + 1]\n t = work_wires_reversed[i]\n gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))\n\n gates.append(qml.Toffoli(wires=[*control_wires[:2], work_wires[0]]))\n\n for i in reversed(range(len(work_wires) - 1)):\n ctrl1 = control_wires_reversed[i + 1]\n ctrl2 = work_wires_reversed[i + 1]\n t = work_wires_reversed[i]\n gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))\n\n return gates",
"def determine_next_stop(current_location, stops):\n possible_stop = {}\n for stop in stops:\n # calculates the distance between the current location and the next possible stop\n calculated_distance = csv_reader.distance_lookup(current_location, stop)\n\n # adds a key value pair of possible trip (a tuple) and the distance of that trip\n possible_stop[(current_location, stop)] = float(calculated_distance)\n return possible_stop",
"def getBreakIndices(self):\n for i in self.raw.index[:-1]:\n if self.raw['stress'][i+1] > self.raw['stress'][i] and \\\n self.raw['stress'][i+2] < self.raw['stress'][i+1]:\n brkIdx1 = i+1 # brkIdx1: start of the first unloading\n break\n if self.reloading:\n for i in self.raw.index[brkIdx1+1:-1]:\n if self.raw['stress'][i+1] < self.raw['stress'][i] and \\\n self.raw['stress'][i+2] > self.raw['stress'][i+1]:\n brkIdx2 = i+1 # brkIdx2: end of the first unloading\n break\n # brkIdx3: Point on the NCL after the first reloading\n brkIdx3 = self.raw.query(f'stress == stress[{brkIdx1}]').index[1]\n # brkIdx4: index of the last point on the NCL\n brkIdx4 = self.raw.query('stress == stress.max()').index[0]\n self.secondUnloading = False\n else:\n brkIdx2 = self.raw.index[-1]\n brkIdx3 = None\n brkIdx4 = None\n\n self.brkIdx1 = brkIdx1\n self.brkIdx2 = brkIdx2\n self.brkIdx3 = brkIdx3\n self.brkIdx4 = brkIdx4\n return",
"def slow_dtw(base_list, test_list, extended=False):\r\n\r\n b = base_list.shape[0]\r\n t = test_list.shape[0]\r\n if (b > 0 and t > 0):\r\n DTW = np.zeros((b, t))\r\n cost = np.zeros((b, t))\r\n\r\n DTW[:, 0] = float('inf')\r\n DTW[0, :] = float('inf')\r\n DTW[0, 0] = 0.0\r\n\r\n for i in range(0, b):\r\n for j in range(0, t):\r\n dist = math.sqrt((test_list[j, 0] - base_list[i, 0]) ** 2 + (test_list[j, 1] - base_list[i, 1]) ** 2)\r\n cost[i, j] = dist\r\n if (i > 0 and j > 0):\r\n jminus2 = DTW[i - 1, j - 2] if j > 1 else float('inf')\r\n jminus1 = DTW[i - 1, j - 1]\r\n jeven = DTW[i - 1, j]\r\n minimum = min(jminus2, jminus1, jeven)\r\n DTW[i, j] = dist + minimum\r\n if (extended):\r\n return DTW[b - 1, t - 1], cost, DTW, _traceback(DTW)\r\n else:\r\n return DTW[b - 1, t - 1]",
"def step2(ball_list, step):\n index_list = range(len(ball_list))\n for i in index_list:\n ball_list[i].new_velocity()\n ball_list[i].compute_step(step)\n return ball_list",
"def tangents(amount, start, stop, truncated, sequence):\n\n for x in range(start, amount):\n y = abs(round(stop * math.tan(x)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def adapt_target(self, target):\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - self.buggy_offset\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs",
"def delme_list_to_rangemap(delme_verts: List[int]) -> Tuple[List[int],List[int]]:\n\tdelme_range = []\n\tstart_idx = 0\n\tfor end_idx in range(1, len(delme_verts)+1):\n\t\tif (end_idx == len(delme_verts)) or (delme_verts[end_idx] != (delme_verts[end_idx-1] + 1)):\n\t\t\t# if the next vert ID is non-contiguous, or is the end of the list, that defines a breakpoint between ranges\n\t\t\t# that means that everything from start to end IS contiguous\n\t\t\t# so save the VALUE of the start, and the LENGTH of the range (which equals the length of the block)\n\t\t\tdelme_range.append([delme_verts[start_idx], end_idx - start_idx])\n\t\t\tstart_idx = end_idx\n\t# convert from [start-length] to [start-cumulativelength]\n\tfor i in range(1, len(delme_range)):\n\t\tdelme_range[i][1] += delme_range[i-1][1]\n\t# convert from [[start,len],[start,len],[start,len]] to [[start,start,start],[len,len,len]]\n\ta,b = zip(*delme_range)\n\treturn a,b",
"def _uniform(trip_list):\n # This should not be necessary, but we are in fallback mode\n longest = max([len(t.get_time_stops()) for t in trip_list])\n return [100] * longest"
] | [
"0.69572246",
"0.5711461",
"0.56443334",
"0.5461794",
"0.5378483",
"0.5345332",
"0.5312884",
"0.52976394",
"0.5167818",
"0.51332635",
"0.51195765",
"0.50715023",
"0.5067811",
"0.50550187",
"0.50407976",
"0.50374746",
"0.50099397",
"0.4976755",
"0.49753836",
"0.49560714",
"0.49494073",
"0.4932878",
"0.4923271",
"0.4908069",
"0.48989302",
"0.48896086",
"0.48862427",
"0.48698542",
"0.48606908",
"0.48506922"
] | 0.6354519 | 1 |
Compare two waypoints to see whether they are the same (within 0.5 m and 0.5 m/s) | def is_same_waypoint(self, wp1, wp2, max_d=0.5, max_v=0.5):
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
ddif = dl(wp1.pose.pose.position, wp2.pose.pose.position)
if ddif < max_d:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __comparing_points(self, point1, point2) -> bool:\n return (abs(point1.x - point2.x) <= self.dirt_pos_tolerance and abs(\n point1.y - point2.y) <= self.dirt_pos_tolerance)",
"def match(uspec1, uspec2):\n \n if uspec1.is_power_onoff() and uspec2.is_power_onoff():\n return True\n \n if uspec1.number_windows() != uspec2.number_windows():\n return False\n \n if uspec1['speed'] != uspec2['speed'] or \\\n uspec1['x_bin'] != uspec2['x_bin'] or \\\n uspec1['y_bin'] != uspec2['y_bin']:\n return False\n \n if uspec1.number_window_pairs() > 0:\n \n if uspec1['x1_start'] != uspec2['x1_start'] or \\\n uspec1['x1_size'] != uspec2['x1_size'] or \\\n uspec1['y1_start'] != uspec2['y1_start'] or \\\n uspec1['y1_size'] != uspec2['y1_size']:\n return False\n \n if uspec1.number_window_pairs() > 1:\n\n if uspec1['x2_start'] != uspec2['x2_start'] or \\\n uspec1['x2_size'] != uspec2['x2_size'] or \\\n uspec1['y2_start'] != uspec2['y2_start'] or \\\n uspec1['y2_size'] != uspec2['y2_size']:\n return False\n \n return True",
"def closeTo(pointOne, pointTwo):\r\n\tif abs(pointOne.lat-pointTwo.lat) < 0.0002:\r\n\t\tif abs(pointOne.lon-pointTwo.lon) < 0.0002:\r\n\t\t\treturn True\r\n\treturn False",
"def _same(p1,p2,prec=0.0001):\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True",
"def match_marking_points(point_a, point_b):\n \n squared_distance_thresh = 0.000277778 # 10 pixel in 600*600 image\n direction_angle_thresh = 0.5235987755982988 # 30 degree in rad \n \n dist_square = calc_point_squre_dist(point_a, point_b)\n #if min(point_a.shape[1], point_b.shape[1]) <= 2:\n if True:\n return dist_square < squared_distance_thresh\n\n angle = calc_point_direction_angle(point_a, point_b)\n if point_a[3] > 0.5 and point_b[3] < 0.5:\n return False\n if point_a[3] < 0.5 and point_b[3] > 0.5:\n return False\n return (dist_square < squared_distance_thresh\n and angle < direction_angle_thresh)",
"def almost_equals(self, other):\n import math\n ox, oy = other\n dx = self[0] - ox\n dy = self[1] - oy\n return (dx*dx + dy*dy) < pygonal.EPSILON2",
"def are_similar(first_coords: List[Tuple[int, int]], second_coords: List[Tuple[int, int]]) -> bool:\n # Step 1: Get angles of each triangle\n # Step 2: Compare grades of two triangles\n # Step 3: If two angles are equal then first triangle is similar to second triangle\n pass",
"def comparison_test():\n for pose in SE2.interesting_points():\n se2 = se2_from_SE2(pose)\n SE2a = SE2_from_se2_slow(se2)\n SE2b = SE2_from_se2(se2)\n # printm('pose', pose, 'se2', se2)\n # printm('SE2a', SE2a, 'SE2b', SE2b)\n SE2.assert_close(SE2a, pose)\n # print('SE2a = pose Their distance is %f' % d)\n SE2.assert_close(SE2b, pose)\n # print('SE2b = pose Their distance is %f' % d)\n assert_allclose(SE2a, SE2b, atol=1e-8, err_msg=\"SE2a != SE2b\")\n assert_allclose(SE2a, pose, atol=1e-8, err_msg=\"SE2a != pose\")\n assert_allclose(SE2b, pose, atol=1e-8, err_msg=\"SE2b != pose\")",
"def are_symmetrically_related(self, point_a, point_b, tol=0.001):\n if np.allclose(self.operate(point_a), point_b, atol=tol):\n return True\n if np.allclose(self.operate(point_b), point_a, atol=tol):\n return True\n return False",
"def compare(first_point, ref_point, compare_point):\n x2, y2 = first_point\n x1, y1 = ref_point\n x3, y3 = compare_point\n m = (y2 - (y1 * 1.00001)) / (x2 - (x1 * 1.00001))\n return y3 - y1 - m * (x3 - x1)",
"def test_same_distances(self):\n \n\t\tm1 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tprint(distances)",
"def _point_equal(a,b):\n return np.array_equal(a._Point__loc, b._Point__loc)",
"def are_equal(self, sp1, sp2):\n return True",
"def _point_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n return np.allclose(a._Point__loc, b._Point__loc,\n rtol=rtol, atol=atol)",
"def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12",
"def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True",
"def Q1_test():\n A, p1, p2 = [0,0], [2,4], [6,5]\n return (distance(A,p1) > 4.472135) and (distance(p1,p2) < 4.472136)",
"def test_find_closest_waypoints_nearest(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n\n planner.position = Vector3(0, 0, 0)\n waypoints = planner.find_closest_waypoints(1)\n self.assertEqual(1, len(waypoints))\n self.assertEqual(0, waypoints[0].pose.pose.position.x)\n self.assertEqual(0, waypoints[0].pose.pose.position.y)\n self.assertEqual(0, waypoints[0].pose.pose.position.z)\n\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(2)\n self.assertEqual(2, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n\n # Check it wraps back around to the start.\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(3)\n self.assertEqual(3, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n self.assertEqual(0, waypoints[2].pose.pose.position.x)\n self.assertEqual(0, waypoints[2].pose.pose.position.y)",
"def test_distances(self):\n for p1, p2, distance in DISTANCES:\n calculated = p1.approximate_distance_meters(p2)\n self.assertAlmostEqual(distance, calculated, delta=5)",
"def are_equal(self, sp1, sp2):\n return",
"def near(a,b):\n return torch.allclose(a,b, rtol=1e-03, atol=1e-05)",
"def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)",
"def torch_the_same(X, Y, eps=1e-8):\n return (X - Y).abs().min() < eps",
"def equal_points_2d(p1, p2, tol=None):\r\n if tol is None:\r\n tol = get_tol_2d()\r\n return geometry.gmEqualPointsXY(p1, p2, tol)",
"def __eq__(self, other):\r\n return abs(self.x - other.x) + abs(self.y - other.y) < Vertex.epsilon",
"def test_gt_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1.1, 'Q2.8')\n assert b > a",
"def compareGrids(grid1, grid2):\n if axis_utils.areAxesIdentical(grid1.getLatitude(),\n grid2.getLatitude(), check_id=False)==False:\n return False\n if axis_utils.areAxesIdentical(grid1.getLongitude(),\n grid2.getLongitude(), check_id=False)==False:\n return False\n return True",
"def assertTPsEqual(self, tp1, tp2):\n self.assertEqual(tp1, tp2, tp1.diff(tp2))\n self.assertTrue(fdrutilities.tpDiff2(tp1, tp2, 1, False))",
"def test_eq_2():\n a = FixedPoint(1, 'Q2.8')\n b = FixedPoint(1, 'Q2.8')\n assert a == b",
"def fuzzy_equals(a, b):\n epsilon = 10**(-6) \n return (abs(a - b) < epsilon)"
] | [
"0.66216546",
"0.65738994",
"0.6439483",
"0.6387838",
"0.6352702",
"0.63003606",
"0.6274246",
"0.6252439",
"0.6230331",
"0.62167144",
"0.6209074",
"0.62022907",
"0.61987966",
"0.61629647",
"0.6139927",
"0.61213946",
"0.6107632",
"0.6103632",
"0.6090313",
"0.6084919",
"0.60797465",
"0.60709864",
"0.60385954",
"0.60332483",
"0.60059434",
"0.6004413",
"0.59955597",
"0.5991775",
"0.5990935",
"0.5987294"
] | 0.73450947 | 0 |
Ensures that a capital can only belong to one country | def test_capital_unicity(self):
# Get Bangkok
bangkok = Country.objects.get(iso3="THA").capital
# Get United States
united_states = Country.objects.get(iso3="USA")
# Initialize assertRaises block
with self.assertRaises(IntegrityError):
# Set the capital of United States to Bangkok
united_states.capital = bangkok
# Try to save United States object to database
united_states.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_country_name_not_in_countries(self):\n\t\tcountry_code = get_country_code('Venezuela, RB')\n\t\tself.assertEqual(country_code, 've')",
"def test_valid_country():\n assert valid_country(\"Democratic Republic of Lungary\") is True\n assert valid_country(\"Kraznoviklandstan\") is True\n assert valid_country(\"kraznoviklandstan\") is True\n assert valid_country(\"KRAZNOVIKLANDSTAN\") is True\n\n assert valid_country(\"Democratic_Republic982759\") is False\n assert valid_country(\"Kraznoviklandsta\") is False\n assert valid_country(\"Principalities of Fryed\") is False\n assert valid_country(\"FRY\") is False",
"def validateCountry(self, country_name):\n if country_name in self.travel_db.countries:\n return True\n else:\n return False",
"def validate_role():\n\n target_country = DEF_ROLE_TARGET_COUNTRY\n pairs = DEF_ROLE_COUNTRY_PAIRS\n\n def wrapper(self):\n \"\"\"Wrapper method.\n \"\"\"\n\n cleaned_data = self.cleaned_data\n\n for country_field, state_field in pairs:\n country = cleaned_data.get(country_field)\n state = cleaned_data.get(state_field)\n\n if country is None or state is None:\n continue\n\n if country != target_country:\n continue\n\n if len(state) != 2:\n raise forms.ValidationError(DEF_2_LETTER_STATE_FMT % target_country)\n\n return cleaned_data\n\n return wrapper",
"def test_country_name_in_countries(self):\n\t\tcountry_code = get_country_code('Andorra')\n\t\tself.assertEqual(country_code, 'ad')",
"def _validate_country(country):\n if country == '' or country == '--': # lint-amnesty, pylint: disable=consider-using-in\n raise errors.AccountCountryInvalid(accounts.REQUIRED_FIELD_COUNTRY_MSG)",
"def _validate(self, s):\n\n billing_accounts = self.billing_client.list_billing_accounts(\n only_open_accounts=True)\n billing_account_names = [\n account['name'] for account in billing_accounts\n ]\n if s not in billing_account_names:\n raise ValueError(\n 'The provided billing account does not exist or is not eligible to use.'\n )",
"def _check_countries(countries: list) -> bool:\n countries_count = Country.objects.all() \\\n .filter(id__in=countries) \\\n .distinct().count()\n\n if countries_count != len(set(countries)):\n return False\n\n return True",
"def test_geography_unique(self):\n htown = City.objects.get(name=\"Houston\")\n CityUnique.objects.create(point=htown.point)\n duplicate = CityUnique(point=htown.point)\n msg = \"City unique with this Point already exists.\"\n with self.assertRaisesMessage(ValidationError, msg):\n duplicate.validate_unique()",
"def test_regions(self):\n for i, item in enumerate(self._letters_proto.item):\n for code in item.region:\n # Region codes should be alpha-2 (where possible) or alpha-3 codes as\n # defined by ISO 3166 standard.\n self.assertLess(1, len(code))\n self.assertGreater(4, len(code))\n self.assertTrue(code.isupper(), f'Letter {i}: Region code `{code}` '\n 'should be upper-case')\n if len(code) == 3:\n country = pycountry.countries.get(alpha_3=code)\n self.assertTrue(country, f'Failed to find country for code `{code}`')\n if hasattr(country, 'alpha_2'):\n self.fail(f'Letter {i}: Please use two-letter code '\n f'`{country.alpha_2}` instead of `{country.alpha_3}` '\n f'for {country.name}')\n else:\n country = pycountry.countries.get(alpha_2=code)\n self.assertTrue(country, f'Failed to find country for code {code}')",
"def testNormalCreate(self):\n\n canada = self.Country(\n {\"name\": \"Canada\", \"abbreviation\": \"CA\", \"languages\": [\"english\", \"french\"]}\n )\n\n canada.save()\n\n self.assertEqual(\"Canada\", canada.name)\n self.assertEqual(\"CA\", canada.abbreviation)\n self.assertEqual(2, len(canada.languages))\n self.assertTrue(\"english\" in canada.languages)\n self.assertTrue(\"french\" in canada.languages)",
"def country(alpha_2_code: str) -> None:",
"def test_create_new_address_incorrect_state_province(self):\n data = dict(\n address_line1='random address 2',\n postal_code='RAN DOM',\n city='random city',\n state_province=self.random_state_province.iso_code,\n country=self.random_country2.iso_code,\n )\n\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.post(\n reverse('location:addresses'),\n data,\n format='json',\n )\n\n res = json.loads(response.content)\n\n err = {\n 'detail': 'The StateProvince should be linked to the Country'\n }\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(res, err)",
"def validate_subdivision(form, field):\n # TODO: check to see if subdivision is in selected country\n try:\n pycountry.subdivisions.lookup(field.data)\n except Exception:\n raise ValidationError(field.data + ' is not a State / Province / Region')",
"def test_only_one_active_Zosia_can_exist(self):\n with self.assertRaises(ValidationError):\n new_zosia(active=True).full_clean()",
"def is_valid_province(province):\n assert province is not None\n return province.lower() in province_complete or \\\n province.upper() in province_abbr",
"def test_clean_country_flag(self):\n # country_flag = self.cleaned_data.get('country_flag', None)\n # field = self.fields.get(self.country_field_name, None)\n # if not field and hasattr(self, 'computed_fields'):\n # field = self.computed_fields.get(self.country_field_name, None)\n # if field.initial == self.cleaned_data.get(self.country_field_name, None)\n pass",
"def clean_supply_place(self):\r\n nature_transactions_sales = self.cleaned_data['nature_transactions_sales']\r\n supply_place = self.cleaned_data['supply_place']\r\n\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Interstate Sales - Taxable':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Deemed Exports Exempt':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Deemed Exports Nil Rated':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Deemed Exports Taxable':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Exports Exempt':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Exports LUT/Bond':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Exports Nil Rated':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Exports Taxable':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Interstate Sales Exempt':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Interstate Sales Nil Rated':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Interstate Sales to Embassy / UN Body Exempt':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Interstate Sales to Embassy / UN Body Nil Rated':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state == supply_place and nature_transactions_sales == 'Interstate Sales to Embassy / UN Body Taxable':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'Intrastate Deemed Exports Exempt':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'Intrastate Deemed Exports Nil Rated':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'Intrastate Deemed Exports Taxable':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'Sales Exempt':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'Sales Nil Rated':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'Intrastate Sales Taxable':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'sales to Consumer - Exempt':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'sales to Consumer - Nil Rated':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n if self.company.organisation.state != supply_place and nature_transactions_sales == 'sales to Consumer - Taxable':\r\n raise ValidationError(\r\n \"This nature of transaction in not valid for the given Place of Supply\")\r\n return supply_place",
"def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)",
"def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')",
"def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')",
"def test_city_country(self):\n santiago_chile = city_country('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')",
"def test_profile_country_to_alpha3_invalid_country(self):\n with mute_signals(post_save):\n profile = ExamProfileFactory(profile__country='XXXX')\n with self.assertRaises(InvalidProfileDataException):\n CDDWriter.profile_country_to_alpha3(profile)",
"def test_country(self):\n self.assertIsInstance(self.address.country, str)\n self.assertEqual(self.address.country, \"France\")",
"def countryInput(self, prompt):\n while True:\n name = input(prompt)\n if name not in self.travel_db.countries:\n print(\"Invalid country name. Please make sure name is capitalized.\")\n else:\n return name",
"def test_CovidCase_creation(self):\n new_Covid = self.create_CovidCase()\n\n self.assertTrue(isinstance(new_Covid, CovidCase))\n self.assertEqual(new_Covid.country_id, \"TE\")",
"def valid_country(value: Any) -> str:\n value = cv.string(value)\n all_supported_countries = holidays.list_supported_countries()\n\n try:\n raw_value = value.encode(\"utf-8\")\n except UnicodeError as err:\n raise vol.Invalid(\n \"The country name or the abbreviation must be a valid UTF-8 string.\"\n ) from err\n if not raw_value:\n raise vol.Invalid(\"Country name or the abbreviation must not be empty.\")\n if value not in all_supported_countries:\n raise vol.Invalid(\"Country is not supported.\")\n return value",
"def clean(self):\n character_id = self.cleaned_data['character_id']\n character = Character.objects.get(pk=character_id)\n if character.concept_approved_flag:\n raise ValidationError(f\"The concept for {character} has already been approved.\")\n return super().clean()",
"def check_country(input_list):\n\n country_list = open(\"countries.txt\").read().splitlines()\n country_list = [x.lower() for x in country_list]\n while True:\n if not all(x in country_list for x in input_list):\n print(\"Please make sure you entered the correct country names\")\n\n input_list = create_input_list(\"Try again, each item \"\n \"separated by ', ': \")\n continue\n else:\n break\n\n return input_list",
"def test_city_country(self):\n santiago_chile = get_city_name('santiago', 'chile')\n self.assertEqual(santiago_chile, 'Santiago, Chile')"
] | [
"0.57727647",
"0.5747203",
"0.5745896",
"0.5662479",
"0.5656905",
"0.55133384",
"0.54153365",
"0.53913385",
"0.53848505",
"0.53621507",
"0.5360658",
"0.53440136",
"0.5325288",
"0.53173727",
"0.528774",
"0.5264815",
"0.5211678",
"0.5199605",
"0.5149981",
"0.5145287",
"0.5145287",
"0.5145287",
"0.5140265",
"0.51283246",
"0.50940603",
"0.50788504",
"0.50710183",
"0.50225186",
"0.50201267",
"0.5001773"
] | 0.64122915 | 0 |
Ensures that the cleaning of UN member status behaves as expected | def test_un_member_status(self):
# Get Hong Kong
hong_kong = Country.objects.get(iso3="HKG")
# Assert that is_un_member_at is None
self.assertEqual(hong_kong.is_un_member_at, None)
# Initialize assertRaises block
with self.assertRaises(ValidationError):
# Set is UN member to True
hong_kong.is_un_member = True
# Attempt to clean the Hong Kong object
hong_kong.clean()
# Should fail because no corresponding is UN member at date is set
# Get Thailand
thailand = Country.objects.get(iso3="THA")
# Assert that Thailand is a UN member state
self.assertEqual(thailand.is_un_member, True)
# Assert that Thailand is UN member at is not None
self.assertIsNotNone(thailand.is_un_member_at)
# Set Thailand is UN member at to False
thailand.is_un_member = False
# Clean Thailand object
thailand.clean()
# Asser that Thailand is UN member at is now None
self.assertIsNone(thailand.is_un_member_at) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean(self, uid, states=None):\n\n # doesn't change status",
"def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n self.data = self.data[self.data['status'] <= max_status]\n\n return",
"def clean(self):\n pass",
"def clean(self):\n\n pass",
"def clean_up(self):\n\t\tpass",
"def clean_role():",
"def test_project_user_membership_unauthorised_status(self):\n unauthorised_states = [\n ProjectUserMembership.REVOKED,\n ProjectUserMembership.SUSPENDED,\n ProjectUserMembership.DECLINED,\n ]\n for status in unauthorised_states:\n self.membership.status = status\n self.assertTrue(self.membership.unauthorised())",
"def test_kyc_delete_legal_board_member(self):\n pass",
"def _clean_up(self):",
"def clean(self):",
"def clean_up(self):\n pass",
"def clean_up(self):\n pass",
"def testResetMembers(self):\n\n self.assertEqual(\n {'uniqueId': [self.cd]},\n cdl_convert.ColorDecision.members\n )\n\n cdl_convert.ColorDecision.reset_members()\n\n self.assertEqual(\n {},\n cdl_convert.ColorDecision.members\n )",
"def clean_up_data(self):\n pass",
"def clean(c):",
"def clean(self):\n raise NotImplementedError",
"def test_X_normalization_not_raw(self):\n\n # Assign a real value to X while X_normalization is 'none'\n del self.validator.adata.raw\n self.validator.adata.uns[\"X_normalization\"] = \"none\"\n self.validator.validate_adata()\n print(\"FOO\", self.validator.warnings)\n self.assertEqual(\n self.validator.warnings,\n [\n \"WARNING: uns['X_normalization'] is 'none', there is no 'raw.X' and 'X' doesn't appear \"\n \"to have raw counts (integers)\"\n ],\n )",
"def test__clean_status(self):\n assert not dockerprettyps._clean_status(\"Exited (1) 22 minutes ago\")\n assert dockerprettyps._clean_status(\"Up 12 minutes\")",
"def test_statusClean(self):\n reposDir = self.makeRepository(self.tmpDir)\n self.assertTrue(self.createCommand.isStatusClean(reposDir))",
"def test_unassign_managing_team(self):\n pass",
"def clean_up(self) -> None:\n print('Doing some clean-up work...')",
"def clear(self):\n\t\tself.membersWithErrors.clear()",
"async def clean(self, ctx):\n pass",
"def cleanUp(self):\r\n pass",
"def clean(_context):",
"def cleanup(self):\n if self._status == 0:\n self.flag = 0\n elif self._status == 1:\n self.flag = 1\n else:\n self.flag = 2\n\n self.final_params = self._popt",
"def clean_up(self):\n # TODO: Implement if needed\n pass",
"def clean_for_commit(self):",
"def test_required_fields_X_normalization(self):\n\n del self.validator.adata.uns[\"X_normalization\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors, [\"ERROR: 'X_normalization' in 'uns' is not present.\"]\n )",
"def cleanup():\n for s in [missiles, explosions, bonus]:\n\n set_to_remove = set([])\n for m in s:\n if m.isDead:\n set_to_remove.add(m)\n\n s.difference_update(set_to_remove)"
] | [
"0.6766085",
"0.60108733",
"0.5959499",
"0.5928209",
"0.59210426",
"0.5901284",
"0.5901226",
"0.586937",
"0.5864156",
"0.5860781",
"0.58541995",
"0.58541995",
"0.58360237",
"0.58315945",
"0.5801635",
"0.5786526",
"0.57726616",
"0.57681125",
"0.57417685",
"0.572516",
"0.56783324",
"0.5657457",
"0.5641488",
"0.5635325",
"0.5625691",
"0.5616406",
"0.56001997",
"0.5598132",
"0.5553602",
"0.5542623"
] | 0.73605675 | 0 |
Remove all erroneous colors, replaced by the most commonly found in the direct neighborhood | def clean(data, out, npcolors):
prev_err = 0
new_err = 0
old = data.copy()
for r in range(data.shape[0]):
for c in range(data.shape[1]):
found = -1
for i, col in enumerate(npcolors):
if data[r, c] == col:
found = i
if found == -1:
prev_err += 1
count = np.zeros(npcolors.shape[0], dtype = np.intp)
for ir in range(max(r-1, 0), min(r+2, data.shape[0])):
for ic in range(max(c-1, 0), min(c+2, data.shape[1])):
for i, col in enumerate(npcolors):
if old[ir, ic] == col:
count[i] += 1
ic = np.argmax(count)
if count[ic] != 0:
data[r, c] = npcolors[ic]
found = ic
else:
new_err += 1
if found != -1:
out[r, c] = found
return prev_err, new_err | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_colors(images):\n images = images[:, :, :, :, 0]\n return images",
"def color_invalid(self):\n for i in self.invalid:\n self.color_cell(i, INVALID)",
"def lightness_correction(self):\n points = self.color_lookup_table_points\n lightness_max_value = math.sqrt(3 * (255**2))\n deadpool = list()\n for index, point in enumerate(points[0]):\n point = self.get_value_tuple(index)\n lightness = int(math.sqrt(point[0]**2 + point[1]**2 + point[2]**2) * 255 / lightness_max_value)\n if not self.to_dark < lightness < self.to_bright:\n deadpool.append(index)\n self.color_lookup_table_points = (np.delete(points[0], deadpool),\n np.delete(points[1], deadpool),\n np.delete(points[2], deadpool))\n self.point_count = len(self.color_lookup_table_points[0])",
"def remove_colors(string):\n color_list = ['\\x1b[0;30m', '\\x1b[0;31m', '\\x1b[0;32m', '\\x1b[0;33m', '\\x1b[0;34m', '\\x1b[0;35m', '\\x1b[0;36m', '\\x1b[0;37m', '\\x1b[0;39m', '\\x1b[0;40m', '\\x1b[0;41m', '\\x1b[0;42m', '\\x1b[0;43m', '\\x1b[0;44m', '\\x1b[0;45m', '\\x1b[0;46m', '\\x1b[0;47m', '\\x1b[0;49m', '\\x1b[0;90m', '\\x1b[0;91m', '\\x1b[0;92m', '\\x1b[0;93m', '\\x1b[0;94m', '\\x1b[0;95m', '\\x1b[0;96m', '\\x1b[0;97m', '\\x1b[0;99m', '\\x1b[0;100m', '\\x1b[0;101m', '\\x1b[0;102m', '\\x1b[0;103m', '\\x1b[0;104m', '\\x1b[0;105m', '\\x1b[0;106m', '\\x1b[0;107m', '\\x1b[0;109m', '\\x1b[1;30m', '\\x1b[1;31m', '\\x1b[1;32m', '\\x1b[1;33m', '\\x1b[1;34m', '\\x1b[1;35m', '\\x1b[1;36m', '\\x1b[1;37m', '\\x1b[1;39m', '\\x1b[1;40m', '\\x1b[1;41m', '\\x1b[1;42m', '\\x1b[1;43m', '\\x1b[1;44m', '\\x1b[1;45m', '\\x1b[1;46m', '\\x1b[1;47m', '\\x1b[1;49m', '\\x1b[1;90m', '\\x1b[1;91m', '\\x1b[1;92m', '\\x1b[1;93m', '\\x1b[1;94m', '\\x1b[1;95m', '\\x1b[1;96m', '\\x1b[1;97m', '\\x1b[1;99m', '\\x1b[1;100m', '\\x1b[1;101m', '\\x1b[1;102m', '\\x1b[1;103m', '\\x1b[1;104m', '\\x1b[1;105m', '\\x1b[1;106m', '\\x1b[1;107m', '\\x1b[1;109m']\n for x in color_list:\n string = string.replace(x, '')\n return string",
"def uniqueish_color(color_data):\n # return plt.cm.gist_ncar(color_data)\n # return plt.cm.binary(color_data)\n return plt.cm.bwr(color_data)",
"def remove_color(image):\n return image[:, :, 0]",
"def remove_color(image):\n return image[:, :, 0]",
"def remove_color(img: np.ndarray, ratio: float, neutral_color: Tuple[int, int, int] = RGB_WHITE) -> None:\n\n channels = img.shape[-1]\n assert channels == 3, \"Not a 3 channel color image\"\n\n norm = np.std(np.array(RGB_YELLOW)) # this is the same for all pure colors\n\n sd = np.std(img, axis=-1)\n img[sd > ratio * norm] = neutral_color",
"def remove_colors(ingredient):\n colors = [\"yellow\", \"purple\", \"green\", \"black\",\n \"purple\", \"white\", \"red\"]\n no_colors = [gram for gram in ingredient.split(\" \") if gram not in colors]\n colorless_string = \" \".join(no_colors)\n return colorless_string",
"def get_unsat_color(colors):\n sat = (255, 255, 255)\n for q, color in colors:\n h, s, v = color\n if s < sat[1]:\n sat = color\n return sat",
"def clean_edges(self):",
"def cleanup_passed_color_value(s):\n reo = re.compile('[0-9a-f]')\n cannotBeCleaned = ''\n if s[0] == '#' and len(s) in [4,7] and reo.match(s[1:]):\n return s\n if s in colorNamesAndCodes:\n col = colorNamesAndCodes[s]\n if reo.match(col[1:]):\n return col\n else:\n return cannotBeCleaned\n if len(s) in [3,6] and reo.match(s):\n return '#' + s\n if len(s) == 2 and reo.match(s):\n return '#' +s +s +s\n return cannotBeCleaned",
"def unique_colors(img):\n colors = {i[1] for i in img.getcolors(maxcolors=img.size[0]*img.size[1])}\n return colors",
"def recolorRegions(x,y,newColorArray,finalColorList):\r\n ## If the comparison tolerance is larger than the normalization factor then two regions can be merged even though they do not appear exactly similar.\r\n ## The purpose of this function is that if that happens then the color that appears the most often is assigned to each square in the merged region so that each merged region will be one single color.\r\n\r\n ## If the color of the current square unit is not in the reduced unique color list then the function returns whichever color of that list it is closest to.\r\n if not newColorArray[x][y] in finalColorList:\r\n distance = [0] * len(finalColorList)\r\n for i in range(0,len(finalColorList)):\r\n distance[i] = euclidianDistance(newColorArray[x][y],finalColorList[i])\r\n minIndex = distance.index(min(distance))\r\n return finalColorList[minIndex]\r\n else:\r\n return newColorArray[x][y]",
"def colorWipe(self, color):\r\n #color = Color(R,G,B)\r\n for i in range(self.strip.numPixels()):\r\n self.strip.setPixelColor(i, color)\r\n self.strip.show()",
"def strip_color(piece):\n return piece[1:]",
"def handle_colordetection(self):\n self.robot.sensormap.tank_drive.stop()\n if self.robot.sensormap.cs_l.color in self.colors and self.robot.sensormap.cs_l.color not in self.detected:\n self.detected.add(self.robot.sensormap.cs_l.color)\n elif self.robot.sensormap.cs_r.color in self.colors and self.robot.sensormap.cs_r.color not in self.detected:\n self.detected.add(self.robot.sensormap.cs_r.color)\n elif self.robot.sensormap.cs_m.color in self.colors and self.robot.sensormap.cs_m.color not in self.detected:\n self.detected.add(self.robot.sensormap.cs_m.color)",
"def remove_dark_background(self, image_array):\n\n cut_off = self.get_image_balance(image_array, False)\n if cut_off < 200:\n cut_off = 200\n new_array = image_array.copy()\n new_array.setflags(write=1)\n for row_number, each_row in enumerate(new_array):\n for pixel_number, each_pixel in enumerate(each_row):\n if reduce(lambda x, y: int(x) + int(y), each_pixel[:3]) / 3 > cut_off:\n new_array[row_number][pixel_number] = image_array[row_number][pixel_number]\n else:\n new_array[row_number][pixel_number] = [0, 0, 0] # Black\n return new_array",
"def test_nan_color_copy():\n\n data = np.zeros((16, 16))\n\n f1 = FITSFigure(data)\n f1.show_grayscale()\n f1.set_nan_color('blue')\n\n f2 = FITSFigure(data)\n f2.show_grayscale()\n f2.set_nan_color('red')\n\n assert f1.image.get_cmap()._rgba_bad == (0.0, 0.0, 1.0, 1.0)\n assert f2.image.get_cmap()._rgba_bad == (1.0, 0.0, 0.0, 1.0)",
"def shuffle_colors(mutated_genome):\n mutated_genome",
"def groupByColor_unlifted(pixmap):\n # Count the number of colors\n nb_colors = int(pixmap.max()) + 1\n # Create a pixmap for each color\n splited = [(pixmap == i) * i for i in range(1, nb_colors)]\n # Filter out empty images\n return [x for x in splited if np.any(x)]",
"def revert_color(cls, colors):\n # 0.5 is to map the color to the center of the range\n return [int((c+0.5) / cls.color_level * 256) for c in colors]",
"def refill_real(img, result, clustermask, cluster_colors):\n overall_dist = 0\n w, h, _ = img.shape\n for x in range(w):\n for y in range(h):\n cid = clustermask[x, y]\n result[x, y] = cluster_colors[cid]",
"def uniqueish_color():\n return plt.cm.gist_ncar(np.random.random())",
"def correct_artefacts(wsh):\n unique, count = np.unique(wsh, return_counts=True)\n to_remove = unique[count<=3]\n for rem in to_remove:\n rem_im = wsh==rem\n rem_cont = dilation(rem_im) & ~rem_im\n vals, val_counts = np.unique(wsh[rem_cont], return_counts=True)\n replace_val = vals[np.argmax(val_counts)]\n if replace_val != 0:\n wsh[rem_im] = int(replace_val)\n return wsh",
"def test_clearColorFormatting(self):\n self.assertAssembledEqually(\n \"\\x0301yay\\x03reset\", A.normal[A.fg.black[\"yay\"], \"reset\"]\n )\n self.assertAssembledEqually(\n \"\\x0301,02yay\\x03reset\", A.normal[A.fg.black[A.bg.blue[\"yay\"]], \"reset\"]\n )",
"def colorWipe(strip):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, GREEN_COLOR)\n strip.show()",
"def clear_colors(self):\n for r in range(0, self.maze.get_nrows()):\n for c in range(0, self.maze.get_ncols()):\n self.set_color((r, c), 'white', draw=False)\n\n self.cvs.itemconfig(self.cvs_cells[self.maze.get_start_cell()],\n fill='green')\n self.cvs.itemconfig(self.cvs_cells[self.maze.get_end_cell()],\n fill='red')\n\n self.draw()",
"def negative(img): \n for pixel in img:\n x, y, col = pixel \n r, g, b = col\n \n new_color = create_color(255 - r, 255 - g, 255 - b)\n set_color(img, x, y, new_color)",
"def get_good_colors(N):\n HSV_tuples = [(x*1.0/N, 0.5, 1) for x in range(N)]\n return(255 * np.array(map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)))"
] | [
"0.6484606",
"0.6377774",
"0.62030065",
"0.60673124",
"0.6038112",
"0.5998475",
"0.5998475",
"0.595649",
"0.594523",
"0.5914712",
"0.5899766",
"0.58824664",
"0.58483124",
"0.5814882",
"0.580523",
"0.5790582",
"0.5781437",
"0.57611006",
"0.575656",
"0.5732335",
"0.5723635",
"0.570608",
"0.57037044",
"0.5645009",
"0.5636778",
"0.5635042",
"0.5586749",
"0.5584295",
"0.5559628",
"0.55340666"
] | 0.6692518 | 0 |
Implement call sklearn metric on dataset. | def __call__(self, dataset: 'SklearnCompatible', dropna: bool = False) -> float:
assert hasattr(dataset, 'target'), 'Dataset should have target to calculate metric'
if self.one_dim:
assert dataset.shape[1] == 1, 'Dataset should have single column if metric is one_dim'
# TODO: maybe refactor this part?
dataset = dataset.to_numpy()
y_true = dataset.target
y_pred = dataset.data
sample_weight = dataset.weights
if dropna:
sl = ~np.isnan(y_pred).any(axis=1)
y_pred = y_pred[sl]
y_true = y_true[sl]
if sample_weight is not None:
sample_weight = sample_weight[sl]
if self.one_dim:
y_pred = y_pred[:, 0]
value = self.metric(y_true, y_pred, sample_weight=sample_weight)
sign = 2 * float(self.greater_is_better) - 1
return value * sign | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate(self, dataset):\n\t\tpass",
"def evaluate(self, data, metric, classes=None):\n func_dict = {\n 'mutual_information': sklearn.metrics.mutual_info_score,\n 'normed_mutual_information': sklearn.metrics.normalized_mutual_info_score,\n 'square_error': sklearn.metrics.mean_squared_error,\n 't-test': scipy.stats.ttest_ind,\n 'wilcoxon': scipy.stats.wilcoxon,\n 'correlation': np.corrcoef\n }\n self.make_signature(data, classes)\n try:\n if metric in {'mutual_information', 'normed_mutual_information'}:\n self.score = func_dict[metric](classes, self.digit_signature()) \n elif metric == 'square_error':\n self.score = func_dict[metric](classes, self.signatures)\n elif metric in {'t-test', 'wilcoxon'} :\n self.score = np.abs(func_dict[metric](self.signatures[classes==1], \\\n self.signatures[classes==0])[0])\n \n elif metric == 'correlation':\n self.score = func_dict[metric](classes, self.signatures)[1,0]\n \n except: KeyError(\"no such a function\") \n \n return self.score",
"def __call__(self, dataset: 'LAMLDataset', dropna: bool = False):\n assert hasattr(dataset, 'target'), 'Dataset should have target to calculate metric'\n raise NotImplementedError",
"def evaluate(self, dataset, metric='auto', verbose=True, batch_size=64):\n if(batch_size < 1):\n raise ValueError(\"'batch_size' must be greater than or equal to 1\")\n\n extracted_features = self._extract_features(dataset, verbose=verbose, batch_size=batch_size)\n extracted_features[self.target] = dataset[self.target]\n return self.classifier.evaluate(extracted_features, metric = metric)",
"def calculate_dataset_metrics(self):\n pass",
"def compute(cls, dataset):\n return dataset",
"def _fit(self, dataset):\n raise NotImplementedError()",
"def evaluate(self, test_data, test_labels):\n raise NotImplementedError",
"def evaluate(self, dataset, *args, **kwargs):\n\n losses = []\n for sample in dataset:\n output = self.predict(sample, *args, **kwargs)\n losses.append(self.metric_loss(output, sample, *args, **kwargs))\n\n return losses",
"def evaluate(self, dataset, metric='auto', missing_value_action='auto'):\n\n _raise_error_evaluation_metric_is_valid(metric,\n ['auto', 'accuracy', 'confusion_matrix', 'roc_curve', 'auc',\n 'log_loss', 'precision', 'recall', 'f1_score'])\n return super(_Classifier, self).evaluate(dataset,\n missing_value_action=missing_value_action,\n metric=metric)",
"def evaluate(self, dataset):\n return self.model.evaluate(dataset.X_val, dataset.y_val)",
"def _train(self,\n Xs: Array,\n Ys: Array,\n metric: Callable = None,\n **kwargs):\n self.model.fit(Xs, Ys, **kwargs)\n return None",
"def __evaluate_metric(dataset, y_act, y_pred):\n if dataset.metric == 'specific':\n if dataset.best_is_min:\n return return_specific_metrics(y_act, y_pred)\n else:\n return -return_specific_metrics(y_act, y_pred)\n else:\n return evaluate_metric(y_act, y_pred, dataset.metric, dataset.y_n_classes)",
"def calc_metric(output, metrics):\n score = []\n for metric in metrics:\n metric_mod = __import__(\"sklearn.metrics\", fromlist=[metric])\n metric_func = getattr(metric_mod, metric)\n score.append(metric_func(output[0], output[1]))\n return score, output",
"def fit(self, X):",
"def __call__(self, estimator, X, y_true, sample_weight=None, offsets=None):\n return self._score(\n partial(_cached_call, None),\n estimator,\n X,\n y_true,\n sample_weight=sample_weight,\n offsets=offsets\n )",
"def __evaluate_other_metrics(dataset, m, y_act, y_pred):\n return evaluate_metric(y_act, y_pred, m, dataset.y_n_classes)",
"def nnRegression(data):",
"def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)",
"def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs",
"def test_call(self, MetricClass, seed):\n m = MetricClass()\n strategy = RandomTrader(seed=seed).run(make_randomwalk(seed=seed))\n result0 = np.array(m.result(strategy)) # from `result` method\n result1 = np.array(m(strategy)) # from __call__\n assert np.equal(result0, result1).all()",
"def evaluate_performance(data_loader, model):\n acc = mx.metric.Accuracy()\n\n for idx, (data, label) in enumerate(data_loader):\n data = data.as_in_context(model.ctx)\n label = label.as_in_context(model.ctx)\n pred = model(data)\n pred = mx.nd.argmax(pred, axis=1)\n acc.update(label, pred)\n return acc.get()",
"def score(\n self,\n data,\n metric=\"accuracy\",\n break_ties=\"random\",\n verbose=True,\n print_confusion_matrix=True,\n **kwargs,\n ):\n Y_p, Y, Y_s = self._get_predictions(\n data, break_ties=break_ties, return_probs=True, **kwargs\n )\n\n # Evaluate on the specified metrics\n return_list = isinstance(metric, list)\n metric_list = metric if isinstance(metric, list) else [metric]\n scores = []\n for metric in metric_list:\n score = metric_score(Y, Y_p, metric, probs=Y_s, ignore_in_gold=[0])\n scores.append(score)\n if verbose:\n print(f\"{metric.capitalize()}: {score:.3f}\")\n\n # Optionally print confusion matrix\n if print_confusion_matrix and verbose:\n confusion_matrix(Y, Y_p, pretty_print=True)\n\n # If a single metric was given as a string (not list), return a float\n if len(scores) == 1 and not return_list:\n return scores[0]\n else:\n return scores",
"def get_metric_fn(self, sklearn_fn, kwargs=None):\n kwargs = kwargs if kwargs else dict()\n if self.data_content is not None and self.task_id is not None:\n predictions_arff = self._generate_arff_dict()\n elif \"predictions\" in self.output_files:\n predictions_file_url = openml._api_calls._file_id_to_url(\n self.output_files[\"predictions\"],\n \"predictions.arff\",\n )\n response = openml._api_calls._download_text_file(predictions_file_url)\n predictions_arff = arff.loads(response)\n # TODO: make this a stream reader\n else:\n raise ValueError(\n \"Run should have been locally executed or \" \"contain outputfile reference.\"\n )\n\n # Need to know more about the task to compute scores correctly\n task = get_task(self.task_id)\n\n attribute_names = [att[0] for att in predictions_arff[\"attributes\"]]\n if (\n task.task_type_id in [TaskType.SUPERVISED_CLASSIFICATION, TaskType.LEARNING_CURVE]\n and \"correct\" not in attribute_names\n ):\n raise ValueError('Attribute \"correct\" should be set for ' \"classification task runs\")\n if task.task_type_id == TaskType.SUPERVISED_REGRESSION and \"truth\" not in attribute_names:\n raise ValueError('Attribute \"truth\" should be set for ' \"regression task runs\")\n if task.task_type_id != TaskType.CLUSTERING and \"prediction\" not in attribute_names:\n raise ValueError('Attribute \"predict\" should be set for ' \"supervised task runs\")\n\n def _attribute_list_to_dict(attribute_list):\n # convenience function: Creates a mapping to map from the name of\n # attributes present in the arff prediction file to their index.\n # This is necessary because the number of classes can be different\n # for different tasks.\n res = OrderedDict()\n for idx in range(len(attribute_list)):\n res[attribute_list[idx][0]] = idx\n return res\n\n attribute_dict = _attribute_list_to_dict(predictions_arff[\"attributes\"])\n\n repeat_idx = attribute_dict[\"repeat\"]\n fold_idx = attribute_dict[\"fold\"]\n predicted_idx = attribute_dict[\"prediction\"] # Assume supervised task\n\n if (\n task.task_type_id == TaskType.SUPERVISED_CLASSIFICATION\n or task.task_type_id == TaskType.LEARNING_CURVE\n ):\n correct_idx = attribute_dict[\"correct\"]\n elif task.task_type_id == TaskType.SUPERVISED_REGRESSION:\n correct_idx = attribute_dict[\"truth\"]\n has_samples = False\n if \"sample\" in attribute_dict:\n sample_idx = attribute_dict[\"sample\"]\n has_samples = True\n\n if (\n predictions_arff[\"attributes\"][predicted_idx][1]\n != predictions_arff[\"attributes\"][correct_idx][1]\n ):\n pred = predictions_arff[\"attributes\"][predicted_idx][1]\n corr = predictions_arff[\"attributes\"][correct_idx][1]\n raise ValueError(\n \"Predicted and Correct do not have equal values:\"\n \" %s Vs. %s\" % (str(pred), str(corr))\n )\n\n # TODO: these could be cached\n values_predict = {}\n values_correct = {}\n for line_idx, line in enumerate(predictions_arff[\"data\"]):\n rep = line[repeat_idx]\n fold = line[fold_idx]\n if has_samples:\n samp = line[sample_idx]\n else:\n samp = 0 # No learning curve sample, always 0\n\n if task.task_type_id in [\n TaskType.SUPERVISED_CLASSIFICATION,\n TaskType.LEARNING_CURVE,\n ]:\n prediction = predictions_arff[\"attributes\"][predicted_idx][1].index(\n line[predicted_idx]\n )\n correct = predictions_arff[\"attributes\"][predicted_idx][1].index(line[correct_idx])\n elif task.task_type_id == TaskType.SUPERVISED_REGRESSION:\n prediction = line[predicted_idx]\n correct = line[correct_idx]\n if rep not in values_predict:\n values_predict[rep] = OrderedDict()\n values_correct[rep] = OrderedDict()\n if fold not in values_predict[rep]:\n values_predict[rep][fold] = OrderedDict()\n values_correct[rep][fold] = OrderedDict()\n if samp not in values_predict[rep][fold]:\n values_predict[rep][fold][samp] = []\n values_correct[rep][fold][samp] = []\n\n values_predict[rep][fold][samp].append(prediction)\n values_correct[rep][fold][samp].append(correct)\n\n scores = []\n for rep in values_predict.keys():\n for fold in values_predict[rep].keys():\n last_sample = len(values_predict[rep][fold]) - 1\n y_pred = values_predict[rep][fold][last_sample]\n y_true = values_correct[rep][fold][last_sample]\n scores.append(sklearn_fn(y_true, y_pred, **kwargs))\n return np.array(scores)",
"def compute_metrics(self):\n pass",
"def fit_score(estimator, train_data, test_data):\n estimator.fit(*train_data)\n return estimator.score(*test_data)",
"def fit_test(self):",
"def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)",
"def call(self, x, training):\n raise NotImplementedError",
"def learn(self, Xtrain, ytrain):"
] | [
"0.69129854",
"0.66921866",
"0.6664506",
"0.6658557",
"0.65358526",
"0.64022624",
"0.63196886",
"0.6217403",
"0.62092805",
"0.61501414",
"0.61377925",
"0.6125066",
"0.6089111",
"0.6083159",
"0.60741395",
"0.6060244",
"0.6057991",
"0.6048644",
"0.6021093",
"0.5998874",
"0.59755945",
"0.5958711",
"0.5955522",
"0.59547466",
"0.594027",
"0.5933909",
"0.59223825",
"0.5914577",
"0.5911423",
"0.5910324"
] | 0.6836997 | 1 |
Create metric for dataset. Get LAMLMetric that is called on dataset. | def get_dataset_metric(self) -> LAMLMetric:
# for now - case of sklearn metric only
one_dim = self.name in _one_dim_output_tasks
dataset_metric = SkMetric(self.metric_func, name=self.metric_name,
one_dim=one_dim, greater_is_better=self.greater_is_better)
return dataset_metric | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_metric(self) -> EvalMetric:\n pass",
"def create_metric(self) -> 'LossMetric':\n raise NotImplementedError()",
"def __call__(self, dataset: 'LAMLDataset', dropna: bool = False):\n assert hasattr(dataset, 'target'), 'Dataset should have target to calculate metric'\n raise NotImplementedError",
"def Create(cls, group_key, machine_id, timestamp, payload):\n sort_key = util.CreateSortKeyPrefix(timestamp, randomness=False) + machine_id\n metric = Metric(group_key, sort_key)\n metric.machine_id = machine_id\n metric.timestamp = timestamp\n metric.payload = payload\n return metric",
"def createMLData(self):\n\n if self._verbose:\n print('\\nCreate ML Data')\n\n # Minimum required number of input data for ML training under the\n # current implemented setup.\n if len(self._input_data.index) < 60:\n raise NotEnoughDataForMachineLearningTraining(\n len(self._input_data.index), 60)\n\n # Add features column\n for indicator, feature in zip(self._indicators_set, self._ti_features):\n feature_data = indicator.getTiData()\n # Because there are some inf values\n feature_data = feature_data.replace([np.inf, -np.inf], np.nan)\n if self._verbose:\n print('- adding feature: ', feature['ti'], ', columns: ',\n str([feature['ti'] + '_' + c\n for c in feature_data.columns]), sep='')\n\n for c in feature_data.columns:\n self._ml_data[feature['ti'] + '_' + c] = feature_data[[c]]\n\n if self._include_close_feature:\n self._ml_data['close'] = self._input_data[['close']]\n\n if self._include_volume_feature:\n self._ml_data['volume'] = self._input_data[['volume']]\n\n # Add label column\n self._ml_data['label'] = np.roll(\n a=self._input_data['close'].values, shift=-1, axis=0\n ) - self._input_data['close'].values\n\n self._ml_data.loc[\n self._ml_data.label > 0, 'label'] = ML_CLASSES['UP']\n self._ml_data.loc[\n self._ml_data.label <= 0, 'label'] = ML_CLASSES['DOWN']\n\n self._ml_data['label'] = self._ml_data['label'].apply(lambda x: int(x))\n\n # Remove last row, since it cannot include a label. Future value is not\n # known\n self._ml_data = self._ml_data.iloc[:-1, :]\n\n # Fill missing values\n self._ml_data = fillMissingValues(input_data=self._ml_data)\n\n return self._ml_data",
"def create_metric(self) -> 'LossMetric':\n return PerplexityMetric(prefix=self._metric_prefix)",
"def create_metric(self) -> 'LossMetric':\n return PerplexityMetric(prefix=self._metric_prefix)",
"def calculate_dataset_metrics(self):\n pass",
"def build_metric_func(dataset_split_name, add_summary=True):\n\n def metric_func(labels, logits):\n \"\"\"Evaluation metric function that runs on CPU.\"\"\"\n accuracy_metric_name = 'Eval/Accuracy/%s' % dataset_split_name\n metric_map = {\n accuracy_metric_name: tf.metrics.accuracy(labels, tf.argmax(logits, 1)),\n }\n if add_summary:\n for name, value in metric_map.items():\n tf.summary.scalar(name, value)\n return metric_map\n\n return metric_func",
"def getMeasure(unique_name):",
"def getMeasure(unique_name):",
"def create_metric(self, metric, metric_name=None):\n metric_name = metric_name or metric.name\n with self._accessor_lock:\n self._accessor.create_metric(metric)\n self._cache_set(metric_name, metric)",
"def make_metric(self, name, metadata=None, **kwargs):\n return make_metric(name, metadata=metadata, accessor=self.accessor, **kwargs)",
"def make_metric(self, name, metadata=None, **kwargs):\n return make_metric(name, metadata=metadata, accessor=self.accessor, **kwargs)",
"def get_metric(self) -> mt.Metric:\n return mt.CategoricalAccuracy()",
"def get_metric(self) -> mt.Metric:\n return mt.CategoricalAccuracy()",
"def make_metric(name):\n return {\n \"type\": \"Metric\",\n \"name\": name,\n \"value\": \"\",\n \"units\": \"\",\n \"rating\": \"\",\n \"notes\": \"\",\n \"comment\": \"\",\n }",
"def get_metric(self):\n assert self._metric in self._metrics, 'Unsupported metric! Check the _metrics attribute for a list of supported metrics.'\n if self._metric == 'Euclidean':\n metric = torch.eye(self.parameters.shape[0])\n elif self._metric == 'Banana':\n n = self.dataset.shape[0]\n fish = torch.zeros(2,2)\n fish[0,0] = n/self.prior_var + 1\n fish[0,1] = n*2*self.parameters[1]/self.prior_var\n fish[1,0] = n*2*self.parameters[1]/self.prior_var\n fish[1,1] = n*4*self.parameters[1]**2/self.prior_var + 1\n metric = fish\n elif self._metric == 'Hessian':\n metric = self.get_hessian()\n elif self._metric == 'Softabs':\n hessian = self.get_hessian()\n if self._potential == 'funnel':\n hessian += torch.diag(self.jitters)\n eigs, vects = hessian.symeig(eigenvectors = True)\n softabs = (1./torch.tanh(self.softabs * eigs)) * eigs\n metric = vects @ softabs.diag() @ vects.t()\n elif self._metric == 'Fisher':\n metric = torch.zeros(self.parameters.shape[0],self.parameters.shape[0])\n grads = torch.zeros(self.parameters.shape[0])\n grads[0] = 0.5*torch.sum(self.parameters[1:]**2)*torch.exp(self.parameters[0]) + self.parameters[0]/9.\n grads[1:] = self.parameters[1:]*torch.exp(self.parameters[0])\n metric = torch.ger(grads,grads) + torch.eye(self.parameters.shape[0])/self.softabs\n return metric",
"def compute(cls, dataset):\n return dataset",
"def test_create_derived_metric(self):\n pass",
"def get_metric(self) -> mt.Metric:\n return mt.BinaryAccuracy()",
"def getMeasures(unique_name=None):",
"def test_get_metric_is_independent_from_metric_learner(estimator,\n build_dataset):\n input_data, labels, _, X = build_dataset()\n model = clone(estimator)\n set_random_state(model)\n\n # we fit the metric learner on it and then we compute the metric on some\n # points\n model.fit(input_data, labels)\n metric = model.get_metric()\n score = metric(X[0], X[1])\n\n # then we refit the estimator on another dataset\n model.fit(np.sin(input_data), labels)\n\n # we recompute the distance between the two points: it should be the same\n score_bis = metric(X[0], X[1])\n assert score_bis == score",
"def getMeasures():",
"def get_metric(self, data_row: pd.Series) -> float:",
"def get(metric_name: Text, dataset_info=None):\n # Register your metric by adding an entry in the dictionary below.\n return base.registry.get_instance(metric_name, dataset_info=dataset_info)",
"def _get_eval_metric(self):\n raise NotImplementedError",
"def metric(self):\n\n if not self._metric_cache:\n # Select an appropriate statistic\n cls = utils.import_class_or_module(self._metric)\n self._metric_cache = cls(self.additional)\n\n return self._metric_cache",
"def metric(self, metric_id):\r\n return Metric(self, metric_id)",
"def create(self,\n metric_type,\n metric_kind='GAUGE',\n value_type='DOUBLE',\n description='N/A'):\n descriptor = ga_metric.MetricDescriptor()\n if metric_type.startswith('custom.googleapis.com/'):\n descriptor.type = metric_type\n else:\n descriptor.type = 'custom.googleapis.com/%s' % metric_type\n descriptor.metric_kind = (getattr(ga_metric.MetricDescriptor.MetricKind,\n metric_kind))\n descriptor.value_type = (getattr(ga_metric.MetricDescriptor.ValueType,\n value_type))\n descriptor.description = description\n LOGGER.info(f'Creating metric descriptor \"{descriptor.type}\" ...')\n return self.client.create_metric_descriptor(\n name=self.project, metric_descriptor=descriptor)"
] | [
"0.666398",
"0.60399914",
"0.5974834",
"0.5590214",
"0.55357915",
"0.55043375",
"0.55043375",
"0.5482854",
"0.54779565",
"0.5451086",
"0.5451086",
"0.54114413",
"0.5354682",
"0.5354682",
"0.51812303",
"0.51812303",
"0.51733845",
"0.5146675",
"0.5142152",
"0.5129933",
"0.51012015",
"0.5090002",
"0.50766927",
"0.5075793",
"0.50717336",
"0.5049763",
"0.5019419",
"0.50163084",
"0.49926704",
"0.49641666"
] | 0.7164863 | 0 |
Collectes entries in rootdir's basedir directory which is always relateive to rootdir. | def _collect_entries(rootdir: str, basedir: str):
files = []
dirs = []
for entry in os.listdir(os.path.join(rootdir, basedir)):
rel_path = os.path.join(basedir, entry)
full_path = os.path.join(rootdir, rel_path)
isdir = os.path.isdir(full_path)
if isdir and (rel_path in ('./.git', './.pytest_cache') or entry == '__pycache__'):
continue
st = os.stat(full_path, follow_symlinks=False)
(dirs if isdir else files).append((rel_path, dict(isdir=isdir, path=rel_path, size=(0 if isdir else st.st_size),
mode=st.st_mode, omode=f'{st.st_mode:04o}',
mtime=int(st.st_mtime))))
for rel_path, entry in sorted(dirs):
yield entry
yield from _collect_entries(rootdir, rel_path)
for _, entry in sorted(files):
yield entry | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_final_dirs(self, root=\"\"):\n _updated = int(self.stats()[\"db_update\"])\n _hash = uhash(root)\n return self._get_final_dirs(_updated=_updated, _hash=_hash, root=root)",
"def getImmediateSubdirectories(dir):",
"def _load_dirs(self):\n rootdirs = self._docset.get_compounds(xml.Directory,\n lambda x: x.get_parent() is None)\n for dirdoc in rootdirs:\n self._load_dir(dirdoc, None)",
"def _walk_dir(self, rootpath):\n assert os.path.isabs(rootpath)\n assert rootpath not in self._dirs\n relpath = self._get_rel_path(rootpath)\n self._dirs[relpath] = Directory(rootpath, relpath, None)\n for dirpath, dirnames, filenames in os.walk(rootpath):\n if 'refdata' in dirnames:\n dirnames.remove('refdata')\n currentdir = self._dirs[self._get_rel_path(dirpath)]\n # Loop through a copy so that we can modify dirnames.\n for dirname in list(dirnames):\n fullpath = os.path.join(dirpath, dirname)\n if fullpath == self._build_root:\n dirnames.remove(dirname)\n continue\n relpath = self._get_rel_path(fullpath)\n self._dirs[relpath] = Directory(fullpath, relpath, currentdir)\n extensions = ('.h', '.cuh', '.hpp', '.c', '.cc', '.cpp', '.cu', '.bm')\n for filename in filenames:\n basename, extension = os.path.splitext(filename)\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = File(fullpath, relpath, currentdir)\n elif extension == '.cmakein':\n extension = os.path.splitext(basename)[1]\n if extension in extensions:\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n sourcefile = GeneratorSourceFile(fullpath, relpath, currentdir)\n self._files[relpath] = sourcefile\n fullpath = os.path.join(dirpath, basename)\n relpath = self._get_rel_path(fullpath)\n fullpath = os.path.join(self._build_root, relpath)\n generatedfile = GeneratedFile(fullpath, relpath, currentdir)\n self._files[relpath] = generatedfile\n generatedfile.set_generator_source(sourcefile)\n elif extension in ('.l', '.y', '.pre'):\n fullpath = os.path.join(dirpath, filename)\n relpath = self._get_rel_path(fullpath)\n self._files[relpath] = GeneratorSourceFile(fullpath, relpath, currentdir)",
"def finddirs(root):\n retval = []\n for root, dirs, files in os.walk(root):\n for d in dirs:\n retval.append(os.path.join(root, d))\n return retval",
"def path_generator(initial_root):\n for root, dirs, files in os.walk(initial_root):\n paths = [os.path.join(root, name) for name in files]\n return paths",
"def _walk_dirs(self):\n for project_name in self.new_source_paths.keys():\n # print \"-------- Now mapping ---- \" + project_name\n search_path = self.root + project_name + '\\\\Data'\n for dirpath, subdirs, files in os.walk(search_path):\n for file in files:\n self.new_source_paths[project_name][file] = dirpath\n # print \"------------ Finished mapping ------- \" + project_name\n return self.new_source_paths",
"def root_directory_list(self) -> str:\n return self.__root_directory_list",
"def rebalance_root(self):\n split_dirs = [d.split('/') for d in self.directories]\n new_root = []\n for level in zip(*split_dirs):\n if not(all([d == level[0] for d in level])):\n break\n new_root.append(level[0])\n self.root = '/'.join(new_root)",
"def list_all_files(root):\n local_files = []\n for path, dirs, files in os.walk(os_path(root), followlinks=False):\n if len(files) > 0:\n path_wo_root = path[(len(root) + len(slash)):] # remove root part\n local_files.extend([os.path.join(path_wo_root, f) for f in files])\n return local_files",
"def get_root_pack_directory_list(self):\n basedir = self.get_installdir()\n packdirectories = [basedir + '/' + dir for dir in mconst.DEF_PACK_subdirlist]\n return packdirectories",
"def walk(rootdir):\n flist = []\n for root, dirs, files in os.walk(rootdir):\n flist = flist + [os.path.join(root, x) for x in files]\n return flist",
"def scan_buildfiles(root_dir, base_path=None):\r\n\r\n buildfiles = []\r\n for root, dirs, files in os.walk(base_path if base_path else root_dir):\r\n for filename in files:\r\n if BuildFile._is_buildfile_name(filename):\r\n buildfile_relpath = os.path.relpath(os.path.join(root, filename), root_dir)\r\n buildfiles.append(BuildFile(root_dir, buildfile_relpath))\r\n return OrderedSet(sorted(buildfiles, key=lambda buildfile: buildfile.full_path))",
"def __init__(self, root):\n FileHelper.ALL_PATHS = [os.path.join(dp, f) for dp, dn, filenames in os.walk(root) for f in filenames if os.path.splitext(f)[1] in Enums.App.VALID_FILE_TYPES]",
"def get_realization_paths(rootdir, folder_prefix): #{{{\n fnames = []\n for root, dirs, files in os.walk(rootdir):\n if(root=='.'):\n dirs.sort()\n for adir in dirs:\n if(adir.startswith(folder_prefix)):\n fnames.append(rootdir + '/' + adir)\n\n return fnames #}}}",
"def get_all_files(self):\n\t\tfiles_list = []\n\t\tfor path, subdirs, files in os.walk(self.root):\n\t\t for name in files:\n\t\t \tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]",
"def build_files_list(root_dir):\n return [\n os.path.join(dirpath, file_path)\n for dirpath, subdirs, files in os.walk(root_dir)\n for file_path in files\n ]",
"def build_files_list(root_dir):\n return [\n os.path.join(dirpath, file_path)\n for dirpath, subdirs, files in os.walk(root_dir)\n for file_path in files\n ]",
"def get_search_dirs(rootdirs_to_search, afile):\n verbose(\"Entering get_search_dirs for \" + afile + \" dirs: \" + str(rootdirs_to_search), LEVEL_4)\n dirs = []\n search_subdirs = get_basic_search_subdirs(afile)\n\n # Add usr/local sub-directories to search list\n subdirs2 = get_search_subdirs(\"usr/local\", afile)\n search_subdirs.extend(subdirs2)\n\n ### Add platform specific search sub-directories\n custom_subdirs = get_customized_search_subdirs(afile)\n search_subdirs.extend(custom_subdirs)\n verbose(\"Final search_subdirs for \" + afile + \" is: \" + str(search_subdirs), LEVEL_4)\n\n for adir in rootdirs_to_search:\n for subdir in search_subdirs:\n path = os.path.join(adir, subdir)\n # non-existent path will be pruned\n if os.path.exists(path):\n dirs.append(path)\n return dirs",
"def files_in_dir(root_dir):\n file_set = set()\n\n for dir_, _, files in os.walk(root_dir):\n for file_name in files:\n rel_dir = os.path.relpath(dir_, root_dir)\n rel_file = os.path.join(rel_dir, file_name)\n file_set.add(rel_file)\n\n return [Path(PureWindowsPath(f)) for f in file_set]",
"def my_root_listdir(root_dir):\n root_listdir = [\n images_dir\n for images_dir in os.listdir(root_dir)\n if not any(\n characters in images_dir for characters in [\".\", \"test\", \"train\", \"valid\"]\n )\n ]\n summ = 0\n for images_dir in root_listdir:\n summ += len(os.listdir(root_dir + \"/\" + images_dir)) / 2 - 2\n print(\"Sum of images in directories: \", int(summ))\n return root_listdir",
"def _walk_to_root(path):\n if not os.path.exists(path):\n raise IOError('Starting path not found')\n\n if os.path.isfile(path):\n path = os.path.dirname(path)\n\n last_dir = None\n current_dir = os.path.abspath(path)\n while last_dir != current_dir:\n yield current_dir\n parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir))\n last_dir, current_dir = current_dir, parent_dir",
"def find_root_thrifts(basedirs, sources, log=None):\r\n\r\n root_sources = set(sources)\r\n for source in sources:\r\n root_sources.difference_update(find_includes(basedirs, source, log=log))\r\n return root_sources",
"def walkdirs(root):\r\n scriptype_paths = collections.defaultdict(set)\r\n for root, subdirs, files in os.walk(root):\r\n\r\n # Filter subdirs\r\n tmpdir = []\r\n for i in subdirs:\r\n if i.startswith(EXCLUDE_PATTERNS):\r\n continue\r\n if '__init__.py' in os.listdir(os.path.join(root, i)):\r\n scriptype_paths['python'].add(root)\r\n continue\r\n tmpdir.append(i)\r\n subdirs[:] = tmpdir\r\n\r\n # If files with extension exists add to right source type.\r\n if ext_exists('.py', files):\r\n scriptype_paths['python'].add(root)\r\n if ext_exists('.mel', files):\r\n scriptype_paths['mel'].add(root)\r\n return scriptype_paths",
"def process_dir(pool, topdir):\n for root, dirs, files in os.walk(topdir):\n # Not really needed, but makes things consistent.\n dirs.sort()\n files.sort()\n\n for path in files:\n process_file(pool, os.path.join(root, path))",
"def parse_paths():\r\n sources = get_source_paths()\r\n results = collections.defaultdict(list)\r\n for root_dir in sources:\r\n\r\n for script_type, dirs in walkdirs(root_dir).iteritems():\r\n\r\n for d in dirs:\r\n logger.debug(d)\r\n\r\n # Add paths to environments\r\n if os.path.basename(d).lower().startswith(ICONS):\r\n results['XBMLANGPATH'].append(d)\r\n os.environ['XBMLANGPATH'] += os.pathsep + d\r\n\r\n if script_type == 'mel':\r\n results['MAYA_SCRIPT_PATH'].append(d)\r\n os.environ['MAYA_SCRIPT_PATH'] += os.pathsep + d\r\n else:\r\n results['PYTHONPATH'].append(d)\r\n site.addsitedir(d)\r\n return results",
"def get_all_dirs(dirpath, base_dir=None):\n\tif not base_dir:\n\t\tpost = os.path.normpath(dirpath)\n\telif base_dir in dirpath:\n\t\t(pre, post) = dirpath.split(os.path.normpath(base_dir))\n\t\tpost = os.path.normpath(post)\n\telse:\n\t\treturn\n\tdirs = []\n\t(head, tail) = os.path.split(post)\n\twhile tail:\n\t\tdirs.append(tail)\n\t\t(head, tail) = os.path.split(head)\n\tdirs.reverse()\n\treturn dirs",
"def index_all_files(self, root_dir):\n pass",
"def _local_dir(self):\n return []",
"def all_image_paths(self):\n self.labels = [i for i in (self.get_immediate_subdirectories(self.root_dir))\n if not i.startswith('.')]\n\n for root, subFolders, files in os.walk(self.root_dir):\n files = [i for i in files if not i.startswith('.')]\n files = files[:self.img_num] # hard coded - will not read in\n for i in files:\n self.all_files.append(os.path.abspath(root) + '/'.join(subFolders) + '/' + i)"
] | [
"0.66336405",
"0.6516187",
"0.63490784",
"0.6312109",
"0.6306998",
"0.6250683",
"0.61746",
"0.6130482",
"0.6113366",
"0.6081825",
"0.60749775",
"0.6051456",
"0.6035353",
"0.602858",
"0.60019743",
"0.59748715",
"0.59671766",
"0.59671766",
"0.5945204",
"0.593414",
"0.5931592",
"0.5883774",
"0.5883281",
"0.58664435",
"0.58641124",
"0.58640337",
"0.5859335",
"0.58384943",
"0.582366",
"0.5800468"
] | 0.76479506 | 0 |
Return MD5 hash's hexdigest bases on nongit nonpycache entries of the root_dir. The purpose is to check if two directory is identical except the modification dates. The two directories can be on different machines when the file transfer would be costly. | def python_repo_hash_md5(root_dir: str, *, verbose: bool = False):
m = hashlib.md5()
for e in _collect_entries(root_dir, '.'):
if verbose:
log_info('Processing e', e)
m.update(
f"path={e['path']}\tisdir={e['isdir']}\tsize={e['size']}\tmode={e['mode']:03o}\tmtime={e['mtime']}\n"
.encode('UTF-8'))
return m.hexdigest() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_md5_of_dir(self, verbose=0):\n directory = self.cfg['sharing_path']\n if verbose:\n start = time.time()\n md5Hash = hashlib.md5()\n if not os.path.exists(directory):\n self.stop(1, 'Error during calculate md5! Impossible to find \"{}\" in user folder'.format(directory))\n\n for root, dirs, files in os.walk(directory, followlinks=False):\n for names in files:\n filepath = os.path.join(root, names)\n rel_path = self.relativize_path(filepath)\n if rel_path in self.client_snapshot:\n md5Hash.update(self.client_snapshot[rel_path][1])\n md5Hash.update(hashlib.md5(filepath).hexdigest())\n else:\n hashed_file = self.hash_file(filepath)\n if hashed_file:\n md5Hash.update(hashed_file)\n md5Hash.update(hashlib.md5(filepath).hexdigest())\n else:\n print \"can't hash file: \", filepath\n\n if verbose:\n stop = time.time()\n print stop - start\n return md5Hash.hexdigest()",
"def _hash_cache_dir():\n import hashlib\n\n out = OrderedDict(python_version=python_version)\n\n try:\n import shapely\n out['shapely_version'] = shapely.__version__\n out['shapely_file'] = shapely.__file__\n except ImportError:\n pass\n try:\n import fiona\n out['fiona_version'] = fiona.__version__\n out['fiona_file'] = fiona.__file__\n except ImportError:\n pass\n try:\n import pandas\n out['pandas_version'] = pandas.__version__\n out['pandas_file'] = pandas.__file__\n except ImportError:\n pass\n try:\n import geopandas\n out['geopandas_version'] = geopandas.__version__\n out['geopandas_file'] = geopandas.__file__\n except ImportError:\n pass\n try:\n import osgeo\n out['osgeo_version'] = osgeo.__version__\n out['osgeo_file'] = osgeo.__file__\n except ImportError:\n pass\n try:\n import pyproj\n out['pyproj_version'] = pyproj.__version__\n out['pyproj_file'] = pyproj.__file__\n except ImportError:\n pass\n try:\n import salem\n out['salem_version'] = salem.__version__\n out['salem_file'] = salem.__file__\n except ImportError:\n pass\n\n # ok, now make a dummy str that we will hash\n strout = ''\n for k, v in out.items():\n strout += k + v\n strout = 'salem_hash_' + hashlib.md5(strout.encode()).hexdigest()\n dirout = os.path.join(cache_dir, 'cache', strout)\n return dirout",
"def _calculate_hash(files: Iterable[str], root: str) -> str:\n file_hash = hashlib.md5()\n for file_name in sorted(files):\n file_path = os.path.join(root, file_name)\n file_hash.update((file_name + \"\\0\").encode())\n with open(file_path, \"rb\") as file_:\n # pylint: disable=cell-var-from-loop\n for chunk in iter(lambda: file_.read(4096), \"\"):\n if not chunk:\n break\n file_hash.update(chunk)\n file_hash.update(\"\\0\".encode())\n\n return file_hash.hexdigest()",
"def generate_hash(self):\r\n\r\n hash_list = []\r\n for root, dirs, files in os.walk(self.options['source']):\r\n for f in sorted([f for f in files if not f.startswith('.')]):\r\n hash_list.append(os.path.join(root, f))\r\n hash_list.append(str(os.path.getmtime(os.path.join(root, f))))\r\n hash_list = ''.join(hash_list)\r\n\r\n if sys.version < '3':\r\n return hashlib.sha1(hash_list).hexdigest()\r\n return hashlib.sha1(hash_list.encode('utf-8')).hexdigest()",
"def hash_directory(path):\n if not os.path.isdir(path):\n raise ValueError(\n \"The given path `{}` is not a directory.\".format(path))\n\n md5 = hashlib.md5()\n\n for root, _, files in os.walk(path):\n for fil in files:\n md5.update(hash_file(os.path.join(root, fil)).encode(\"utf-8\"))\n\n return \"{}\".format(md5.hexdigest())",
"def _hash_dir(directory: Union[str, Path], md5: Hash) -> Hash:\n if not Path(directory).is_dir():\n raise ValueError(str(directory) + \" is not a valid directory\")\n for path in sorted(Path(directory).iterdir()):\n md5.update(path.name.encode())\n if path.is_file():\n md5 = _hash_file(path, md5)\n elif path.is_dir():\n md5 = _hash_dir(path, md5)\n return md5",
"def directory_hash(root_path, verbose, hash_format, ignore_list, ignore_spec_file):\n if not os.path.isabs(root_path):\n root_path = os.path.join(os.getcwd(), root_path)\n\n # store the directory hashes of sub folders so we can use it when calculating the hash of the parent folder\n dir_hash_mappings = {}\n\n ignore_spec = ignore.MHLIgnoreSpec(None, ignore_list, ignore_spec_file)\n\n for folder_path, children in post_order_lexicographic(root_path, ignore_spec.get_path_spec()):\n dir_hash_context = DirectoryHashContext(hash_format)\n for item_name, is_dir in children:\n item_path = os.path.join(folder_path, item_name)\n if is_dir:\n if not dir_hash_context:\n continue\n hash_string = dir_hash_mappings.pop(item_path)\n else:\n hash_string = create_filehash(hash_format, item_path)\n dir_hash_context.append_hash(hash_string, item_name)\n dir_hash = dir_hash_context.final_hash_str()\n dir_hash_mappings[folder_path] = dir_hash\n if folder_path == root_path:\n logger.info(f' calculated root hash: {hash_format}: {dir_hash}')\n elif verbose:\n logger.info(f'directory hash for: {folder_path} {hash_format}: {dir_hash}')",
"def dir_hash(cls, d):\r\n names = sorted(f for f in cls._iter_files(d) if not f.endswith('.pyc'))\r\n def stream_factory(name):\r\n return open(os.path.join(d, name), 'rb')\r\n return cls._compute_hash(names, stream_factory)",
"def hash_files_or_dirs(paths: List[str]) -> str:\n md5 = hashlib.md5()\n for path in sorted(paths):\n md5 = _hash_file_or_dir(path, md5)\n return md5.hexdigest()",
"def changed(dirname, filename='.md5', args=None, glob=None):\n root = Path(dirname)\n if not root.exists():\n # if dirname doesn't exist it is changed (by definition)\n return True\n\n cachefile = root / filename\n current_digest = cachefile.open().read() if cachefile.exists() else \"\"\n \n _digest = digest(dirname, glob=glob)\n if args and args.verbose: # pragma: nocover\n print(\"md5:\", _digest)\n has_changed = current_digest != _digest\n\n if has_changed:\n with open(os.path.join(dirname, filename), 'w') as fp:\n fp.write(_digest)\n\n return has_changed",
"def MD5(self) -> _n_0_t_3[_n_0_t_9]:",
"def digest(dirname, glob=None):\n md5 = hashlib.md5()\n if glob is None:\n fnames = [fname for _, fname in list_files(Path(dirname))]\n for fname in sorted(fnames):\n fname = os.path.join(dirname, fname)\n md5.update(open(fname, 'rb').read())\n else:\n fnames = Path(dirname).glob(glob)\n for fname in sorted(fnames):\n md5.update(fname.open('rb').read())\n return md5.hexdigest()",
"def find_identical_files(directory):\n # go to the directory\n os.chdir(directory)\n \n # the problem wiht the md5 in our scan is that it causes the access time to be\n # updated. This renders future scans of the directory when looking for old files\n # to see them no older than the last scan. An approach to get around this would\n # be to retrieve the access times for all the files using the stat command\n # then use touch reset the access time to the original. This may change other\n # time stats too need to look in that. Here is a command set example for\n # changing the access times using touch:\n\n # addressing access times\n \n # 1 - fetch all the previous accesstimes\n try:\n find_stat = subprocess.Popen(\"find * -exec stat '{}' \\;\",shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n except:\n raise\n # get the standard output\n out, err = find_stat.communicate() # get the standard output\n fstats = out.decode().split(\"\\n\") # split the text into a list\n fdates = {}\n for s in fstats:\n # parse stat output lines appear as follows:\n #16777220 1001760 -rw-r--r-- 1 todd staff 0 7 \"Jan 25 22:07:00 2015\" \"Jan 25 22:00:07 2015\" \"Jan 25 22:09:51 2015\" \"Jan 25 22:00:07 2015\" 4096 8 0 bar.txt\n if s == \"\":\n continue\n at = re.search(\"\\\"[^\\\"]+\\\"\",s).group(0)\n at = at.strip('\"')\n dspec = file_date_to_spec(at)\n #ss = s.split(\" \")\n ss = re.split(\"\\s+\",s)\n fn = \" \".join(ss[27:])\n fdates[fn] = dspec\n \n\n # get the md5 sums for each file...the side effect is the access time changes...but we repair these \n file_by_md5 = {}\n for fn in fdates.keys():\n \n # run md5 sum and get the value in a dict\n try:\n cmd_md5 = subprocess.Popen(\"md5 \"+fn,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n except:\n raise\n out, err = cmd_md5.communicate() # get the standard output\n md5 = out.decode() # split the text into a list\n md5 = md5.rstrip()\n if md5 == '':\n continue\n p = re.split(\"\\) = \",md5)\n if len(p) < 2:\n print(\"Failed to split \"+f)\n fnn = re.sub(\"MD5 \\(\",\"\",p[0])\n if fnn != fn:\n print(\"The file returned by md5 was not was not what was expected: \"+fnn)\n print(\"Expected: \"+fn)\n if file_by_md5.__contains__(p[1]):\n file_by_md5[p[1]] += [ fn ]\n else:\n file_by_md5[p[1]] = [ fn ]\n \n # repair access time using touch command e.g.:\n # /usr/bin/touch -a -t 201501252207.30 bar.txt\n tch = \"/usr/bin/touch -a -t \"+fdates[fn]+\" \"+fn\n return_signal = subprocess.call(tch.split())\n if return_signal != 0:\n print(\"Could not run command \"+tch)\n sys.exit()\n \n # create our dict of list of files keyed by md5 sums\n identical = {}\n for md5 in file_by_md5.keys():\n if len(file_by_md5[md5]) == 1:\n continue\n identical[md5] = file_by_md5[md5]\n \n # go back to our starting directory \n os.chdir(iwd)\n \n return(identical)",
"def check_md5sum(file1: str, file2: str) -> bool:\n return get_md5_hash(file1) == get_md5_hash(file2)",
"def create_hash(tree_string):\n return hashlib.md5(tree_string.encode()).hexdigest()",
"def find_duplicates(directories):\n md = sha1sums(directories)\n # prune multidict, only keep files that are duplicates\n # use list() to iterate first so dict doesnt change size while pop()ing\n for digest,paths in list(md.iteritems()):\n if len(paths) < 2:\n md.pop(digest)\n \n return md",
"def get_md5(location: str, ignore_hidden_files: bool=True) -> Optional[str]:\n if not os.path.exists(location):\n return None\n if os.path.isfile(location):\n with open(location, \"rb\") as file:\n content = file.read()\n return hashlib.md5(content).hexdigest()\n else:\n return dirhash(location, \"md5\", ignore_hidden=ignore_hidden_files)",
"def md5(self):\n return md5file(self.abspath)",
"def check_md5(file1, file2):\r\n with open(file1, \"rb\") as f1:\r\n h1 = hashlib.md5(f1.read()).digest()\r\n with open(file2, \"rb\") as f2:\r\n h2 = hashlib.md5(f2.read()).digest()\r\n return h1 == h2",
"def equal_file_sum(file1_paht, file2_paht):\n md5_sum1 = generate_sum(file1_path)\n md5_sum2 = generate_sum(file2_path)\n return (md5_sum1 == md5_sum2)",
"def hash(self):\r\n hash_list = []\r\n for image in self.images:\r\n hash_list.append(os.path.relpath(image.path))\r\n hash_list.append(image._image_data)\r\n\r\n for key, value in self.config.iteritems():\r\n hash_list.append(key)\r\n hash_list.append(value)\r\n\r\n if sys.version < '3':\r\n return hashlib.sha1(''.join(map(str, hash_list))).hexdigest()[:10]\r\n return hashlib.sha1(''.join(map(str, hash_list)).encode('utf-8')).hexdigest()[:10]",
"def _get_local_md5(self, blocksize=2**20):\n m = hashlib.md5()\n with open(self.dst, \"rb\") as f:\n buf = f.read(blocksize)\n while buf:\n m.update(buf)\n buf = f.read(blocksize)\n return m.hexdigest()",
"def md5(dir):\n\n # ugly way to avoid circular imports\n from . import settings\n\n files = [ \n settings.DATA['nation']['file_name'],\n settings.DATA['regions']['file_name'],\n settings.DATA['provinces']['file_name'],\n ]\n\n hash_md5 = hashlib.md5()\n for f in files:\n with open(dir+'/'+f, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n \n return hash_md5.hexdigest()",
"def compute_fingerprint(path_list):\r\n\r\n hasher = hashlib.sha1()\r\n\r\n for path in path_list:\r\n\r\n # For directories, create a hash based on the modification times\r\n # of first-level subdirectories\r\n if os.path.isdir(path):\r\n for dirname in sorted(os.listdir(path)):\r\n p = os.path.join(path, dirname)\r\n if os.path.isdir(p):\r\n hasher.update(str(os.stat(p).st_mtime))\r\n\r\n # For files, hash the contents of the file\r\n if os.path.isfile(path):\r\n with open(path, \"rb\") as file_handle:\r\n hasher.update(file_handle.read())\r\n\r\n return hasher.hexdigest()",
"def md5sum(filename_list):\n md5 = hashlib.md5()\n for filename in filename_list:\n if os.path.isfile(filename):\n _count_md5_for_file(md5, filename)\n elif os.path.isdir(filename):\n for base, dirs, files in os.walk(filename):\n dirs.sort() # ensure that directories will be traversed in same order on all platforms\n for name in sorted(files):\n _count_md5_for_file(md5, os.path.join(base, name))\n return md5.hexdigest()",
"def _check_md5(self):\n\n self.log.info('-' * 80)\n self.log.info('Check md5 sum')\n\n self.log.info(self._ref_value)\n self.log.info(self._output_file)\n\n code, out = cmd_exec(['md5sum', self._output_file], shell=False, log=self.log)\n if code:\n self.log.error(out)\n return False\n self.log.info(out)\n\n md5sum, _ = out.split(' ')\n\n self.log.info(f'reference md5: {self._ref_value}')\n self.log.info(f'actual md5: {md5sum}')\n\n if self._ref_value != md5sum:\n return False\n\n return True",
"def svn_fs_file_md5_checksum(*args):\r\n return _fs.svn_fs_file_md5_checksum(*args)",
"def calculate(d):\r\n\r\n # Set correct slashes for the OS\r\n if sys.platform == 'windows':\r\n slash = '\\\\'\r\n elif sys.platform == 'linux':\r\n slash = '/'\r\n else:\r\n print('#Error. Unknown platform.')\r\n return\r\n\r\n print('Files in the current directory and their md5-hashes:\\n')\r\n i = 0\r\n assert i == 0, '#Error. Variable i != 0.'\r\n\r\n for i in range(len(d[2])): # Go through the list of files\r\n full_path = d[0]+slash+d[2][i]\r\n print(full_path) # Get the list of files with full paths\r\n print(md5(full_path))\r\n size(full_path)",
"def hash_files(verbose, debug):\n\n found = []\n h = hashlib.new('md5')\n for pattern in FILES_PATTERNS:\n for f in glob.iglob(pattern, flags=FLAGS):\n name = f.replace('\\\\', '/')\n found.append(name)\n if verbose:\n print('FILES:')\n for f in sorted(found):\n if verbose:\n print(f)\n h.update(f.encode('ascii'))\n with open(f, 'rb') as f:\n h.update(f.read().replace(b'\\r\\n', b'\\n'))\n result = h.hexdigest()\n print('HASH: ', result)\n return result",
"def md5(self):\n\t\tfrom utils import get_md5\n\t\t# from hashlib import md5\n\t\t# m = md5()\n\t\t# m.update(str(self.html))\n\t\t# return m.hexdigest()\n\t\treturn get_md5(str(self.html))"
] | [
"0.6838967",
"0.6250359",
"0.6099796",
"0.5913261",
"0.5903527",
"0.5851177",
"0.5848145",
"0.5792256",
"0.5689556",
"0.56635755",
"0.56403613",
"0.5586936",
"0.5534521",
"0.5526404",
"0.5467155",
"0.5463636",
"0.54520786",
"0.5427325",
"0.5422579",
"0.54110044",
"0.5407061",
"0.54044783",
"0.5395575",
"0.53726864",
"0.53637904",
"0.53573763",
"0.5348888",
"0.53187734",
"0.53079337",
"0.52986985"
] | 0.7109552 | 0 |
Deactivate an ApiOAuth2Application Does not delete the database record, but revokes all tokens and sets a flag that hides this instance from API | def deactivate(self, save=False):
client = cas.get_client()
# Will raise a CasHttpError if deletion fails, which will also stop setting of active=False.
resp = client.revoke_application_tokens(self.client_id, self.client_secret) # noqa
self.is_active = False
if save:
self.save()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def revoke_api_access(application):\n try:\n file = open(PATH + '/../DB/access.json', 'r')\n accessData = json.load(file)\n if (application in accessData):\n accessData.pop(application, None)\n\n with open(PATH + '/../DB/access.json', 'w') as f:\n f.write(json.dumps(accessData, indent=4, sort_keys=True)) \n except:\n raise",
"def deactivate(self, save=False):\n client = cas.get_client()\n # Will raise a CasHttpError if deletion fails for any reason other than the token\n # not yet being created. This will also stop setting of active=False.\n try:\n resp = client.revoke_tokens({'token': self.token_id}) # noqa\n except cas.CasHTTPError as e:\n if e.code == 400:\n pass # Token hasn't been used yet, so not created in cas\n else:\n raise e\n\n self.is_active = False\n\n if save:\n self.save()\n return True",
"def deactivate(self):\n pass",
"def deactivate(self):\n pass",
"def stop_application_mode(self) -> None:\n # Nothing to do",
"def deactivate(self):\n self._is_active = False",
"def deactivate(self):\r\n self.update_enrollment(is_active=False)",
"def deactivate(self) -> None:\n self._bot.remove_flows_from(self)\n self.is_activated = False",
"def deactivate(self):\n self.active = False",
"def deactivate(self):\n self.active = False",
"def deactivate(self):\r\n self.activated = False",
"def deactivate(self):\n pass",
"def deactivate(self, util):\n return self._deactivate(util, persist=True)",
"def deactivate():\n deactivate_connection_with_mainloop(get_uuid())",
"def deactivate(self) -> bool:\n pass",
"def deactivate(self):\n super(Pixiv_bot, self).deactivate()",
"def disconnect(self):\r\n self._apiSession.close()\r\n self._oAuthSession.close()\r\n \r\n # Check the access token and refresh if expired\r",
"def set_inactive(self):\n self.active = False",
"def perform_destroy(self, instance):\n instance.is_active = False\n instance.save()",
"def delete_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n token = data.get(\"token\")\n\n valid, message = is_token_valid(token, address)\n if not valid:\n return jsonify(error=message), 400\n\n force_expire_token(token)\n\n return jsonify(success=\"Token has been deactivated.\")",
"def stop():\n app = get_vistrails_application()\n app.finishSession()\n app.save_configuration()\n app.destroy()",
"def deactivate(self):\n raise NotImplementedError('Subclasses must implement deactivate()')",
"def cancel(self):\n self.is_active = False\n self.save()",
"def cancel(self):\n self.is_active = False\n self.save()",
"def cmd_disable(self, app_name=None):\n rc = self.socket_command_with_project('disable', app_name)\n return rc",
"def shutdown(api, settings):\n if api.check_token():\n update_tokenfile(api, settings)\n else:\n delete_tokenfile(settings)",
"def deactivate(self) -> None:\n return self.stop()",
"def revoke(self):\n # Set the application as unsucessful with the current datetime\n self.status = self.Status.REVOKED\n self.revoked_datetime = timezone.now()\n\n # Removes credentialing from the user\n self.user.is_credentialed = False\n self.user.credential_datetime = None\n\n with transaction.atomic():\n self.user.save()\n self.save()\n\n logger.info('Credentialing for user {0} has been removed.'.format(\n self.user.email))",
"def deactivate_user_setup():\n\n app.active_users.pop(current_user.get_id(), None)\n try:\n requests.post(\"http://localhost:9090\",\n json={\"active_users\": app.active_users})\n except:\n logger.info('Unable to send updated list of active users.')\n return Response('500')\n logger.info('Deactivated messages for user '\n + current_user.get_id() + '.')\n return Response('200')",
"async def async_turn_off_when_active(self, **kwargs: Any) -> None:\n await self._data.controller.programs.stop(self.entity_description.uid)\n self._update_activities()"
] | [
"0.6420284",
"0.64088273",
"0.6401829",
"0.6401829",
"0.6192459",
"0.6173356",
"0.6137181",
"0.6090798",
"0.6071947",
"0.6071947",
"0.60594076",
"0.5915336",
"0.58965695",
"0.5862787",
"0.5858027",
"0.58575356",
"0.58293414",
"0.5747239",
"0.5745004",
"0.57321835",
"0.57201946",
"0.5705499",
"0.57000935",
"0.57000935",
"0.56785685",
"0.5654742",
"0.5644072",
"0.56083983",
"0.5601154",
"0.5585542"
] | 0.7059044 | 0 |
Reset the secret of an ApiOAuth2Application Revokes all tokens | def reset_secret(self, save=False):
client = cas.get_client()
client.revoke_application_tokens(self.client_id, self.client_secret)
self.client_secret = generate_client_secret()
if save:
self.save()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def resetSecret(self):\n self.secret = str(uuid())\n self.put()",
"def _clear_secret_token_map():\n global _secret_token_map\n _secret_token_map = None",
"def manage_clearSecrets(self, REQUEST):\n manager = getUtility(IKeyManager)\n manager.clear()\n manager.rotate()\n response = REQUEST.response\n response.redirect(\n '%s/manage_secret?manage_tabs_message=%s' %\n (self.absolute_url(), 'All+secrets+cleared.')\n )",
"def refresh_token():\n try:\n deserialized_message = peek_app_token()\n app_id = deserialized_message.get('app_id')\n installation_id = deserialized_message.get('installation_id')\n store_token(get_token(app_id, installation_id))\n\n except Exception as exc:\n log.error(f'Could not refresh token.\\n{exc}')\n traceback.print_exc(file=sys.stderr)",
"def revoke_api_access(application):\n try:\n file = open(PATH + '/../DB/access.json', 'r')\n accessData = json.load(file)\n if (application in accessData):\n accessData.pop(application, None)\n\n with open(PATH + '/../DB/access.json', 'w') as f:\n f.write(json.dumps(accessData, indent=4, sort_keys=True)) \n except:\n raise",
"def shutdown(api, settings):\n if api.check_token():\n update_tokenfile(api, settings)\n else:\n delete_tokenfile(settings)",
"def refresh_access_token(self):\n self._access_token = self.generate_access_token()",
"def invalidate_existing_tokens(self, client_id, user):\n\n app = Application.objects.get(client_id=client_id)\n tokens = AccessToken.objects.filter(user=user, application=app)\n tokens.delete()",
"def refresh_credentials():\n global auth_token\n auth_token = get_oauth_token()",
"def revoke_token(token):\n token.delete_instance()",
"def __del__(self):\n self.token_revoke()",
"def revoke_token():\n return server.create_endpoint_response(RevocationEndpoint.ENDPOINT_NAME)",
"def disconnect(self):\r\n self._apiSession.close()\r\n self._oAuthSession.close()\r\n \r\n # Check the access token and refresh if expired\r",
"def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")",
"def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')",
"def renew_access_token(self):\n self._access_token = self._get_access_token()",
"def revoke_token():\n json_request = request.json\n refresh_token = json_request.get('refresh_token')\n if not refresh_token:\n return msg.errors.bad_request(\n 'You should provide refresh token for this call')\n RefreshToken.revoke(refresh_token)\n db.session.commit()\n return msg.success('Token is successfully revoked')",
"def clean_user_tokens() -> None:\n asyncio.run(clean_old_user_tokens())",
"def refresh_auth_token(self):\n self._auth_token = self.generate_auth_token()",
"def reset_api_key(request):\r\n user = request.user\r\n # Generate new api key and assign it to user's api key\r\n user.api_key = User.gen_api_key()\r\n return _api_response(request, {\r\n 'api_key': user.api_key,\r\n 'message': 'Api Key was successfully changed',\r\n })",
"def UpdateSecretKey():\n _LOG.info('Updating webapp2_secret_key.')\n webapp2_secret_key = Webapp2SecretKey(id='current_secret_key')\n webapp2_secret_key.secret_key = os.urandom(16).encode('hex')\n webapp2_secret_key.put()\n return True",
"def reset_api_key(name):\r\n user = User.query.filter_by(name=name).first()\r\n if not user:\r\n return abort(404)\r\n if current_user.name != user.name:\r\n return abort(403)\r\n\r\n title = (\"User: %s · Settings\"\r\n \"- Reset API KEY\") % current_user.fullname\r\n user = db.session.query(model.user.User).get(current_user.id)\r\n user.api_key = model.make_uuid()\r\n db.session.commit()\r\n cached_users.delete_user_summary(user.name)\r\n msg = gettext('New API-KEY generated')\r\n flash(msg, 'success')\r\n return redirect(url_for('account.profile', name=name))",
"def _delete_all_secrets(self):\n for secret_ref in self.created_entities['secret']:\n self.barbicanclient.secrets.delete(secret_ref, True)",
"def refresh():\n current_user = get_jwt_identity()\n ret = {\n 'access_token': create_access_token(identity=current_user)\n }\n return jsonify(ret), 200",
"def reset_api_key(self):\r\n url = '{0}/{1}'.format(self.get_url(), 'api_key')\r\n\r\n return http.Request('POST', url, None), parsers.parse_json",
"def test_authtoken_refresh(self):\n hagrid = models.User(username='hagrid', fullname='Rubeus Hagrid')\n auth_token = models.AuthToken(user=hagrid, algorithm='hmac-sha-1')\n existing_token = auth_token.token\n existing_secret = auth_token.secret\n auth_token.refresh()\n self.assertNotEqual(existing_token, auth_token.token)\n self.assertNotEqual(existing_secret, auth_token.secret)",
"def _refresh_access_token(self) -> None:\n response = httpx.post(\n f\"{self._base_url}/oauth2/token\",\n proxies=self._proxies,\n data={\n \"grant_type\": \"client_credentials\",\n \"client_id\": self._api_key,\n \"client_secret\": self._api_secret,\n },\n )\n response.raise_for_status()\n token = response.json()[\"access_token\"]\n c = httpx.Client()\n c.close()\n self._authorization_headers = {\"Authorization\": f\"Bearer {token}\"}",
"def refresh(self):\n self._request_token(grant_type='password', username=self._username,\n password=self._password)",
"def refresh(self):\n self._request_token(grant_type='client_credentials')",
"def __init__(self):\n self.application_id = None\n self.secret = None\n self.token = {}"
] | [
"0.699256",
"0.6872917",
"0.6153157",
"0.6107559",
"0.59967816",
"0.59598404",
"0.5934698",
"0.5888643",
"0.58882296",
"0.5865433",
"0.5845431",
"0.5785667",
"0.57682496",
"0.5740121",
"0.5738762",
"0.57173806",
"0.5665614",
"0.5649307",
"0.56228507",
"0.56142944",
"0.55869836",
"0.5579306",
"0.5557085",
"0.55158216",
"0.55051273",
"0.55004776",
"0.5495246",
"0.54911983",
"0.5470276",
"0.546352"
] | 0.702098 | 0 |
Deactivate an ApiOAuth2PersonalToken Does not delete the database record, but hides this instance from API | def deactivate(self, save=False):
client = cas.get_client()
# Will raise a CasHttpError if deletion fails for any reason other than the token
# not yet being created. This will also stop setting of active=False.
try:
resp = client.revoke_tokens({'token': self.token_id}) # noqa
except cas.CasHTTPError as e:
if e.code == 400:
pass # Token hasn't been used yet, so not created in cas
else:
raise e
self.is_active = False
if save:
self.save()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_auth_token():\n data = get_request_data(request)\n address = data.get(\"address\")\n token = data.get(\"token\")\n\n valid, message = is_token_valid(token, address)\n if not valid:\n return jsonify(error=message), 400\n\n force_expire_token(token)\n\n return jsonify(success=\"Token has been deactivated.\")",
"def deactivate(self, save=False):\n client = cas.get_client()\n # Will raise a CasHttpError if deletion fails, which will also stop setting of active=False.\n resp = client.revoke_application_tokens(self.client_id, self.client_secret) # noqa\n\n self.is_active = False\n\n if save:\n self.save()\n return True",
"def revoke_token(token):\n token.delete_instance()",
"async def revoke_token(self, request: Request, token: str) -> None:\n token_record = ...\n token_record.revoked = True\n token_record.save()",
"def delete_token(self):\n config.update(outlook_token=None)",
"def deauth(request):\n\n if(request.token):\n request.token.delete()\n return JsonResponse({'message': 'Your token is revoked'}) \n else:\n return HttpResponseBadRequest('It does not make sense to revoke a token ' +\n 'if no token are supplied to the request')",
"def revoke_token(decoded_token):\n jti = decoded_token['jti']\n user_identity = decoded_token[current_app.config['JWT_IDENTITY_CLAIM']]\n expires = datetime.fromtimestamp(decoded_token['exp'])\n\n db_token = BlacklistedToken(\n jti=jti,\n user_identity=user_identity,\n expires=expires\n )\n db.session.add(db_token)\n prune_if_necessary()\n db.session.commit()",
"def post(self, request):\n if 'person_id' in self.request.POST:\n user = User.objects.get(person__id=self.request.POST['person_id'])\n if AccessToken.objects.filter(user=user).exists():\n tokens = AccessToken.objects.filter(user=user)\n for token in tokens:\n token.revoke()\n logout(request)\n return Response({'status': True})\n return Response({'status': False})",
"def deactivate(self):\n pass",
"def deactivate(self):\n pass",
"def revoke_token():\n return server.create_endpoint_response(RevocationEndpoint.ENDPOINT_NAME)",
"def removeToken(self, token):\n self.__require_privilaged_access()\n with DBSession(self.__config_db) as session:\n # Check if the given token is a personal access token so it can be\n # removed.\n user = self.getLoggedInUser()\n num_of_removed = session.query(Session) \\\n .filter(Session.user_name == user) \\\n .filter(Session.token == token) \\\n .filter(Session.can_expire.is_(False)) \\\n .delete(synchronize_session=False)\n session.commit()\n\n if not num_of_removed:\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.DATABASE,\n \"Personal access token {0} was not found in the \"\n \"database.\".format(token))\n\n # Invalidate the local session by token.\n self.__manager.invalidate_local_session(token)\n\n LOG.info(\"Personal access token '%s...' has been removed by '%s'.\",\n token[:5], self.getLoggedInUser())\n\n return True",
"def __del__(self):\n self.token_revoke()",
"def deactivate(self):\r\n self.update_enrollment(is_active=False)",
"def deactivate(self):\r\n self.activated = False",
"def deactivate_resident(email):\n query = \"\"\"\n mutation deactivateUser($email: String!){\n deactivateUser(userEmail: $email){\n user{\n email\n }\n }\n }\n \"\"\"\n\n variables = {\n 'email': email\n }\n\n headers = {\n 'Authorization': 'JWT %s' % API_TOKEN\n }\n\n response = requests.post(PATH, headers=headers, json={'query':query, 'variables':variables})\n\n return response.json()",
"def deactivate(self):\n self._is_active = False",
"def revoke_token(self, subid):\n from expfactory.database.models import Participant\n\n p = Participant.query.filter(Participant.id == subid).first()\n if p is not None:\n p.token = \"revoked\"\n self.session.commit()\n return p",
"def disconnect(self):\r\n self._apiSession.close()\r\n self._oAuthSession.close()\r\n \r\n # Check the access token and refresh if expired\r",
"def deactivate(self) -> None:\n self._bot.remove_flows_from(self)\n self.is_activated = False",
"def deauthorize():\n\tPAYLOAD_HEADERS.pop('Authorization', None)",
"def deactivate(self):\n self.active = False",
"def deactivate(self):\n self.active = False",
"def deactivate(self):\n super(Pfsense, self).deactivate()",
"def deactivate(self, util):\n return self._deactivate(util, persist=True)",
"def blacklist_token(token, user):\r\n user = User.query.filter_by(username=user).first()\r\n user.login_status = False\r\n token = Token.query.filter_by(token=token).first()\r\n token.blacklist = True\r\n db.session.commit()\r\n return {'Message': 'You have successfully logged out', \"Status\": \"Success\"}, 201",
"def shutdown(api, settings):\n if api.check_token():\n update_tokenfile(api, settings)\n else:\n delete_tokenfile(settings)",
"def deactivate(self):\n pass",
"def test_revoke_inactive(self):\n self.invite.active = False\n self.invite.save()\n url = reverse(\n 'projectroles:api_invite_revoke',\n kwargs={'projectinvite': self.invite.sodar_uuid},\n )\n response = self.request_knox(url, method='POST')\n self.assertEqual(response.status_code, 400, msg=response.content)",
"def revoke(self, token):\n client = self.connect(VAULT_TOKEN)\n client.revoke_token(token)"
] | [
"0.68359965",
"0.68096167",
"0.65469694",
"0.6422617",
"0.6284022",
"0.6237951",
"0.61001736",
"0.60755134",
"0.60667545",
"0.60667545",
"0.6003061",
"0.59748185",
"0.59192157",
"0.5878691",
"0.5849091",
"0.5848954",
"0.58168155",
"0.5810158",
"0.5800938",
"0.57993716",
"0.5776381",
"0.57720834",
"0.57720834",
"0.5764816",
"0.57554555",
"0.5747779",
"0.57382774",
"0.5734721",
"0.5711402",
"0.5701068"
] | 0.7050593 | 0 |
Calls the cvsfileUsage function to start parsing | def __call__(self):
return self.csvfileUsage() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n parse_file(sys.argv[1])",
"def main():\n parser = ArgumentParser(usage='%(prog)s [options] ecommonsMetadata.csv')\n parser.add_argument(\"-d\", \"--date\", dest=\"date\",\n help=\"Date on or after that an ETD was published for \\\n creating DOIs. Put in format YYYY-MM\")\n parser.add_argument(\"datafile\", help=\"eCommons metadata worked from.\")\n\n args = parser.parse_args()\n\n if not len(sys.argv) > 0:\n parser.print_help()\n parser.exit()\n\n workingdir = csvparse(args.datafile, args.date)\n doiparse(workingdir)\n print('ANVL files available in: ' + workingdir)",
"def main():\n processSetOfCerFiles(sys.argv[1:])",
"def _parse(self, infile):\n raise NotImplementedError()",
"def __init__(self, *, csv_file_path: str = ''):\n self.__csv_file_path = csv_file_path\n self._parse_csv()",
"def parse_args(self):\n assert os.path.isfile(self.params.csv_input_file), \\\n \"Input CSV file %s not found\" % self.params.csv_input_file",
"def buildFromCSV(self, filepath):\r\n\t\t# TODO: Implement\r\n\t\traise NotImplementedError('This function has not yet been implemented.')\r\n\t\t# with open(filepath, 'r') as scheduleFile:\r\n\t\t# \t# Reusing Parser.parseCSVs(), but not in the intended way; ok because validation is not yet implemented\r\n\t\t# \t# TODO: Split Parser.parseCSVs() into separate people/set file parsers \r\n\t\t# \tn, people, setConstraints = Parser.parseCSVs(-1, scheduleFile, [])\r",
"def parse(self, infile):\r\n raise NotImplementedError()",
"def process_csvs(conn: Connection, basedir: Path) -> None:\n process_files(conn, basedir/\"files.csv\")\n process_notes(conn, basedir/\"notes.csv\")\n process_links(conn, basedir/\"links.csv\")\n process_clusters(conn, basedir/\"clusters.csv\")\n process_bibliography(conn, basedir/\"bibliography.csv\")\n process_citations(conn, basedir/\"citations.csv\")",
"def _read_csvs(self):\n self.data = pd.read_csv(self.path+self.name, index_col=0)",
"def __init__(self):\r\n self.parent_directory = \"..\\csv\\\\\"\r\n self.file_parse_error_msg = \"An error occurred while paring the file\"",
"def main():\n filename = \"data/exercise.csv\"\n analyze(filename)",
"def parse_cat_file(filename, src_num):\n # Loads csv file\n line = np.genfromtxt(open(filename, \"r\"), names=True, delimiter=',', dtype=None)\n\n # Arrays Function\n # MCSNR\n MCSNR = arrays(0, line)\n MCSNR = MCSNR[src_num]\n MCSNR = MCSNR.decode(\"utf-8\")\n\n # RA\n RA = arrays(1, line)\n RA = RA[src_num]\n\n # DE\n DE = arrays(2, line)\n DE = DE[src_num]\n\n # Radius\n Rad = arrays(8, line)\n Rad = Rad[src_num]\n\n # kT\n kT = arrays(12, line)\n kT = kT[src_num]\n\n # VShock\n VShock = arrays(16, line)\n VShock = VShock[src_num]\n\n # Age\n Age = arrays(18, line)\n Age = Age[src_num]\n\n # LX\n LX = arrays(4, line)\n LX = LX[src_num]\n\n # LIR\n LIR = arrays(7, line)\n LIR = LIR[src_num]\n\n return MCSNR, RA, DE, Rad, kT, VShock, Age, LX, LIR",
"def process_file():\n global distances_between_cities\n global number_of_cities\n global unvisited_cities\n\n text_file = open(sys.argv[1].strip('\\r'))\n distances_between_cities = [[int(i) for i in line.strip(\"\\r\\n\").split()[1:]] for line in text_file.readlines()[1:]]\n number_of_cities = len(distances_between_cities)\n\n # set the initial conditions of the problem (you have already visited madrid)\n unvisited_cities = range(number_of_cities)\n visit_city(0)",
"def main():\n\t\n\tglobal debug\n\tct = 0\n\tfor opt in sys.argv[1:]:\n\t\tif opt[0] != \"-\": break\n\t\tct = ct + 1\n\t\tif opt == \"-d\": debug = True\n\tif len(sys.argv) < 2+ct:\n\t\tprint (\"Usage: %s filename\" % sys.argv[0])\n\t\treturn\n\tparse(\"\".join(mklines(sys.argv[1+ct])))\n\treturn",
"def parse_file():\n\tfile_lines = []\n\n\t## For each line in the file, if it's not empty, store it\n\tfor line in fileinput.input():\n\t\tif len(line) > 1:\n\t\t\tfile_lines.append(line.strip())\n\t\n\trun_algorithms(file_lines)",
"def __init__(self, args):\n self.verbose = args.verbose\n self.force = args.force\n self.extra = args.extra\n self.master_csv = args.master\n self.new_files = args.new_files\n self.df_mas_lab_data = None # Master Lab data\n self.df_new_lab_data = None # Aggregated new Lab data\n self.columns = [\n \"CLIA\",\n \"FACILITY_TYPE\",\n \"CERTIFICATE_TYPE\",\n \"LAB_NAME\",\n \"STREET\",\n \"CITY\",\n \"STATE\",\n \"ZIP\",\n \"PHONE\",\n ]",
"def parse(self, filename: str, input_format='csv', **kwargs):\n if 'delimiter' not in kwargs:\n kwargs['delimiter'] = self._extention_types[input_format]\n if filename.endswith('.tar'):\n with tarfile.open(filename) as tar:\n for member in tar.getmembers():\n f = tar.extractfile(member)\n df = pd.read_csv(f, comment='#', **kwargs) # type: pd.DataFrame\n if member.name == 'nodes.csv':\n self.load_nodes(df)\n elif member.name == 'edges.csv':\n self.load_edges(df)\n else:\n raise Exception('Tar file contains unrecognized member {}'.format(member.name))\n else:\n df = pd.read_csv(filename, comment='#', **kwargs) # type: pd.DataFrame\n self.load(df)",
"def __init__(self,\n fileName,\n realFileName=None,\n prequelFileName=None,\n preErrorMessages=(), # Type to be checked\n doNotReadFiles=False,\n allowedFeatures=()):\n #type: (Text, Optional[Text], Optional[Text], List[Any], bool, List[Text]) -> None\n\n assert fileName is not None\n\n self.fileName=fileName #type: Text\n \"\"\" The filename as given when creating the source file\"\"\"\n\n self.prequelFileName=(\n fileName if prequelFileName is None\n else prequelFileName\n )\n \"\"\" \n The named of the unprocessed file or the filename.\n This is useful when a preprocessor is used. \n \"\"\"\n\n self.realFileName=(\n None if doNotReadFiles # filled later\n else (\n fileName if realFileName is None\n else realFileName))\n \"\"\" \n The name of the actual file name that is parsed.\n This is almost never used so don't use it unless\n you know what you are doing. \n \"\"\"\n\n # This should be after the definition of\n # filenames\n super(SourceFile, self).__init__(parents=[])\n\n\n if len(preErrorMessages) >= 1:\n for msg in preErrorMessages:\n Issue(\n origin=self,\n level=Levels.Error,\n message=msg\n )\n return\n\n self.sourceLines=[] #type: List[Text]\n \"\"\"\n The source lines of the 'logical' file.\n It will be the same as realSourceLines \n if not isBasedInHiddenFile. \n Filled by doReadFile but if doReadFile raise \n an exception, the sourceLines will still be of the\n appropriate type (no lines)\n The caller must call doReadFile explictely\n if doNotReadFiles.\n \"\"\"\n\n self.realSourceLines=[] #type: List[Text]\n \"\"\"\n The source lines of the 'real' file.\n It will be the same as sourceLines \n if not isBasedInHiddenFile. \n Filled by doReadFile but if doReadFile raise \n an exception, the sourceLines will still be of the\n appropriate type (no lines)\n The caller must call doReadFile explictely\n if doNotReadFiles.\n \"\"\"\n\n self.allowedFeatures=allowedFeatures #type: List[Text]\n \"\"\"\n A list of feature names that could be issued\n in the parser.\n \"\"\"\n\n\n\n\n if not doNotReadFiles:\n self.doReadFiles(\n logicalFileName=self.fileName,\n realFileName=self.realFileName)",
"def main():\n # openfile allows for CSV files with stored data of two columns\n # data = openfile(\"filename\")\n data = get_data()\n abtest = AB_test(data)\n abtest.stats()\n abtest.print_stats()",
"def run(self, file):\n self.loadReport(file)\n self.findCountryCode()\n self.reformatAndSave()",
"def main():\n Tkinter.Tk().withdraw()\n try:\n #Ask user for the file and open the file in read mode\n file=tkFileDialog.askopenfile()\n\n try:\n #Read the file, split contents by lines, then split lines into words using \",\" as separator and\n # use the words to create carEvaluation object\n carevaluation_list=[CarEvaluation(*lines.split(\",\")) for lines in file.read().splitlines()]\n process_records(carevaluation_list)\n except Exception, e:\n print \"Error: Parsing file. Terminating the program! \" + str(e)\n sys.exit(e)\n finally:\n file.close()\n except Exception, e:\n print \"Error: Could not open the file to read. Terminating the program! \" + str(e)\n sys.exit(e)",
"def run_parser(self, path, app=None, renderer=None, failed=False):\n self._stats = RenderStats()\n\n filters = {'app': app,\n 'renderer': renderer,\n 'failed': failed}\n\n # Read all files that are properly named\n filenames = [os.path.join(path, rf) for rf in os.listdir(path) if re.match(r'renders_\\d{4}-\\d{2}-\\d{2}.csv', rf)]\n\n for fn in filenames:\n for render in filter_rows(fn, filters):\n # Update stats based on this render\n self._stats.update(render)",
"def process_file(file_name):\n pass # delete this line and replace with your code here",
"def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()",
"def load_csv(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n files, _ = QFileDialog.getOpenFileNames(\n self,\n \"Select one or more files\",\n \"\",\n \"csv files (*.csv);;All Files (*)\",\n options=options,\n )\n self.show()\n\n if files:\n self.files_now = files\n else:\n self.files_now = None\n\n if self.files_now:\n self.lineEdit_file_name.setText(self.files_now[0])\n self.update_gui_from_csv()",
"def parse_source_file(self, filepath):\n raise NotImplementedError('Not Implemented')",
"def parse_design(self, detailed_design_file):",
"def compile_file(self, filename):\n if self.basepath is None:\n self.basepath = os.path.split(filename)\n\n i = 1\n txt = \"\"\n with open(filename, \"r\") as reader:\n for line in reader:\n if line != \"\\n\":\n txt += line\n debug(logger, \"*** [%d] %s\" % (i, line))\n if balanced(txt) == 0:\n print(self.parseit(txt))\n txt = \"\"\n i = i + 1\n\n if len(txt):\n print(\"Error: missing ()'s, %s\" % txt)",
"def process_file_import(self):\r\n directory_csv = [file for file in os.listdir() if file.endswith(\".csv\")]\r\n self.print_options(directory_csv,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then imports csv file based on user's input.\r\n \"\"\"\r\n n = (input(\"Which csv would you like to import? Please input the corresponding integer:\"))\r\n\r\n try:\r\n n = int(n)\r\n except:\r\n pass\r\n\r\n if isinstance(n, int) is True and n <= len(directory_csv):\r\n self.population.import_csv(directory_csv[int(n)-1])\r\n print(self.population)\r\n self.file_import()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"\\nPlease input a valid digit, 'q' or 'b'\")"
] | [
"0.58680946",
"0.584178",
"0.56904954",
"0.55948025",
"0.55103976",
"0.54529536",
"0.5447621",
"0.5420857",
"0.5396767",
"0.5378413",
"0.5375421",
"0.5359897",
"0.53025967",
"0.5297627",
"0.52868927",
"0.52581596",
"0.52294654",
"0.5224852",
"0.5216758",
"0.51756084",
"0.51752704",
"0.51541066",
"0.5153563",
"0.5129914",
"0.5121719",
"0.511627",
"0.5110112",
"0.51087254",
"0.5107346",
"0.5094554"
] | 0.6263405 | 0 |
Check For valid csv data | def check_valid_csv_data(self, row):
obj = re.match(re.compile('^[0-9]{4}\,[A-Z]{1}[a-z]{2}\,.'),
','.join(row))
if not obj:
raise Exception("Invalid Data String must be like `1990` `Jan` Check Sample file") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_valid_csvformat(self, csv_path):\n with open(self.csv_path, \"rb+\") as file_obj:\n reader = csv.reader(file_obj, delimiter=',') # CSV DictReader object\n self.check_valid_csv_header(reader.next())\n self.check_valid_csv_data(reader.next())",
"def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False",
"def validate_csv(filename: str) -> bool:\n # From: https://stackoverflow.com/questions/2984888/check-if-file-has-a-csv-format-with-python\n try:\n with open(filename, newline='') as csvfile:\n start = csvfile.read(4096)\n\n # isprintable does not allow newlines, printable does not allow umlauts...\n if not all([c in string.printable or c.isprintable() for c in start]):\n return False\n dialect = csv.Sniffer().sniff(start)\n return True\n except csv.Error:\n # Could not get a csv dialect -> probably not a csv.\n return False\n except UnicodeError:\n return False",
"def validate_csv(filename, header, cols, rows):\n\n # open file\n data = pd.read_csv(filename, delimiter='|')\n\n # validate header\n assert header == '|'.join(list(data.columns.values))\n\n # validate column count\n assert data.shape[1] == cols\n\n # validate row count\n assert data.shape[0] == rows\n\n # return (header_result == column_result == row_result) is True",
"def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def test_valid_csv(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_csv(url, fname='wine2')\n self.assertTrue(str(type(readerobject)),\"_csv.reader\")",
"def check_valid_csv_header(self, row):\n obj = re.match(re.compile('^Year\\,Month\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Headers must be `Year` `Month` Check Sample file\")",
"def test_validate_file_extension_csv(self):\n data_contacts = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n data_contacts_false = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n a = validate_file_extension_csv(data_contacts)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_csv(data_contacts_false)\n data_contacts.close()\n data_contacts_false.close()\n self.assertTrue(\"Keine gültige CSV-Datei\" or \"No valid CSV file\" in\n str(context.exception))",
"def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def csv_parser_test():\n data = csv_parser(myspreadsheet)\n print 'Your data object:'\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data) \n # Did your parser work?\n for row_num, row in enumerate(data):\n try:\n assert len(row) == 3\n except AssertionError:\n print \"Row %s seems to be misparsed; its length is %s\" % (row_num, len(row))\n # Check on one of the values:\n try:\n assert data[4][2] == 'Linguist'\n except AssertionError:\n print \"Error: data[4][2] should equal 'Linguist'; actual value is %s\" % data[4][2]\n # Did you remember your int conversions?\n try:\n assert isinstance(data[0][0], int)\n except AssertionError:\n print \"Error: data[0][0] should be an int\"\n # Did you remember your float conversions?\n try:\n assert isinstance(data[6][1], float)\n except AssertionError:\n print \"Error: data[6][1] should be a float\"",
"def csv_parser_test():\r\n data = csv_parser(myspreadsheet)\r\n print('Your data object:')\r\n pp = pprint.PrettyPrinter(indent=4)\r\n pp.pprint(data) \r\n # Did your parser work?\r\n for row_num, row in enumerate(data):\r\n try:\r\n assert len(row) == 3\r\n except AssertionError:\r\n print ((\"Row %s seems to be misparsed; its length is %s\") % (row_num, len(row)))\r\n # Check on one of the values:\r\n try:\r\n assert data[4][2] == 'Linguist'\r\n except AssertionError:\r\n print ((\"Error: data[4][2] should equal 'Linguist'; actual value is %s\") % data[4][2])\r\n # Did you remember your int conversions?\r\n try:\r\n assert isinstance(data[0][0], int)\r\n except AssertionError:\r\n print (\"Error: data[0][0] should be an int\")\r\n # Did you remember your float conversions?\r\n try:\r\n assert isinstance(data[6][1], float)\r\n except AssertionError:\r\n print (\"Error: data[6][1] should be a float\")",
"def __is_csv(self):\n try:\n # just open to check if there is the file\n with open(self.__csv_file_name, 'r') as file:\n file.close()\n return True\n # if it do not exists the exception will returns false\n except IOError:\n return False",
"def open_convert_and_clean_csv(csv_data_file):\n imported_data = tablib.Dataset().load(open(csv_data_file).read())\n dataset = []\n for row in imported_data:\n if float(row[1]) > 0 and float(row[2]) > 0:\n dataset.append((row[0], float(row[1]), float(row[2])))\n return dataset",
"def test_invalid_header(self, tmpdir):\n path1 = tmpdir.join(\"invalid.csv\")\n path1.write(\"not,a,valid,header,row\")\n with pytest.raises(ValueError):\n parse_file(str(path1))\n\n path2 = tmpdir.join(\"valid.csv\")\n path2.write(\",\".join(HEADER_ROW))\n try:\n parse_file(str(path2))\n except ValueError:\n assert False, \"Unexpected ValueError\"",
"def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'",
"def _verify_csv_file_report(self, report_store, expected_data):\n report_csv_filename = report_store.links_for(self.course.id)[0][0]\n report_path = report_store.path_to(self.course.id, report_csv_filename)\n with report_store.storage.open(report_path) as csv_file:\n csv_file_data = csv_file.read()\n # Removing unicode signature (BOM) from the beginning\n csv_file_data = csv_file_data.decode(\"utf-8-sig\")\n for data in expected_data:\n self.assertIn(data, csv_file_data)",
"def test_validate_csv():\n duplicate_keys_file_path = os.path.join(\n TEST_DATA_DIR, \"clubs_invalid_duplicate_keys.csv\"\n )\n\n invalid_headers_file_path = os.path.join(\n TEST_DATA_DIR, \"membership_invalid_syntax.csv\"\n )\n\n # Test duplicate keys\n with open(duplicate_keys_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n assert \"error\" in validation_resp\n duplicate_keys = validation_resp[\"detail\"]\n assert \"5\" in duplicate_keys\n assert \"2\" in duplicate_keys\n\n # Test invalid syntax\n with open(invalid_headers_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n invalid_rows = [x[\"row\"] for x in validation_resp[\"detail\"]]\n assert \"error\" in validation_resp\n assert 3 in invalid_rows\n assert 4 in invalid_rows\n assert 5 in invalid_rows\n\n # Test unicode decode errors\n test_data = b\"\\xff\\xfe_\\x00k\\x00e\\x00y\\x00,\\x00n\\x00a\\x00m\\x00e\\x00\\n\"\n pytest.raises(DecodeFailed, decode_data, test_data)",
"def handle_csv(self):\n try:\n reader = csv.reader(open(self.options.datafile, 'r'))\n except IOError:\n errormsg(_('Cannot read \"{}\"'.format(self.options.datafile)))\n raise Exception(_('Cannot read \"{}\"'.format(self.options.datafile)))\n if self.options.var_type == 'name':\n try:\n self.header = reader.next()\n except StopIteration:\n errormsg(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n raise Exception(_('Data file \"{}\" contains no data'.format(\n self.options.datafile)))\n self.data = []\n for row in reader:\n self.data.append(row)",
"def validate_data(self, row, col, value):\n\n return True",
"def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")",
"def test_load_csv_file():\n data = loader.load_csv_file(\"buildup/reference/comsol_solution/lofi/voltage.csv.bz2\")\n\n assert data.any()",
"def check_meatadata_row(validated, input_validate_dict, row, idx):\n\n if row['RealCrystalName'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'RealCrystalName spaces or null', idx + 2)\n validated = False\n if row['crystal_name'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name spaces or null', idx + 2)\n validated = False\n if row['RealCrystalName'] not in row['crystal_name']:\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name does not contain RealCrystalName', idx + 2)\n validated = False\n if row['smiles'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Smiles null', idx + 2)\n validated = False\n\n return validated, input_validate_dict",
"def process(self, record):\n is_data = True\n if self.file_path.split('.')[-1] == 'csv':\n if self.header_skip:\n logging.info('Skipping header data... {}'.format(record))\n self.header_skip = False\n is_data = False\n return [(record, None, None, is_data)]\n record_attributes = list(csv.reader([record]))[0]\n if len(record_attributes) != len(self.schema[FIELDS_KEY]):\n if len(record_attributes) > 1 or not record_attributes[0].strip().isdigit():\n IS_VALID_FILE = False\n is_data = None\n return [(record, None, None, is_data)]\n for record_attribute, attribute_schema in zip(\n record_attributes, self.schema[FIELDS_KEY]):\n is_valid_datatype_check = self.__datatype_check(record_attribute, attribute_schema)\n is_valid_null_check = self.__null_check(record_attribute, attribute_schema)\n return [(record, is_valid_datatype_check, is_valid_null_check, is_data)]",
"def read_csv():",
"def is_valid(self, dataset):\n pass",
"def validate_dataset(self):\n pass",
"def test_csvfile_get_data_impossible_filter(fs: FakeFilesystem) -> None:\n fs.create_file(\"test.csv\", contents=CONTENTS)\n\n adapter = CSVFile(\"test.csv\")\n assert list(adapter.get_data({\"index\": Impossible()}, [])) == []",
"def test_csvfile_different_types(fs: FakeFilesystem) -> None:\n contents = '''\"a\"\n1\n2.0\n\"test\"'''\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }",
"def filterLines(weatherRDDRecord):\n fieldsList = weatherRDDRecord.split(\",\")\n #return len(fieldsList)\n if any(i.isdigit() for i in fieldsList[0]):\n return True\n else:\n return False",
"def _validate_row(self, row):\n\n # assume value.\n is_valid = True\n\n # test if each field in @row has the correct data type.\n tests = []\n for field, value in row.items():\n value_type, header_type = (type(value).__name__, \n self.required_headers[field].__name__)\n test = value_type == header_type\n if not test:\n err = \"Field '{}' not valid; expected '{}', got '{}'.\".format(field,\n header_type, value_type)\n self.logger.debug(err)\n tests.append(test)\n\n # if any test failed, set @is_valid to False.\n if False in tests:\n is_valid = False\n \n return is_valid"
] | [
"0.80091965",
"0.75875276",
"0.75167954",
"0.741095",
"0.71336776",
"0.70634043",
"0.6948182",
"0.68665993",
"0.6859504",
"0.67613226",
"0.67475",
"0.6734457",
"0.67083627",
"0.6599125",
"0.6583989",
"0.65218884",
"0.6469705",
"0.64656794",
"0.6436142",
"0.6427924",
"0.64077896",
"0.63980997",
"0.63969994",
"0.63700426",
"0.63189805",
"0.63122225",
"0.6297279",
"0.6287425",
"0.6284053",
"0.6282969"
] | 0.7886953 | 1 |
Check if csv is in valid format with data | def check_valid_csvformat(self, csv_path):
with open(self.csv_path, "rb+") as file_obj:
reader = csv.reader(file_obj, delimiter=',') # CSV DictReader object
self.check_valid_csv_header(reader.next())
self.check_valid_csv_data(reader.next()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_valid_csv_data(self, row):\n obj = re.match(re.compile('^[0-9]{4}\\,[A-Z]{1}[a-z]{2}\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Data String must be like `1990` `Jan` Check Sample file\")",
"def validate_csv(filename: str) -> bool:\n # From: https://stackoverflow.com/questions/2984888/check-if-file-has-a-csv-format-with-python\n try:\n with open(filename, newline='') as csvfile:\n start = csvfile.read(4096)\n\n # isprintable does not allow newlines, printable does not allow umlauts...\n if not all([c in string.printable or c.isprintable() for c in start]):\n return False\n dialect = csv.Sniffer().sniff(start)\n return True\n except csv.Error:\n # Could not get a csv dialect -> probably not a csv.\n return False\n except UnicodeError:\n return False",
"def validate_csv_seq(sequence):\n if sequence.find(',') != -1 or sequence.find(';') != -1:\n return True\n else:\n return False",
"def validate_csv(filename, header, cols, rows):\n\n # open file\n data = pd.read_csv(filename, delimiter='|')\n\n # validate header\n assert header == '|'.join(list(data.columns.values))\n\n # validate column count\n assert data.shape[1] == cols\n\n # validate row count\n assert data.shape[0] == rows\n\n # return (header_result == column_result == row_result) is True",
"def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def test_valid_csv(self):\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_csv(url, fname='wine2')\n self.assertTrue(str(type(readerobject)),\"_csv.reader\")",
"def check_valid_csv_header(self, row):\n obj = re.match(re.compile('^Year\\,Month\\,.'),\n ','.join(row))\n if not obj:\n raise Exception(\"Invalid Headers must be `Year` `Month` Check Sample file\")",
"def csv_parser_test():\n data = csv_parser(myspreadsheet)\n print 'Your data object:'\n pp = pprint.PrettyPrinter(indent=4)\n pp.pprint(data) \n # Did your parser work?\n for row_num, row in enumerate(data):\n try:\n assert len(row) == 3\n except AssertionError:\n print \"Row %s seems to be misparsed; its length is %s\" % (row_num, len(row))\n # Check on one of the values:\n try:\n assert data[4][2] == 'Linguist'\n except AssertionError:\n print \"Error: data[4][2] should equal 'Linguist'; actual value is %s\" % data[4][2]\n # Did you remember your int conversions?\n try:\n assert isinstance(data[0][0], int)\n except AssertionError:\n print \"Error: data[0][0] should be an int\"\n # Did you remember your float conversions?\n try:\n assert isinstance(data[6][1], float)\n except AssertionError:\n print \"Error: data[6][1] should be a float\"",
"def csv_parser_test():\r\n data = csv_parser(myspreadsheet)\r\n print('Your data object:')\r\n pp = pprint.PrettyPrinter(indent=4)\r\n pp.pprint(data) \r\n # Did your parser work?\r\n for row_num, row in enumerate(data):\r\n try:\r\n assert len(row) == 3\r\n except AssertionError:\r\n print ((\"Row %s seems to be misparsed; its length is %s\") % (row_num, len(row)))\r\n # Check on one of the values:\r\n try:\r\n assert data[4][2] == 'Linguist'\r\n except AssertionError:\r\n print ((\"Error: data[4][2] should equal 'Linguist'; actual value is %s\") % data[4][2])\r\n # Did you remember your int conversions?\r\n try:\r\n assert isinstance(data[0][0], int)\r\n except AssertionError:\r\n print (\"Error: data[0][0] should be an int\")\r\n # Did you remember your float conversions?\r\n try:\r\n assert isinstance(data[6][1], float)\r\n except AssertionError:\r\n print (\"Error: data[6][1] should be a float\")",
"def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)",
"def isFormatOk(self, row):\n try:\n date = datetime.strptime(row[0], \"%m/%d/%Y\").date()\n state = fix_text(row[1])\n impressions = int(row[2])\n if impressions < 0:\n raise ValueError\n CTR = float(row[3].replace(\"%\",\"\"))\n if CTR < 0 or CTR > 1:\n raise ValueError\n except ValueError as e:\n print(f\"Wrong format of provided data {row}\", file=sys.stderr)\n return False\n return Record(date=date, state=state, impressions=impressions, CTR=CTR)",
"def test_validate_file_extension_csv(self):\n data_contacts = open(self.test_dir + 'contacts.csv',\n encoding='utf-8')\n data_contacts_false = open(self.test_dir + 'mannheim_short.json',\n encoding='utf-8')\n a = validate_file_extension_csv(data_contacts)\n self.assertEqual(a, None)\n with self.assertRaises(ValidationError) as context:\n validate_file_extension_csv(data_contacts_false)\n data_contacts.close()\n data_contacts_false.close()\n self.assertTrue(\"Keine gültige CSV-Datei\" or \"No valid CSV file\" in\n str(context.exception))",
"def __is_csv(self):\n try:\n # just open to check if there is the file\n with open(self.__csv_file_name, 'r') as file:\n file.close()\n return True\n # if it do not exists the exception will returns false\n except IOError:\n return False",
"def validate_bed_format(row):\n assert len(row) >= 3, 'Bed Files must have at least 3 tab separated fields.'\n\n return True",
"def test_validate_csv():\n duplicate_keys_file_path = os.path.join(\n TEST_DATA_DIR, \"clubs_invalid_duplicate_keys.csv\"\n )\n\n invalid_headers_file_path = os.path.join(\n TEST_DATA_DIR, \"membership_invalid_syntax.csv\"\n )\n\n # Test duplicate keys\n with open(duplicate_keys_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n assert \"error\" in validation_resp\n duplicate_keys = validation_resp[\"detail\"]\n assert \"5\" in duplicate_keys\n assert \"2\" in duplicate_keys\n\n # Test invalid syntax\n with open(invalid_headers_file_path) as test_file:\n test_file = test_file.read()\n\n rows = list(csv.DictReader(StringIO(test_file)))\n with pytest.raises(ValidationFailed) as v_error:\n validate_csv(rows)\n\n validation_resp = v_error.value.errors[0]\n invalid_rows = [x[\"row\"] for x in validation_resp[\"detail\"]]\n assert \"error\" in validation_resp\n assert 3 in invalid_rows\n assert 4 in invalid_rows\n assert 5 in invalid_rows\n\n # Test unicode decode errors\n test_data = b\"\\xff\\xfe_\\x00k\\x00e\\x00y\\x00,\\x00n\\x00a\\x00m\\x00e\\x00\\n\"\n pytest.raises(DecodeFailed, decode_data, test_data)",
"def open_convert_and_clean_csv(csv_data_file):\n imported_data = tablib.Dataset().load(open(csv_data_file).read())\n dataset = []\n for row in imported_data:\n if float(row[1]) > 0 and float(row[2]) > 0:\n dataset.append((row[0], float(row[1]), float(row[2])))\n return dataset",
"def test_csv_reader_data_contents(process_data):\n data = process_data(file_name_or_type='clean_map.csv')\n\n # Check row types\n for row in data:\n assert(isinstance(row['Country'], str))\n assert(isinstance(row['City'], str))\n assert(isinstance(row['State_Or_Province'], str))\n assert(isinstance(row['Lat'], float))\n assert(isinstance(row['Long'], float))\n assert(isinstance(row['Altitude'], float))\n\n # Basic data checks\n assert len(data) == 180 # We have collected 180 rows\n assert data[0]['Country'] == 'Andorra'\n assert data[106]['Country'] == 'Japan'",
"def process(self, record):\n is_data = True\n if self.file_path.split('.')[-1] == 'csv':\n if self.header_skip:\n logging.info('Skipping header data... {}'.format(record))\n self.header_skip = False\n is_data = False\n return [(record, None, None, is_data)]\n record_attributes = list(csv.reader([record]))[0]\n if len(record_attributes) != len(self.schema[FIELDS_KEY]):\n if len(record_attributes) > 1 or not record_attributes[0].strip().isdigit():\n IS_VALID_FILE = False\n is_data = None\n return [(record, None, None, is_data)]\n for record_attribute, attribute_schema in zip(\n record_attributes, self.schema[FIELDS_KEY]):\n is_valid_datatype_check = self.__datatype_check(record_attribute, attribute_schema)\n is_valid_null_check = self.__null_check(record_attribute, attribute_schema)\n return [(record, is_valid_datatype_check, is_valid_null_check, is_data)]",
"def test_invalid_header(self, tmpdir):\n path1 = tmpdir.join(\"invalid.csv\")\n path1.write(\"not,a,valid,header,row\")\n with pytest.raises(ValueError):\n parse_file(str(path1))\n\n path2 = tmpdir.join(\"valid.csv\")\n path2.write(\",\".join(HEADER_ROW))\n try:\n parse_file(str(path2))\n except ValueError:\n assert False, \"Unexpected ValueError\"",
"def _validate_row(self, row):\n\n # assume value.\n is_valid = True\n\n # test if each field in @row has the correct data type.\n tests = []\n for field, value in row.items():\n value_type, header_type = (type(value).__name__, \n self.required_headers[field].__name__)\n test = value_type == header_type\n if not test:\n err = \"Field '{}' not valid; expected '{}', got '{}'.\".format(field,\n header_type, value_type)\n self.logger.debug(err)\n tests.append(test)\n\n # if any test failed, set @is_valid to False.\n if False in tests:\n is_valid = False\n \n return is_valid",
"def test_csvfile_different_types(fs: FakeFilesystem) -> None:\n contents = '''\"a\"\n1\n2.0\n\"test\"'''\n fs.create_file(\"test.csv\", contents=contents)\n\n adapter = CSVFile(\"test.csv\")\n\n assert adapter.get_columns() == {\n \"a\": String(\n filters=[Range, Equal, NotEqual, IsNull, IsNotNull],\n order=Order.NONE,\n exact=True,\n ),\n }",
"def test_is_valid_manifest_format_with_csv(caplog):\n assert is_valid_manifest_format(\"tests/test_manifest.csv\") == True\n assert caplog.text == \"\"",
"def isLineData(self, line):\n\n if line is None or line.strip().startswith('#'):\n return False, None, 0\n\n dataType = self.getDataType()\n\n if dataType == 'Y':\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n newYValues = []\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n newYValues.append(yValue)\n except ValueError:\n pass\n\n return True, 'Y', len(newYValues)\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n elif dataType == 'XY':\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n else:\n # Y with 1 column\n try:\n yValue = float(line)\n\n return True, 'Y', 1\n except:\n pass\n\n # Y with comma 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split(',')\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n numberValues = 0\n for yValue in yValueList:\n try:\n yValue = float(yValue)\n numberValues += 1\n except ValueError:\n pass\n\n return True, 'Y', numberValues\n except:\n pass\n\n # Y with space 2 to 5 column\n try:\n yValueList = []\n yValueList = line.split()\n\n if len(yValueList) > 1 and len(yValueList) <= 5:\n for yValue in yValueList:\n yValue = float(yValue)\n\n return True, 'Y', len(yValueList)\n except:\n pass\n\n # XY with comma\n try:\n (xValue, yValue) = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with comma\n try:\n xValue, yValue, dummy = line.split(',')\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n # XY with space\n try:\n (xValue, yValue) = line.split()\n\n xValue = float(xValue)\n yValue = float(yValue)\n\n return True, 'XY', 2\n except:\n pass\n\n return False, None, 0",
"def check_meatadata_row(validated, input_validate_dict, row, idx):\n\n if row['RealCrystalName'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'RealCrystalName spaces or null', idx + 2)\n validated = False\n if row['crystal_name'].isspace() or row['RealCrystalName'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name spaces or null', idx + 2)\n validated = False\n if row['RealCrystalName'] not in row['crystal_name']:\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Crystal name does not contain RealCrystalName', idx + 2)\n validated = False\n if row['smiles'] == 'nan':\n add_tset_warning(input_validate_dict, 'Metadata.csv', 'Smiles null', idx + 2)\n validated = False\n\n return validated, input_validate_dict",
"def test_with_no_specified_or_inferred_schema(self):\n # should default to creating a schema of all strings\n frame = self.context.frame.import_csv(self.dataset, infer_schema=False)\n expected_schema = [(\"C0\", str), (\"C1\", str), (\"C2\", str)]\n self.assertEqual(frame.schema, expected_schema)",
"def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")",
"def test_misc_csv_read():\n r = csv_reader(\"../test/test.csv\")\n fields = r.hdr()\n data = str(fields)\n while True:\n row = r.row()\n if not row: break\n data += '\\n' + str(row)\n\n assert(data == \"\"\"\n['EVT_CODE*', 'EVT_DATE.DE', 'CODE', 'AGE', 'FRST', 'LST', 'SPEC', 'de.id']\n['tea', '2018/01/01', 'X', '35', 'PRE', 'WHO', 'BUG', '1']\n['coffee', '2018/05/05', 'X', '35', 'JAN,Z', 'WHO', 'FRG', '1']\n['water', '2018/01/01', 'Y', '35', 'TAN', 'POST', 'CAT', '2']\n \"\"\".strip())",
"def test_load_csv_file():\n data = loader.load_csv_file(\"buildup/reference/comsol_solution/lofi/voltage.csv.bz2\")\n\n assert data.any()",
"def filterLines(weatherRDDRecord):\n fieldsList = weatherRDDRecord.split(\",\")\n #return len(fieldsList)\n if any(i.isdigit() for i in fieldsList[0]):\n return True\n else:\n return False",
"def test_itercsv_emits_data_lines():\n expected = [\n b'Hello,World\\r\\n',\n b'1,2\\r\\n',\n b'3,4\\r\\n'\n ]\n assert list(itercsv(['Hello', 'World'], [[1, 2], [3, 4]])) == expected"
] | [
"0.79888827",
"0.77903473",
"0.7676916",
"0.73605514",
"0.72107303",
"0.69361657",
"0.6886044",
"0.68570256",
"0.68204045",
"0.67923874",
"0.67035025",
"0.65973264",
"0.65804535",
"0.6500226",
"0.6483696",
"0.644907",
"0.6433415",
"0.64274734",
"0.63892037",
"0.6351961",
"0.63333106",
"0.62869",
"0.62831384",
"0.62623185",
"0.62588614",
"0.62510127",
"0.6190187",
"0.61790985",
"0.61496836",
"0.61432505"
] | 0.8132414 | 0 |
Prepare the company's data | def prepare_company_data(self, month, year, row, company_data):
for key, value in row.items():
if not company_data[key]:
company_data[key] = {'year':year, 'month':month, 'value':value}
else:
"""main operation updating the company's data per year
and month vise """
company_data[key].update({'year':year, 'month':month, 'value':value})\
if company_data[key]['value'] < value else None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getCompaniesData(self, schema):\n try:\n self.cursor.execute(\"\"\"SELECT id, twitter, proven_score, slug FROM {schema}.vendors_vendor WHERE\n twitter <> ''\"\"\".format(schema=schema))\n data = self.cursor.fetchall()\n\n companies = []\n for entry in data:\n self.cursor.execute('SELECT location_id FROM {schema}.vendors_vendorlocation WHERE vendor_id = {vendor}'.format(schema=schema, vendor=entry[0]))\n cities = self.cursor.fetchall()\n\n if cities is None:\n continue\n\n city = ''\n\n for cityId in cities:\n self.cursor.execute('SELECT city FROM {schema}.locations_location WHERE id = {city}'.format(schema=schema, city=cityId[0]))\n cityName = self.cursor.fetchone()\n\n if cityName is not None:\n city += cityName[0]\n\n self.cursor.execute('SELECT category_id, rank FROM {schema}.vendors_vendorcustomkind WHERE vendor_id = {vendor} AND \"primary\" is true'.format(schema=schema, vendor=entry[0]))\n customKind = self.cursor.fetchone()\n\n if customKind is None:\n catId = rank = None\n else:\n catId, rank = customKind\n\n if catId is not None:\n self.cursor.execute('SELECT name, slug FROM {schema}.categories_category WHERE id = {cat}'.format(schema=schema, cat=catId))\n catData = self.cursor.fetchone()\n else:\n catData = None\n\n companies.append(DBItemCompany(\n _id = entry[0],\n tweeter = entry[1],\n category = catData[0] if catData is not None else None,\n categoryUrl = self.__buildCategoryUrl(catId, schema) if catId is not None else None,\n provenScore = entry[2],\n ranking = rank,\n location = city,\n url = self.__buildProfileUrl(catData[1], entry[3], schema) if catData is not None else self.__buildProfileUrlWOCategory(entry[3], schema),\n categoryId = catId\n ))\n\n self.__companies[schema] = companies\n\n except psycopg2.DatabaseError as err:\n raise DBException(err.args[0])",
"def prepare_data(self):",
"def __load_company_data(self):\n\n for ticker_type, ticker_list in self.tickers.items():\n # yfinance only has sector, industry and country for stocks\n if ticker_type == \"STOCK\":\n for ticker in ticker_list:\n # Only gets fields for tickers with missing data\n # TODO: Should only get field missing for tickers with missing data\n # now it's taking the 4 of them\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"] from isin/ticker\n info_list = get_info_from_ticker(ticker)\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list\n\n elif ticker_type == \"CRYPTO\":\n for ticker in ticker_list:\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"]\n info_list = [\"Crypto\", \"Crypto\", \"Crypto\", \"Crypto\"]\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list\n\n else:\n for ticker in ticker_list:\n if (\n self.__transactions.loc[\n self.__transactions[\"Ticker\"] == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ]\n .isnull()\n .values.any()\n ):\n # Get ticker info in list [\"Sector\", \"Industry\", \"Country\", \"Region\"]\n info_list = [\"-\", \"-\", \"-\", \"-\"]\n\n # Replace fields in transactions\n self.__transactions.loc[\n self.__transactions.Ticker == ticker,\n [\"Sector\", \"Industry\", \"Country\", \"Region\"],\n ] = info_list",
"def prepare_data(self):\n\n # Get days abbrs and month names lists\n self.month_names = cal_data.get_month_names()\n self.month_names_eng = cal_data.get_month_names_eng()\n self.days_abrs = cal_data.get_days_abbrs()\n\n # Today date\n self.active_date = cal_data.today_date_list()\n # Set title\n self.title = \"%s - %s\" % (self.month_names[self.active_date[1] - 1],\n self.active_date[2])\n\n # Quarter where current month in the self.quarter[1]\n self.get_quarter()",
"def _load_companies(self):\n if os.path.exists(self.PATH_TO_COMPANY_FILES + '/Companies.csv'):\n df = pd.read_csv(self.PATH_TO_COMPANY_FILES + '/Companies.csv')\n self.Symbols = list(df['Symbol'])\n self.FullNames = list(df['FullName'])\n self.CSVNames = list(df['CSVName'])\n self.sectors = list(df['Sector'])\n self.companies = df\n \n return",
"def feed_company_from_db1(output_data, domain):\n companyfl = CompanyFromdb1.objects.using('db1').filter(\n company_domain__iexact=domain,\n active=1\n )[0]\n\n if companyfl.company_name:\n output_data['name'] = companyfl.company_name\n\n if companyfl.company_phone:\n output_data['phone'] = companyfl.company_phone\n\n if companyfl.company_website:\n analyzed_url = urlparse(companyfl.company_website)\n if analyzed_url.netloc and analyzed_url.scheme:\n website_url = \"%s://%s\" % (\n analyzed_url.scheme,\n analyzed_url.netloc\n )\n elif analyzed_url.netloc and not analyzed_url.scheme:\n website_url = analyzed_url.netloc\n else:\n website_url = analyzed_url.path\n output_data['website_url'] = website_url\n\n if (companyfl.company_size and\n company_size_mapping_dict.get(companyfl.company_size)):\n output_data['size'] = company_size_mapping_dict.get(\n companyfl.company_size\n )\n\n if companyfl.company_remarks:\n output_data['description'] = (\n companyfl.\n company_remarks.\n replace('\\n', ' ').\n replace('\\r', '')\n )\n\n if companyfl.company_social:\n output_data['linkedin_url'] = companyfl.company_social\n\n if companyfl.sectors:\n output_data['industries'] = companyfl.sectors.split(u'§')\n\n if companyfl.profiles:\n output_data['types'] = companyfl.profiles.split(u'§')\n\n if companyfl.updated_on:\n output_data['last_updated'] = companyfl.updated_on\n\n # only retrieving email if email_status=VAL and row was updated less than\n # 365days ago\n if companyfl.company_email_status == \"VAL\" and companyfl.updated_on:\n duration_in_days = (timezone.now() - companyfl.updated_on).days\n if duration_in_days <= 365:\n output_data['email'] = companyfl.company_email\n\n if companyfl.street_name and companyfl.city and companyfl.country:\n # TODO: if street_number or postcode are None, we do not add it but it\n # leaves 2 spaces...find a way to solve it intelligently\n output_data['formatted_address'] = \"%s %s, %s %s, %s\" % (\n companyfl.street_number if companyfl.street_number else '',\n companyfl.street_name,\n companyfl.postcode if companyfl.postcode else '',\n companyfl.city,\n companyfl.country.country_name\n )\n\n return output_data",
"def prepare_data(self):\n import subprocess\n # Download coco data set into dir specified by config then /data/coco\n subprocess.call([f\"{get_original_cwd()}/bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\", f\"{get_original_cwd()}\"])\n # subprocess.call([f\"bin/fetch_dataset.sh\", f\"{self.dir}/data/coco\"])\n task = \"instances\" if self.instance else \"person_keypoints\"\n register_coco_instances(\"train\", {}, f\"{self.dir}/data/coco/{task}_train2014.json\",\n f\"{self.dir}/data/coco/train2014\")\n register_coco_instances(\"val\", {}, f\"{self.dir}/data/coco/{task}_minival2014.json\",\n f\"{self.dir}/data/coco/val2014\")\n register_coco_instances(\"test\", {}, f\"{self.dir}/data/coco/{task}_valminusminival2014.json\",\n f\"{self.dir}/data/coco/val2014\")",
"def setup_data(es_with_collector):\n country_uk = constants.Country.united_kingdom.value.id\n country_us = constants.Country.united_states.value.id\n uk_region = constants.UKRegion.south_east.value.id\n CompanyFactory(\n name='abc defg ltd',\n trading_names=['helm', 'nop'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_uk,\n uk_region_id=uk_region,\n )\n CompanyFactory(\n name='abc defg us ltd',\n trading_names=['helm', 'nop', 'qrs'],\n address_1='1 Fake Lane',\n address_town='Downtown',\n address_country_id=country_us,\n registered_address_country_id=country_us,\n )\n es_with_collector.flush_and_refresh()",
"def make_companies():\n logging.info(\"Making CH\")\n companies_address = get_address()\n companies_sector = get_sector()\n\n companies = (\n companies_address[[\"company_number\", \"postcode\"]]\n .merge(\n companies_sector.query(\"rank==1\")[[\"company_number\", \"SIC4_code\"]],\n on=\"company_number\",\n )\n .assign(division=lambda x: [c[:2] for c in x[\"SIC4_code\"]])\n .assign(division_name=lambda x: x[\"division\"].map(_DIV_NAME_LOOKUP))\n .merge(nspl, left_on=\"postcode\", right_on=\"pcds\")\n )\n\n return companies",
"def _store_company_info(self, company_info):\n def add_node_company(tx, *args, **kwargs):\n props = ', '.join('c.' + prop + ' = $' + prop for prop in kwargs.keys())\n tx.run('MERGE (c: Company {{rc: $rc}}) '\n f'ON CREATE SET {props}', *args, **kwargs)\n\n def add_node_person(tx, *args, **kwargs):\n props = ', '.join('(p: Person {name: \"' + name + '\"}' + ')' for name in kwargs['p_name'])\n tx.run(f'MERGE {props}', *args, **kwargs)\n\n def add_rela_works_in(tx, *args, **kwargs):\n tx.run('MATCH (c: Company {name: $c_name}), (p: Person) '\n 'WHERE p.name in $p_name '\n 'MERGE (p)-[:WORKS_IN]->(c)', *args, **kwargs)\n\n with self._DRIVER.session() as session:\n representatives = company_info.pop('repr')\n session.write_transaction(add_node_company, **company_info)\n session.write_transaction(add_node_person, p_name=representatives)\n session.write_transaction(add_rela_works_in, p_name=representatives, c_name=company_info['name'])",
"def retrieve_company_data(self):\n self.set_stock_sym_append_str('')\n self.set_stock_retrieval_type('all') #'all', watcher\n self.load_stock_symbol_fr_file()",
"def create_company(list_of_data):\n ID = common.generate_random(list_of_data)\n user_input = ui.get_inputs(list_labels, title)\n user_input.insert(0, ID)\n list_of_data.append(user_input)\n with open(\"company/company_data.csv\",\"w\") as f:\n for i in range(len(list_of_data)):\n row = ','.join(list_of_data[i])\n f.write(row + '\\n')",
"def __init__(self, name, **company_data):\n self.name = name\n self.__dict__.update(company_data)",
"def _match_fields(self):\n\n data = {\n 'user': self.user,\n 'name': self.data['Company Name'].lower(),\n 'address': self.data['Mailing Address'].lower(),\n 'city': self.data['Mailing City'].lower(),\n 'state': self.data['Mailing State'].lower(),\n 'zipcode': self.__parse__('zipcode'),\n 'phone': self.__parse__('phone'),\n 'first_name': self.data['Executive First Name'].lower(),\n 'last_name': self.data['Executive Last Name'].lower(),\n 'title': self.data['Executive Title'].lower(),\n 'profile': self._get_profile(),\n 'sales_volume': self.data['Location Sales Volume Range'],\n 'employee_count': self.__parse__('employee_count'),\n 'origin': 'C' # cold call\n }\n Company(**data).save()\n return len(data);",
"def __init__(self, path):\n self.csv_path = path\n # check if csv format is valid or not\n self.check_valid_csvformat(self.csv_path)\n \"\"\" empty dict to store all company names\n prepare initial company data in dictionary format \"\"\"\n self.company_data = dict()",
"def generate_company_library():\n\n companyids = generate_company_list()\n counter = 0\n companyids = companyids[counter:]\n\n for id in companyids:\n print(counter,\"generating\", id + \".csv\")\n try:\n update_data(id)\n except:\n pass\n counter += 1",
"def create_company_df(companies):\n\n companies = list(set(companies)) # removes duplicates\n\n symbols = []\n exchanges = []\n ynames = []\n is_us = []\n\n for company in companies:\n sym, exch, yco, usa = check_usa_mkts(get_company_info(company))\n symbols.append(sym)\n exchanges.append(exch)\n ynames.append(yco)\n is_us.append(usa)\n\n marketcaps = []\n sizes = []\n urls = []\n urls_pr = []\n\n for sym, co in zip(symbols, companies):\n if sym == \"n/a\":\n print(f\"Skipping {co}\\n\")\n marketcaps.append(\"n/a\")\n sizes.append(\"n/a\")\n urls.append(\"n/a\")\n urls_pr.append(\"n/a\")\n continue\n\n print(f\"Checking {co} [{sym}]\")\n marketcap = get_market_cap(sym)\n size = id_company_size(marketcap)\n url = get_company_url(sym)\n url_pr = get_press_release_page(url)\n\n marketcaps.append(marketcap)\n sizes.append(size)\n urls.append(url)\n urls_pr.append(url_pr[0])\n\n print(\"Search complete\")\n\n df = pd.DataFrame(\n {\n \"Company\": companies,\n \"Yahoo Listed Co.\": ynames,\n \"Symbol\": symbols,\n \"Exchange\": exchanges,\n \"Market Cap\": marketcaps,\n \"Company Size\": sizes,\n \"Is American\": is_us,\n \"Home URL\": urls,\n \"Press Release URL\": urls_pr,\n }\n )\n\n return df",
"def _prepare_data(self):\n #TODO hardcoded values need to change\n print_info(\"Preprocessing the train data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"train\"),\n self.TRAIN_OUT_PATH)\n\n print_info(\"Preprocessing the test data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"test\"),\n self.TEST_OUT_PATH)\n\n print_info(\"Preprocessing the validation data...\")\n self._place_dataset(os.path.join(self._hparams[\"temp-data\"], \"val\"),\n self.VAL_OUT_PATH)",
"def __init__(self):\n\n\n f = open(datapath + '/Data/companylist.csv', 'r')\n\n\n for line in f:\n reg = line.split(',')\n if reg[0] != 'Symbol':\n if reg[0] not in self.cnames:\n self.cnames[reg[0]] = [reg[1], reg[2], reg[3], reg[4].strip()]\n else:\n if reg[4].strip() != 'ASX':\n self.cnames[reg[0]] = [reg[1], reg[2], reg[3], reg[4].strip()]",
"def fill_record_for_company(conn, symbol):\n # call APIs\n info = get_company_info_for(symbol)\n # insert new record into db\n insert_company(conn=conn,\n symbol=symbol,\n name=info[0][\"name\"],\n exchange=info[0][\"exchangeShortName\"])",
"def __init__(self, client_id, login_details, data_set=None):\n self.schema = login_details['company_schema']\n data = self.get_details(client_id)\n self.name = data[1]\n self.id = data[0]\n self.sort_code = data[2]\n self.data_set = data_set\n self.default_email = self.get_default_email()",
"def company_v1(request):\n domain = request.GET.get(\"domain\")\n output_data = {}\n feed_company_from_db1_is_ok = 0\n\n if domain:\n\n domain = domain.lower()\n if domain.startswith(\"www.\"):\n domain = domain.replace(\"www.\", \"\")\n\n try:\n\n output_data = feed_company_from_db1(output_data, domain)\n feed_company_from_db1_is_ok = 1\n\n try:\n\n output_data = company_addition_from_db2(output_data, domain)\n\n except IndexError:\n\n if not feed_company_from_db1_is_ok:\n\n try:\n\n output_data = feed_company_from_clearbit(\n output_data=output_data,\n domain=domain\n )\n\n except IndexError:\n\n try:\n\n clearbit_company = clearbit_api.get_company(domain)\n\n except:\n # Not sure which exceptions I could get from\n # Clearbit's Python lib.\n # I know I could get a KeyError if I'm trying\n # to access a json field that Clearbit put in\n # his docs but forgets to put in the response\n # (actually not anymore because I'm retrieving\n # the dict values with .get() now).\n # But I don't know which error it would give me\n # if api call gives me an error like a http 500\n # error.\n # Sometimes if Clearbit does not find a company\n # it raises a 422 http error (validation error\n # which should only happend for malformed domain\n # names) instead of just returning none...\n\n api_access_logging(\n request,\n \"company\",\n domain,\n \"404\",\n \"2\",\n \"1\"\n )\n return Response(\n {\n \"error_code\": \"2\",\n \"detail\": errors_for_customers[\"2\"]\n },\n status=status.HTTP_404_NOT_FOUND\n )\n\n if clearbit_company:\n\n output_data = feed_company_from_clearbit(\n output_data=output_data,\n cbcompany=clearbit_company\n )\n\n else:\n\n api_access_logging(\n request,\n \"company\",\n domain,\n \"404\",\n \"2\",\n \"1\"\n )\n return Response(\n {\n \"error_code\": \"2\",\n \"detail\": errors_for_customers[\"2\"]\n },\n status=status.HTTP_404_NOT_FOUND\n )\n\n except IndexError:\n\n try:\n\n output_data = feed_company_from_clearbit(\n output_data=output_data,\n domain=domain\n )\n\n except IndexError:\n\n try:\n\n clearbit_company = clearbit_api.get_company(domain)\n\n except:\n\n api_access_logging(\n request,\n \"company\",\n domain,\n \"404\",\n \"2\",\n \"1\"\n )\n return Response(\n {\n \"error_code\": \"2\",\n \"detail\": errors_for_customers[\"2\"]\n },\n status=status.HTTP_404_NOT_FOUND\n )\n\n if clearbit_company:\n\n output_data = feed_company_from_clearbit(\n output_data=output_data,\n cbcompany=clearbit_company\n )\n\n else:\n\n api_access_logging(\n request,\n \"company\",\n domain,\n \"404\",\n \"2\",\n \"1\"\n )\n return Response(\n {\n \"error_code\": \"2\",\n \"detail\": errors_for_customers[\"2\"]\n },\n status=status.HTTP_404_NOT_FOUND\n )\n\n else:\n\n api_access_logging(\n request,\n \"company\",\n domain,\n \"400\",\n \"3\",\n \"1\"\n )\n return Response(\n {\n \"error_code\": \"3\",\n \"detail\": errors_for_customers[\"3\"]\n },\n status=status.HTTP_400_BAD_REQUEST\n )\n\n # Check that user plan allows this request.\n if not customer_throttling_checked(request, domain):\n\n api_access_logging(\n request,\n \"company\",\n domain,\n \"402\",\n \"5\",\n \"1\"\n )\n return Response(\n {\n \"error_code\": \"5\",\n \"detail\": errors_for_customers[\"5\"]\n },\n status=status.HTTP_402_PAYMENT_REQUIRED\n )\n\n api_access_logging(\n request,\n \"company\",\n domain,\n \"200\",\n None,\n \"1\"\n )\n return Response(output_data)",
"def data_preparation(self) -> None:\n self.logger.info('data cleaning')\n self.logger.info('num of secs: {}, num of ipo_dates: {}, num of secs with prices: {}'.format(\n len(self.data),\n len(self.ipo_dates),\n len(self.prices)\n ))\n excluded = []\n excluded = [i.lower() for i in excluded]\n self.logger.info(f'number of excluded: {len(excluded)}')\n for i in excluded:\n self.data.pop(i)\n for s in self.data:\n # columns with empty assets sum (empty columns and other situations)\n self.data[s].dropna(axis='columns', how='any', subset=['A_0'], inplace=True)\n # columns with descriptions (polish and english names of values)\n self.data[s].drop(self.data[s].columns[[0, 1]], inplace=True, axis=1)\n\n self.logger.info(f'number of secs after cleaning: {len(self.data)}')\n data_list = [k for k in self.data.values()]\n self.uber_data = pd.concat(data_list, ignore_index=True, axis=1)\n self.uber_data = self.uber_data.transpose()\n self.uber_data = self.uber_data.loc[:, pd.notnull(self.uber_data.columns)]",
"def _company(self, uid=1):\r\n company = self.env['res.company'].browse(uid)\r\n return {\r\n 'journal': company.pledge_journal.id,\r\n 'product': company.pledge_product,\r\n 'account': company.pledge_receipt_account.id,\r\n 'property_account_income': company.pledge_product.property_account_income_id,\r\n }",
"def __init__(self, company_id):\n self.company_id = company_id",
"def test_website_companies_get_details(self):\n pass",
"def get_data_from_individual_company_pages(soup):\n individual_company_data = []\n usd_roe = get_usd_roe()\n company_code = (\n soup.find(\"meta\", {\"name\": \"description\"}).get(\"content\").split(\":\")[0]\n )\n current_price_usd = float(\n soup.find(\"span\", {\"class\": \"price-section__current-value\"}).text.replace(\n \",\", \"\"\n )\n )\n current_price = round(current_price_usd * usd_roe)\n try:\n p_e_ratio = float(\n soup.find(\n \"div\", {\"class\": \"snapshot__header\"}, string=\"P/E Ratio\"\n ).previous_sibling.replace(\",\", \"\")\n )\n except AttributeError:\n p_e_ratio = 0\n\n try:\n week_52_low = float(\n soup.find(\"div\", {\"class\": \"snapshot__header\"}, string=\"52 Week Low\")\n .previous_sibling.strip()\n .replace(\",\", \"\")\n )\n except AttributeError:\n week_52_low = 1\n\n try:\n week_52_high = float(\n soup.find(\"div\", {\"class\": \"snapshot__header\"}, string=\"52 Week High\")\n .previous_sibling.strip()\n .replace(\",\", \"\")\n )\n except AttributeError:\n week_52_high = 0\n\n unreal_profit_per_year_percent = round((week_52_high / week_52_low - 1) * 100, 2)\n\n individual_company_data.append(\n [company_code, current_price, p_e_ratio, unreal_profit_per_year_percent]\n )\n\n company_df = pd.DataFrame(\n columns=[\"company_code\", \"current_price\", \"P_E\", \"potential_profit_percent\"]\n )\n company_df = company_df.append(\n {\n \"company_code\": company_code,\n \"current_price\": current_price,\n \"P_E\": p_e_ratio,\n \"potential_profit_percent\": unreal_profit_per_year_percent,\n },\n ignore_index=True,\n )\n\n return company_df",
"def setup_dd(self, company):\n self.company = company\n self.apr = .03\n self.minimum_balance = 1000.0",
"def prepare_data(self):\n try:\n self.train_dataset = self.datasets['train']\n self.val_dataset = self.datasets['val']\n try:\n self.test_dataset = self.datasets['test']\n except:\n pass\n except Exception as e:\n print('Data was not succesfully prepared:', e)",
"def company():\n\n company = Company.objects.create(name='Tre G.M.B.H.', country='Germany')\n return company"
] | [
"0.6938614",
"0.67131525",
"0.67055386",
"0.6524873",
"0.63706404",
"0.6255161",
"0.6225849",
"0.6220218",
"0.62027955",
"0.6075558",
"0.5998771",
"0.5954136",
"0.59276706",
"0.5911707",
"0.5911581",
"0.5889178",
"0.5853898",
"0.58073986",
"0.5801998",
"0.5798251",
"0.57885075",
"0.5787987",
"0.574923",
"0.5744494",
"0.5724451",
"0.5703551",
"0.57026386",
"0.5687978",
"0.5684356",
"0.56805325"
] | 0.71914417 | 0 |
Takes WARC record and returns domain of target URI plus Counter for domains of outlinked pages if these exist. | def parse_links(record):
try:
page_url = record['WARC-Header-Metadata']['WARC-Target-URI']
page_domain = urlparse.urlparse(page_url).netloc
links = record['Payload-Metadata']['HTTP-Response-Metadata']['HTML-Metadata']['Links']
out_links = Counter([urlparse.urlparse(url['url']).netloc for url in links])
return (page_domain, out_links)
except:
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def count_domains(urls, screen_name, domains):\n\n def add_domain_to_dict(domains, domain_string):\n \"\"\" helper function\"\"\"\n domain = urlparse(unquote(domain_string)).netloc.replace('www.', '')\n domain = domain.split(':')[0]\n try:\n new_domain = get_domain(domain)\n except ValueError:\n with open('erroneous_domains.txt', 'a') as f:\n f.write(domain)\n return domains\n\n if not domains.get(new_domain):\n domains[new_domain] = {}\n domains[new_domain].get(screen_name, {})\n domains[new_domain][screen_name] = domains[new_domain].get(screen_name, 0) + 1\n\n for u in urls:\n long_url = 'http://api.longurl.org/v2/expand'\n params = {'url': u, 'format': 'json'}\n\n r = do_a_request(long_url, params)\n if r is None:\n continue\n \n json = simplejson.loads(r.text)\n r.close()\n if json.has_key('long-url'):\n domain = get_domain(urlparse(u).netloc.replace('www.', ''))\n if json['long-url'] and domain not in json['long-url']:\n add_domain_to_dict(domains, json['long-url'])\n continue\n if json.has_key('messages') and json['messages'] and \\\n json['messages'][0]['message'] == 'Input is oversize: NOT_SHORTURL.':\n add_domain_to_dict(domains, u)\n else:\n request = do_a_request(u)\n if request is None:\n continue\n \n if request.status_code == 200:\n add_domain_to_dict(domains, request.url)\n else:\n with open('log.txt', 'a') as f:\n f.write(u + ',' + screen_name + '\\n')\n request.close()\n\n return domains",
"def parse_urls(record):\n url_list = []\n try:\n page_url = record['WARC-Header-Metadata']['WARC-Target-URI']\n x = urlparse.urlparse(page_url)\n url_list += [(x.netloc, x.path)]\n except:\n pass\n try: \n links = record['Payload-Metadata']['HTTP-Response-Metadata']['HTML-Metadata']['Links']\n for url in links:\n x = urlparse.urlparse(url['url'])\n url_list += [(x.netloc, x.path)]\n except:\n pass\n \n return url_list",
"def get_most_surfed_page(records):\n uris = {}\n for r in records:\n if r.code != 408:\n uris[r.uri] = uris.get(r.uri, 0) + 1\n max_req = 0\n max_uri = None\n for k,v in uris.items():\n if v > max_req:\n max_req, max_uri = v, k\n print(max_req)\n return max_uri",
"def get_hits_by_domain():\n \n #PUT THIS HERE TO TRY AND RUN THE PYTHON SCRIPT AS ROOT, DIDN'T WORK\n uid = pwd.getpwnam('root')[2]\n os.setuid(uid)\n\n #DEFINE VARS\n nlogdir = '/var/log/nginx/'\n alogdir = '/var/log/httpd/'\n hits_by_domain = {}\n\n try:\n #GET NGINX LOGS\n nfiles = [f for f in os.listdir(nlogdir) if re.match(r'^(?!default)(.+access.log)$', f)]\n\n for i, s in enumerate(nfiles):\n nfiles[i] = os.path.join(nlogdir, s)\n\n #GET APACHE LOGS\n afiles = [f for f in os.listdir(alogdir) if re.match(r'^(?!.+_)(.+\\.access.log)$', f)]\n\n for i, s in enumerate(afiles):\n afiles[i] = os.path.join(alogdir, s)\n\n #COUNT NGINX HITS\n for f in nfiles:\n nregex = \"(\"+str(nlogdir)+\")(.+?)(.access.log)\"\n domain = re.search(nregex, f)\n count = 0\n for line in open(f).xreadlines():\n count +=1\n hits_by_domain[domain.group(2)] = count\n\n #COUNT APACHE HITS\n for f in afiles:\n aregex = \"(\"+str(alogdir)+\")(.+?)(.access.log)\"\n domain = re.search(aregex, f)\n count = 0\n for line in open(f).xreadlines():\n count +=1\n hits_by_domain[domain.group(2)] = count\n\n return hits_by_domain\n\n except Exception as e:\n collectd.info(str(e))\n return",
"def count_by_domain(\n bookmarks: Iterator[sqlite3.Row]\n ) -> Dict[str, int]:\n counts = {}\n\n for bookmark in bookmarks:\n key = bookmark[\"url\"].display_domain\n if key not in counts:\n counts[key] = cherrypy.engine.publish(\n \"bookmarks:domaincount\",\n bookmark[\"url\"]\n ).pop()\n\n return counts",
"def proxy_ref_info(request):\n ref = request.headers.get('referer')\n if ref:\n _, _, uri = split_url(ref)\n if uri.find(\"/\") < 0:\n return None\n first, rest = uri.split(\"/\", 1)\n if first in \"pd\":\n parts = rest.split(\"/\", 1)\n r = (parts[0], parts[1]) if len(parts) == 2 else (parts[0], \"\")\n print(\"Referred by proxy host, uri: %s, %s\", r[0], r[1])\n return r\n return None",
"def find_domain_range(record):\n response = {\"domain\": [], \"range\": []}\n if \"http://schema.org/domainIncludes\" in record:\n if isinstance(record[\"http://schema.org/domainIncludes\"], dict):\n response[\"domain\"] = [record[\"http://schema.org/domainIncludes\"][\"@id\"]]\n elif isinstance(record[\"http://schema.org/domainIncludes\"], list):\n response[\"domain\"] = [\n _item[\"@id\"] for _item in record[\"http://schema.org/domainIncludes\"]\n ]\n if \"http://schema.org/rangeIncludes\" in record:\n if isinstance(record[\"http://schema.org/rangeIncludes\"], dict):\n response[\"range\"] = [record[\"http://schema.org/rangeIncludes\"][\"@id\"]]\n elif isinstance(record[\"http://schema.org/rangeIncludes\"], list):\n response[\"range\"] = [\n _item[\"@id\"] for _item in record[\"http://schema.org/rangeIncludes\"]\n ]\n return (response[\"domain\"], response[\"range\"])",
"def domain_count(self, url: Url) -> int:\n\n sql = \"\"\"SELECT count(*) FROM bookmarks WHERE domain=?\"\"\"\n\n if \"reddit.com\" not in url.domain:\n count = self._selectFirst(sql, (url.domain,))\n else:\n sql += \" AND url LIKE ?\"\n count = self._selectFirst(\n sql,\n (url.domain, f\"%{url.display_domain}%\")\n )\n\n return int(count)",
"def by_domains(self):\n\t\t\n\t\t# TODO: use urllib instead\n\t\turl_format = r'^\\s*(?:(?P<protocol>\\w+)://)?(?P<domain>[\\w\\d\\-\\.]+)(?::(?P<port>\\d+))?/?(?P<everything_else>.*)$'\n\t\tsites = {}\n\t\tfor line in self.source.lines:\n\t\t\ttry:\n\t\t\t\tif self.filter(line):\n\t\t\t\t\tresult = re.match(url_format, line.content.url)\n\t\t\t\t\tif result.group('domain') not in sites.keys():\n\t\t\t\t\t\tsites[result.group('domain')] = 0\n\t\t\t\t\tsites[result.group('domain')] += int(line.content.size)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\n\t\t# TODO: sort; convert to lists is even better\n\t\t\n\t\treturn sites",
"def process_response(self, response):\n\n if response.status == 404:\n return set()\n\n # store response HTTP code if not redirect\n if not (response.status == 301 or response.status == 302):\n if response.url not in self.crawled_urls:\n self.crawled_urls[response.url] = response.status\n\n # some colorful printing\n if self.verbose:\n code = str(response.status)\n extra_print = \"\"\n if code[0] == \"2\":\n color = util.GREEN\n elif code[0] == \"3\":\n color = util.BRIGHT_CYAN\n extra_print = (util.BRIGHT_CYAN + \" --> \" + util.SANE +\n response.headers[\"Location\"].decode())\n elif code[0] == \"4\":\n color = util.RED\n elif code[0] == \"5\":\n color = util.MAGENTA\n else:\n color = util.SANE\n print_str = \" [\" + color + str(response.status) + util.SANE + \"] \" + response.url + extra_print\n util.printit(print_str)\n\n # extract cookies and their paths from HTTP response header\n cookie_paths = self.extract_cookies(response.headers.getlist(\"Set-Cookie\"), response.url)\n cookie_urls = set()\n for path in cookie_paths:\n cookie_urls.add(self.to_absolute_url(path, response.urljoin))\n\n\n # use scrapy's lxml linkextractor to extract links / URLs\n scrapy_urls = set()\n try:\n # extract <base> URL's domain if a <base> tag exists\n base_domain = \"\"\n base_tag_sels = response.xpath(\"//base\")\n for base_tag_sel in base_tag_sels:\n href_sels = base_tag_sel.xpath(\"@href\")\n if href_sels:\n href = href_sels.extract_first()\n base_domain = urllib.parse.urlparse(href).netloc\n break\n\n # setup allowed domains and extract new links\n allowed_domains = [self.domain, \"%s:%s\" % (self.domain, self.port)]\n if base_domain:\n allowed_domains.append(base_domain)\n raw_scrapy_links = LinkExtractor(allow_domains=allowed_domains,\n tags=(\"a\", \"area\", \"script\", \"link\", \"source\", \"img\"),\n attrs=(\"src\", \"href\"),\n deny_extensions=set()).extract_links(response)\n raw_scrapy_urls = [link.url for link in raw_scrapy_links]\n\n # copy discovered URLs and additionally insert initial network location\n scrapy_urls = raw_scrapy_urls.copy()\n if base_domain and base_domain != allowed_domains[0] and base_domain != allowed_domains[1]:\n orig_netloc = urllib.parse.urlparse(response.url).netloc\n for scrapy_url in raw_scrapy_urls:\n parsed_scrapy_url = list(urllib.parse.urlsplit(scrapy_url))\n parsed_scrapy_url[1] = orig_netloc\n scrapy_urls.append(urllib.parse.urlunsplit(parsed_scrapy_url))\n scrapy_urls = set(scrapy_urls)\n except (AttributeError, scrapy.exceptions.NotSupported) as e:\n if str(e) == \"Response content isn't text\":\n # stop processing and return no new URLs\n return set()\n raise e\n\n\n # run the different URL / link discovery mechanisms\n linkfinder_urls, dynamic_urls, form_urls, sub_urls = set(), set(), set(), set()\n if self.config[\"use_linkfinder\"].lower() == \"true\":\n linkfinder_urls = self.run_linkfinder(response.text, response.urljoin)\n if self.config[\"use_selenium\"].lower() == \"true\":\n dynamic_urls = self.extract_dynamic_urls(response.url)\n if self.config[\"extract_info_from_forms\"].lower() == \"true\":\n form_data = extract_form_data(response)\n # extract new URLs and HTTP parameters from parsed form data\n form_urls = self.process_form_data(form_data, response.urljoin)\n\n # extract sub URLs, i.e. URLs with parent paths\n sub_urls = extract_sub_urls(response.url)\n\n # extract comments if configured\n if self.config[\"extract_comments\"].lower() == \"true\":\n self.extract_comments(response)\n\n # unite discovered URLs\n urls = set()\n urls |= cookie_urls\n urls |= scrapy_urls\n urls |= linkfinder_urls\n urls |= dynamic_urls\n urls |= form_urls\n urls |= sub_urls\n\n # store info about redirect and add redirect URL to discovered URLs\n if response.status == 301 or response.status == 302:\n location = response.headers[\"Location\"].decode()\n self.redirects[response.url] = {\"code\": response.status, \"to\": location}\n urls.add(self.to_absolute_url(location, response.urljoin))\n\n # process all the discovered URLs, i.e. extract new information and decide which to crawl\n yield_urls = set()\n for url in urls:\n # strip anchor\n if \"#\" in url:\n url = url[:url.rfind(\"#\")]\n\n # replace entities and parse URL\n url = url.replace(\"&\", \"&\")\n url = url.replace(\"&\", \"&\")\n parsed_url = urllib.parse.urlparse(url)\n\n # disregard information about directory listing sorting\n if parsed_url.path.endswith(\"/\") and re.match(\"C=[A-Z];O=[A-Z]\", parsed_url.query):\n continue\n\n # extract GET parameters and cut URL if option is configured\n params = {}\n if parsed_url.query:\n if self.config[\"crawl_parameter_links\"].lower() != \"true\":\n url = \"%s://%s/%s\" % (parsed_url.scheme, parsed_url.netloc, parsed_url.path)\n params = get_query_params(parsed_url.query)\n elif url.endswith(\"?\"):\n url = url[:-1]\n\n # add URL as instance of its path\n if self.url_has_netloc(url) and params:\n self.add_path_instance(parsed_url.path, params, {}, {})\n\n # skip already crawled URLs\n if url in self.found_urls:\n continue\n self.found_urls.add(url)\n\n # skip URLs with different network location\n if not self.url_has_netloc(url):\n continue\n if url == response.url:\n continue\n\n # skip paths that are excluded from crawling\n if self.exclude_paths and url.count(\"/\") > 2:\n check_str = \"/\" + \"/\".join(url.split(\"/\")[3:])\n if any(re_path.match(check_str) for re_path in self.exclude_paths):\n continue\n\n # check whether to add this URL to the to-be-crawled URLs\n if url not in yield_urls:\n # limit the crawling depth\n max_depth = int(self.config[\"max_depth\"])\n if max_depth > 0:\n depth = parsed_url.path.count(\"/\")\n if depth > max_depth:\n continue\n\n # limit the number of times a path can be crawled to avoid endless\n # crawling upon GET parameter variation\n if parsed_url.path not in self.crawled_paths:\n self.crawled_paths[parsed_url.path] = 0\n self.crawled_paths[parsed_url.path] += 1\n if self.crawled_paths[parsed_url.path] > int(self.config[\"max_path_visits\"]):\n continue\n\n yield_urls.add(url)\n\n return yield_urls",
"def redirect(prefix):\n\tif len(prefix)==2:\n\t\tstart=1000\n\t\tend=9999\n\telif len(prefix)==1:\n\t\tstart=10000\n\t\tend=99999\n\twhile start<end+1:\n\t\tcount=start\n\t\ttry:\n\t\t\turl='http://www.adidas.com/us/adidas-/'+prefix+str(count)+'.html'\n\t\t\treq = urllib2.Request(url)\n\t\t\treq.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/37.0.2062.120 Chrome/37.0.2062.120 Safari/537.36')\n\t\t\treq.add_header('Accept-Language','en-US,en;q=0.8')\n\t\t\treq.add_header('Connection','keep-alive')\n\t\t\treq.add_header('Accept-Encoding','gzip,deflate,sdch')\n\t\t\tres = urllib2.urlopen(req)\n\t\t\tredirectResult = res.geturl()\n\t\t\ttry:\n\t\t\t\tres = urllib2.urlopen(\"http://demandware.edgesuite.net/sits_pod20-adidas/dw/image/v2/aaqx_prd/on/demandware.static/Sites-adidas-US-Site/Sites-adidas-products/en_US/v1460455685655/zoom/\"+prefix+str(count)+\"_01_standard.jpg?sw=500&sfrm=jpg\")\n\t\t\t\tif res.getcode() == 200:\n\t\t\t\t\tprint prefix+str(count)+' - '+redirectResult\t\t\t\t\n\t\t\t\t\turllib.urlretrieve(\"http://demandware.edgesuite.net/sits_pod20-adidas/dw/image/v2/aaqx_prd/on/demandware.static/Sites-adidas-US-Site/Sites-adidas-products/en_US/v1460455685655/zoom/\"+prefix+str(count)+\"_01_standard.jpg?sw=500&sfrm=jpg\", \"images/\"+prefix+str(count)+\".jpg\")\n\t\t\texcept Exception as e:\n\t\t\t\tprint prefix+str(count)+' - '+str(e)\n\t\texcept Exception as e:\n\t\t\t\tprint prefix+str(count)+' - '+str(e)\n\t\tstart+=1",
"def count_extracted(j_data):\n count = 0\n for record in j_data:\n tmp = {}\n desc = record['lcr_desc'].lower().split('/')\n title = desc[0]\n cat = category(title)\n if cat and 'location' in record:\n count += 1\n return count",
"def get_unreviewed_redirects(site, namespace=\"0\"):\n result = []\n\n def _submit_and_parse(request):\n \"\"\"Divide the answer to the list of values and continue info.\"\"\"\n answer = request.submit()\n if \"pages\" not in answer[\"query\"]:\n return ([], {})\n values = list(answer[\"query\"][\"pages\"].values())\n if \"query-continue\" in answer:\n contin = answer[\"query-continue\"]\n else:\n contin = {}\n return (values, contin)\n\n kwargs = {\n \"action\": \"query\",\n \"prop\": \"links\",\n \"pllimit\": \"5000\",\n \"generator\": \"unreviewedpages\",\n \"gurnamespace\": namespace,\n \"gurfilterredir\": \"redirects\",\n \"gurlimit\": \"5000\"\n }\n\n while True:\n # iterate for gurstart, get list of redirects\n request = Request(site=site, **kwargs)\n (values, contin) = _submit_and_parse(request)\n chunk = [{\"title\": value[\"title\"], \"links\": []} for value in values]\n\n while True:\n # iterate for plcontinue, get list of links (ie target candidates)\n for key, value in enumerate(values):\n if \"links\" in value:\n chunk[key][\"links\"] += [links[\"title\"] for links in value[\"links\"]]\n if \"links\" in contin:\n request[\"plcontinue\"] = contin[\"links\"][\"plcontinue\"]\n (values, contin) = _submit_and_parse(request)\n continue\n else:\n break\n result += chunk\n\n if \"unreviewedpages\" in contin:\n kwargs[\"gurstart\"] = contin[\"unreviewedpages\"][\"gurstart\"]\n continue\n else:\n break\n\n # filter result: redirects with two or more links aren't any interesting for us\n result = [(x[\"title\"], x[\"links\"][0]) for x in filter(lambda x: len(x[\"links\"]) == 1, result)]\n return result",
"def extract_page_urls(self, _):\n url = \"https://mossadams.taleo.net/careersection/rest/jobboard/searchjobs?lang=en&portal=4160751617\"\n page_num = 1\n last_count = 0\n this_count = 0\n\n while True:\n last_count = len(self.urls_to_scrape)\n payload = PAYLOAD + '\"pageNo\":' + str(page_num) + \"}\"\n json_data = self.post_request(url, out_format='json', headers=HEADERS, data=payload)\n\n for job in json_data['requisitionList']:\n job_url = \"https://mossadams.taleo.net/careersection/6/jobdetail.ftl?job=\" + job['contestNo']\n self.urls_to_scrape.add(job_url)\n\n # check to see if any new records were scraped; if not, I've reach the end\n this_count = len(self.urls_to_scrape)\n if last_count == this_count:\n break\n else:\n last_count = this_count\n page_num += 1",
"def count_urls(self):\n return self.request(\"count:Message_Url\", [ None ])",
"def create_results(self):\n\n webhost_map = {}\n # put the found paths into the webhost map\n self.process_crawled_urls(webhost_map)\n # put the found param infos into the webhost map\n self.process_param_infos(webhost_map)\n # put the found cookies into the webhost map and get as by product new domains\n new_domains = self.process_cookies(webhost_map)\n # get the newly discovered network locations\n new_netlocs = self.get_new_netlocs()\n\n # append new domains to the new network locations\n for new_domain in new_domains:\n if self.port != 80 and self.port != 443:\n new_netlocs.add(new_domains + \":\" + self.port)\n else:\n new_netlocs.add(new_domain)\n\n # process discovered redirects and put into webhost map if appropriate\n for url, redirect in self.redirects.items():\n path = urllib.parse.urlparse(url).path\n if url + \"/\" == redirect[\"to\"]:\n continue\n else:\n if not self.url_has_netloc(redirect[\"to\"]):\n redirect_to = redirect[\"to\"]\n else:\n redirect_to = urllib.parse.urlparse(redirect[\"to\"]).path\n\n code = str(redirect[\"code\"])\n if code not in webhost_map:\n webhost_map[code] = {}\n if path not in webhost_map[code]:\n webhost_map[code][path] = {}\n\n redirect_info = \"redirect to %s\" % redirect_to\n if \"misc_info\" not in webhost_map[code][path]:\n webhost_map[code][path][\"misc_info\"] = redirect_info\n elif not any(val == redirect_info for val in webhost_map[code][path].values()):\n for i in range(10):\n alt_key = \"misc_info_%d\" % i\n if alt_key not in webhost_map[code][path]:\n webhost_map[code][path][alt_key] = redirect_info\n break\n\n return webhost_map, new_netlocs, self.comments",
"def read_callback(data=None):\n\n hits_by_domain = get_hits_by_domain()\n\n if not hits_by_domain:\n collectd.info('hits_by_domain not collected successfully')\n pass\n else:\n for key in hits_by_domain:\n metric = collectd.Values()\n metric.plugin = 'hits_by_domain'\n metric.type = 'count'\n metric.type_instance = key\n metric.values = [hits_by_domain[key]]\n metric.dispatch()",
"def queue_domain(event, context):\n\n domain = event['domain']\n fetch_limit = int(os.environ['PAGE_FETCH_LIMIT'])\n if 'limit' in event:\n fetch_limit = int(event['limit'])\n\n index = os.environ['CC_INDEX']\n if 'index' in event:\n index = event['index']\n\n # pull all entries for this domain from index\n indices = list(get_warc_indices_for_domain(domain, index))\n\n # sample returned indices to 'limit' (where they exceed 'limit')\n sampled_indices = indices\n if fetch_limit < len(indices):\n sampled_indices = random.sample(indices, fetch_limit)\n\n # for each sampled index, get stored page text by URL\n lambda_client = boto3.client('lambda')\n\n results = list()\n\n for index in sampled_indices:\n results.append(\n lambda_client.invoke(\n FunctionName='fetch_wet_entry',\n Payload=json.dumps(index),\n InvocationType='Event'\n )\n )\n\n return {\n \"total_index_count\": len(indices),\n \"requested_indices\": sampled_indices\n }",
"def domain_parser():\n\tinput_handle=\"blast_hits.fasta\"\n\t# File with: (1) the domains of each hit and (2) domains information\n\tdomains= \"domains_hits.txt\"\n\toutput_handle = open(domains, \"w\")\n\toutput_handle.write(\"#This file contains the domains of each hit.\\n#At the bottom, you will find detail information of all the domains detected.\\n\")\n\toutput_handle.write(\"#We strongly recommend to open this file with Visual Studio Code.\\n#Because when the names of the domains are too large, in regular editors the table looks awful.\\n\")\n\toutput_handle.write(\"#Here it is only showed how many times a pattern is present.\\n#In the figure of the domains you will find the position of each domain.\\n\\n\")\n\taccession_list=[] \t# List of prosite.doc accessions of the domains that had been found\n\tdomains_dict=dict() # dictionary that saves matches\n\tcount=1\n\tmax_seq_len=0 # Keep larger sequence to plot x-axe\n\t# Loop to go through hits\n\tfor seq_record in SeqIO.parse(input_handle, \"fasta\"):\n\t\toutput_handle.write(str(seq_record.id)+\"\\n\") # print identifier of the hit\n\t\toutput_handle.write(str(seq_record.seq)+\"\\n\") # print sequence of the hit\n\t\tif len(seq_record.seq)>max_seq_len:\n\t\t\tmax_seq_len=len(seq_record.seq)\n\t\t# Make a table for each hit with the domains, that contains the following fields: name, accession, description and pattern\n\t\tx=PrettyTable()\n\t\tx.field_names=[\"name\",\"accession\",\"description\",\"pattern\",\"repetitions\"]\n\n\t\t# Loop to go through prosite domains\n\t\thandle = open(\"prosite.dat\",\"r\")\n\t\trecords = Prosite.parse(handle)\n\t\tfor record in records:\n\t\t\t# prosite.dat preparation for parsing\n\t\t\t# {} -> [^]\n\t\t\tpattern = record.pattern.upper()\n\t\t\tpattern = pattern.replace(\"{\", \"[^\")\n\t\t\tpattern = pattern.replace(\"}\", \"]\")\t\n\t\t\t# - -> \"\"\n\t\t\tpattern = pattern.replace(\"-\", \"\")\t\n\t\t\t# . -> \"\"\n\t\t\tpattern = pattern.replace(\".\", \"\")\t\n\t\t\t# X|x -> \"[ARNDCQEGHILKMFPSTWYV]\"\n\t\t\tAAS=\"[ARNDCQEGHILKMFPSTWYV]\"\n\t\t\tpattern = pattern.replace(\"x\", AAS)\n\t\t\tpattern = pattern.replace(\"X\", AAS)\t\n\t\t\t# () -> {}\n\t\t\tpattern = pattern.replace(\"(\", \"{\")\n\t\t\tpattern = pattern.replace(\")\", \"}\")\t\n\n\t\t\t# >] -> ]?$\n\t\t\tpattern = pattern.replace(\">]\", \"]?$\")\t\n\n\t\t\t# < -> ^\n\t\t\t# > -> $\n\t\t\tpattern = pattern.replace(\"<\", \"^\")\t\n\t\t\tpattern = pattern.replace(\">\", \"$\")\t\n\t\t\tif pattern != \"\":\n\t\t\t\t# Look if the hit contains the current patter\n\t\t\t\tif re.search(r\"\"+str(pattern), str(seq_record.seq).upper()): # if found\n\t\t\t\t\tif record.pdoc not in accession_list:\n\t\t\t\t\t\t# Save pdoc accession in the list of prosite.doc accessions\n\t\t\t\t\t\t# if it is not already\n\t\t\t\t\t\taccession_list.append(record.pdoc)\n\t\t\t\t\tmatches = re.finditer(r\"\"+str(pattern), str(seq_record.seq).upper())\n\t\t\t\t\treps=0\n\t\t\t\t\tfor match in matches: # save all matches in a dictionary to plot them later\n\t\t\t\t\t\tdomains_dict[count]=[seq_record.id, len(seq_record.seq),record.name,match.start(),match.end()]\n\t\t\t\t\t\tcount=count+1\n\t\t\t\t\t\treps=reps+1\n\t\t\t\t\tx.add_row([record.name,record.accession,record.description,record.pattern, reps]) # add found domain to table\n\n\t\toutput_handle.write(str(x)+\"\\n\") # add table of hit to domains_hits.txt\n\n\t# At the end of the tables, print information of all the domains that had been found\n\toutput_handle.write(\"\\n\")\n\trecord_text_list=DocParser(accession_list)\n\tfor text in record_text_list:\n\t\toutput_handle.write(text)\n\treturn (domains_dict,max_seq_len,accession_list)",
"def extract_urls(genome):\n itemid = genome.get('metadata').get('identifier')\n urls = set([url for url in genome['urls'] if 'archive.org' not in url])\n db_urls_found(itemid, urls)",
"def get_domain_of_page(page_url):\r\n db = connect()\r\n cursor = db.cursor()\r\n sql_statement = \"\"\"\r\n SELECT on_progress_domains.url\r\n FROM on_progress_pages INNER JOIN on_progress_domains ON on_progress_pages.domain_id = on_progress_domains.domain_id\r\n WHERE on_progress_pages.page_url = %(d)s\r\n \"\"\"\r\n try:\r\n cursor.execute(sql_statement, {'d':page_url})\r\n page = cursor.fetchone()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()\r\n return page",
"def report_domain():\n req_data = request.get_json()\n\n # is authentication token correct?\n\n try:\n auth_token = Token.query.filter_by(auth_token=request.headers.get('Authorization')).first()\n except:\n return {\"alternatives\" : \"Database Error with token!\"}\n if not auth_token:\n return {\"alternatives\": \"Unauthorized!\"}\n\n now = datetime.datetime.now()\n\n # Have we seen this domain before?\n try:\n domain = Domain.query.filter_by(domain=req_data['domain']).first()\n except:\n return {\"report\" : \"Database Error with domain query!\"}\n\n if domain: # we've seen it before\n domain_id = domain.id\n # Have we seen the mirror before?\n try:\n mirror = Mirror.query.filter_by(mirror_url=req_data['mirror_url']).first()\n except:\n return {\"report\" : \"Database Error with mirror query!\"}\n if mirror:\n mirror_id = mirror.id\n else:\n mirror = False\n else: # Let's add it\n try:\n domain = Domain(domain=req_data['domain'])\n db.session.add(domain)\n db.session.commit()\n except:\n return {\"report\" : \"Database Error with mirror addition!\"}\n domain_id = domain.id\n mirror = False # No domain, no mirror\n \n # Add mirror\n if not mirror:\n mirror = Mirror(\n mirror_url=req_data['mirror_url'],\n domain_id=domain_id)\n try:\n db.session.add(mirror)\n db.session.commit()\n except:\n return {\"report\" : \"Database Error with mirror addition!\"}\n mirror_id = mirror.id\n\n # check values for lat/long/accuracy\n try:\n float(req_data['latitude'])\n except ValueError:\n req_data['latitude'] = None\n \n try:\n float(req_data['longitude'])\n except ValueError:\n req_data['longitude'] = None\n try:\n int(req_data['accuracy'])\n except ValueError:\n req_data['accuracy'] = None\n\n # Make the report\n req_data['auth_token'] = auth_token.auth_token\n req_data['date_reported'] = now\n req_data['domain_id'] = domain_id\n req_data['mirror_id'] = mirror_id\n req_data.pop('domain')\n req_data.pop('mirror_url')\n try:\n report = Report(**req_data)\n db.session.add(report)\n db.session.commit()\n except:\n return {\"report\" : \"Database Error with report!\"}\n\n\n return {\"report\": \"Successfully reported.\"}",
"def process_url(self, url):\n LogParser.increment_count(self.urls, url)",
"def orphaned_pages(self):\n linked = set()\n total = {p for p in self.storage}\n for doc in self.index.run_query(self.name, query.Every('has_links'), limit=10000):\n for link in doc['links'].split():\n link = link.split(':', 1)[0]\n linked.add(link.replace('%20', ' '))\n return sorted(total - linked)",
"def _findRedirect(self, page):\n htmlRes = [('koreascience', re.compile(\"<script>location.href='(.+)'</script>\"))]\n for domainTag, htmlRe in htmlRes:\n if domainTag not in page['url']:\n continue\n logging.debug('redirect: domain match')\n match = htmlRe.search(page['data'])\n if match != None:\n url = match.group(1)\n url = urlparse.urljoin(page['url'], url)\n logging.debug('redirect: found URL %s' % url)\n page = self._httpGetDelay(url, 1)\n page = parseHtmlLinks(page)\n\n return page",
"def crawler(website_url):\n\n try:\n\n # open and read the website\n pageFile = urllib2.urlopen(website_url)\n pageHtml = pageFile.read()\n pageFile.close()\n\n # call BeautifulSoup on an array of lines in string format\n soup = BeautifulSoup(\"\".join(pageHtml), \"html.parser\")\n # print soup.prettify()[0:1000]\n\n # find all links with hashtag cat, limit to 100 results\n # FIXME add hashtag cat requirement (string = \"#cat\")\n pageLinks = soup.findAll(\"a\", {\"href\": True}, limit=100)\n # import pdb; pdb.set_trace()\n page_URLs = []\n\n for pageLink in pageLinks:\n pageLink = pageLink['href']\n\n # if URL does not have a domain, add the main page's domain'\n if pageLink[0] == '/' and pageLink[:1] != '//':\n pageLink = website_url + pageLink\n\n # check if item in db, if not - add to db and commit\n existing_page = session.query(Page).filter_by(page_URL=pageLink).first()\n\n # add to array of link strings\n page_URLs.append(pageLink)\n\n if not existing_page:\n page_URL = Page(page_URL=pageLink)\n session.add(page_URL)\n session.commit()\n\n # import pdb; pdb.set_trace()\n return page_URLs\n\n except urllib2.URLError as e:\n # exception handling for URLError\n if hasattr(e, 'reason'):\n print \"We failed to reach a server.\"\n print \"Reason: \", e.reason\n # exception handling for HTTPError\n elif hasattr(e, 'code'):\n print 'The server couldn\\'t fulfill the request.'\n print 'Error code; ', e.code\n else:\n print 'Everything is fine.'",
"def get_domain_id_by_domainurl(domain_url):\r\n db = connect()\r\n cursor = db.cursor()\r\n sql_statement = \"\"\"\r\n SELECT domain_id FROM `domains` WHERE domain_url = %(d)s\r\n \"\"\"\r\n try:\r\n cursor.execute(sql_statement, {'d':domain_url})\r\n page = cursor.fetchone()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()\r\n return page",
"def analyze(url):\n\n #Note : Using the function to count repeated words and sorted by value\n\n print('\\n\\nVisiting',url)\n print('The most 25 common word')\n print('\\n{:30} {:6}\\n'.format('Word','Count'))\n\n content = urlopen(url).read().decode()\n collector = Collector(url)\n collector.feed(content)\n urls = collector.getLinks()\n\n words_lst = collector.getdata()\n print(words_lst)\n # word_count = Counter(words_lst) # use collection\n # most_25_common = word_count.most_common(25) #\n\n word_count = frequency(words_lst)\n sorted_word_count = sorted(word_count.items(), key = lambda x : x[1],reverse= True)\n\n for word,count in sorted_word_count[:25]:\n print ('{:30}{:5}'.format(word,count))\n\n #return word_count\n\n # for word,count in most_25_common:\n # print('{:30} {:5}'.format(word,count))\n # return urls",
"def get_pages_count(link_headers):\n last_header = link_headers.get('last')\n if last_header:\n return int(parse_qs(urlparse(last_header['url']).query)['page'][0])\n return 1",
"def hash_entries(entries):\n d = dict()\n for e in entries:\n uri = e[\"uri\"]\n domain = re.match(\"^/view\\d*/(.*)$\", uri).group(1)\n if domain:\n visitor_id = e[\"visitor_id\"]\n if d.has_key(domain):\n store_page_entries = d[domain]\n store_page_entries.append(visitor_id)\n else:\n d[domain] = [visitor_id]\n print \"Retrieved {0} unique domains.\".format(len(d))\n return d"
] | [
"0.5832478",
"0.5743405",
"0.5526221",
"0.54275036",
"0.5161741",
"0.51254606",
"0.5115564",
"0.50487834",
"0.5029015",
"0.49536353",
"0.49229515",
"0.49164706",
"0.48799717",
"0.48791456",
"0.4861495",
"0.48533532",
"0.48137897",
"0.4801171",
"0.47810617",
"0.4773569",
"0.47550088",
"0.4747353",
"0.46952662",
"0.46906394",
"0.46851605",
"0.4673478",
"0.46612337",
"0.4660336",
"0.46552795",
"0.46540755"
] | 0.68593603 | 0 |
Takes WARC record and outputs all pairs (domain, path) from URIs, if these exist. It searches both target URI and outlinks and does not distinguish between them. | def parse_urls(record):
url_list = []
try:
page_url = record['WARC-Header-Metadata']['WARC-Target-URI']
x = urlparse.urlparse(page_url)
url_list += [(x.netloc, x.path)]
except:
pass
try:
links = record['Payload-Metadata']['HTTP-Response-Metadata']['HTML-Metadata']['Links']
for url in links:
x = urlparse.urlparse(url['url'])
url_list += [(x.netloc, x.path)]
except:
pass
return url_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_links(record):\n try:\n page_url = record['WARC-Header-Metadata']['WARC-Target-URI']\n page_domain = urlparse.urlparse(page_url).netloc\n links = record['Payload-Metadata']['HTTP-Response-Metadata']['HTML-Metadata']['Links']\n out_links = Counter([urlparse.urlparse(url['url']).netloc for url in links])\n return (page_domain, out_links)\n except:\n return None",
"def _filter_return_url_from_list(self, paths, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n for path in paths:\r\n if path in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches",
"def extract_urls(genome):\n itemid = genome.get('metadata').get('identifier')\n urls = set([url for url in genome['urls'] if 'archive.org' not in url])\n db_urls_found(itemid, urls)",
"def by_navigations(self):\n\t\t\n\t\turl_format = r'^\\s*(?:(?P<protocol>\\w+)://)?(?P<domain>[\\w\\d\\-\\.]+)(?::(?P<port>\\d+))?/?(?P<everything_else>.*)$'\n\t\tnavigations = {}\n\t\tfor line in self.source.lines:\n\t\t\ttry:\n\t\t\t\tif self.filter(line):\n\t\t\t\t\tresult = re.match(url_format, line.content.url)\n\t\t\t\t\tif line.content.source_address not in navigations.keys():\n\t\t\t\t\t\tnavigations[line.content.source_address] = {}\n\t\t\t\t\tif line.content.user not in navigations[line.content.source_address]:\n\t\t\t\t\t\tnavigations[line.content.source_address][line.content.user] = {}\n\t\t\t\t\tif result.group('domain') not in navigations[line.content.source_address][line.content.user]:\n\t\t\t\t\t\tnavigations[line.content.source_address][line.content.user][result.group('domain')] = 0\n\t\t\t\t\tnavigations[line.content.source_address][line.content.user][result.group('domain')] += int(line.content.size)\n\t\t\texcept AttributeError:\n\t\t\t\tpass\n\t\t\n\t\tflat_navigations = []\n\t\tfor address in navigations.keys():\n\t\t\t# node = socket.getfqdn(address)\n\t\t\tnode = address\n\t\t\tfor user in navigations[address].keys():\n\t\t\t\tfor domain in navigations[address][user].keys():\n\t\t\t\t\tflat_navigations.append([user, domain, node, address, navigations[address][user][domain]])\n\t\t\n\t\t# What happend here? Why an exception handler is needed?\n\t\ttry:\n\t\t\tflat_navigations.sort(key = lambda t: t[4], reverse = True)\n\t\texcept (e):\n\t\t\tpass\n\t\t\n\t\treturn flat_navigations",
"def reformat(array):\n global searched_domain\n response = []\n for tag in array:\n link = tag.get(\"href\", None)\n if link is not None:\n p = parse.urlparse(link)\n if re.match(searched_netloc, p.netloc):\n if p.scheme == \"\":\n link = parse.ParseResult(\"http\", *p[1:]).geturl()\n response.append(link)\n return response",
"def create_results(self):\n\n webhost_map = {}\n # put the found paths into the webhost map\n self.process_crawled_urls(webhost_map)\n # put the found param infos into the webhost map\n self.process_param_infos(webhost_map)\n # put the found cookies into the webhost map and get as by product new domains\n new_domains = self.process_cookies(webhost_map)\n # get the newly discovered network locations\n new_netlocs = self.get_new_netlocs()\n\n # append new domains to the new network locations\n for new_domain in new_domains:\n if self.port != 80 and self.port != 443:\n new_netlocs.add(new_domains + \":\" + self.port)\n else:\n new_netlocs.add(new_domain)\n\n # process discovered redirects and put into webhost map if appropriate\n for url, redirect in self.redirects.items():\n path = urllib.parse.urlparse(url).path\n if url + \"/\" == redirect[\"to\"]:\n continue\n else:\n if not self.url_has_netloc(redirect[\"to\"]):\n redirect_to = redirect[\"to\"]\n else:\n redirect_to = urllib.parse.urlparse(redirect[\"to\"]).path\n\n code = str(redirect[\"code\"])\n if code not in webhost_map:\n webhost_map[code] = {}\n if path not in webhost_map[code]:\n webhost_map[code][path] = {}\n\n redirect_info = \"redirect to %s\" % redirect_to\n if \"misc_info\" not in webhost_map[code][path]:\n webhost_map[code][path][\"misc_info\"] = redirect_info\n elif not any(val == redirect_info for val in webhost_map[code][path].values()):\n for i in range(10):\n alt_key = \"misc_info_%d\" % i\n if alt_key not in webhost_map[code][path]:\n webhost_map[code][path][alt_key] = redirect_info\n break\n\n return webhost_map, new_netlocs, self.comments",
"def getURLs():",
"def _get_records(self, domain, domain_id, record):\n for needed in [\"type\", \"source\", \"target\"]:\n if needed not in record:\n raise ValueError(\"{} not provided in record dict\".format(needed))\n\n if record[\"source\"] == \".\":\n fqdn = domain\n else:\n fqdn = \"{source}.{domain}\".format(source=record[\"source\"], domain=domain)\n return list(\n filter(\n lambda x: (\n x[\"source_idn\"] == fqdn\n and x[\"type\"] == record[\"type\"]\n and x[\"target\"] == record[\"target\"]\n ),\n self._get_request(\"/1/domain/{domain_id}/dns/record\".format(domain_id=domain_id)),\n )\n )",
"def _filter_return_url(self, url, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n for entry in har[\"log\"][\"entries\"]:\r\n if url in entry[\"request\"][\"url\"]:\r\n temp = entry[\"request\"][\"url\"].encode('ascii', 'ignore')\r\n matches.append(temp)\r\n return matches",
"def find_domain_range(record):\n response = {\"domain\": [], \"range\": []}\n if \"http://schema.org/domainIncludes\" in record:\n if isinstance(record[\"http://schema.org/domainIncludes\"], dict):\n response[\"domain\"] = [record[\"http://schema.org/domainIncludes\"][\"@id\"]]\n elif isinstance(record[\"http://schema.org/domainIncludes\"], list):\n response[\"domain\"] = [\n _item[\"@id\"] for _item in record[\"http://schema.org/domainIncludes\"]\n ]\n if \"http://schema.org/rangeIncludes\" in record:\n if isinstance(record[\"http://schema.org/rangeIncludes\"], dict):\n response[\"range\"] = [record[\"http://schema.org/rangeIncludes\"][\"@id\"]]\n elif isinstance(record[\"http://schema.org/rangeIncludes\"], list):\n response[\"range\"] = [\n _item[\"@id\"] for _item in record[\"http://schema.org/rangeIncludes\"]\n ]\n return (response[\"domain\"], response[\"range\"])",
"def get_redirect_uris(\n domains: List[str], redirect_path_sign_in: str, redirect_path_sign_out: str\n) -> Dict[str, List[str]]:\n return {\n \"sign_in\": [f\"{domain}{redirect_path_sign_in}\" for domain in domains],\n \"sign_out\": [f\"{domain}{redirect_path_sign_out}\" for domain in domains],\n }",
"def lookup(redirects, request):\n\n # Compute and cache `request.path` once, rather than recomputing for each\n # redirect rule that the path is matched against.\n path = request.path\n\n for redirect in redirects:\n if redirect.prefix and path.startswith(redirect.src):\n suffix = path.replace(redirect.src, \"\", 1)\n return _dst_root(request, redirect) + suffix\n if not redirect.prefix and path == redirect.src:\n return _dst_root(request, redirect)\n return None",
"def process_trace(self, trace):\n # type: (List[Span]) -> Optional[List[Span]]\n for span in trace:\n if span.parent_id is None and span.get_tag(http.URL) is not None:\n url = span.get_tag(http.URL)\n for regexp in self._regexps:\n if regexp.match(url):\n return None\n return trace",
"def get_urls(self, queries):\n domains = defaultdict(list)\n for q in queries:\n q = \"\\\"\" + q + \"\\\"\"\n results = self.engine.search(q)\n\n for result in results: \n url = result.url\n domain = self.get_domain(url)\n domains[domain].append(q) \n return domains",
"def routes():\n import urllib.request, urllib.parse, urllib.error\n output = []\n for rule in app.url_map.iter_rules():\n options = {}\n for arg in rule.arguments:\n options[arg] = \"[{0}]\".format(arg)\n methods = ','.join(rule.methods)\n url = url_for(rule.endpoint, **options)\n line = urllib.parse.unquote(\"{:50s} {:20s} {}\".format(rule.endpoint, methods, url))\n output.append(line)\n for line in sorted(output):\n print (line)",
"def hash_entries(entries):\n d = dict()\n for e in entries:\n uri = e[\"uri\"]\n domain = re.match(\"^/view\\d*/(.*)$\", uri).group(1)\n if domain:\n visitor_id = e[\"visitor_id\"]\n if d.has_key(domain):\n store_page_entries = d[domain]\n store_page_entries.append(visitor_id)\n else:\n d[domain] = [visitor_id]\n print \"Retrieved {0} unique domains.\".format(len(d))\n return d",
"def url_map():\n from operator import itemgetter\n\n rules = []\n for rule in app.url_map.iter_rules():\n # Generate a human-readable path for the url\n tmp = []\n for is_dynamic, data in rule._trace:\n if is_dynamic:\n tmp.append(u'<%s>' % data)\n else:\n tmp.append(data)\n path = ''.join(tmp).lstrip('|')\n\n # Figure out what auth decorator was used for the endpoint\n view_func = app.view_functions[rule.endpoint]\n if hasattr(view_func, '__authblueprint_authfunc__'):\n auth = view_func.__authblueprint_authfunc__.__name__\n else:\n auth = 'NONE'\n\n rules.append(dict(\n path = path,\n endpoint = rule.endpoint,\n methods = rule.methods,\n auth = auth,\n ))\n\n # Emit them nicely\n last_blueprint = None\n for rule in sorted(rules, key=itemgetter('endpoint')):\n blueprint = rule['endpoint'].split('.')[0]\n if not last_blueprint or blueprint != last_blueprint:\n if last_blueprint:\n print\n print blueprint\n print '-' * len(blueprint)\n last_blueprint = blueprint\n\n print '%-75s %-30s -> %s (%s)' % (rule['path'], rule['auth'], rule['endpoint'], '/'.join(list(rule['methods'])))",
"def _get_urls_drs(file_type, id, reference_name = None, start = None, end = None):\n\n file_exists = file_exists_drs(id)\n if file_exists:\n file_format = \"VCF\" \n urls = _create_slices(CHUNK_SIZE, id, reference_name, start, end)\n response = {\n 'htsget': {\n 'format': file_format,\n 'urls': urls \n }\n }\n return {\"response\": response, \"http_status_code\": 200}\n else:\n err = f\"No {file_type} found for id: {id}\" \n return {\"response\": err, \"http_status_code\": 404}",
"def check_uri_redirects(df, column, replace=True, custom_name_postfix=None, redirection_property=\"http://dbpedia.org/ontology/wikiPageRedirects\", endpoint=DBpedia, regex_filter=\"dbpedia\", bundled_mode=True, uri_data_model=False, progress=True, caching=True):\n\n if custom_name_postfix == \"\":\n\n raise ValueError(\"'custom_name_postfix' can't be an empty string. If you don't want to use a custom_name_postfix, please set the attribute to None\")\n\n df = df.copy()\n\n if bundled_mode and not uri_data_model:\n\n values = \" ( <\"+df[column].str.cat(sep=\"> ) ( <\")+\"> ) \"\n\n query = \"SELECT DISTINCT ?value ?redirect WHERE {VALUES (?value) {\" +values+\"} ?value <\"+redirection_property+\"> ?redirect . }\"\n\n result_df = endpoint_wrapper(query, endpoint, caching=caching).drop_duplicates().reset_index(drop=True)\n\n else: \n \n result_df = pd.DataFrame()\n \n if uri_data_model:\n \n query = \"SELECT DISTINCT ?value ?redirect WHERE {VALUES (?value) {(<**URI**>)} ?value <\"+redirection_property+\"> ?redirect . }\"\n\n result_df = uri_querier(df, column, query, regex_filter=regex_filter, progress=progress, caching=caching)\n \n else:\n\n for uri in df[column].iteritems():\n\n if pd.notna(uri[1]):\n\n query = \"SELECT DISTINCT ?value ?redirect WHERE {?value <\"+redirection_property+\"> ?redirect . FILTER (?value = <\"+uri[1]+\">) }\"\n\n result = endpoint_wrapper(query, endpoint, caching=caching)\n\n result_df = result_df.append(result)\n\n else:\n pass\n\n result_df = result_df.rename({\"callret-0\": \"value\"}, axis=\"columns\").drop_duplicates().reset_index(drop=True)\n\n if result_df.empty:\n\n return df\n\n else:\n\n if custom_name_postfix == None:\n\n new_attribute_name = column+\"_redirect\"\n\n else:\n\n new_attribute_name = column+custom_name_postfix\n\n result_df = pd.merge(df, result_df, how=\"left\", left_on=column, right_on=\"value\").drop(\"value\",axis=1).rename(columns={\"redirect\":new_attribute_name})\n\n if replace:\n\n result_df.loc[(pd.isnull(result_df[new_attribute_name])), new_attribute_name] = result_df[column]\n result_df.drop(column, axis=1, inplace=True)\n result_df.rename(columns={new_attribute_name: column}, inplace=True)\n\n return result_df",
"def service_urls(records, service='odp:url'):\n service_string = 'urn:x-esri:specification:ServiceType:' + service\n urls = []\n for key, rec in records.items():\n # Create a generator object, and iterate through it until the match is\n # found if not found, gets the default value (here \"none\").\n url = next((d['url'] for d in rec.references if\n d['scheme'] == service_string), None)\n if url is not None:\n urls.append(url)\n return urls",
"def get_record_urls(split):\n\n stream = os.popen(f'gsutil ls {WAYMO_DATASET_BUCKET}/{split}')\n urls = list(filter(None, stream.read().split('\\n')))\n return urls",
"def site_map(line):\n args = shlex.split(line)\n if len(args) > 0 and args[0] == 'p':\n paths = True\n else:\n paths = False\n ids = yield async_main_context_ids()\n paths_set = set()\n for reqid in ids:\n req = yield Request.load_request(reqid)\n if req.response and req.response.response_code != 404:\n paths_set.add(req.path_tuple)\n tree = sorted(list(paths_set))\n if paths:\n for p in tree:\n print ('/'.join(list(p)))\n else:\n print_tree(tree)",
"def copyurls(door):\n return {name: Url(url.path) for name, url in door.urls.items()}",
"def read_gobuster_output(url: str, output_path: str) -> set :\n regex = re.compile(r\"^(\\/.*?) .*\\(Status: ([0-9]{3})\\).*\\[Size: [0-9]+\\](?: \\[--> (.*)\\])?$\")\n results = set(())\n with open(output_path, \"r\") as file_pointer:\n raw_output = file_pointer.readlines()\n for line in raw_output:\n line = line.strip()\n matches = regex.match(line)\n if matches:\n path, status, redirect_url = matches.groups()\n if redirect_url and redirect_url.startswith(\"/\"):\n results.add(urljoin(url, redirect_url))\n elif redirect_url:\n results.add(redirect_url)\n else:\n results.add(urljoin(url, path))\n return results",
"def collectLinks(self, output):\n pass",
"def _filter_entries_by_response(self, urls, har=None):\r\n if not har:\r\n har = self.har\r\n \r\n matches = []\r\n if len(har[\"log\"][\"entries\"]) > 1:\r\n for entry in har[\"log\"][\"entries\"]:\r\n for url in urls:\r\n if url in entry[\"request\"][\"url\"]:\r\n tempObject = {}\r\n if entry[\"response\"][\"status\"] == 200 and entry[\"response\"][\"content\"].get(\"text\") and entry[\"response\"][\"content\"][\"text\"] != \"\":\r\n tempObject['url'] = entry[\"request\"][\"url\"]\r\n tempObject['response'] = entry[\"response\"][\"content\"][\"text\"].encode('ascii', 'ignore')\r\n matches.append(tempObject)\r\n return matches",
"def get_request_url(rule):\n rule_result = []\n for key, value in rule.items():\n if (key == 'destination' or\n key == 'destination_nat_address' or\n key == 'source' or\n key == 'source_nat_address') and value:\n result = nat_network.get_request_network(value)\n data = {\n key:result\n }\n rule_result.append(data)\n elif (key == 'service' or\n key == 'destination_nat_service') and value:\n result = nat_service.get_request_service(value)\n data = {\n key:result\n }\n rule_result.append(data)\n else:\n data = {\n key:value\n }\n rule_result.append(data)\n \n final_result = []\n final_result.append(dict(ChainMap(*rule_result)))\n \n return final_result",
"def extract_domains(self, resp):\n return",
"def traverse_uris(uri):\n seen = set()\n uris_to_check = [uri]\n while len(uris_to_check) > 0: \n uri = uris_to_check.pop()\n if uri not in seen:\n seen.add(uri)\n for key in keys_for_uri[uri]:\n for uri2 in uris_for_key[key]:\n if uri2 not in seen:\n uris_to_check.append(uri2)\n \n return seen",
"def __get_urls(self):\n self.__valid_servers = {\n \"qa\": {\n \"server_url\": \"https://qa.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://qa.api.deepaffex.ai:9080\"\n },\n \"dev\": {\n \"server_url\": \"https://dev.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://dev.api.deepaffex.ai:9080\"\n },\n \"demo\": {\n \"server_url\": \"https://demo.api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://demo.api.deepaffex.ai:9080\"\n },\n \"prod\": {\n \"server_url\": \"https://api.deepaffex.ai:9443\",\n \"websocket_url\": \"wss://api.deepaffex.ai:9080\"\n },\n \"prod-cn\": {\n \"server_url\": \"https://api.deepaffex.cn:9443\",\n \"websocket_url\": \"wss://api.deepaffex.cn:9080\"\n },\n \"demo-cn\": {\n \"server_url\": \"https://demo.api.deepaffex.cn:9443\",\n \"websocket_url\": \"wss://demo.api.deepaffex.cn:9080\"\n }\n }\n try:\n self.server_url = self.__valid_servers[self.server][\"server_url\"]\n self.websocket_url = self.__valid_servers[self.server][\"websocket_url\"]\n except KeyError:\n raise KeyError(\"Invalid server ID given\")"
] | [
"0.648302",
"0.58565474",
"0.57382894",
"0.56618035",
"0.55831033",
"0.55718136",
"0.5427328",
"0.5408748",
"0.53590286",
"0.53027636",
"0.526104",
"0.52590644",
"0.5210608",
"0.5188143",
"0.5162076",
"0.5156036",
"0.51295507",
"0.51037467",
"0.50811",
"0.5069011",
"0.50665647",
"0.506475",
"0.5036145",
"0.50289774",
"0.49940535",
"0.49844387",
"0.49792275",
"0.49578828",
"0.49511132",
"0.49468654"
] | 0.6712598 | 0 |
Takes domain and concatenates with path URIs separated by newlines.. | def domain_string(domain, path_set):
out = domain + '\n' + '\n'.join(list(path_set)) + '\n\n\n'
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def merge_link(url_domain, url_path):\n\n # Ensure domain is not empty\n if url_domain.strip() == \"\":\n return url_path\n\n # Strip / at end of domain\n if url_domain[-1] == \"/\":\n url_domain = url_domain[0:-1]\n\n # Strip / at beginning of path\n if url_path[0] == \"/\":\n url_path = url_path[1:]\n\n url_full = \"/\".join([url_domain, url_path])\n\n return url_full",
"def baseDomain(domain, includeScheme=True):\n result = ''\n url = urlparse(domain)\n if includeScheme:\n if len(url.scheme) > 0:\n result = '%s://' % url.scheme\n if len(url.netloc) == 0:\n result += url.path\n else:\n result += url.netloc\n return result",
"def _urljoin(self, *args):\r\n\t\treturn \"/\".join(map(lambda x: str(x).rstrip('/'), args))",
"def urljoin(*args):\n\n return \"/\".join(map(lambda x: str(x).rstrip('/'), args))",
"def createCompleteLink(link, domain):\n if link is not None and len(link) > 0:\n if re.match('^http', link) is not None:\n return link\n else:\n #Remove the first / to avoid //\n if link[0] == '/':\n link = link[1:]\n return domain + link\n return domain",
"def get_url(domain, path):\n\n return f\"https://{domain}.freshservice.com/api/v2/{path}\"",
"def str_join(paths: []):\n return \"/\".join(paths)",
"def join_path(base, *args):\n\tfilepath = base\n\tfor arg in args:\n\t\tfilepath = filepath + cfg.SEP_COMM + arg\n\tfilepath = filepath.replace( '//', cfg.SEP_COMM)\n\treturn filepath",
"def join(self, *parts):\n if parts:\n parts = list(parts)\n if len(parts) > 1:\n for i, p in enumerate(parts[:-1]):\n parts[i] = p.strip('/')\n parts[-1] = parts[-1].lstrip('/')\n return '/'.join(parts)",
"def join_urls(*urls):\r\n if not urls:\r\n return\r\n \r\n url = urls[0]\r\n for u in urls[1:]:\r\n if not url.endswith('/'):\r\n url += '/'\r\n while u.startswith('/'):\r\n u = utils.lstrips(u, '/')\r\n url += u\r\n return url",
"def path_to_url(path):\r\n if os.sep == '/':\r\n return path\r\n else:\r\n return '/'.join(split_all(path))",
"def join_url(*args): # type: (*str) -> str\n parts = [part[:-1] if part and part[-1] == '/' else part for part in args]\n parts.append('')\n return '/'.join(parts)",
"def concat_url(endpoint, url):\n u = \"%s/%s\" % (endpoint.rstrip(\"/\"), url.strip(\"/\"))\n return u.rstrip('/')",
"def test_url_concat(self):\n assert ct.url_join(\"www.bad-actor.services\", \"api\") == \"http://www.bad-actor.services/api\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"/api\") == \"https://www.bad-actor.services/api\"\n assert ct.url_concat(\n \"https://www.bad-actor.services\", \"/api\", \"new//one\") == \"https://www.bad-actor.services/api/new/one\"\n assert ct.url_concat(\"https://www.bad-actor.services\", \"/\") == \"https://www.bad-actor.services/\"\n assert ct.url_concat(\"https://www.bad-actor.services/\", \"/\") == \"https://www.bad-actor.services/\"",
"def add_email_domain(email, domain):\n if not domain:\n return email\n if '@' in email:\n return email\n at_domain = domain if domain.startswith('@') else '@' + domain\n if email.endswith(at_domain):\n return email\n if email.endswith(at_domain + '>'):\n return email\n return email + at_domain",
"def urljoin(*args):\n return '/'.join(str(a or '').strip('/') for a in args)",
"def _construct_url(parts):\n results = []\n last_idx = len(parts) - 1\n for n, part in enumerate(parts):\n if n > 0:\n part = part.lstrip('/')\n if n < last_idx:\n part = part.rstrip('/')\n if part:\n results.append(part)\n return '/'.join(results)",
"def urljoin(*atoms):\n url = \"/\".join([x for x in atoms if x])\n while \"//\" in url:\n url = url.replace(\"//\", \"/\")\n # Special-case the final url of \"\", and return \"/\" instead.\n return url or \"/\"",
"def join(path, *paths):\n\n for p in paths:\n if p.startswith(\"/\"):\n path = p\n elif p != \"\":\n path += (\"\" if path == \"\" or path.endswith(\"/\") else \"/\") + p\n return path",
"def _url_join(self, *parts):\n return \"/\".join(map(lambda fragment: fragment.rstrip('/'), parts))",
"def join(path, *paths: str) -> str:\n pass",
"def domain_args(domains):\n return ' ' + ' '.join(['-d {0}'.format(domain) for domain in domains])",
"def join(*paths):\r\n path = \"\"\r\n for component in paths:\r\n path += (\"/\" if path and not path.endswith(\"/\") else \"\") + component.replace(\r\n \"\\\\\", \"/\"\r\n )\r\n return path",
"def urljoin(*parts):\n def _gen(parts):\n prev = None\n for part in parts:\n if not part:\n continue\n if not prev:\n prev = part\n elif (prev[-1] == '/') != (part[0] == '/'): # Exactly one slash was present\n prev = part\n # At this point, either zero or two slashes are present. Which is it?\n elif part[0] == '/': # Two slashes.\n prev = part[1:]\n else: # No slashes.\n yield '/'\n prev = part\n yield prev\n\n return \"\".join(part for part in _gen(parts))",
"def ends_slash(url):\n return url if url.endswith(\"/\") else url + \"/\"",
"def generate_url(domainname = None):\n path_length = random.choice([1,2,3,4,5])\n path = ''\n for i in range(path_length):\n path = path + '/' + ''.join(generate_string(5, valid_domain_name_chars))\n if domainname:\n return 'http://www.'+domainname+path\n else: \n return 'http://www.'+generate_domainname()+path",
"def append_links(self, lines, lang):\n lines.append(\"verbatim \")\n lines.append(\"section Links\")\n lines.append(\"external http://polcasaglia.blogspot.com Blog\")\n lines.append(\"external http://www.uisp-fe.it/calcio.php UISP\" )\n lines.append(\"verbatim \")\n return lines",
"def full_uri(path):\n protocol = 'https' if settings.USE_HTTPS else 'http'\n domain = Site.objects.get_current().domain\n return \"{}://{}{}\".format(protocol, domain, path)",
"def test_url_domain(self):\n assert ct.url_domain(\"http://www.google.com\") == \"google.com\"\n assert ct.url_domain(\"http://localhost\") == \"localhost\"\n assert ct.url_domain(\"http://192.168.1.19:5010\") == \"192.168.1.19\"",
"def get_url_end_string(url_extras: [str]) -> str:\n if len(url_extras) == 0:\n return \"\"\n else:\n url_end_string = \"\"\n for i in url_extras:\n url_end_string += \"/\" + i\n return url_end_string"
] | [
"0.6588488",
"0.6037963",
"0.59932935",
"0.58680135",
"0.5814077",
"0.58129615",
"0.5769635",
"0.5683123",
"0.566781",
"0.56072015",
"0.55921346",
"0.55277026",
"0.5518288",
"0.5502831",
"0.5482606",
"0.5472744",
"0.545825",
"0.54528075",
"0.5439585",
"0.5436488",
"0.54126805",
"0.54007715",
"0.5369596",
"0.53551173",
"0.534428",
"0.5336937",
"0.53326494",
"0.5322084",
"0.5320789",
"0.5317493"
] | 0.71544874 | 0 |
Creates a DataFrame with polygones and IDs for all tax zones. | def createEmptyMapData():
with open('data/taxzone.json', 'r') as f:
taxzones = json.load(f)
polygons_shape = [shape(feature['geometry']) for feature in taxzones['features']]
names = [feature['properties']['id'] for feature in taxzones['features']]
map_data = pd.DataFrame({'poly': polygons_shape, 'id': names})
return map_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def taxa_data_frame(self):\n cols = list(self._taxa.keys())\n cols.remove(\"uid\")\n cols.remove(\"object\")\n df = DataFrame(self._taxa, columns=cols, index=self._taxa[\"uid\"])\n df.index.name = \"uid\"\n\n return df",
"def taxi_zones(path, storage_options=None):\n zdf = pd.read_csv(path, storage_options=storage_options)\n zdf = zdf.drop(\"OBJECTID\", axis=\"columns\")\n zdf = zdf.set_index(\"LocationID\")\n return zdf",
"def _regions(self, voronoi_diagram, unique_id, ids, crs):\n # generate DataFrame of results\n regions = pd.DataFrame()\n regions[unique_id] = ids # add unique id\n regions[\"region\"] = voronoi_diagram.point_region # add region id for each point\n\n # add vertices of each polygon\n vertices = []\n for region in regions.region:\n vertices.append(voronoi_diagram.regions[region])\n regions[\"vertices\"] = vertices\n\n # convert vertices to Polygons\n polygons = []\n for region in tqdm(regions.vertices, desc=\"Vertices to Polygons\"):\n if -1 not in region:\n polygons.append(Polygon(voronoi_diagram.vertices[region]))\n else:\n polygons.append(None)\n # save polygons as geometry column\n regions[\"geometry\"] = polygons\n\n # generate GeoDataFrame\n regions_gdf = gpd.GeoDataFrame(regions.dropna(), geometry=\"geometry\")\n regions_gdf = regions_gdf.loc[\n regions_gdf[\"geometry\"].length < 1000000\n ] # delete errors\n regions_gdf = regions_gdf.loc[\n regions_gdf[unique_id] != -1\n ] # delete hull-based cells\n regions_gdf.crs = crs\n return regions_gdf",
"def createTerritoryGeometries(config, start_time):\n # get the correct names for all of the provinces within each territory\n file_name = config['shape_files_path'] + config['county_shape_file_name']\n names_df = gpd.read_file(file_name)\n names_df.rename(columns={'NAMELSAD':'NAME'})\n names_df = names_df[['GEOID', 'NAME']]\n\n df_holder = []\n # read in block files for the 4 excluded US territories\n for territory in ['60','66','69','78']:\n try:\n temp_time = time.localtime()\n # open the appropriate block file for the given territory\n file_name = config['shape_files_path'] +\\\n \"block/tl_%s_%s_tabblock%s.shp\" %\\\n (config['census_vintage'],territory,config['census_vintage'][2:])\n temp_df = gpd.read_file(file_name)\n # modify the column names so they match what we expect in the tract and \n # county geojson files\n change_columns = { 'STATEFP%s' % config['census_vintage'][2:]:'state_fips', \n 'COUNTYFP%s' % config['census_vintage'][2:]: 'county_fips',\n 'GEOID%s' % config['census_vintage'][2:]:'block_fips',\n 'ALAND%s' % config['census_vintage'][2:]:'aland'}\n temp_df.rename(columns=change_columns, inplace=True)\n\n # create the tract file for the given territory\n tract_df = temp_df[['block_fips', 'aland', 'geometry']]\n tract_df['GEOID'] = tract_df['block_fips'].str[:11]\n tract_df['NAME']=tract_df['GEOID'].str[5:11]\n tract_df['NAME'] = np.where(tract_df['NAME'].str[4:6] != '00', \n tract_df['NAME'].str[:4] + \".\" + tract_df['NAME'].str[4:6], \n tract_df['NAME'].str[:4])\n\n # dissolve the blocks into tract level detail\n tract_df=tract_df[['GEOID', 'NAME', 'geometry']].loc[tract_df['aland']>0].dissolve(by='GEOID')\n tract_df.reset_index(inplace=True)\n\n # save the newly created tracts for the territory into a shape file\n # for later use by processes\n file_name = config['shape_files_path'] +\\\n \"tract/gz_%s_%s_140_00_500k.shp\" %\\\n (config['census_vintage'],territory)\n tract_df.to_file(file_name)\n\n # provide status or data processing\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - FINISHED WRITING TRACT SHAPE FILE\n FOR US TERRITORY %s\n \"\"\" % territory\n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen\n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED WRITING TRACT SHAPE FILE\n FOR US TERRITORY %s\n \"\"\" % territory \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False\n\n try:\n # create the dataframe for capturing county level data\n temp_time = time.localtime()\n county_df = temp_df[['state_fips', 'county_fips', 'aland', 'geometry']]\n county_df['GEOID'] = county_df['state_fips'] + county_df['county_fips']\n\n # merge the block level data at the county level to get the geometry\n county_df=county_df[['GEOID', 'geometry']].loc[county_df['aland']>0].dissolve(by='GEOID')\n\n # the county records for US states include names. The names cannot\n # be easily constructed following a set of rules, so instead we just\n # merge the names of the territories that are listed in the tiger line\n # files with the geometries we just calculated. This ends up giving\n # us the information we need to create the equivalent of a fully \n # populated 2010 county cartographic file that includes territories\n county_df = county_df.merge(names_df, left_on='GEOID', right_on='GEOID')\n county_df = county_df[['GEOID', 'NAME', 'geometry']]\n\n # append the information to a list that we will process later\n df_holder.append(county_df)\n\n # provide the status on the data processing for this task\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - PROCESSED COUNTY DATA FOR\n US TERRITORY %s\n \"\"\" % territory\n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen \n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED PROCESSING COUNTY DATA\n FOR US TERRITORY %s\n \"\"\" % territory \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False \n\n try:\n # now that we have the county level data for the territories, we need to merge\n # it with the US county data and create a single file for subsequent processing\n # open the county cartographic bounday file\n file_name = config['shape_files_path'] + config['county_cb_shape_file_name']\n county = gpd.read_file(file_name)\n\n # the cartographic boundary files do not have full names, so concatenate the \n # name and lsad columns and overwrite the original name\n county['NAME']=county['NAME'] + ' ' + county['LSAD']\n\n # extract the county fips from the non-standard county fips identifier in the\n # 2010 cartographic boundary file and then preserve only the necessary columns\n county['GEOID']=county['GEO_ID'].str[9:]\n county = county[['GEOID', 'NAME','geometry']]\n\n # append the county data to the list to be used to build the single file\n df_holder.append(county)\n\n # merge all of the dataframes into a single dataframe, sort it, and then \n # write the file out as a shape file so it can be used later for subsequent\n # data processing\n counties = pd.concat([x for x in df_holder])\n counties.sort_values(by='GEOID',inplace=True)\n file_name = config['shape_files_path'] + config['county_gzm_shape_file_name']\n counties.to_file(file_name)\n \n # provide the status on the data processing for this task\n my_message = \"\"\"\n INFO - STEP 0 (MASTER): TASK 3 OF 13 - COMPLETED UPDATING COUNTY \n CARTOGRAPHIC SHAPE FILE\n \"\"\" \n my_message = ' '.join(my_message.split()) \n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time))) \n return True \n\n except:\n # there was an error in processing. Capture the error and output the\n # stacktrace to the screen \n my_message = \"\"\"\n ERROR - STEP 0 (MASTER): TASK 3 OF 13 - FAILED UPDATING COUNTY \n CARTOGRAPHIC SHAPE FILE\n \"\"\" \n my_message += \"\\n\" + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False",
"def makeGeoDf(self, arr: dict):\n geometry_points = [Point(x, y) for x, y in zip(arr[\"X\"], arr[\"Y\"])]\n elevetions = arr[\"Z\"]\n df = gpd.GeoDataFrame(columns=[\"elevation\", \"geometry\"])\n df['elevation'] = elevetions\n df['geometry'] = geometry_points\n df = df.set_geometry(\"geometry\")\n df.set_crs(self.output_epsg, inplace=True)\n return df",
"def _regions(self, voronoi_diagram, unique_id, ids, crs):\n vertices = pd.Series(voronoi_diagram.regions).take(voronoi_diagram.point_region)\n polygons = []\n for region in vertices:\n if -1 not in region:\n polygons.append(pygeos.polygons(voronoi_diagram.vertices[region]))\n else:\n polygons.append(None)\n\n regions_gdf = gpd.GeoDataFrame(\n {unique_id: ids}, geometry=polygons, crs=crs\n ).dropna()\n regions_gdf = regions_gdf.loc[\n regions_gdf[unique_id] != -1\n ] # delete hull-based cells\n\n return regions_gdf",
"def to_frame(self):\n return gpd.GeoDataFrame(\n data=range(0, self.nleaves),\n geometry=self.to_geom(),\n crs=self.crs,\n columns=['id'],\n )",
"def create_airports(data):\n \n airport_cities = pd.DataFrame(data)\n geometry = [Point(xy) for xy in zip(airport_cities.lon, airport_cities.lat)]\n airport_cities = airport_cities.drop(['lon', 'lat'], axis=1)\n crs = {'init': 'epsg:4326'}\n geo_airport_cities = gpd.GeoDataFrame(airport_cities, crs=crs, geometry=geometry)\n return geo_airport_cities",
"def _load_county_geometry() -> geopandas.GeoDataFrame:\n\tfilename = shapefiles_folder / \"cb_2016_us_county_500k\"\n\ttable = read_geometry(filename)\n\n\ttable['regionCode'] = [f\"{i:>02}{j:>03}\" for i, j in zip(table['STATEFP'].values, table['COUNTYFP'].values)]\n\treturn table",
"def to_frame(self):\n return gpd.GeoDataFrame(\n data=self.tree_ids,\n geometry=self.to_geom(),\n crs=self.crs,\n columns=['id'],\n )",
"def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon",
"def data_with_fips(self) -> pd.DataFrame:\n return self.data",
"def generatePolygons():",
"def create_table(f, geoinfo):\n bounds_cols = xb_points + yb_points\n df = pd.read_csv(f, delimiter=\";\", index_col=\"INDEX_RC\")\n df[duration_name] = parse_duration_level(f)\n df = df.join(geoinfo[[\"X_CENT_GEO\", \"Y_CENT_GEO\", \"Col\", \"Row\"]])\n df = df.rename(columns={\"Col\": x, \"Row\": y, \"X_CENT_GEO\": lon, \"Y_CENT_GEO\": lat})\n return df",
"def hexlist_to_geodataframe(list_hexagons):\n df=pd.DataFrame(list_hexagons, columns=['hexid'])\n def f(x):\n #return h3.h3_to_geo_boundary(h3_address=x['hexid'],geo_json=False)\n return hex_to_polygon(x['hexid'])\n\n gdf = gpd.GeoDataFrame(df, geometry=df.apply(f, axis=1))\n return gdf",
"def convert_shapefiles_to_dataframe(self, shapefiles: List[Shapefile]):\n # Avoid needless encoding warnings\n os.environ['CPL_ZIP_ENCODING'] = 'UTF-8'\n subsets = []\n # Drop null values (e.g., for not-yet-released shapefiles) from list of filepaths\n filepaths = filter(None, shapefiles)\n for filepath in filepaths:\n try:\n subset = load_geodataframe(filepath)\n except BadZipFile:\n logger.warning(\n f'Warning: Failed to load zip file {filepath}. It may be corrupted. You might '\n 'try clearing your autocensus cache by calling autocensus.clear_cache() or '\n f'manually deleting the cache folder at {CACHE_DIRECTORY_PATH}. Continuing…'\n )\n continue\n subsets.append(subset)\n dataframe = pd.concat(subsets, ignore_index=True, sort=True)\n\n # Geometry columns\n if self.geometry == 'polygons':\n dataframe['geometry'] = (\n dataframe['geometry'].map(coerce_polygon_to_multipolygon).map(flatten_geometry)\n )\n\n # Clean up\n affgeoid_field = identify_affgeoid_field(dataframe.columns)\n columns_to_keep = [affgeoid_field, 'year', 'geometry']\n dataframe = dataframe.loc[:, columns_to_keep]\n return dataframe",
"def convert_data (data_taxi,density):\n \n n_trips = len(data_taxi)\n \n min_longitude = min(min(list(data_taxi.loc[:,'pickup_longitude'])),\n min(list(data_taxi.loc[:,'dropoff_longitude'])))\n max_longitude = max(max(list(data_taxi.loc[:,'pickup_longitude'])),\n max(list(data_taxi.loc[:,'dropoff_longitude'])))\n min_latitude = min(min(list(data_taxi.loc[:,'pickup_latitude'])),\n min(list(data_taxi.loc[:,'dropoff_latitude'])))\n max_latitude = max(max(list(data_taxi.loc[:,'pickup_latitude'])),\n max(list(data_taxi.loc[:,'dropoff_latitude'])))\n \n e_longitude = max_longitude - min_longitude\n \n e_latitude = max_latitude - min_latitude\n \n scale =np.sqrt( n_trips/( e_longitude* e_latitude * density) )\n\n taxis = []\n \n for i in range(n_trips):\n selected_taxi = data_taxi.iloc[i]\n departure = [int((selected_taxi.pickup_longitude - min_longitude) * scale),\n int((selected_taxi.pickup_latitude - min_latitude) * scale),\n ]\n \n arrival = [\n int((selected_taxi.dropoff_longitude - min_longitude) * scale),\n int((selected_taxi.dropoff_latitude - min_latitude) * scale)]\n \n taxis.append(taxi(departure,arrival,departure))\n return taxis,int(scale*(e_latitude))+1,int(scale*(e_longitude))+1",
"def df_customers_features_build(self):\n\n df_customers_rfm = self._df_customers_rfm.copy()\n df_customers_timeFeature = self._df_customers_timeFeature.copy()\n df_customers_nlp = self._df_customers_pca_nlp.copy()\n\n #-------------------------------------------------------------------------\n # Dataframe are aggregated; note that indexes are customerID.\n #-------------------------------------------------------------------------\n df_customers = pd.DataFrame()\n\n df_customers = pd.concat([df_customers,df_customers_rfm], axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_timeFeature]\\\n , join='inner', axis=1)\n\n df_customers = pd.concat([df_customers,df_customers_nlp]\\\n , join='inner', axis=1)\n \n self.strprint(\"All features : \"+str(df_customers.shape))\n self._df_customers = df_customers.copy()\n return",
"def get_usa_states_geo_df() -> geopandas.GeoDataFrame:\n\n geo_df: geopandas.GeoDataFrame = geopandas.read_file(\n GEO_DATA_DIR / \"cb_2017_us_state_20m\" / \"cb_2017_us_state_20m.shp\"\n ).rename(columns={\"STUSPS\": CODE}, errors=\"raise\")\n\n geo_df = geo_df[\n [\n \"STATEFP\",\n # \"STATENS\",\n # \"AFFGEOID\",\n # \"GEOID\",\n CODE,\n # \"NAME\",\n \"LSAD\",\n # \"ALAND\",\n # \"AWATER\",\n \"geometry\",\n ]\n ]\n\n return geo_df",
"def join_with_zones(taxi_rides_df, taxi_zones_df):\n # inner join on pickup location id, drop duplicate columns\n taxi_rides_df = taxi_rides_df.merge(taxi_zones_df, how=\"inner\",\n left_on=\"PULocationID\",\n right_on=\"LocationID\",\n right_index=True)\n taxi_rides_df = taxi_rides_df.drop(\"PULocationID\", axis=\"columns\")\n # inner join on drop off location id, drop duplicate columns\n taxi_rides_df = taxi_rides_df.merge(taxi_zones_df, how=\"inner\",\n left_on=\"DOLocationID\",\n right_on=\"LocationID\",\n suffixes=[\"_PU\", \"_DO\"],\n right_index=True)\n taxi_rides_df = taxi_rides_df.drop(\"DOLocationID\", axis=\"columns\")\n return taxi_rides_df",
"def get_zone_list(self, building_id: str) -> DataFrame:\n raw = self._get_zone_list(building_id)\n if raw:\n return ensure_camel_columns(read_json(json.dumps(raw)).set_index(\"id\"))\n raise IOError(\"Empty response from web request.\")",
"def get_places() -> DataFrame:\n df = pd.read_csv('./data/geoplaces2.csv', encoding='utf-8')\n\n # drop useless columns\n df.drop(columns=['the_geom_meter', 'name', 'address',\n 'city', 'state', 'country', 'fax',\n 'zip', 'url', 'accessibility', 'franchise',\n 'other_services'],\n inplace=True)\n\n # select categorical column names\n categorical_columns = [column for column in df.columns\n if df[column].dtype.name == 'object'\n if column not in ['userID', 'smoker']]\n\n # replace categorical columns with one hot encoding\n for column_name in categorical_columns:\n dummies = pd.get_dummies(df[column_name])\n\n for dummy_column_name in dummies.columns:\n df[column_name + \"_\" + dummy_column_name] = dummies[dummy_column_name]\n\n df.drop(columns=[column_name], inplace=True)\n\n categorical_columns = [column for column in df.columns if df[column].dtype.name == 'object']\n\n for column in categorical_columns:\n df[column] = df[column].astype('category')\n\n df_cuisine = get_place_secondary_df('cuisine', 'Rcuisine')\n df_payment = get_place_secondary_df('accepts', 'Rpayment')\n df_hours = get_place_hours()\n\n payment_columns = list(filter(lambda x: x.startswith(\"Raccepts_\"), df_payment.columns))\n\n # some restaurants don't have specified payment ... but why\n # left join payment options and set cash option\n new_df = df.merge(df_payment, on='placeID', how='left')\n new_df[payment_columns] = new_df[payment_columns].fillna(0)\n new_df['Raccepts_cash'] = 1\n\n # left join cuisines and fill missing values with 0\n new_df = new_df.merge(df_cuisine, on='placeID', how='left')\n cuisine_columns = list(filter(lambda x: \"Rcuisine\" in x, new_df.columns))\n new_df[cuisine_columns] = new_df[cuisine_columns].fillna(0)\n\n new_df = new_df.merge(df_hours, on='placeID', how='inner')\n\n return new_df",
"def create_geodata(x):\n list_len = len(x)\n pilot_log = pd.concat(x[i][['time','Cn0DbHz','svid','geometry']] for i in range(list_len))\n \n return pilot_log",
"def createDataFrames(self):\n self._atmDF = pd.DataFrame.from_dict(self._atmDict, orient='index')\n \n self._clientDF = pd.DataFrame.from_dict(self._clientDict, orient='index')\n self._clientDF['longAccount'] = self._clientDF.client.map(str) +\\\n '_' + self._clientDF.account.map(str)\n \n self._transactionDF = pd.DataFrame.from_dict(self._transactionDict, orient='index')",
"def _extract_geographical_patterns(self):\n # take onehot encoding of zipcodes\n onehot = pd.get_dummies(self.df_transaction['zipcode'], prefix='zipcode')\n rider_id = pd.DataFrame(data={'riderID': self.df_transaction['riderID']})\n frames = [rider_id, onehot]\n df_onehot = pd.concat(frames, axis=1)\n\n # count zipcodes\n df_rider_geo_count = df_onehot.groupby(['riderID'])[list(onehot.columns.values)].sum().reset_index()\n df_rider_geo_count['geo_row_sum'] = df_rider_geo_count.iloc[:, 1:].sum(axis=1)\n\n return df_rider_geo_count",
"def get_countries_geo_df() -> geopandas.GeoDataFrame:\n\n geo_df: geopandas.GeoDataFrame = geopandas.read_file(\n GEO_DATA_DIR / \"ne_110m_admin_0_map_units\" / \"ne_110m_admin_0_map_units.shp\"\n )\n\n geo_df = geo_df.rename(columns={\"ADMIN\": CODE}, errors=\"raise\")\n\n # Keys are what's in the geo df, values are what we want to rename them to\n # Values must match the names in the original data source. If you don't like those\n # names, change them there and then come back and change the values here.\n geo_df[CODE] = (\n geo_df[CODE]\n .map(\n {\n \"Central African Republic\": \"Central African Rep.\",\n \"Democratic Republic of the Congo\": \"Dem. Rep. Congo\",\n \"Equatorial Guinea\": \"Eq. Guinea\",\n \"eSwatini\": \"Eswatini\",\n \"Georgia (Country)\": \"Georgia\",\n \"Republic of Serbia\": \"Serbia\",\n \"United Arab Emirates\": \"UAE\",\n \"United Kingdom\": \"Britain\",\n \"United Republic of Tanzania\": \"Tanzania\",\n \"Western Sahara\": \"W. Sahara\",\n \"United States of America\": \"United States\",\n }\n )\n .fillna(geo_df[CODE])\n )\n geo_df = geo_df[geo_df[CODE] != \"Antarctica\"]\n\n colonial_power_main_countries = {\n \"Britain\": \"England\",\n \"France\": \"France, Metropolitan\",\n \"Norway\": \"Norway\",\n \"Papua New Guinea\": \"Papua New Guinea\",\n }\n\n is_main_country_idx = geo_df[CODE].map(colonial_power_main_countries).isna() | (\n geo_df[\"NAME_SORT\"] == geo_df[CODE].map(colonial_power_main_countries)\n )\n\n geo_df[CODE] = geo_df[CODE].where(\n is_main_country_idx, geo_df[CODE].str.cat(geo_df[\"NAME_SORT\"], sep=\" - \"),\n )\n geo_df[\"name\"] = geo_df[CODE]\n\n geo_df = geo_df[\n [\n \"featurecla\",\n \"scalerank\",\n \"LABELRANK\",\n # \"SOVEREIGNT\",\n # \"SOV_A3\",\n # \"ADM0_DIF\",\n \"LEVEL\",\n # \"TYPE\",\n CODE,\n \"name\",\n # \"ADM0_A3\",\n # \"GEOU_DIF\",\n # \"GEOUNIT\",\n # \"GU_A3\",\n # \"SU_DIF\",\n # \"SUBUNIT\",\n # \"SU_A3\",\n # \"BRK_DIFF\",\n # \"NAME\",\n # \"NAME_LONG\",\n # \"BRK_A3\",\n # \"BRK_NAME\",\n # \"BRK_GROUP\",\n \"ABBREV\",\n # \"POSTAL\",\n # \"FORMAL_EN\",\n # \"FORMAL_FR\",\n # \"NAME_CIAWF\",\n # \"NOTE_ADM0\",\n # \"NOTE_BRK\",\n \"NAME_SORT\",\n # \"NAME_ALT\",\n # \"MAPCOLOR7\",\n # \"MAPCOLOR8\",\n # \"MAPCOLOR9\",\n # \"MAPCOLOR13\",\n # \"POP_EST\",\n # \"POP_RANK\",\n # \"GDP_MD_EST\",\n # \"POP_YEAR\",\n # \"LASTCENSUS\",\n # \"GDP_YEAR\",\n \"ECONOMY\",\n \"INCOME_GRP\",\n # \"WIKIPEDIA\",\n # \"FIPS_10_\",\n # \"ISO_A2\",\n # \"ISO_A3\",\n # \"ISO_A3_EH\",\n # \"ISO_N3\",\n # \"UN_A3\",\n # \"WB_A2\",\n # \"WB_A3\",\n # \"WOE_ID\",\n # \"WOE_ID_EH\",\n # \"WOE_NOTE\",\n # \"ADM0_A3_IS\",\n # \"ADM0_A3_US\",\n # \"ADM0_A3_UN\",\n # \"ADM0_A3_WB\",\n \"CONTINENT\",\n \"REGION_UN\",\n \"SUBREGION\",\n \"REGION_WB\",\n # \"NAME_LEN\",\n # \"LONG_LEN\",\n # \"ABBREV_LEN\",\n # \"TINY\",\n # \"HOMEPART\",\n # \"MIN_ZOOM\",\n # \"MIN_LABEL\",\n # \"MAX_LABEL\",\n # \"NE_ID\",\n # \"WIKIDATAID\",\n # \"NAME_AR\",\n # \"NAME_BN\",\n # \"NAME_DE\",\n # \"NAME_EN\",\n # \"NAME_ES\",\n # \"NAME_FR\",\n # \"NAME_EL\",\n # \"NAME_HI\",\n # \"NAME_HU\",\n # \"NAME_ID\",\n # \"NAME_IT\",\n # \"NAME_JA\",\n # \"NAME_KO\",\n # \"NAME_NL\",\n # \"NAME_PL\",\n # \"NAME_PT\",\n # \"NAME_RU\",\n # \"NAME_SV\",\n # \"NAME_TR\",\n # \"NAME_VI\",\n # \"NAME_ZH\",\n \"geometry\",\n ]\n ]\n\n return geo_df",
"def planes(self):\n\n results = []\n for sz in self.shearzones:\n point_cloud = self.get_shearzone(sz=sz, coords=\"gts\")\n n_pts = point_cloud.shape[1]\n centroid = np.sum(point_cloud, axis=1) / n_pts\n normal = fit_normal_to_points(point_cloud)\n\n data = np.atleast_2d(np.hstack((centroid, normal)))\n columns = (\"x_c\", \"y_c\", \"z_c\", \"n_x\", \"n_y\", \"n_z\")\n frame = pd.DataFrame(data=data, columns=columns)\n frame[\"shearzone\"] = sz\n results.append(frame)\n\n df = pd.concat(results, ignore_index=True)\n return df",
"def generateTripsAndZoneDist(filename, numDataSets, percentUsing):\n\n data = pd.read_csv(filename).values\n zoneData = pd.read_csv(\"taxi_zones/zone_lookup.csv\").values\n\n # Making time distribution and zone distributions\n timeDistribution = np.zeros(24)\n zoneDistribution = {k: 0 for k in range(1,zoneData[-1][0] + 1)}\n for d in data:\n timeDistribution[d[0]] += 1\n zoneDistribution[d[2]] += 1\n zoneDistribution[d[3]] += 1\n\n total = len(data)\n for i in range(24):\n timeDistribution[i] /= total\n for i in range(1,zoneData[-1][0] + 1):\n zoneDistribution[i] /= (2 * total)\n\n # n = number of data points divided by (30 * number of datasets) * 0.02 (2% of population uses it)\n n = int(len(data) / (30 * numDataSets) * percentUsing)\n\n # Sample trips according to time distribution\n sample = np.random.choice(np.arange(0, 24), size=n, p=timeDistribution)\n\n # Randomly sample existing trips and add them to new list\n trips = []\n for time in range(24):\n indices = np.random.choice(len(data[data[:, 0] == time]), size=len(sample[sample == time]))\n trips.extend(data[data[:, 0] == time][indices])\n\n return np.array(trips), zoneDistribution",
"def get_individual_df(nombre):\n clusters = []\n contadores = []\n for k, v in mydic[nombre][\"contador\"].items():\n clusters.append(k)\n contadores.append(v)\n return pd.DataFrame({\"CODIGO_POSTAL\": clusters, f\"contadores_{nombre}\": contadores})",
"def creat_df(urlist):\n country = []\n head = []\n for i in range(len(urlist)):\n country.append(urlist[i][2])\n head.append(urlist[i][4])\n df = pd.DataFrame.from_dict({\"Country\": country, \"Head\": head})\n return df"
] | [
"0.6282922",
"0.62614584",
"0.6177866",
"0.58757657",
"0.58594614",
"0.57724375",
"0.5746732",
"0.5704685",
"0.57044125",
"0.5677378",
"0.56272644",
"0.55792403",
"0.5492265",
"0.5476538",
"0.54143095",
"0.53428125",
"0.53372324",
"0.5313981",
"0.52794796",
"0.52559406",
"0.52483755",
"0.52481127",
"0.5225919",
"0.5224178",
"0.52220714",
"0.519711",
"0.51942855",
"0.5187383",
"0.518501",
"0.51816314"
] | 0.74269277 | 0 |
Appends a new column named 'field_name' to map_data. The data is read from json_file. Flag single_point_per_zone set True, will only read a single count per polygon. | def addJsonFileToMapData(json_file, field_name, map_data, single_point_per_zone=False):
# Read the json file
json_data = pd.io.json.read_json(json_file)
json_data['points'] = json_data.apply(lambda row: Point(row.coords), axis=1)
# Loop over all polygons in the map.
poly_counts = []
for polygon in map_data['poly']:
counts = 0
# Loop over all points in the json data.
for point_count in json_data[['points', 'count']].values:
point = point_count[0]
count = point_count[1]
if polygon.contains(point):
counts += float(count)
if single_point_per_zone:
break
poly_counts.append(counts)
map_data = pd.merge(map_data,
pd.DataFrame({'poly': map_data['poly'],
field_name: poly_counts}),
on='poly')
return map_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def geojson2postgis(self, filepath, table_name, geo_type):\n map_data = gpd.GeoDataFrame.from_file(filepath)\n # Maybe you want to change link address\n link = \"postgresql://{0}:{1}@{3}:5432/{2}\".format(self.username, self.password, self.dbname, self.host)\n engine = create_engine(link, encoding='utf-8')\n map_data = self.dict_to_json(map_data)\n map_data['geometry'] = map_data['geometry'].apply(lambda x: WKTElement(x.wkt, 4326))\n # Maybe you want to change 'replace' to 'append' in the future\n map_data.to_sql(\n name=table_name,\n con=engine,\n if_exists='replace',\n dtype={'geometry': Geometry(geometry_type=geo_type, srid=4326)}\n )",
"def _parse_and_store_geojson(filename):\n ds = DataSource(filename)\n _sanity_check_datasource(ds)\n\n logger.info('Data file %s was opened', ds.name)\n lm = LayerMapping(WegStuk, ds, MAPPING)\n\n with transaction.atomic():\n WegStuk.objects.all().delete()\n lm.save(strict=True, verbose=False)\n\n logger.info('Travel time dataset was updated.')",
"def add_geo_shape(self, field, point, distance):\n from haystack.utils.geo import ensure_point, ensure_distance\n self.geo_shape = {\n 'field': field,\n 'point': ensure_point(point),\n 'distance': ensure_distance(distance),\n }",
"def array2D_to_geoJson(geoJsonFileName, array2d,\n layerName=\"BuildingID\",fieldName=\"BuildingID\"):\n \n memdrv = gdal.GetDriverByName('MEM')\n src_ds = memdrv.Create('', array2d.shape[1], array2d.shape[0], 1)\n band = src_ds.GetRasterBand(1)\n band.WriteArray(array2d)\n\n drv = ogr.GetDriverByName(\"geojson\")\n dst_ds = drv.CreateDataSource(geoJsonFileName)\n dst_layer = dst_ds.CreateLayer(layerName, srs=None)\n\n fd = ogr.FieldDefn(fieldName, ogr.OFTInteger)\n dst_layer.CreateField(fd)\n dst_field = 0\n\n gdal.Polygonize(band, None, dst_layer, dst_field, [], callback=None)\n \n return True",
"def to_geojson(self, filename, varname='data'):\n\n geojsondata = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"Point\",\n \"coordinates\": [lon, lat],\n },\n \"properties\": {\"field\": field, \"weight\": weight},\n } for lon, lat, field, weight in zip(self.x, self.y, self.field, self.weight)]\n }\n\n try:\n with open(os.path.join(filename), 'w') as f:\n f.write(\"\".join((\"var \", varname, \" = \")))\n out = json.dumps(geojsondata, indent=2, separators=(',', ': '))\n f.write(out)\n\n except FileNotFoundError:\n logger.error(\"Directory {} does not exist\".format(os.path.basename(filename)))\n raise FileNotFoundError('Directory does not exist')",
"def save_to_geojson(self, topology_map, filename):",
"def populateNewFields(nadPoints):\n with arcpy.da.UpdateCursor(nadPoints,\n ['SHAPE@X', 'SHAPE@Y', 'longitude', 'latitude', 'Source'],\n spatial_reference=arcpy.SpatialReference(4326)) as cursor:\n for row in cursor:\n row[2] = row[0]\n row[3] = row[1]\n row[4] = 'Utah AGRC'\n cursor.updateRow(row)",
"def emptyGeoJson( filename):\r\n\r\n filename = dataDir + filename\r\n with open(filename) as data_file:\r\n gjdata = json.load(data_file)\r\n\r\n gjdata['features'] = []\r\n\r\n with open(filename, \"w\") as outfile:\r\n json.dump(gjdata, outfile)",
"def geo_split(file_geo1):\n with open(file_geo1, 'r') as inp, open(new_geo_out, 'w') as outp:\n json_decode = json.load(inp)\n result = []\n for loca in json_decode:\n lat_lon = loca['geo'].split(\",\")\n result.append({\n \"ipv4\": loca[\"ipv4\"],\n \"lat\": lat_lon[0],\n \"lon\": lat_lon[1]\n })\n data = json.dumps(result)\n outp.write(data)\n with open(new_geo_out, 'r') as f:\n geosplit = json.load(f)\n return geosplit",
"def newColumn (layer,FieldName,DataType):\n # Check if field already exists\n if layer.fields().indexFromName(FieldName)==-1:\n caps = layer.dataProvider().capabilities()\n if caps & QgsVectorDataProvider.AddAttributes:\n res = layer.dataProvider().addAttributes([QgsField(FieldName,DataType)])\n print(\"New field \\\"{}\\\" added\".format(FieldName))\n # Update to propagate the changes\n layer.updateFields()\n else:\n print(\"Field \\\"{}\\\" already exists.\".format(FieldName))",
"def createEmptyMapData():\n with open('data/taxzone.json', 'r') as f:\n taxzones = json.load(f)\n\n polygons_shape = [shape(feature['geometry']) for feature in taxzones['features']]\n names = [feature['properties']['id'] for feature in taxzones['features']]\n map_data = pd.DataFrame({'poly': polygons_shape, 'id': names})\n\n return map_data",
"def writeGeoJson(self, file):\n with open(file, 'w') as f:\n json.dump(self._geojson, f)",
"def to_geojson(self, filename, varname='results', levels=None):\n\n llon, llat = np.meshgrid(self.x, self.y)\n contoursfield = cntr.Cntr(llon, llat, self.analysis)\n if levels is None:\n # By default we represent 10 levels from min to max\n levels = np.linspace(self.analysis.min(), self.analysis.max(), 10)\n\n geojsonfield = {\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"type\": \"Feature\",\n \"geometry\": {\n \"type\": \"MultiPolygon\",\n \"coordinates\": [[[[lon, lat] for lon, lat in seg] for seg in\n contoursfield.trace(level)[:len(contoursfield.trace(level)) // 2]]],\n },\n \"properties\": {\"field\": str(level)},\n } for level in levels]\n }\n\n try:\n with open(os.path.join(filename), 'w') as f:\n f.write(\"\".join((\"var \", varname, \" = \")))\n out = json.dumps(geojsonfield, indent=2, separators=(',', ': '))\n f.write(out)\n\n except FileNotFoundError:\n logger.error(\"Directory {} does not exist\".format(os.path.basename(filename)))\n raise FileNotFoundError('Directory does not exist')",
"def add_geojson(map, geojson, style_function, name='states' ):\n\n folium.GeoJson(\n geojson,\n name=name,\n style_function=style_function\n ).add_to(map)",
"def map_csv_fields(self):\n etod_csv_fields = {\n 'ctry_id': None,\n 'obst_identifier': None,\n 'obst_name': None,\n 'lon_src': None,\n 'lat_src': None,\n 'agl': None,\n 'amsl': None,\n 'vert_uom': None,\n 'hor_acc': None,\n 'hor_acc_uom': None,\n 'vert_acc': None,\n 'vert_acc_uom': None,\n 'obst_type': None,\n 'lighting': None,\n 'marking': None,\n 'is_group': None,\n }\n\n for field in etod_csv_fields:\n try:\n etod_csv_fields[field] = etod_map[self.ctry_short_name]['fields'][field]\n except KeyError:\n etod_csv_fields[field] = None\n\n self.field_map = etod_csv_fields",
"def prepare(dp: frictionless.package.Package, name: str):\n data = read_datapackage(dp)\n data[\"fid\"] = name + \"_\" + data[ID].astype(str)\n\n spatial = gpd.GeoDataFrame(\n data[\"fid\"],\n columns=[\"fid\"],\n geometry=gpd.points_from_xy(data.longitude, data.latitude),\n crs=\"EPSG:4326\",\n )\n\n # Other fields to json\n def np_encoder(object):\n \"\"\"Source: https://stackoverflow.com/a/65151218.\"\"\"\n if isinstance(object, np.generic):\n return object.item()\n\n other_cols = [\n x for x in data.columns if x not in VALUE_VARS + SPATIAL_VARS + ID_VARS\n ]\n\n # Int64 to int\n data.loc[:, other_cols].loc[:, data[other_cols].dtypes == \"int64\"] = (\n data.loc[:, other_cols].loc[:, data[other_cols].dtypes == \"int64\"].astype(int)\n )\n data = data.replace({np.nan: None})\n data[\"fields\"] = data[other_cols].to_dict(orient=\"records\")\n data[\"fields\"] = data[\"fields\"].apply(lambda x: json.dumps(x, default=np_encoder))\n\n # Unpivoting\n data = data.melt(id_vars=ID_VARS, value_vars=VALUE_VARS)\n\n # Remove nan\n data = data.dropna()\n\n # Conversion\n enermaps_data = utilities.ENERMAPS_DF\n enermaps_data[\"fid\"] = data[\"fid\"]\n enermaps_data[\"value\"] = data[\"value\"]\n enermaps_data[\"variable\"] = data[\"variable\"]\n enermaps_data[\"fields\"] = data[\"fields\"]\n enermaps_data[\"unit\"] = UNIT\n enermaps_data[\"israster\"] = ISRASTER\n\n return enermaps_data, spatial",
"def _fast_append_profile_in_gdf(geodataframe, raster_path, force_crs_match=True):\n\n _check_presence_of_crs(geodataframe)\n if force_crs_match:\n with rio.open(raster_path) as raster:\n geodataframe = geodataframe.to_crs(crs=raster.crs.data)\n else:\n warnings.warn(\n \"The GeoDataFrame is not being reprojected. The clipping might be being performing on unmatching polygon to the raster.\"\n )\n\n zonal_gjson = rs.zonal_stats(\n geodataframe, raster_path, prefix=\"Type_\", geojson_out=True, categorical=True\n )\n\n zonal_ppt_gdf = gpd.GeoDataFrame.from_features(zonal_gjson)\n\n return zonal_ppt_gdf",
"def __geo_interface__(self):\r\n if HASARCPY:\r\n template = {\r\n \"type\": \"FeatureCollection\",\r\n \"features\": []\r\n }\r\n geom_type = self.geometry_type\r\n if geom_type.lower() == \"point\":\r\n geom_type = \"Point\"\r\n elif geom_type.lower() == \"polyline\":\r\n geom_type = \"LineString\"\r\n elif geom_type.lower() == \"polygon\":\r\n geom_type = \"Polygon\"\r\n df_copy = self.copy(deep=True)\r\n df_copy['geom_json'] = self.geometry.JSON\r\n df_copy['SHAPE'] = df_copy['geom_json']\r\n del df_copy['geom_json']\r\n for index, row in df_copy.iterrows():\r\n geom = row['SHAPE']\r\n del row['SHAPE']\r\n template['features'].append(\r\n {\"type\" : geom_type,\r\n \"geometry\" : pd.io.json.loads(geom),\r\n \"attributes\":row}\r\n )\r\n return pd.io.json.dumps(template)",
"def _add_field_feature_dtype(self, field_path, field_feature_dtype):\n feature_dtype_str = json.dumps(field_feature_dtype.descr)\n dtypes_grp = self._h5['{}/{}'.format(SETTINGS, FIELD_FEATURE_DTYPES_STR)]\n dtypes_grp.create_dataset(field_path, data=feature_dtype_str)",
"def make_map(filename, datadir):\n items = json.load(open(filename))\n geojson_items = []\n for item in items:\n geojson_items.append(get_linestring(item))\n with open(os.path.join(datadir, 'waze.geojson'), 'w') as outfile:\n geojson.dump(geojson.FeatureCollection(geojson_items), outfile)",
"def writeCountryCodeFile(self):\n try:\n geojson = requests.get(self.GEOJSON_URL).json()\n except:\n sys.exit('GeoJSON data unavailable at source.')\n \n country_mapping = {}\n for country in geojson['features']:\n iso_2 = country['properties']['ISO_A2']\n country_name = country['properties']['ADMIN']\n country_mapping.update({country_name: iso_2})\n \n with open('countryNameISO2.json', 'w') as file:\n json.dump(country_mapping, file)",
"def write_geojson(vec:gpd.GeoDataFrame, dest):\n\t\tdest = str(dest)\n\n\t\t# WGS 84\n\t\t#vec = vec.to_crs({'init': 'epsg:4326'})\n\n\t\tif os.path.isfile(dest):\n\t\t\tos.remove(dest)\n\t\t\t\n\t\tvec.to_file(dest, driver='GeoJSON', encoding='utf-8')",
"def load_from_geojson(self, filename_or_url):",
"def get_geojson(self, sql, context):\n result = self.db.query(sql).getresult()\n geo_objects = []\n\n for poly in result:\n poly = poly[0]\n geo_objects.append(geojson.Feature(geometry=geojson.loads(poly)))\n\n crs = {\n \"type\": \"name\",\n \"properties\": {\n \"name\": \"EPSG:\" + str(context[\"crs\"])\n }\n }\n collection = geojson.FeatureCollection(geo_objects, crs=crs)\n\n return {\n 'type': 'result',\n 'result': geojson.dumps(collection)\n }",
"def addproperties_json(source, mortspd):\n with open(source, encoding=\"utf-8\",mode=\"r\") as f: # load boundaries\n boundaries = json.load(f)\n \n\n for regionBoundary in boundaries['features']: # get nb murdered by region\n del regionBoundary['properties']['admin1Pcod']\n del regionBoundary['properties']['admin1RefN']\n \n regionBoundary['properties']['Departement'] = regionBoundary['properties']['admin1Name']\n \n currentRegion = regionBoundary['properties']['Departement']\n if currentRegion in mortspd:\n regionBoundary['properties']['Morts'] = mortspd[currentRegion]\n \n else: \n regionBoundary['properties']['Morts'] = 0 \n continue\n return boundaries",
"def create_map(data_file):\n\n\t# Define a type of GeoJSON\n\tgeo_map = {\"type\": \"FeatureCollection\"}\n\t# Define list to collect each point to graph\n\titem_list = []\n\n\t# Iterate over our data to create GeoJSON doc\n\tfor index, line in enumerate(data_file):\n\t\t# Skip any zero coordinates\n\t\tif line['X'] == '0' or line['Y'] == '0':\n\t\t\tcontinue\n\t\t# New dict for every iteration\n\t\tdata = {}\n\t\t# Assign line items to json fields\n\t\tdata['type'] = 'Feature'\n\t\tdata['id'] = index\n\t\tdata['properties'] = {'title': line['Category'],\n\t\t 'description': line['Descript'],\n\t\t 'date': line['Date']}\n\t\tdata['geometry'] = {'type': 'Point',\n\t\t 'coordinates': (line['X'], line['Y'])}\n\t\t# Add data dict to our itemlist\n\t\titem_list.append(data)\n\n\t# for each point in our item list we add a point to dict\n\tfor point in item_list:\n\t\tgeo_map.setdefault('features', []).append(point)\n\t# write a file, upload to gist.github.com\n\twith open('file_sf.geojson', 'w') as f:\n\t\tf.write(geojson.dumps(geo_map))",
"def data_geojson(self):\n coordinates = self.value\n if not coordinates:\n return\n\n title = getattr(self.context, 'title', '') or ''\n description = getattr(self.context, 'description', '') or ''\n\n geo_json = {\n 'type': 'FeatureCollection',\n 'features': [\n {\n 'type': 'Feature',\n 'properties': {\n 'popup': u'<h3>{0}</h3><p>{1}</p>'.format(\n safe_unicode(title),\n safe_unicode(description)\n )\n },\n 'geometry': {\n 'type': 'Point',\n 'coordinates': [\n coordinates[1], # lng\n coordinates[0] # lat\n ]\n }\n },\n ]\n }\n\n if self.mode == 'input':\n properties = geo_json['features'][0]['properties']\n properties['editable'] = True\n properties['no_delete'] = True\n properties['latinput'] = u'#{0}'.format(self.id_input_lat)\n properties['lnginput'] = u'#{0}'.format(self.id_input_lng)\n\n return json.dumps(geo_json)",
"def add_property(path, key, value):\n with open(path) as fp:\n features = geojson.loads(fp.read())\n\n for feature in features.features:\n feature.properties[key] = value\n\n with open(path, 'w') as fp:\n fp.write(geojson.dumps(features))",
"def test_write_json_object_properties():\n data = \"\"\"\n{\n \"type\": \"FeatureCollection\",\n \"features\": [\n {\n \"geometry\": {\n \"type\": \"Polygon\",\n \"coordinates\": [\n [\n [\n 87.33588,\n 43.53139\n ],\n [\n 87.33588,\n 45.66894\n ],\n [\n 90.27542,\n 45.66894\n ],\n [\n 90.27542,\n 43.53139\n ],\n [\n 87.33588,\n 43.53139\n ]\n ]\n ]\n },\n \"type\": \"Feature\",\n \"properties\": {\n \"upperLeftCoordinate\": {\n \"latitude\": 45.66894,\n \"longitude\": 87.91166\n },\n \"tricky\": \"{gotcha\"\n }\n }\n ]\n}\n\"\"\"\n data = Feature.from_dict(**json.loads(data)[\"features\"][0])\n tmpdir = tempfile.mkdtemp()\n filename = os.path.join(tmpdir, \"test.json\")\n with fiona.open(\n filename,\n \"w\",\n driver=\"GeoJSON\",\n schema={\n \"geometry\": \"Polygon\",\n \"properties\": {\"upperLeftCoordinate\": \"str\", \"tricky\": \"str\"},\n },\n ) as dst:\n dst.write(data)\n\n with fiona.open(filename) as src:\n ftr = next(iter(src))\n props = ftr[\"properties\"]\n assert props[\"upperLeftCoordinate\"][\"latitude\"] == 45.66894\n assert props[\"upperLeftCoordinate\"][\"longitude\"] == 87.91166\n assert props[\"tricky\"] == \"{gotcha\"",
"def geojson_zillow_fc(rows):\n feature_collection = []\n for row in rows:\n if row[2] == \"Boston\":\n f = Feature(geometry=Polygon(\\\n [parse_zillow_coordinates(row[6])]),\n id = row[4],\n properties={\"state\": str(row[0]),\n \"county\": str(row[1]),\n \"city\": str(row[2]),\n \"neighborhood\": \\\n str(row[3]),\n \"regionid\": str(row[4]),\n \"total_potins\": \\\n str(row[5]),\n \"mean_interior_score\": \\\n row[7],\n \"sd_interior_score\":\\\n row[8],\n \"max_int_score\":\\\n row[9],\n \"min_int_score\":\\\n row[10],\n \"region_property_count\":\\\n row[11],\n \"projectedfor2018\":\\\n row[17],\n })\n feature_collection.append(f)\n\n else:\n print(\"City: {}\".format(row[2]))\n\n fc = FeatureCollection(feature_collection)\n return fc"
] | [
"0.561637",
"0.55059373",
"0.5413647",
"0.52885896",
"0.5211857",
"0.5193927",
"0.51066226",
"0.5084411",
"0.50838536",
"0.50032544",
"0.49953464",
"0.49931327",
"0.49827933",
"0.4979779",
"0.4963164",
"0.49228954",
"0.49089125",
"0.4892675",
"0.48825735",
"0.48774529",
"0.48758775",
"0.48755366",
"0.48737693",
"0.4842182",
"0.48389238",
"0.4833909",
"0.48143587",
"0.48137662",
"0.48111278",
"0.4809574"
] | 0.8169456 | 0 |
A message handler method may simply be a method with som kwargs. The kwargs will be given all incoming pipeline data, the bus and the incoming payload. | def MessageHandlerMethod(**kwargs):
data: dict = kwargs['data']
bus: AbstractPikaBus = kwargs['bus']
payload: dict = kwargs['payload']
print(payload)
if payload['reply']:
payload['reply'] = False
bus.Reply(payload=payload) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()",
"def _incoming_handler(self, context, message, fake_reply):\r\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)",
"def _handler(self, message):\n\n data = pickle.loads(message['data'])\n\n if not data[2]:\n # empty method call; bail out\n return\n\n # call the function and respond to the proxy object with return value\n uuid = data[0]\n proxy = data[1]\n func = getattr(self, data[2])\n result = (uuid, func(*data[3], **data[4]))\n self._redis.publish('proxy:%s' % proxy, pickle.dumps(result))",
"def process(self, message: Message, **kwargs: Any) -> None:",
"def processMessage(self, *args, **kwargs):\r\n pass",
"def __call__(self, *args, **kwargs):\n return self.method(self.receiver, *args, **kwargs)",
"def __call__(self, *args, **kwargs):\n\t\treturn self.handler()(self.request(kwargs))",
"def handle(self, body):\n event_type = body['event_type']\n method_name = event_type.replace('.', '_')\n try:\n method = getattr(self, method_name)\n method(body)\n except AttributeError:\n LOG.debug('%s needs a method called `%s` to handle %s' %\n (self.__class__.__name__, method_name, event_type))",
"def process_method(self, method, args, kwargs, request_id=None, **context):\n return method(*([] if args is None else args), **({} if kwargs is None else kwargs))",
"def __call__(self,data):\n\n log.debug('got data: %s' % (len(data)))\n\n # if we don't have args yet, these must be them\n if not self.args:\n self.parse_args(data)\n\n else:\n # we've already got args, must\n # be a message\n self.handle_send(data)",
"def call(self, method, name, params=None, payload=None, **kwds):",
"def incoming(self, msg):\n hdr = msg.header\n\n # Signals:\n if hdr.message_type is MessageType.signal:\n key = (hdr.fields.get(HeaderFields.path, None),\n hdr.fields.get(HeaderFields.interface, None),\n hdr.fields.get(HeaderFields.member, None)\n )\n cb = self.signal_callbacks.get(key, None)\n if cb is not None:\n cb(msg.body)\n return\n\n # Method returns & errors\n reply_serial = hdr.fields.get(HeaderFields.reply_serial, -1)\n reply_handle = self.awaiting_reply.pop(reply_serial, None)\n if reply_handle is not None:\n if hdr.message_type is MessageType.method_return:\n reply_handle.set_result(msg.body)\n return\n elif hdr.message_type is MessageType.error:\n reply_handle.set_exception(DBusErrorResponse(msg))\n return\n\n if self.on_unhandled:\n self.on_unhandled(msg)",
"def process(self, message: Message, **kwargs: Any) -> None:\n pass",
"def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type",
"def __call__(self, *args, **params):\n\t\treturn self.send(params)",
"def handle(self, message):",
"def handle(self, args):\n\n logger.debug('ARGS: %s', args)\n args = json.loads(args)\n\n try:\n logger.info('Handling %s request.' % args['method'])\n method = 'handle_' + args['method'].lower()\n if callable(getattr(self, method, None)):\n return operator.methodcaller(method, args)(self)\n else:\n return self.error('Invalid method for this endpoint', httplib.METHOD_NOT_ALLOWED)\n except ValueError as e:\n msg = 'ValueError: %s' % e.message\n return self.error(msg, httplib.BAD_REQUEST)\n except splunk.RESTException as e:\n return self.error('RESTexception: %s' % e, httplib.INTERNAL_SERVER_ERROR)\n except Exception as e:\n msg = 'Unknown exception: %s' % e\n logger.exception(msg)\n return self.error(msg, httplib.INTERNAL_SERVER_ERROR)",
"def handle(self, *args, **kwargs):\n raise NotImplementedError()",
"def make_new_handler(self, *args, **kwargs):",
"def on_bus_message(self, channel, method_frame, header_frame, body):\n\n try:\n # there are two messages that get broadcast that we really\n # don\"t care about. They have to do with feed synchronization\n # and other internal book-keeping\n if method_frame.routing_key in self.capture_events:\n event = {\n \"content_type\": header_frame.content_type,\n \"routing_key\": method_frame.routing_key,\n \"body\": body\n }\n self.logger.debug(\"Received Message: %s - %s\" % (header_frame.content_type, method_frame.routing_key))\n self.processor_pool.apply_async(process_event, (self.event_processor, event))\n\n else:\n self.logger.debug(\"Unknown message info: %s\" % method_frame.routing_key)\n\n except:\n self.logger.exception(\"Error processing bus message\")",
"def call(self, *args, **kwargs):",
"def on_message(client, userdata, msg):\n TOPIC_DISPATCH_DICTIONARY[msg.topic][\"method\"](msg)",
"def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)",
"def invoke(self, event_args, *args, **kwargs):\n pass # pragma: no cover",
"def _dispatch(self, body):\n pass",
"def Message(self, *args, **kwargs):\n pass",
"def message_handler(self, dest, source, message):\n pass",
"def __call__(self, data, **kwargs):",
"def __call__(self, details: CallableDetails, arg: CallableArg, obj: Any) -> Handler:\n if not details.is_async:\n raise HandlerFactoryError(f\"Object {details.obj!r} is not async callable\")\n\n subject_name: Optional[str]\n if self.subject_as_keyword or not arg.is_positional:\n subject_name = arg.name\n else:\n subject_name = None\n\n if self.arg_strict or details.has_kwargs:\n allow_args = None\n else:\n allow_args = {arg.name for arg in details.args}\n\n return CallableHandler(\n obj=obj,\n fn=details.obj,\n key=arg.type,\n subject_name=subject_name,\n arg_map=self.arg_map,\n allow_args=allow_args,\n )",
"def on_message(self, handler: Callable[[Request], Coroutine[Any, Any, Any]]):\n self.on_message_handler = handler"
] | [
"0.70364314",
"0.68466866",
"0.67132837",
"0.65455157",
"0.65276337",
"0.6499453",
"0.64698917",
"0.64215446",
"0.64198",
"0.63106513",
"0.6194863",
"0.61529726",
"0.6119505",
"0.6105504",
"0.6038387",
"0.60153407",
"0.59553987",
"0.59527606",
"0.5943922",
"0.5909026",
"0.5869171",
"0.5828906",
"0.5810934",
"0.5807503",
"0.5804422",
"0.5797844",
"0.57904345",
"0.5768044",
"0.5763654",
"0.5753036"
] | 0.7705003 | 0 |
derivative of tanh(x) = 1. (tanh(x) ^.2) | def d_tanh(x):
return 1. - np.power(np.tanh(x), 2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def d_tanh(x):\n\n return 1 - x.tanh().pow(2)",
"def d_tanh(x:float)->float:\n if not isinstance(x, numbers.Real):\n raise TypeError(\"Input value of invalid type\")\n\n return(1 - math.pow(math.tanh(x), 2))",
"def tanh(x):\n return (1 - e ** (-2*x))/ (1 + e ** (-2*x))",
"def tanh(x):\n return (1- power(e,(-2*x))) / (1 + power(e,(-2*x)))",
"def tanh(x):\n return 0.0",
"def derived_tanh(x):\n return 1 - tanh(x)",
"def derived_tanh(x):\n return 1 - tanh(x)",
"def tanh(x):\r\n ex = np.exp(x)\r\n emx = np.exp(-x)\r\n return (ex - emx) / (ex + emx)",
"def activ_fn_derivative(z):\n return 1 - np.square(np.tanh(z))",
"def grad_tanh(self):\r\n return 1 - np.square(self.tanh(self.x))",
"def tanh(x):\n raise NotImplementedError",
"def grad_tanh(self):\n grad = 1 - self.tanh(self.x) ** 2\n return grad",
"def tanh_grad(self, X):\n return 1-self.tanh(X)**2",
"def tanh(X):\n\tif isinstance(X,np.ndarray):\n\t\treturn (2.0/(1.0+np.exp(-(2*X))))-1\n\telse:\n\t\tX=np.array(X)\n\t\treturn tanh(X)",
"def sigmoid_derivative(x):\n return x * (1-x)",
"def tanh(self, X):\n return (np.exp(X)-np.exp(-X))/(np.exp(X)+np.exp(-X))",
"def sigmoid_derivative(x):\n return x * (1.0 - x)",
"def sigmoid_derivative(x):\n\n return sigmoid(x) * (1 - sigmoid(x))",
"def tanh(x):\r\n # see decorator for function body\r",
"def tanh(self, z, deriv=False):\n return np.tanh(z) if not deriv else 1 - np.square(np.tanh(z))",
"def derivative_sigmoid(x):\n return x * (1 - x)",
"def derivative_sigmoid(x):\n return x * (1 - x)",
"def tanh_grad(z):\n return 1.7159 * 2 / 3.0 * (1 - (np.tanh(2 / 3.0 * z)) ** 2)",
"def grad_tanh(self):\n return (1-np.tanh(self.x)*np.tanh(self.x))\n raise NotImplementedError(\"tanh gradient not implemented\")",
"def tanh(x:float)->float:\n if not isinstance(x, numbers.Real):\n raise TypeError(\"Input value of invalid type\")\n\n return(math.tanh(x))",
"def tan(x):\n return 0.0",
"def tanh(data):\n return _make.tanh(data)",
"def _derivative_(self, x, diff_param=None):\n return 2*exp(-x**2)/sqrt(pi)",
"def tanh(d: D) -> NumDict:\n\n return (2 * sigmoid(d)) - 1",
"def derivative_activation(z):\n return activation(z) * (1 - activation(z))"
] | [
"0.83941805",
"0.80225915",
"0.7937077",
"0.78862315",
"0.78078306",
"0.7777195",
"0.7777195",
"0.7744556",
"0.75239104",
"0.7510693",
"0.74799824",
"0.746419",
"0.74618053",
"0.7429449",
"0.73665565",
"0.7362666",
"0.7334647",
"0.72786546",
"0.727234",
"0.7268931",
"0.7219215",
"0.7219215",
"0.7165723",
"0.71402425",
"0.7101533",
"0.70927775",
"0.70315456",
"0.70066506",
"0.700157",
"0.69966966"
] | 0.8256705 | 1 |
Gets the operational_state of this ConnectionEndPoint. | def operational_state(self) -> str:
return self._operational_state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_status(self):\n return self._conn_state",
"def get_connection_state(self):\n return self.connection_state",
"def state(self):\n return pn_connection_state(self._impl)",
"def connection_status(self):\n return self._connection_status",
"def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:\n return pulumi.get(self, \"status\")",
"def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:\n return pulumi.get(self, \"status\")",
"def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:\n return pulumi.get(self, \"status\")",
"def status(self) -> Optional[pulumi.Input[Union[str, 'PrivateEndpointServiceConnectionStatus']]]:\n return pulumi.get(self, \"status\")",
"def state(self):\n return self.device.status(station=self.station_number)",
"def state(self):\n return STATE_ON if self.is_on else STATE_OFF",
"def state(self):\n return STATE_ON if self.is_on else STATE_OFF",
"def state(self):\n return STATE_ON if self.is_on else STATE_OFF",
"def get_state(self):\n return self._env.get_state()",
"def GetAdbConnectionStatus(self):\n if not self._adb_port:\n return None\n\n return self._device_information[\"adb_status\"]",
"def operational_state(self, operational_state: str):\n allowed_values = [\"DISABLED\", \"ENABLED\"] # noqa: E501\n if operational_state not in allowed_values:\n raise ValueError(\n \"Invalid value for `operational_state` ({0}), must be one of {1}\"\n .format(operational_state, allowed_values)\n )\n\n self._operational_state = operational_state",
"def state(self) -> Any:\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def get_state(self):\n return self._state",
"def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:\n return pulumi.get(self, \"private_link_service_connection_state\")",
"def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]:\n return pulumi.get(self, \"private_link_service_connection_state\")",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state",
"def state(self):\n return self._state"
] | [
"0.68319815",
"0.68308073",
"0.6515286",
"0.64101964",
"0.6124702",
"0.6124702",
"0.6124702",
"0.6124702",
"0.6071668",
"0.5983941",
"0.5983941",
"0.5983941",
"0.59587127",
"0.59099543",
"0.5899447",
"0.58726335",
"0.5847335",
"0.5847335",
"0.5847335",
"0.5847335",
"0.5847335",
"0.58449984",
"0.5816909",
"0.58005977",
"0.58005977",
"0.58005977",
"0.58005977",
"0.58005977",
"0.58005977",
"0.58005977"
] | 0.75950426 | 0 |
Sets the operational_state of this ConnectionEndPoint. | def operational_state(self, operational_state: str):
allowed_values = ["DISABLED", "ENABLED"] # noqa: E501
if operational_state not in allowed_values:
raise ValueError(
"Invalid value for `operational_state` ({0}), must be one of {1}"
.format(operational_state, allowed_values)
)
self._operational_state = operational_state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def operational_status(self, operational_status):\n\n self._operational_status = operational_status",
"def operational_status(self, operational_status):\n\n self._operational_status = operational_status",
"def operation_state(self, operation_state):\n\n self._operation_state = operation_state",
"def SetConnectionStatus(self, state, info):\n self.connection_state = state\n self.connection_info = info",
"def set_state(self, state):\n self.state = state",
"def set_state(self, state):\n #print(\"ComponentBase.set_state\")\n for k,v in state.items():\n #print(\" Set {:14s} to {:s}\".format(k,str(v)))\n if k == \"connectors\":\n for con_state in v:\n self.add_connector() \n self.connectors[-1].set_state(con_state)\n else:\n setattr(self, k, v)",
"def set_working_state(self):\n self.state = 0\n self.port = None",
"def setstate(self, state=None):\n self.state = state or Pdod(self.datadir + os.sep + 'state')\n if self.state and not 'joinedchannels' in self.state.data: self.state.data.joinedchannels = []",
"def set_state(self,state):\n self.__state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def state(self, state):\n\n self._state = state",
"def set_state(self, state):\n self._env.set_state(state)",
"def setState(self, state):\n self.state = state",
"def set_state(self, state: int):\n self.state = state",
"def state(self, state):\n self._state = state",
"def set_state(self, state: Any) -> None:\n raise NotImplementedError(\n 'This environment has not implemented `set_state()`.'\n )",
"def setState(self, state):\n assert self.isValidState(state)\n self._state = state",
"async def async_set_state(self, state):\n self._state = state",
"def state(self, state):\n allowed_values = [\"lost\", \"negotiating\", \"connected\", \"operational\", \"decommissioned\"]\n if state not in allowed_values:\n raise ValueError(\n \"Invalid value for `state`, must be one of {0}\"\n .format(allowed_values)\n )\n self._state = state"
] | [
"0.6623259",
"0.6623259",
"0.6101698",
"0.5693205",
"0.56924415",
"0.5691022",
"0.56134856",
"0.5601487",
"0.5581799",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.5563682",
"0.55585635",
"0.55497503",
"0.55466264",
"0.55304515",
"0.5529952",
"0.54998195",
"0.5490826",
"0.5488242"
] | 0.70021296 | 0 |
Gets the termination_direction of this ConnectionEndPoint. | def termination_direction(self) -> str:
return self._termination_direction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def termination_direction(self, termination_direction: str):\n allowed_values = [\"BIDIRECTIONAL\", \"SINK\", \"SOURCE\", \"UNDEFINED_OR_UNKNOWN\"] # noqa: E501\n if termination_direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `termination_direction` ({0}), must be one of {1}\"\n .format(termination_direction, allowed_values)\n )\n\n self._termination_direction = termination_direction",
"def getDirection(self):\n return self.listener.direction",
"def direction(self) -> int:\n return self._direction",
"def get_direction(self):\r\n return self.__direction",
"def get_direction(self):\n return self.direction",
"def direction(self):\n return self._direction.copy()",
"def direction(self) -> Optional[str]:\n return self._direction",
"def get_direction(self) -> int: \r\n if time.time() > self.stop_timer:\r\n return Directions.stop\r\n else:\r\n return self.direction",
"def connection_port_direction(self) -> str:\n return self._connection_port_direction",
"def direction(self):\n return self._dir",
"def direction(self):\n return None if not bool(self.relation) else (self.s_end <= self.o_start)",
"def direction(self) -> np.ndarray:\n return self._direction",
"def direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"direction\")",
"def direction(self) -> str:\n return pulumi.get(self, \"direction\")",
"def get_direction(self):\n return self.actual_coordinates[2]",
"def directionRight(self):\n return self.__directionRight",
"def termination(self):\n return self.__termination",
"def getDirection(self):\n return self.ray.direction",
"def observation_direction(self) -> Optional[ObservationDirection]:\n return map_opt(\n ObservationDirection, self._get_property(OBSERVATION_DIRECTION_PROP, str)\n )",
"def direction(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"direction\")",
"def current_direction(self):\n return self._attributes.get(\"current_direction\")",
"def direction(self):\n return self.cfg.direction",
"def termination_state(self) -> str:\n return self._termination_state",
"def getDirection(self, direction: str):\n return direction",
"def get_direction_to_right(self, direction):\r\n return direction_to_right[direction]",
"def direction(self):\n _direction = self._custom.get(\"direction\")\n if _direction is not None:\n return _direction\n\n _direction = self._infer_direction()\n self._custom[\"direction\"] = _direction\n\n return _direction",
"def traffic_direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"traffic_direction\")",
"def get_direction(self, start_direction):\n # get all visually connected links\n if not self.directions:\n directions = {}\n neighbors = self.get_linked_neighbors()\n nodes = [\n direction\n for direction, neighbor in neighbors.items()\n if hasattr(neighbor, \"node_index\")\n ]\n\n if len(nodes) == 2:\n # prefer link to these two nodes\n for direction in nodes:\n directions[direction] = REVERSE_DIRECTIONS[direction]\n elif len(neighbors) - len(nodes) == 1:\n for direction in neighbors:\n directions[direction] = REVERSE_DIRECTIONS[direction]\n else:\n raise MapParserError(\n \"must have exactly two connections - either directly to \"\n \"two nodes or connecting directly to one node and with exactly one other \"\n f\"link direction. The neighbor(s) in directions {list(neighbors.keys())} do \"\n \"not fulfill these criteria.\",\n self,\n )\n\n self.directions = directions\n return self.directions.get(start_direction)",
"def getDirection(self):\n if 'N' in str(self.trip_update.trip.trip_id):\n direction = 'northbound'\n if 'S' in str(self.trip_update.trip.trip_id):\n direction = 'southbound'\n return direction",
"def direction(self):\n return atan2d(self.y, self.x)"
] | [
"0.6747025",
"0.6339703",
"0.6330633",
"0.6314831",
"0.6202958",
"0.61843795",
"0.61089504",
"0.6067965",
"0.60548645",
"0.5998903",
"0.5955845",
"0.59224766",
"0.58646476",
"0.5852054",
"0.5817432",
"0.5813375",
"0.5800287",
"0.57266474",
"0.5713044",
"0.57085085",
"0.57036185",
"0.567866",
"0.5619289",
"0.5587586",
"0.55626535",
"0.55186117",
"0.5492344",
"0.548159",
"0.5477178",
"0.54695094"
] | 0.8100644 | 0 |
Sets the termination_direction of this ConnectionEndPoint. | def termination_direction(self, termination_direction: str):
allowed_values = ["BIDIRECTIONAL", "SINK", "SOURCE", "UNDEFINED_OR_UNKNOWN"] # noqa: E501
if termination_direction not in allowed_values:
raise ValueError(
"Invalid value for `termination_direction` ({0}), must be one of {1}"
.format(termination_direction, allowed_values)
)
self._termination_direction = termination_direction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_termination(self, termination):\n # FIXME should be internally accessible only?\n self.__termination = termination",
"def termination_direction(self) -> str:\n return self._termination_direction",
"def direction(self, direction):\n\n self._direction = direction",
"def set_direction(self, direction: str) -> None:\n if direction == \"forward\":\n self._bond.setDirection(self._deviceId, Directions.FORWARD)\n elif direction == \"reverse\":\n self._bond.setDirection(self._deviceId, Directions.REVERSE)\n self._attributes['current_direction'] = direction",
"def direction(self, direction):\n allowed_values = [\"supports\", \"does_not_support\"] # noqa: E501\n if direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `direction` ({0}), must be one of {1}\" # noqa: E501\n .format(direction, allowed_values)\n )\n\n self._direction = direction",
"def terminated(self, terminated):\n\n self._terminated = terminated",
"def terminated(self, terminated):\n\n self._terminated = terminated",
"def terminated(self, terminated):\n\n self._terminated = terminated",
"async def async_set_direction(self, direction: str) -> None:\n if direction == DIRECTION_FORWARD:\n self._device.fan_dir = SENSEME_DIRECTION_FORWARD\n else:\n self._device.fan_dir = SENSEME_DIRECTION_REVERSE",
"def direction(self, direction):\n _api.check_in_list(['horizontal', 'vertical'], direction=direction)\n if hasattr(self, '_direction') and direction != self._direction:\n # remove previous artists\n self._selection_artist.remove()\n if self._interactive:\n self._edge_handles.remove()\n self._direction = direction\n self.new_axes(self.ax)\n if self._interactive:\n self._setup_edge_handles(self._handle_props)\n else:\n self._direction = direction",
"def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError",
"def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError",
"def setRobotDirection(self, direction):\n self.direction = direction",
"def setRobotDirection(self, direction):\n self.direction = direction",
"def set_port_direction(self, port, direction):\n\n if port == 1:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, direction)\n self.__port_b_direction = direction\n else:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, direction)\n self.__port_a_direction = direction\n return",
"def set_direction(self, new_dir):\n self.__direction = new_dir",
"def termination_state(self, termination_state: str):\n allowed_values = [\"LP_CAN_NEVER_TERMINATE\", \"LT_NOT_TERMINATED\", \"TERMINATED_SERVER_TO_CLIENT_FLOW\", \"TERMINATED_CLIENT_TO_SERVER_FLOW\", \"TERMINATED_BIDIRECTIONAL\", \"LT_PERMENANTLY_TERMINATED\", \"TERMINATION_STATE_UNKNOWN\"] # noqa: E501\n if termination_state not in allowed_values:\n raise ValueError(\n \"Invalid value for `termination_state` ({0}), must be one of {1}\"\n .format(termination_state, allowed_values)\n )\n\n self._termination_state = termination_state",
"def setDirection(self,stepDir = 2):\n pass",
"def optimization_force_direction(self, optimization_force_direction):\n\n self._optimization_force_direction = optimization_force_direction",
"def setdirection(self, *args, **kwargs):\n return _coordsys.coordsys_setdirection(self, *args, **kwargs)",
"def set_direction(self, direction: int) -> None: \r\n self.direction = direction\r\n if (direction == Directions.turn_left or\r\n direction == Directions.turn_right):\r\n self.stop_timer = time.time() + self.driving_time_turning\r\n else:\r\n self.stop_timer = time.time() + self.driving_time",
"def set_end(self, end_line):\n self.__end_line = end_line",
"def setDirection (self, ra, dec):\n self._response.setDirection(ra, dec)",
"def set_terminator (self, term):\r\n self.terminator = term",
"def setEnd(self, *args):\n return _libsbml.LineSegment_setEnd(self, *args)",
"def connection_port_direction(self, connection_port_direction: str):\n allowed_values = [\"BIDIRECTIONAL\", \"INPUT\", \"OUTPUT\", \"UNIDENTIFIED_OR_UNKNOWN\"] # noqa: E501\n if connection_port_direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `connection_port_direction` ({0}), must be one of {1}\"\n .format(connection_port_direction, allowed_values)\n )\n\n self._connection_port_direction = connection_port_direction",
"def set_direction(self, direction):\n\n def same_axis(direction1, direction2):\n y_axis = [Direction.Y_POSITIVE, Direction.Y_NEGATIVE]\n x_axis = [Direction.X_POSITIVE, Direction.X_NEGATIVE]\n return ((direction1 in x_axis and direction2 in x_axis)\n or (direction1 in y_axis and direction2 in y_axis))\n\n if direction is None:\n return\n elif not same_axis(self.direction, direction):\n self.direction = direction",
"def set_direction(self, direction: str) -> None:\n self.wink.set_fan_direction(direction)",
"def set_terminating_ray(self, location):\n self._terminating_ray = location",
"def terminating_on(self, terminating_on):\n\n self._terminating_on = terminating_on"
] | [
"0.640824",
"0.63931453",
"0.56420517",
"0.5394109",
"0.53315324",
"0.52557427",
"0.52557427",
"0.52557427",
"0.5211987",
"0.51994586",
"0.5056376",
"0.5056376",
"0.5051297",
"0.5051297",
"0.5045381",
"0.5027594",
"0.4985935",
"0.4943825",
"0.49381512",
"0.49232072",
"0.48966873",
"0.48898247",
"0.48413134",
"0.47601172",
"0.47515503",
"0.47479823",
"0.47464487",
"0.47200117",
"0.467411",
"0.4655515"
] | 0.7977831 | 0 |
Gets the termination_state of this ConnectionEndPoint. | def termination_state(self) -> str:
return self._termination_state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def termination(self):\n return self.__termination",
"def get_ssl_termination(self):\n return self.manager.get_ssl_termination(self)",
"def terminating_on(self):\n return self._terminating_on",
"def terminated(self):\n return self._terminated",
"def terminated(self):\n return self._terminated",
"def get_connection_state(self):\n return self.connection_state",
"def termination_direction(self) -> str:\n return self._termination_direction",
"def terminated_on(self):\n return self._terminated_on",
"def get_ssl_termination(self, loadbalancer):\n return loadbalancer.get_ssl_termination()",
"def get_state(self):\n return self._env.get_state()",
"def termination_status(self):\n res = {}\n for i in range(len(self)):\n res[i] = self.kernels[i].stop()\n return res",
"def get_status(self):\n return self._conn_state",
"def state(self):\n return pn_connection_state(self._impl)",
"def get_ssl_termination(self, loadbalancer):\n uri = \"/loadbalancers/%s/ssltermination\" % utils.get_id(loadbalancer)\n try:\n resp, body = self.api.method_get(uri)\n except exc.NotFound:\n # For some reason, instead of returning an empty dict like the\n # other API GET calls, this raises a 404.\n return {}\n return body.get(\"sslTermination\", {})",
"def private_link_service_connection_state(self) -> Optional[pulumi.Input['PrivateLinkServiceConnectionStateArgs']]:\n return pulumi.get(self, \"private_link_service_connection_state\")",
"def termination_issued_on(self):\n return self._termination_issued_on",
"def termination_state(self, termination_state: str):\n allowed_values = [\"LP_CAN_NEVER_TERMINATE\", \"LT_NOT_TERMINATED\", \"TERMINATED_SERVER_TO_CLIENT_FLOW\", \"TERMINATED_CLIENT_TO_SERVER_FLOW\", \"TERMINATED_BIDIRECTIONAL\", \"LT_PERMENANTLY_TERMINATED\", \"TERMINATION_STATE_UNKNOWN\"] # noqa: E501\n if termination_state not in allowed_values:\n raise ValueError(\n \"Invalid value for `termination_state` ({0}), must be one of {1}\"\n .format(termination_state, allowed_values)\n )\n\n self._termination_state = termination_state",
"def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':\n return pulumi.get(self, \"private_link_service_connection_state\")",
"def getShutdownFlag(self):\n return self._shutdownFlag",
"def private_link_service_connection_state(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']:\n return pulumi.get(self, \"private_link_service_connection_state\")",
"def connection_status(self):\n return self._connection_status",
"def lifecycle_state(self):\n return self._lifecycle_state",
"def lifecycle_state(self):\n return self._lifecycle_state",
"def lifecycle_state(self):\n return self._lifecycle_state",
"def lifecycle_state(self):\n return self._lifecycle_state",
"def lifecycle_state(self):\n return self._lifecycle_state",
"def is_terminated(self):\n self._lock_terminate.acquire()\n terminated = self._terminated\n self._lock_terminate.release()\n return terminated",
"def get_termination_command_state(instance: Dict[str, str]) -> Optional[str]:\n invocations = ssm.list_command_invocations(\n InstanceId=instance[\"InstanceId\"], Filters=[{\"key\": \"DocumentName\", \"value\": SSM_TERMINATION_DOCUMENT_NAME}]\n )[\"CommandInvocations\"]\n\n if len(invocations) == 0:\n return None\n\n invocations.sort(key=lambda invocation: invocation[\"RequestedDateTime\"], reverse=True)\n\n return invocations[0][\"Status\"] # type: ignore",
"def exit_status(self):\n return self._exit_status",
"def get_state(self):\n return self._state"
] | [
"0.65559936",
"0.6262929",
"0.62271124",
"0.6188171",
"0.6188171",
"0.6125202",
"0.60600793",
"0.6005115",
"0.5923498",
"0.56676537",
"0.5665788",
"0.56531096",
"0.5590717",
"0.557181",
"0.5468976",
"0.5463126",
"0.5419899",
"0.5410675",
"0.5404743",
"0.5395988",
"0.5296206",
"0.5288655",
"0.5288655",
"0.5288655",
"0.5288655",
"0.5288655",
"0.526083",
"0.5260363",
"0.5239485",
"0.52191544"
] | 0.7544638 | 0 |
Sets the termination_state of this ConnectionEndPoint. | def termination_state(self, termination_state: str):
allowed_values = ["LP_CAN_NEVER_TERMINATE", "LT_NOT_TERMINATED", "TERMINATED_SERVER_TO_CLIENT_FLOW", "TERMINATED_CLIENT_TO_SERVER_FLOW", "TERMINATED_BIDIRECTIONAL", "LT_PERMENANTLY_TERMINATED", "TERMINATION_STATE_UNKNOWN"] # noqa: E501
if termination_state not in allowed_values:
raise ValueError(
"Invalid value for `termination_state` ({0}), must be one of {1}"
.format(termination_state, allowed_values)
)
self._termination_state = termination_state | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_termination(self, termination):\n # FIXME should be internally accessible only?\n self.__termination = termination",
"def terminated(self, terminated):\n\n self._terminated = terminated",
"def terminated(self, terminated):\n\n self._terminated = terminated",
"def terminated(self, terminated):\n\n self._terminated = terminated",
"def terminating_on(self, terminating_on):\n\n self._terminating_on = terminating_on",
"def termination_direction(self, termination_direction: str):\n allowed_values = [\"BIDIRECTIONAL\", \"SINK\", \"SOURCE\", \"UNDEFINED_OR_UNKNOWN\"] # noqa: E501\n if termination_direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `termination_direction` ({0}), must be one of {1}\"\n .format(termination_direction, allowed_values)\n )\n\n self._termination_direction = termination_direction",
"def termination_state(self) -> str:\n return self._termination_state",
"def set_state(self, state):\n self._env.set_state(state)",
"def terminated_on(self, terminated_on):\n\n self._terminated_on = terminated_on",
"def lifecycle_state(self, lifecycle_state):\n self._lifecycle_state = lifecycle_state",
"def lifecycle_state(self, lifecycle_state):\n self._lifecycle_state = lifecycle_state",
"def set_device_state(self, nDeviceState):\n\t\tcall_sdk_function('PrlSrvCfgDev_SetDeviceState', self.handle, nDeviceState)",
"def __setstate__(self, state):\n\n self.set(DER = state)",
"def lifecycle_state(self, lifecycle_state):\n allowed_values = [\"PROVISIONING\", \"AVAILABLE\", \"TERMINATING\", \"TERMINATED\"]\n if lifecycle_state not in allowed_values:\n raise ValueError(\n \"Invalid value for `lifecycle_state`, must be one of {0}\"\n .format(allowed_values)\n )\n self._lifecycle_state = lifecycle_state",
"def set_termination_protection(self, jobflow_id, termination_protection_status):\r\n assert termination_protection_status in (True, False)\r\n\r\n params = {}\r\n params['TerminationProtected'] = (termination_protection_status and \"true\") or \"false\"\r\n self.build_list_params(params, [jobflow_id], 'JobFlowIds.member')\r\n\r\n return self.get_status('SetTerminationProtection', params, verb='POST')",
"def terminate(self):\n self.terminated = True",
"def _shutdown(self):\n self.control_socket.send(zmqmessage.IPC_END)\n self.end_threads = True\n self.timeout = 1",
"def shutdown(self):\n\n self.active = False\n\n try:\n self.listen_socket.shutdown(socket.SHUT_RDWR)\n except:\n self.logger.info(\"Ignoring listen soc shutdown error\")\n self.listen_socket = None\n\n with self.connect_cv:\n self.connect_cv.notifyAll()\n\n self.wakeup()\n self.dbg_state = \"down\"",
"def set_state(self, state):\n self.state = state",
"def set_state(self,state):\n self.__state = state",
"async def async_set_state(self, state):\n self._state = state",
"def terminate(self):\r\n for call in self._deathCandidates.itervalues():\r\n call.cancel()\r\n\r\n self._deathCandidates = {}\r\n\r\n for connection in self._connections.copy():\r\n connection.destroy()\r\n assert len(self._connections) == 0\r\n\r\n Endpoint.terminate(self)",
"def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,\n secureTrafficOnly=None):\n return loadbalancer.update_ssl_termination(securePort=securePort,\n enabled=enabled, secureTrafficOnly=secureTrafficOnly)",
"def update_ssl_termination(self, loadbalancer, securePort=None, enabled=None,\n secureTrafficOnly=None):\n ssl_info = self.get_ssl_termination(loadbalancer)\n if not ssl_info:\n raise exc.NoSSLTerminationConfiguration(\"You must configure SSL \"\n \"termination on this load balancer before attempting \"\n \"to update it.\")\n if securePort is None:\n securePort = ssl_info[\"securePort\"]\n if enabled is None:\n enabled = ssl_info[\"enabled\"]\n if secureTrafficOnly is None:\n secureTrafficOnly = ssl_info[\"secureTrafficOnly\"]\n uri = \"/loadbalancers/%s/ssltermination\" % utils.get_id(loadbalancer)\n req_body = {\"sslTermination\": {\n \"enabled\": enabled,\n \"secureTrafficOnly\": secureTrafficOnly,\n \"securePort\": securePort,\n }}\n resp, body = self.api.method_put(uri, body=req_body)\n return body",
"def set_state(self, state: int):\n self.state = state",
"def setState(self, state):\n assert self.isValidState(state)\n self._state = state",
"def set_state(self, state: Any) -> None:\n raise NotImplementedError(\n 'This environment has not implemented `set_state()`.'\n )",
"def terminate(self):\n self._running = False",
"def health_state(self, health_state):\n\n self._health_state = health_state",
"def set_working_state(self):\n self.state = 0\n self.port = None"
] | [
"0.6452473",
"0.5807092",
"0.5807092",
"0.5807092",
"0.5624698",
"0.55612886",
"0.54337186",
"0.5364221",
"0.5222837",
"0.4969389",
"0.4969389",
"0.49535656",
"0.48852345",
"0.4882801",
"0.48434332",
"0.48371",
"0.48204172",
"0.47924957",
"0.47783226",
"0.47641855",
"0.47625518",
"0.47515357",
"0.47490737",
"0.4742783",
"0.47196737",
"0.47155845",
"0.46756157",
"0.4668573",
"0.4666526",
"0.46533087"
] | 0.6799398 | 0 |
Gets the layer_protocol_name of this ConnectionEndPoint. | def layer_protocol_name(self) -> str:
return self._layer_protocol_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def layer_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"layer_name\")",
"def layer_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"layer_name\")",
"def protocol(self) -> str:\n return self.__parameters.protocol",
"def layer_protocol_name(self, layer_protocol_name: str):\n allowed_values = [\"OTSiA\", \"OCH\", \"OTU\", \"ODU\", \"ETH\", \"ETY\", \"DSR\"] # noqa: E501\n if layer_protocol_name not in allowed_values:\n raise ValueError(\n \"Invalid value for `layer_protocol_name` ({0}), must be one of {1}\"\n .format(layer_protocol_name, allowed_values)\n )\n\n self._layer_protocol_name = layer_protocol_name",
"def layer_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"layer_name\")",
"def getProtocol(self, _):\r\n return self._protocol",
"def protocol(self):\n return self._protocol",
"def protocol(self):\n return self._host[CONF_PROTOCOL]",
"def protocol(self) -> str:\n return __name__",
"def protocol(self) -> str:\n return pulumi.get(self, \"protocol\")",
"def getProtocol(self) -> str:\n ...",
"def protocol(self):\n return helpers.get_protocol()",
"def protocol(self):\n self._recv_protocol()\n return self._protocol",
"def protocol_name(self):\n self._protocol_name = 'kerberos'\n return self._protocol_name",
"def __ip_protocol(self, proto_num):\n if proto_num in self.protocols:\n return self.protocols[proto_num]\n return str(proto_num)",
"def proto(self):\n return self.sock.proto",
"def protocol(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"protocol\")",
"def get_name(self):\n \n return 'Socket/IP'",
"def protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input[Union[str, 'Protocol']]]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input['TargetServerProtocol']]:\n return pulumi.get(self, \"protocol\")",
"def in_protocol(self) -> str:\n return pulumi.get(self, \"in_protocol\")",
"def v_protocol(self):\n return self._protocol",
"def v_protocol(self):\n return self._protocol",
"def ip_protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_protocol\")",
"def protocol(self) -> Optional['ListenerProtocol']:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input[Union[str, 'GatewayRouteConfigProtocol']]]:\n return pulumi.get(self, \"protocol\")",
"def getsockname(self):\n return self.sock.getsockname()",
"def getsockname(self):\r\n return self._fd.getsockname()"
] | [
"0.6991893",
"0.6737981",
"0.6655885",
"0.66094315",
"0.6580004",
"0.6526617",
"0.64756644",
"0.64686406",
"0.6449779",
"0.6433681",
"0.6416226",
"0.64074725",
"0.63770485",
"0.63522774",
"0.63161236",
"0.62349397",
"0.61921996",
"0.618826",
"0.6171238",
"0.6171238",
"0.6143952",
"0.60964465",
"0.6087765",
"0.6067266",
"0.6067266",
"0.6062247",
"0.5993879",
"0.59627205",
"0.5954461",
"0.592667"
] | 0.87370723 | 0 |
Sets the layer_protocol_name of this ConnectionEndPoint. | def layer_protocol_name(self, layer_protocol_name: str):
allowed_values = ["OTSiA", "OCH", "OTU", "ODU", "ETH", "ETY", "DSR"] # noqa: E501
if layer_protocol_name not in allowed_values:
raise ValueError(
"Invalid value for `layer_protocol_name` ({0}), must be one of {1}"
.format(layer_protocol_name, allowed_values)
)
self._layer_protocol_name = layer_protocol_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def layer_protocol_name(self) -> str:\n return self._layer_protocol_name",
"def layer(self, layer):\n self._layer = layer",
"def protocol_id(self, protocol_id):\n self._protocol_id = protocol_id",
"def protocol_id(self, protocol_id):\n\n self._protocol_id = protocol_id",
"def protocol_version(self, protocol_version):\n\n self._protocol_version = protocol_version",
"def protocol_name(self):\n self._protocol_name = 'kerberos'\n return self._protocol_name",
"def protocol(self, protocol):\n\n self._protocol = protocol",
"def protocol(self, protocol):\n\n self._protocol = protocol",
"def protocol(self, protocol):\n\n self._protocol = protocol",
"def protocol(self, protocol):\n\n self._protocol = protocol",
"def auth_protocol(self, auth_protocol):\n\n self._auth_protocol = auth_protocol",
"def layer_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"layer_name\")",
"def set_prev_layer(self, layer):\n self._prev_layer = layer",
"def set_next_layer(self, layer):\n self._next_layer = layer",
"def layer_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"layer_name\")",
"def topology_name(self, topology_name):\n\n self._topology_name = topology_name",
"def setLayer( self, layer ):\n if ( layer == self._layer ):\n return False\n \n self._layer = layer\n self.syncLayerData()\n \n return True",
"def transportprotocol(self, transportprotocol) :\n\t\ttry :\n\t\t\tself._transportprotocol = transportprotocol\n\t\texcept Exception as e:\n\t\t\traise e",
"def set_protocol(cls, interface_name, proto='provision'): # pragma: no cover\n if proto not in cls.supported_proto:\n return\n try:\n ret = cls.get_logical_ifname(interface_name, proto)\n if not ret:\n return\n os.system('uci set network.%s.proto=%s' % (ret, proto))\n os.system('uci commit network')\n os.system('/etc/init.d/network reload')\n if proto == cls.supported_proto[1]:\n os.system('sysctl -w net.ipv6.conf.%s.autoconf=0' % interface_name)\n os.system('sysctl -w net.ipv6.conf.%s.use_tempaddr=2' % interface_name)\n cls.logger.debug(\"set %s[%s] DCHP protocol to %s\", interface_name, ret, proto)\n except OSError as e:\n cls.logger.error(\"Got exception:%s\" % str(e))",
"def protocol_in(self, protocol_in):\n\n self._protocol_in = protocol_in",
"def SetLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_SetLayer(self, *args)",
"def layer_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"layer_name\")",
"def switch_protocol(self):\n with self._lock:\n if self.protocol == 'rtmp':\n self._protocol = 'hls'\n else:\n self._protocol = 'rtmp'",
"def _get_layer_name(self, layer):\n label = '{}-{}'.format(layer.label, layer.rank)\n if label not in self.naming_map:\n self.naming_map[label] = {}\n\n if layer not in self.naming_map[label].keys():\n self.naming_map[label][layer] = len(self.naming_map[label]) + 1\n return '{}-{}'.format(label, self.naming_map[label][layer])",
"def team_name(self, team_name):\n\n self._team_name = team_name",
"def set_protocol(name):\n\n global global_serializer, global_deserializer\n global_serializer = get_serializer(name)\n global_deserializer = get_deserializer(name)",
"def fill_protocol(self, data):\n self.protocol = get_optional_value(data, self.PROTOCOL, \"http\")\n self.protocol = self.protocol or \"http\"",
"def auth_protocol_in(self, auth_protocol_in):\n\n self._auth_protocol_in = auth_protocol_in",
"def team_set_name(self, team_type: TeamType, team_name):\r\n\t\tself._teams[team_type].team_name = team_name\r\n\t\t_logger.info(\"Set the name of team {0} to \\\"{1}\\\".\" \\\r\n\t\t\t.format(team_type, team_name))",
"def __ip_protocol(self, proto_num):\n if proto_num in self.protocols:\n return self.protocols[proto_num]\n return str(proto_num)"
] | [
"0.70598274",
"0.5782147",
"0.57290965",
"0.57289034",
"0.5708312",
"0.5688233",
"0.56085056",
"0.56085056",
"0.56085056",
"0.56085056",
"0.5562985",
"0.54273206",
"0.5392024",
"0.53690857",
"0.53411305",
"0.52844375",
"0.5257484",
"0.52147466",
"0.5196867",
"0.50834507",
"0.50757086",
"0.5049594",
"0.50476027",
"0.49269477",
"0.4910125",
"0.48815018",
"0.4868808",
"0.48294768",
"0.48057765",
"0.4765683"
] | 0.79915446 | 0 |
Gets the connectivity_service_end_point of this ConnectionEndPoint. | def connectivity_service_end_point(self) -> str:
return self._connectivity_service_end_point | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connectivity_service_end_point(self, connectivity_service_end_point: str):\n\n self._connectivity_service_end_point = connectivity_service_end_point",
"def get_endpoint(self):\r\n return self._endpoint",
"def __get_endpoint(self):\n return self._endpoint",
"def connected_endpoint(self):\n try:\n if self._connected_interface:\n return self._connected_interface\n except ObjectDoesNotExist:\n pass\n try:\n if self._connected_circuittermination:\n return self._connected_circuittermination\n except ObjectDoesNotExist:\n pass\n return None",
"def service_endpoint(self) -> str:\n return pulumi.get(self, \"service_endpoint\")",
"def endpoint(self):\r\n return self._endpoint",
"def GetEndPoint(self) -> Optional[str]:\n if self._end_point:\n return self._end_point\n\n cmd = util.GcloudCommand(self, 'config', 'get-value',\n 'api_endpoint_overrides/spanner')\n stdout, _, retcode = cmd.Issue(raise_on_failure=False)\n if retcode != 0:\n logging.warning('Fail to retrieve cloud spanner end point.')\n return None\n self._end_point = json.loads(stdout)\n return self._end_point",
"def endpoint(self):\n return self.Endpoint",
"def get_service(self):\n return self.__service",
"def service(self):\n return self._service",
"def service(self):\n return self._service",
"def end_point(self) -> PointValue:\n return ops.GeoEndPoint(self).to_expr()",
"def endpoint(self):\n return f'Endpoint = {self._peer.endpoint}'",
"def getService(self):\n return self.serviceClass",
"def acs_endpoint(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"acs_endpoint\")",
"def get_service(self):\n if 'service' in self._data:\n return self._data['service']\n else:\n raise ClskError('Network %s does not have service confgiured' % \n self.name)",
"def connected_endpoint(self):\n try:\n if self._connected_poweroutlet:\n return self._connected_poweroutlet\n except ObjectDoesNotExist:\n pass\n try:\n if self._connected_powerfeed:\n return self._connected_powerfeed\n except ObjectDoesNotExist:\n pass\n return None",
"def service_connection_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_connection_id\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def endpoint_reference(self) -> Optional[str]:\n return pulumi.get(self, \"endpoint_reference\")",
"def _getEndpointConnection(self, epA, epB):\r\n if epA not in self._endpoints or epB not in self._endpoints:\r\n raise InternalError('Endpoint is not part of this network.')\r\n\r\n if epA == epB:\r\n return epA.getLoopback()\r\n else:\r\n connectionsA = self._endpoints[epA]\r\n connectionsB = self._endpoints[epB]\r\n\r\n candidates = connectionsA.intersection(connectionsB)\r\n\r\n if candidates:\r\n if len(candidates) != 1:\r\n raise InternalError('There are more than one possible '\r\n 'endpoint connections.')\r\n\r\n return candidates.pop()\r\n else:\r\n connection = EndpointConnection(epA, epB)\r\n connectionsA.add(connection)\r\n connectionsB.add(connection)\r\n return connection",
"def transport(self) -> AppConnectionsServiceTransport:\n return self._client.transport",
"def service(self) -> Optional['outputs.ServiceReference']:\n return pulumi.get(self, \"service\")",
"def get_service_url():\n return get_config_handler().get_service_url()",
"def endpoint_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"endpoint_uri\")",
"def get_service_endpoint(service_name):\n service = Config.get_service_info(service_name)\n return 'http://%s:%s' % (service['url'], service['port'])"
] | [
"0.70261055",
"0.63055533",
"0.62776625",
"0.6240966",
"0.61877346",
"0.6100025",
"0.60089487",
"0.60035914",
"0.59879875",
"0.5943736",
"0.5943736",
"0.56929696",
"0.5629272",
"0.5618605",
"0.56028825",
"0.55782425",
"0.5570696",
"0.5567195",
"0.5560139",
"0.5560139",
"0.5560139",
"0.5560139",
"0.5560139",
"0.5560139",
"0.55195624",
"0.54671526",
"0.54464173",
"0.54249823",
"0.5413622",
"0.5408024"
] | 0.825556 | 0 |
Sets the connectivity_service_end_point of this ConnectionEndPoint. | def connectivity_service_end_point(self, connectivity_service_end_point: str):
self._connectivity_service_end_point = connectivity_service_end_point | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connectivity_service_end_point(self) -> str:\n return self._connectivity_service_end_point",
"def graph_endpoint(self, graph_endpoint):\n\n self._graph_endpoint = graph_endpoint",
"def setEndpoint(self, endpoint):\n self.__lockobj.acquire()\n self.__endpoints[endpoint.getEndpoint()] = endpoint\n self.__lockobj.acquire()",
"def with_custom_connectivity( # type: ignore\n self, connectivity_service: ConnectivityService\n ):\n self.logger.debug(f\"Connectivity service: {connectivity_service}\")\n if not isinstance(connectivity_service, ConnectivityService):\n raise ValueError(\"Invalid connectivity service provided\")\n self.connectivity_service = connectivity_service\n self.connectivity_service.set_inbound_message_listener(\n self._on_inbound_message\n )\n\n return self",
"def set_endpoint(endpoint_url):\n log.info(\"Called set_endpoint with args %s\", locals())\n if 'cb/api' in endpoint_url:\n log.debug(\"Setting Cloudbreak endpoint to %s\", endpoint_url)\n this_config = config.cb_config\n elif ':7189' in endpoint_url:\n log.debug(\"Setting Altus Director endpoint to %s\", endpoint_url)\n this_config = config.cd_config\n else:\n raise ValueError(\"Unrecognised API Endpoint\")\n try:\n if this_config.api_client:\n log.debug(\"Found Active API Client, updating...\")\n this_config.api_client.host = endpoint_url\n except AttributeError:\n log.debug(\"No Active API Client found to update\")\n this_config.host = endpoint_url\n if this_config.host == endpoint_url:\n return True\n return False",
"def set_bindpoint(self, bindpoint):\n self.options['bindpoint'] = bindpoint",
"def defaultEndpoint(self, end_point=None):\n if(end_point is not None):\n self.end_point = end_point\n return self.end_point",
"def service(self, service):\n \n self._service = service",
"def secondary_endpoint(self, secondary_endpoint):\n\n self._secondary_endpoint = secondary_endpoint",
"def set_service(self):\n\n if self.service:\n self.service = self.service(\n json=self.json,\n google_user=self.google_user,\n endpoint=self\n )",
"def completion_endpoint(self, completion_endpoint):\n\n self._completion_endpoint = completion_endpoint",
"def service_account(self, service_account):\n\n self._service_account = service_account",
"def service_connection_id(self, service_connection_id):\n if service_connection_id is None:\n raise ValueError(\"Invalid value for `service_connection_id`, must not be `None`\") # noqa: E501\n\n self._service_connection_id = service_connection_id",
"def endpointuuid(self, endpointuuid):\n\n self._endpointuuid = endpointuuid",
"def connect_datacenter(self, dc):\n self.compute.dc = dc\n for ep in self.openstack_endpoints.values():\n ep.manage = self.manage\n logging.info \\\n (\"Connected DC(%s) to API endpoint %s(%s:%d)\" % (dc.label, self.__class__.__name__, self.ip, self.port))",
"def update_endpoint(self, endpoint_id, endpoint_ref):\n raise exception.NotImplemented() # pragma: no cover",
"def __set_endpoint(self, endpoint):\n self._endpoint = endpoint\n\n host, port = endpoint.get_address()\n if __debug__: dprint(\"update LAN address \", self._lan_address[0], \":\", self._lan_address[1], \" -> \", self._lan_address[0], \":\", port, force=True)\n self._lan_address = (self._lan_address[0], port)\n\n # at this point we do not yet have a WAN address, set it to the LAN address to ensure we\n # have something\n assert self._wan_address == (\"0.0.0.0\", 0)\n if __debug__: dprint(\"update WAN address \", self._wan_address[0], \":\", self._wan_address[1], \" -> \", self._lan_address[0], \":\", self._lan_address[1], force=True, level='error')\n self._wan_address = self._lan_address\n\n if not self.is_valid_address(self._lan_address):\n if __debug__: dprint(\"update LAN address \", self._lan_address[0], \":\", self._lan_address[1], \" -> \", host, \":\", self._lan_address[1], force=True)\n self._lan_address = (host, self._lan_address[1])\n\n if not self.is_valid_address(self._lan_address):\n if __debug__: dprint(\"update LAN address \", self._lan_address[0], \":\", self._lan_address[1], \" -> \", self._wan_address[0], \":\", self._lan_address[1], force=True)\n self._lan_address = (self._wan_address[0], self._lan_address[1])\n\n # our address may not be a bootstrap address\n if self._lan_address in self._bootstrap_candidates:\n del self._bootstrap_candidates[self._lan_address]\n\n # our address may not be a candidate\n if self._lan_address in self._candidates:\n del self._candidates[self._lan_address]",
"def service_selector(self, service_selector: ConfigNodePropertyString):\n\n self._service_selector = service_selector",
"def setSetpoint(self, point):\n\n\t\tself._setpoint = point",
"def set_Endpoint(self, value):\n super(AddressValidationInputSet, self)._set_input('Endpoint', value)",
"def service_account(self, service_account: str):\n\n self._service_account = service_account",
"def _bind_to_service(self):\n if self._service_dn:\n # bind with the service_dn\n self._server.simple_bind_s(self._service_dn, self._service_password)\n else:\n # force a connection without binding\n self._server.whoami_s()",
"def service_vm_ovf_url(self, service_vm_ovf_url):\n\n self._service_vm_ovf_url = service_vm_ovf_url",
"def authentication_endpoint(self, authentication_endpoint):\n\n self._authentication_endpoint = authentication_endpoint",
"def contact_point(self, contact_point: object):\n\n self._contact_point = contact_point",
"def service_area(self, service_area: object):\n\n self._service_area = service_area",
"def update_endpoint(self, endpoint_id, service_id=None, interface=None,\n url=None, region=None, enabled=None, **kwargs):\n doc = common.Document()\n endpoint = common.Element(\"endpoint\")\n doc.append(endpoint)\n\n if service_id:\n endpoint.add_attr(\"service_id\", service_id)\n if interface:\n endpoint.add_attr(\"interface\", interface)\n if url:\n endpoint.add_attr(\"url\", url)\n if region:\n endpoint.add_attr(\"region\", region)\n\n if 'force_enabled' in kwargs:\n endpoint.add_attr(\"enabled\", kwargs['force_enabled'])\n elif enabled is not None:\n endpoint.add_attr(\"enabled\", str(enabled).lower())\n\n resp, body = self.patch('endpoints/%s' % str(endpoint_id), str(doc))\n body = self._parse_body(etree.fromstring(body))\n return resp, body",
"def service_code(self, service_code):\n \n self._service_code = service_code",
"def service_status(self, service_status):\n\n self._service_status = service_status",
"def management_endpoint(self, management_endpoint):\n\n self._management_endpoint = management_endpoint"
] | [
"0.635812",
"0.5773693",
"0.56269103",
"0.53948677",
"0.53651756",
"0.53116417",
"0.525991",
"0.5178036",
"0.5159906",
"0.5135377",
"0.50487715",
"0.50400984",
"0.49996492",
"0.4988267",
"0.4931792",
"0.48966828",
"0.48765537",
"0.4869402",
"0.48534063",
"0.48342404",
"0.4833629",
"0.47994652",
"0.47701412",
"0.47626632",
"0.47335273",
"0.4716113",
"0.47107357",
"0.47006774",
"0.46905866",
"0.46893582"
] | 0.85730124 | 0 |
Gets the parent_node_edge_point of this ConnectionEndPoint. | def parent_node_edge_point(self) -> List[str]:
return self._parent_node_edge_point | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getParent(self):\n return self.parent_edge",
"def edges_parent(self):\n return self._edges_parent",
"def get_parent_id(self):\n return self._parent_id",
"def get_parent(self):\n return BinaryNode.or_none(self.parent)",
"def parent_id(self):\n return self._parent_id",
"def parent_id(self):\n return self._parent_id",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node.parent)",
"def parent_location(self):\n return self._parent_location",
"def get_parent(self):\n return self._parent",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def parent(self, node):\n self._validate_node(node)\n idx = node._index\n if idx == 0:\n return None # Root node has no parent\n if idx % 2 == 0:\n return self._array[(idx-2)//2] # Right child (even number)\n return self._array[(idx-1)//2] # left child (odd number)",
"def parent_node_edge_point(self, parent_node_edge_point: List[str]):\n\n self._parent_node_edge_point = parent_node_edge_point",
"def get_parent(self) :\n return self.parent",
"def parent(self, p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def get_parent(self):\n if self.parent:\n return self.parent()\n else:\n return None",
"def parent_id(self) -> int:\n return self._parent_id",
"def get_parent(self):\n return self.__parent",
"def get_parent(self):\n return self.__parent",
"def parent(self, p):\n node = self._validate_position(p)\n return self._make_position(node)",
"def parent(self,p):\n node = self._validate(p)\n return self._make_position(node._parent)",
"def parent(self, node):\r\n return self.find_node(node).parent.content",
"def get_parent(self):\n return self.parent",
"def get_parent(self):\n return self.parent",
"def get_parent(self):\n return self.parent",
"def parent(self):\n \n return self._parent",
"def parent(self) -> Union[\"ExpressionNode\", None]:\n return self.__parent",
"def GetParent(self):\r\n\r\n return self._parent",
"def parent(self):\n if not self._parents:\n return None\n elif len(self._parents) == 1:\n return tuple(self._parents)[0]\n else:\n raise RuntimeError('Ambiguous parent: there are multiple parents.')"
] | [
"0.7909449",
"0.7849635",
"0.72225994",
"0.71083087",
"0.6985489",
"0.6985489",
"0.69378465",
"0.6932805",
"0.6929256",
"0.6897707",
"0.6897707",
"0.6897707",
"0.68958336",
"0.686817",
"0.68395025",
"0.68314517",
"0.6809207",
"0.6806697",
"0.6785802",
"0.6785802",
"0.67726624",
"0.6768616",
"0.6763231",
"0.6756571",
"0.6756571",
"0.6756571",
"0.67324257",
"0.6699666",
"0.6691139",
"0.6690608"
] | 0.7929555 | 0 |
Sets the parent_node_edge_point of this ConnectionEndPoint. | def parent_node_edge_point(self, parent_node_edge_point: List[str]):
self._parent_node_edge_point = parent_node_edge_point | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setParent(self, edge):\n self.parent_edge = edge",
"def set_parent(self, parent_node):\n self.set_parent = parent_node",
"def set_parent(self, parent: \"BaseSegment\") -> None:\n self._parent = weakref.ref(parent)",
"def set_parent(self, parent):\n self._parent = parent",
"def parent_id(self, parent_id):\n\n self._parent_id = parent_id",
"def parent_id(self, parent_id):\n\n self._parent_id = parent_id",
"def parent_id(self, parent_id):\n\n self._parent_id = parent_id",
"def parent_id(self, parent_id):\n\n self._parent_id = parent_id",
"def _set_parent(self, parent):\n self.__parent = parent",
"def set_parent(self, new_parent):\n node = BinaryNode.or_none(new_parent)\n self.parent = node",
"def _set_parent(self, parent):\n assert self._parent == None # implementing reparenting requires more work\n self._parent = parent",
"def parent_connect(self, node):\n if self.parent.get() >= self.data:\n self.parent.set_left(node)\n if node and node.left is not None:\n node.set_parent(self.parent)\n else:\n self.parent.set_right(node)\n if node and node.left is not None:\n node.set_parent(self.parent)",
"def parent_location(self, parent_location):\n\n self._parent_location = parent_location",
"def setParent(self, parent):\n if parent is None:\n self.__parent = None\n else:\n self.__parent = weakref.ref(parent)",
"def set_parent(self, parent):\n self.parent = parent",
"def set_parent(self, parent):\n self.parent = parent",
"def parent(self, parent):\n if parent is None:\n raise ValueError(\"Invalid value for `parent`, must not be `None`\")\n\n self._parent = parent",
"def _set_parent(self, parent):\n assert self._parent == None # implementing reparenting requires more work\n # Set the parent silently to dodge setattr parent handling.\n object.__setattr__(self, '_parent', parent)",
"def set_task_parent(self, parent: \"Task\"):\n self.parent_uid = parent.uid\n self.root_uid = parent.root_uid",
"def parent_id(self, parent_id: int):\n if parent_id is None:\n raise ValueError(\"Invalid value for `parent_id`, must not be `None`\")\n\n self._parent_id = parent_id",
"def setParent(self, parent):\n self.parent = parent",
"def set_parent(self, parent):\n self._parent = parent\n\n if self._parent is not None:\n self._compress_path()",
"def set_parent(self, new_parent):\n self.__parent = new_parent",
"def parent_change_id(self, parent_change_id):\n\n self._parent_change_id = parent_change_id",
"def setparent(self, parent):\n\t\tself._setparent(parent)",
"def setParent(self, parent):\n self.parent = parent\n self.position = parent.positionCount",
"def client_node_edge_point(self, client_node_edge_point: List[str]):\n\n self._client_node_edge_point = client_node_edge_point",
"def parent(self, parent: AbstractPaths):\r\n self._parent = parent",
"def parent(self, parent):\n\n self._parent = parent",
"def parent(self, parent):\n\n self._parent = parent"
] | [
"0.77046937",
"0.7487442",
"0.6870154",
"0.65892845",
"0.6562548",
"0.6562548",
"0.6562548",
"0.6562548",
"0.65572673",
"0.6549818",
"0.65178555",
"0.64919025",
"0.6487479",
"0.6479882",
"0.64650625",
"0.64650625",
"0.64604414",
"0.63897806",
"0.63681376",
"0.63280916",
"0.6316669",
"0.63014454",
"0.62920254",
"0.62810856",
"0.62543243",
"0.61787546",
"0.6126904",
"0.61217755",
"0.6115599",
"0.6115599"
] | 0.8324284 | 0 |
Gets the client_node_edge_point of this ConnectionEndPoint. | def client_node_edge_point(self) -> List[str]:
return self._client_node_edge_point | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def client_node_edge_point(self, client_node_edge_point: List[str]):\n\n self._client_node_edge_point = client_node_edge_point",
"def parent_node_edge_point(self) -> List[str]:\n return self._parent_node_edge_point",
"def edges_parent(self):\n return self._edges_parent",
"def get_edge_coords(self):\n return self.coords",
"def getParent(self):\n return self.parent_edge",
"def getClientIP(self):\n if isinstance(self.client, IPv4Address):\n return self.client.host\n return None",
"def edge(self) -> EdgeConfig:\n return self._edge",
"def client_ip(self):\n return self._client_ip",
"def getAdjacent(self, node):\n return self.graph[node]",
"def get_merkle_edge(self):\n return self._get_merkle_edge()",
"def get_node(self):\r\n return self._node",
"def get_node(self):\n return self.__node",
"def parent_node_edge_point(self, parent_node_edge_point: List[str]):\n\n self._parent_node_edge_point = parent_node_edge_point",
"def get_start_vertex(self):\n\n return self._start_vertex",
"def client_affinity(self) -> Optional['ListenerClientAffinity']:\n return pulumi.get(self, \"client_affinity\")",
"def get_head_vertex(self):\n return self.graph.vertices[self.head_vertex.vertex_number]",
"def get_node_ip(self):\n return ray.services.get_node_ip_address()",
"def get_start_node(self) -> MazeCell:\n return self._start_node",
"def node(self):\n return self._node",
"def node(self):\n return self._node",
"def get_current_edge(self):\r\n edge = self.get_selected_part()\r\n if edge is None:\r\n edge = self.get_part(type=\"edge\", sub_type=\"h\", row=1, col=1)\r\n return edge",
"def getNode(self):\n node = Element.getNode(self)\n node.tag = 'edge'\n node.attrib['bgn'] = self.bgn.id\n node.attrib['end'] = self.end.id\n return(node)",
"def edge_attribute(self):\n return self._edge_attribute",
"def getFromNode(self):\n return self.from_node",
"def node_fwd(self):\n return self.grid.edges['nodes'][self.j, 1-self.orient]",
"def getNode(self):\n node = Edge.getNode(self)\n node.tag = 'depedge'\n return(node)",
"def client(self):\n return self._thread._client",
"def node_id(self) -> int:\n return self.data[\"nodeId\"]",
"def node_id(self) -> int:\n return self.data[\"nodeId\"]",
"def contact_point(self) -> object:\n return self._contact_point"
] | [
"0.7040182",
"0.6694296",
"0.6014033",
"0.579763",
"0.5779725",
"0.5725222",
"0.5720722",
"0.56671035",
"0.566444",
"0.5613887",
"0.56126505",
"0.5602985",
"0.55665207",
"0.5564498",
"0.5508583",
"0.55030704",
"0.54853576",
"0.54559284",
"0.545324",
"0.545324",
"0.54242283",
"0.5357227",
"0.535184",
"0.5341127",
"0.5337543",
"0.5325889",
"0.52919865",
"0.5283881",
"0.5283881",
"0.5277322"
] | 0.78359306 | 0 |
Sets the client_node_edge_point of this ConnectionEndPoint. | def client_node_edge_point(self, client_node_edge_point: List[str]):
self._client_node_edge_point = client_node_edge_point | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parent_node_edge_point(self, parent_node_edge_point: List[str]):\n\n self._parent_node_edge_point = parent_node_edge_point",
"def setParent(self, edge):\n self.parent_edge = edge",
"def client_node_edge_point(self) -> List[str]:\n return self._client_node_edge_point",
"def set_node(self, node):\n self.__node = node",
"def set_node_id(self, node_id):\n self._node_id = node_id",
"def edge(self, edge: EdgeConfig):\n\n self._edge = edge",
"def node_id(self, node_id):\n\n self._node_id = node_id",
"def graph_endpoint(self, graph_endpoint):\n\n self._graph_endpoint = graph_endpoint",
"def node_id(self, node_id: int):\r\n self._node_id = node_id",
"def node_version(self, node_version):\n\n self._node_version = node_version",
"def define_edge(self):\n\n self.canvas_edge = Line(\n points=[\n self.canvas_nodes[0].pos[0] + self.nodesize[0] / 2,\n self.canvas_nodes[0].pos[1] + self.nodesize[1] / 2,\n self.canvas_nodes[1].pos[0] + self.nodesize[0] / 2,\n self.canvas_nodes[1].pos[1] + self.nodesize[1] / 2\n ],\n joint='round',\n cap='round',\n width=3\n )",
"def setCooperationClient(self, client):\n self.__cooperationClient = client",
"def set_parent(self, parent_node):\n self.set_parent = parent_node",
"def set_node_pos(self, node, pos):\n if not node in self:\n raise NodeNotInGraph(node)\n self.node_positions[node] = pos",
"def setPrev(self, prev_half_edge):\n self.prev = prev_half_edge",
"def setPeer (self, peer):\n\t\tself.peer = peer",
"def node_id(self, node_id):\n if node_id is None:\n raise ValueError(\"Invalid value for `node_id`, must not be `None`\") # noqa: E501\n\n self._node_id = node_id",
"def client_id(self, client_id):\n\n self._client_id = client_id",
"def client_id(self, client_id):\n\n self._client_id = client_id",
"def client_id(self, client_id):\n\n self._client_id = client_id",
"def client_id(self, client_id):\n\n self._client_id = client_id",
"def ConnectByEdge(self, edge, arrow=False):\n return self.Connect(edge.node1.index, edge.node2.index,arrow, edge.weight)",
"def set_nodeset(self, nodeset):\n self.nodeset = set(nodeset) # overwrite the existing nodeset with the input nodeset\n\n self.__check_validity() # check if graph is valid - throws exception if not",
"def __init__(self, edgelist):\n self.edge = edgelist\n if edgelist:\n self.update_node2edge()",
"def edge(self, viz_edge: VizEdge) -> None:\n self._digraph.edge(viz_edge.start, viz_edge.end)",
"def set_enode(self) -> None:\r\n method = 'admin_nodeInfo'\r\n result = self.rpc_call(method) # result from rpc call\r\n enode = result['enode'].split('@')[0]\r\n self.enode = '{}@{}:{}'.format(enode, self.ip.address, self.ethereum_network_port)",
"def edges(self, e):\n self._edges = e",
"def set_node(self, index, node):\r\n self.loc.coord[index] = node",
"def setContextNode(self, node):\n if node is None: node__o = None\n else: node__o = node._o\n libxml2mod.xmlXPathSetContextNode(self._o, node__o)",
"def setSetpoint(self, point):\n\n\t\tself._setpoint = point"
] | [
"0.64545894",
"0.5764588",
"0.5657335",
"0.5621235",
"0.56201965",
"0.5616962",
"0.54884785",
"0.54075307",
"0.53285813",
"0.5285194",
"0.527346",
"0.52295053",
"0.50200117",
"0.49891022",
"0.4982168",
"0.49503762",
"0.49278897",
"0.49262947",
"0.49262947",
"0.49262947",
"0.49262947",
"0.49253324",
"0.4918428",
"0.48821616",
"0.48635104",
"0.48408765",
"0.48327228",
"0.48154062",
"0.4785559",
"0.47831938"
] | 0.84220517 | 0 |
Gets the connection_port_direction of this ConnectionEndPoint. | def connection_port_direction(self) -> str:
return self._connection_port_direction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_port_direction(self, port):\n if port == 1:\n self.__port_b_direction = self.__bus.read_byte_data(\n self.__ioaddress, self.IODIRB)\n return self.__port_b_direction\n else:\n self.__port_a_direction = self.__bus.read_byte_data(\n self.__ioaddress, self.IODIRA)\n return self.__port_a_direction\n return",
"def getDirection(self):\n return self.listener.direction",
"def direction(self) -> int:\n return self._direction",
"def connection_port_direction(self, connection_port_direction: str):\n allowed_values = [\"BIDIRECTIONAL\", \"INPUT\", \"OUTPUT\", \"UNIDENTIFIED_OR_UNKNOWN\"] # noqa: E501\n if connection_port_direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `connection_port_direction` ({0}), must be one of {1}\"\n .format(connection_port_direction, allowed_values)\n )\n\n self._connection_port_direction = connection_port_direction",
"def get_direction(self):\r\n return self.__direction",
"def get_direction(self):\n return self.direction",
"def direction(self):\n return self._direction.copy()",
"def getDirection(self):\n return self.ray.direction",
"def direction(self) -> np.ndarray:\n return self._direction",
"def direction(self):\n return self.cfg.direction",
"def direction(self):\n return self._dir",
"def get_direction(self):\n return self.actual_coordinates[2]",
"def get_direction(self) -> int: \r\n if time.time() > self.stop_timer:\r\n return Directions.stop\r\n else:\r\n return self.direction",
"def direction(self) -> Optional[str]:\n return self._direction",
"def direction(self) -> str:\n return pulumi.get(self, \"direction\")",
"def direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"direction\")",
"def read_direction(self):\n global motor_direction\n with self._lock:\n return motor_direction",
"def get_direction(self):\n\n return -1 if self.curr_player == self.PLAYER1 else 1",
"def direction(self):\n _direction = self._custom.get(\"direction\")\n if _direction is not None:\n return _direction\n\n _direction = self._infer_direction()\n self._custom[\"direction\"] = _direction\n\n return _direction",
"def direction(self):\n return atan2d(self.y, self.x)",
"def get_port(self):\n \n return self._port",
"def get_port(self):\n return self.__port",
"def connection_port_role(self) -> str:\n return self._connection_port_role",
"def getRobotDirection(self):\n return self.direction\n #raise NotImplementedError",
"def getRobotDirection(self):\n return self.direction\n #raise NotImplementedError",
"def traffic_direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"traffic_direction\")",
"def getPort(self):\n return self._port",
"def get_port(self) -> int:\n return self._port",
"def getPort(self):\n return self._port",
"def comm_port(self):\r\n return self._comm_port"
] | [
"0.7422299",
"0.7100193",
"0.6954227",
"0.6906184",
"0.6850313",
"0.6784414",
"0.6750329",
"0.6648726",
"0.6547236",
"0.65026873",
"0.64180374",
"0.6364987",
"0.63177645",
"0.6307946",
"0.6284133",
"0.6239046",
"0.62192076",
"0.6218952",
"0.6201423",
"0.6183298",
"0.61322004",
"0.61203665",
"0.61045444",
"0.6085407",
"0.6085407",
"0.60699",
"0.6059426",
"0.6043886",
"0.60401857",
"0.60152787"
] | 0.8515388 | 0 |
Sets the connection_port_direction of this ConnectionEndPoint. | def connection_port_direction(self, connection_port_direction: str):
allowed_values = ["BIDIRECTIONAL", "INPUT", "OUTPUT", "UNIDENTIFIED_OR_UNKNOWN"] # noqa: E501
if connection_port_direction not in allowed_values:
raise ValueError(
"Invalid value for `connection_port_direction` ({0}), must be one of {1}"
.format(connection_port_direction, allowed_values)
)
self._connection_port_direction = connection_port_direction | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_port_direction(self, port, direction):\n\n if port == 1:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, direction)\n self.__port_b_direction = direction\n else:\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, direction)\n self.__port_a_direction = direction\n return",
"def connection_port_direction(self) -> str:\n return self._connection_port_direction",
"def changePort(self, to_port, from_port=None, direction='CW'):\n if not 0 < to_port <= self.num_ports:\n raise(ValueError('`in_port` [{0}] must be between 1 and '\n '`num_ports` [{1}]'.format(to_port,\n self.num_ports)))\n if not from_port:\n if self.sim_state['port']:\n from_port = self.sim_state['port']\n else:\n from_port = 1\n diff = to_port - from_port\n if abs(diff) >= 7: diff = -diff\n if diff < 0: direction = 'CCW'\n else: direction = 'CW'\n cmd_string = '{0}{1}'.format(self.__class__.DIR_DICT[direction][0],\n to_port)\n self.sim_state['port'] = to_port\n self.cmd_chain += cmd_string\n self.exec_time += 0.2",
"def direction(self, direction):\n\n self._direction = direction",
"def set_direction(self, direction: str) -> None:\n if direction == \"forward\":\n self._bond.setDirection(self._deviceId, Directions.FORWARD)\n elif direction == \"reverse\":\n self._bond.setDirection(self._deviceId, Directions.REVERSE)\n self._attributes['current_direction'] = direction",
"def connection_port_role(self, connection_port_role: str):\n allowed_values = [\"SYMMETRIC\", \"ROOT\", \"LEAF\", \"TRUNK\", \"UNKNOWN\"] # noqa: E501\n if connection_port_role not in allowed_values:\n raise ValueError(\n \"Invalid value for `connection_port_role` ({0}), must be one of {1}\"\n .format(connection_port_role, allowed_values)\n )\n\n self._connection_port_role = connection_port_role",
"def set_pin_direction(self, pin, direction):\n pin = pin - 1\n if pin < 8:\n self.__port_a_direction = self.__helper.updatebyte(\n self.__port_a_direction, pin, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRA, self.__port_a_direction)\n else:\n self.__port_b_direction = self.__helper.updatebyte(\n self.__port_b_direction, pin - 8, direction)\n self.__bus.write_byte_data(\n self.__ioaddress, self.IODIRB, self.__port_b_direction)\n return",
"def setdirection(self, *args, **kwargs):\n return _coordsys.coordsys_setdirection(self, *args, **kwargs)",
"def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError",
"def setRobotDirection(self, direction):\n self.direction = direction\n #raise NotImplementedError",
"def setRobotDirection(self, direction):\n self.direction = direction",
"def setRobotDirection(self, direction):\n self.direction = direction",
"def get_port_direction(self, port):\n if port == 1:\n self.__port_b_direction = self.__bus.read_byte_data(\n self.__ioaddress, self.IODIRB)\n return self.__port_b_direction\n else:\n self.__port_a_direction = self.__bus.read_byte_data(\n self.__ioaddress, self.IODIRA)\n return self.__port_a_direction\n return",
"def sendDirection(self,direction):\n x,y = direction\n data = _RobotCommunicator.DIRECTION_HEADER + \\\n pack(_RobotCommunicator.DIRECTION_FORMAT,x,y)\n self.udpSock.sendto(data,self.addr)",
"def direction(self, direction):\n allowed_values = [\"supports\", \"does_not_support\"] # noqa: E501\n if direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `direction` ({0}), must be one of {1}\" # noqa: E501\n .format(direction, allowed_values)\n )\n\n self._direction = direction",
"def port_mapping(self, port_mapping):\n\n self._port_mapping = port_mapping",
"def setDirection(self,stepDir = 2):\n pass",
"async def async_set_direction(self, direction: str) -> None:\n if direction == DIRECTION_FORWARD:\n self._device.fan_dir = SENSEME_DIRECTION_FORWARD\n else:\n self._device.fan_dir = SENSEME_DIRECTION_REVERSE",
"def set_direction(self, new_dir):\n self.__direction = new_dir",
"def SetLayoutDirection(*args, **kwargs):\n return _gdi_.DC_SetLayoutDirection(*args, **kwargs)",
"def port_in(self, port_in):\n\n self._port_in = port_in",
"def port_in(self, port_in):\n\n self._port_in = port_in",
"def setDirection (self, ra, dec):\n self._response.setDirection(ra, dec)",
"def dock_direction_set(self, value):\r\n \r\n self._dock_direction = value",
"def setport(self, port):\n self.__port = port",
"def set_direction(self, dir):\n if dir == 0:\n self.direction = [0, -1]\n elif dir == 1:\n self.direction = [1, 0]\n elif dir == 2:\n self.direction = [0, 1]\n elif dir == 3:\n self.direction = [-1, 0]",
"def set_port(self, party_port) -> None:\n\n self._port = party_port",
"def set_direction(self, direction: int) -> None: \r\n self.direction = direction\r\n if (direction == Directions.turn_left or\r\n direction == Directions.turn_right):\r\n self.stop_timer = time.time() + self.driving_time_turning\r\n else:\r\n self.stop_timer = time.time() + self.driving_time",
"def port(self, port):\n\n self._port = port",
"def port(self, port):\n\n self._port = port"
] | [
"0.72491103",
"0.6883171",
"0.61560464",
"0.59813505",
"0.5803918",
"0.5802289",
"0.5751801",
"0.56792367",
"0.56702006",
"0.56702006",
"0.5623771",
"0.5623771",
"0.5603514",
"0.55927914",
"0.558962",
"0.5573132",
"0.55627877",
"0.5492916",
"0.5368065",
"0.53601414",
"0.53387684",
"0.53387684",
"0.53256667",
"0.53254306",
"0.52949286",
"0.5279159",
"0.52074665",
"0.5205922",
"0.5122478",
"0.5122478"
] | 0.77216226 | 0 |
Gets the connection_port_role of this ConnectionEndPoint. | def connection_port_role(self) -> str:
return self._connection_port_role | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connection_port_role(self, connection_port_role: str):\n allowed_values = [\"SYMMETRIC\", \"ROOT\", \"LEAF\", \"TRUNK\", \"UNKNOWN\"] # noqa: E501\n if connection_port_role not in allowed_values:\n raise ValueError(\n \"Invalid value for `connection_port_role` ({0}), must be one of {1}\"\n .format(connection_port_role, allowed_values)\n )\n\n self._connection_port_role = connection_port_role",
"def get_port(self):\n \n return self._port",
"def get_port(self):\n return self.__port",
"def receiver_port(self):\n return self._receiver_port",
"def get_port(self):\n return self.port",
"def getPort(self):\n return self._port",
"def getPort(self):\n return self._port",
"def get_port(self) -> int:\n return self._port",
"def connection_port_direction(self) -> str:\n return self._connection_port_direction",
"def role(self):\n\n return self._role",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def role(self):\n return self._role",
"def get_role(self):\n return self.role",
"def comm_port(self):\r\n return self._comm_port",
"def _get_port(self):\n return self.__port",
"def role(self) -> aws_cdk.aws_iam.IRole:\n return self._values.get('role')",
"def port(self):\n return self._port",
"def port(self):\n return self._port",
"def port(self):\n return self._port",
"def port(self):\n return self._port",
"def port(self):\n return self._port",
"def port(self):\n return self._port",
"def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get('role')",
"def role(self) -> typing.Optional[aws_cdk.aws_iam.IRole]:\n return self._values.get(\"role\")",
"def port(self):\n\n return self._port",
"def remoteport(self) :\n\t\ttry :\n\t\t\treturn self._remoteport\n\t\texcept Exception as e:\n\t\t\traise e",
"def port(self) -> int:\n if hasattr(self, \"_port\"):\n return self._port\n _args: list[Arg] = []\n _ctx = self._select(\"port\", _args)\n return _ctx.execute_sync(int)",
"def port(self) -> int:\n return self._port",
"def port(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n return rel.data[rel.app].get(\"port\")"
] | [
"0.6675221",
"0.6536682",
"0.64771336",
"0.643482",
"0.63028085",
"0.62838185",
"0.62733996",
"0.62726057",
"0.6268773",
"0.61741924",
"0.61579907",
"0.61579907",
"0.61579907",
"0.6105567",
"0.6075074",
"0.5997939",
"0.59857833",
"0.59406775",
"0.59406775",
"0.59406775",
"0.59406775",
"0.59406775",
"0.59406775",
"0.592843",
"0.590334",
"0.5894983",
"0.5886195",
"0.5843209",
"0.5831595",
"0.58259475"
] | 0.8586469 | 0 |
Sets the connection_port_role of this ConnectionEndPoint. | def connection_port_role(self, connection_port_role: str):
allowed_values = ["SYMMETRIC", "ROOT", "LEAF", "TRUNK", "UNKNOWN"] # noqa: E501
if connection_port_role not in allowed_values:
raise ValueError(
"Invalid value for `connection_port_role` ({0}), must be one of {1}"
.format(connection_port_role, allowed_values)
)
self._connection_port_role = connection_port_role | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def connection_port_role(self) -> str:\n return self._connection_port_role",
"def set_port(self, party_port) -> None:\n\n self._port = party_port",
"def setport(self, port):\n self.__port = port",
"def set_task_role(self, task_role):\n self._task_role = task_role",
"def port(self, port):\n\n self._port = port",
"def port(self, port):\n\n self._port = port",
"def port(self, port):\n\n self._port = port",
"def role(self, role):\n\n self._role = int(role)",
"def role(self, role):\n\n self._role = role",
"def role(self, role):\n\n self._role = role",
"def __setRole(self, session):\r\n self.__role = session.role\r\n if self._config.has_key('purpose'):\r\n co_role = ccm.get_role_for_purpose(session, self._config['purpose'])\r\n _logger.info(\"Switching user to role: %s\" % co_role)\r\n session.role = co_role\r\n _logger.info(\"Switched user to role: %s\" % session.role)",
"def port(self, port):\n if port is not None and port > 65535:\n raise ValueError(\"Invalid value for `port`, must be a value less than or equal to `65535`\")\n if port is not None and port < 1:\n raise ValueError(\"Invalid value for `port`, must be a value greater than or equal to `1`\")\n\n self._port = port",
"def port_mapping(self, port_mapping):\n\n self._port_mapping = port_mapping",
"def setPort(self, port):\n libxml2mod.xmlURISetPort(self._o, port)",
"def port_lte(self, port_lte):\n\n self._port_lte = port_lte",
"def port_lte(self, port_lte):\n\n self._port_lte = port_lte",
"def cloud_port(self, cloud_port):\n\n self._cloud_port = cloud_port",
"def port_in(self, port_in):\n\n self._port_in = port_in",
"def port_in(self, port_in):\n\n self._port_in = port_in",
"def authorize_role(self, role, from_port, to_port, cidr_ip):\n role_group_name = self.group_name_for_role(role)\n # Revoke first to avoid InvalidPermission.Duplicate error\n self.ec2Connection.revoke_security_group(role_group_name, ip_protocol=\"tcp\", from_port=from_port, to_port=to_port, cidr_ip=cidr_ip)\n self.ec2Connection.authorize_security_group(role_group_name, ip_protocol=\"tcp\", from_port=from_port, to_port=to_port, cidr_ip=cidr_ip)",
"def connect_icache(self, port: Port) -> None:\n self.port_end.req_ports = port",
"def port(self, port: int):\n if port is not None and port < 0: # noqa: E501\n raise ValueError(\"Invalid value for `port`, must be a value greater than or equal to `0`\") # noqa: E501\n\n self._port = port",
"def server_port(self, server_port):\n\n self._server_port = server_port",
"def changeRole(self, node, role):",
"async def set_port(self, port: int) -> None:\n self.port = port\n _LOGGER.info(\"Setting port to %s\", port)\n if self._server:\n self._server.stop()\n await self._start_server()",
"def setRole(self, *args):\n return _libsbml.ReferenceGlyph_setRole(self, *args)",
"def set_login_port(self, port: int):\n assert 0 < port < 65535\n self.login_udp_port = port\n return self",
"def connection_port_direction(self, connection_port_direction: str):\n allowed_values = [\"BIDIRECTIONAL\", \"INPUT\", \"OUTPUT\", \"UNIDENTIFIED_OR_UNKNOWN\"] # noqa: E501\n if connection_port_direction not in allowed_values:\n raise ValueError(\n \"Invalid value for `connection_port_direction` ({0}), must be one of {1}\"\n .format(connection_port_direction, allowed_values)\n )\n\n self._connection_port_direction = connection_port_direction",
"def modify_ports(self, ports, **kwargs):\n pass",
"def connection_port_direction(self) -> str:\n return self._connection_port_direction"
] | [
"0.7050339",
"0.5935539",
"0.58579373",
"0.5754634",
"0.56839955",
"0.56839955",
"0.56839955",
"0.55978996",
"0.5477563",
"0.5477563",
"0.540866",
"0.5401389",
"0.5297533",
"0.5271658",
"0.5263875",
"0.5263875",
"0.5228858",
"0.51936316",
"0.51936316",
"0.5122609",
"0.511871",
"0.50765187",
"0.5058898",
"0.5051977",
"0.5043132",
"0.5029945",
"0.50271964",
"0.50074357",
"0.4961915",
"0.48959112"
] | 0.7690575 | 0 |
Resolve the free type variables of a constructor given the argument types we instantiate the class with. This means we need to match up argument types with variables from the class layout. In the most general case this means we need to fixpoint infer all methods called from the constructor. | def infer_constructor_application(classtype, argtypes):
# Figure out the list of argtypes
cls = classtype.impl
init = cls.__init__.py_func
argtypes = fill_missing_argtypes(init, tuple(argtypes))
# Determine __init__ argnames
argspec = inspect.getargspec(init)
assert not argspec.varargs
assert not argspec.keywords
argnames = argspec.args
assert len(argtypes) == len(argnames)
return infer_type_from_layout(classtype, zip(argnames, argtypes)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, *args):\n self.types = tuple([trait_from(arg) for arg in args])\n self.fast_validate = (9, self.types)",
"def resolve_types(cls, types: dict) -> Callable:\n return functools.partial(resolve_annotations, annotations=cls.annotations(types))",
"def __init__(self):\n\n # Dictionary of types seen so far. Builtin types always available.\n # Values : list of constructors which the type defines\n # This is a smartdict, so keys can be retrieved.\n self.knownTypes = smartdict.Smartdict()\n for typecon in ast.builtin_types_map.values():\n self.knownTypes[typecon()] = None\n\n # Dictionary of constructors encountered so far.\n # Value: Type which the constructor produces.\n # This is a smartdict, so keys can be retrieved.\n self.knownConstructors = smartdict.Smartdict()",
"def resolver(cls) -> Callable:\n annotations = {}\n for subclass in cls.subclasses():\n name = subclass.__name__.split(cls.__name__)[0].lower() # type: ignore\n argument = strawberry.argument(description=subclass._type_definition.description)\n annotations[name] = Annotated[List[subclass], argument] # type: ignore\n defaults = dict.fromkeys(annotations, []) # type: dict\n return functools.partial(resolve_annotations, annotations=annotations, defaults=defaults)",
"def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n return get_init_arguments_and_types(cls)",
"def resolve_constructor(loader, node):\n global workspace\n arg = loader.construct_mapping(node, deep=True)\n return resolve_pointer( workspace, arg )",
"def test_class():\n class TestClass1(object):\n arg1 = None # type: int\n arg2 = None # type: str\n\n assert get_type_hints(TestClass1) == {\n 'arg1': int,\n 'arg2': str\n }",
"def _create_args(self, func_args):\n self.llvm_ret_type = self._from_ctype(self.signature.ret_type)\n self.llvm_arg_types = \\\n [self._from_ctype(a) for a in self.signature.arg_ctypes]",
"def test_method():\n class TestClass(object):\n\n def typed(self, arg1):\n # type: (TestClass, int) -> None\n pass\n\n def untyped(self, arg1):\n # type: (int) -> None\n pass\n\n assert get_type_hints(TestClass.typed, globals(), locals()) == {\n 'return': type(None),\n 'self': TestClass,\n 'arg1': int\n }\n assert get_type_hints(TestClass.untyped) == {\n 'return': type(None),\n 'arg1': int\n }",
"def get_init_arguments_and_types(cls) -> List[Tuple[str, Tuple, Any]]:\n trainer_default_params = inspect.signature(cls).parameters\n name_type_default = []\n for arg in trainer_default_params:\n arg_type = trainer_default_params[arg].annotation\n arg_default = trainer_default_params[arg].default\n try:\n arg_types = tuple(arg_type.__args__)\n except AttributeError:\n arg_types = (arg_type,)\n\n name_type_default.append((arg, arg_types, arg_default))\n\n return name_type_default",
"def _init_parametric_base(cls) -> None:\n\n # Direct subclasses of ParametricType must declare\n # ClassVar attributes corresponding to the Generic type vars.\n # For example:\n # class P(ParametricType, Generic[T, V]):\n # t: ClassVar[Type[T]]\n # v: ClassVar[Type[V]]\n\n params = getattr(cls, '__parameters__', None)\n\n if not params:\n raise TypeError(\n f'{cls} must be declared as Generic'\n )\n\n mod = sys.modules[cls.__module__]\n annos = get_type_hints(cls, mod.__dict__)\n param_map = {}\n\n for attr, t in annos.items():\n if not typing_inspect.is_classvar(t):\n continue\n\n args = typing_inspect.get_args(t)\n # ClassVar constructor should have the check, but be extra safe.\n assert len(args) == 1\n\n arg = args[0]\n if typing_inspect.get_origin(arg) != type:\n continue\n\n arg_args = typing_inspect.get_args(arg)\n # Likewise, rely on Type checking its stuff in the constructor\n assert len(arg_args) == 1\n\n if not typing_inspect.is_typevar(arg_args[0]):\n continue\n\n if arg_args[0] in params:\n param_map[arg_args[0]] = attr\n\n for param in params:\n if param not in param_map:\n raise TypeError(\n f'{cls.__name__}: missing ClassVar for'\n f' generic parameter {param}'\n )\n\n cls._type_param_map = param_map",
"def _init_parametric_user(cls) -> None:\n\n # For ParametricType grandchildren we have to deal with possible\n # TypeVar remapping and generally check for type sanity.\n\n ob = getattr(cls, '__orig_bases__', ())\n generic_params: list[type] = []\n\n for b in ob:\n if (\n isinstance(b, type)\n and not isinstance(b, GenericAlias)\n and issubclass(b, ParametricType)\n and b is not ParametricType\n ):\n raise TypeError(\n f'{cls.__name__}: missing one or more type arguments for'\n f' base {b.__name__!r}'\n )\n\n if not typing_inspect.is_generic_type(b):\n continue\n\n org = typing_inspect.get_origin(b)\n if not isinstance(org, type):\n continue\n if not issubclass(org, ParametricType):\n generic_params.extend(getattr(b, '__parameters__', ()))\n continue\n\n base_params = getattr(org, '__parameters__', ())\n base_non_type_params = getattr(org, '_non_type_params', {})\n args = typing_inspect.get_args(b)\n expected = len(base_params)\n if len(args) != expected:\n raise TypeError(\n f'{b.__name__} expects {expected} type arguments'\n f' got {len(args)}'\n )\n\n base_map = dict(cls._type_param_map)\n subclass_map = {}\n\n for i, arg in enumerate(args):\n if i in base_non_type_params:\n continue\n if not typing_inspect.is_typevar(arg):\n raise TypeError(\n f'{b.__name__} expects all arguments to be'\n f' TypeVars'\n )\n\n base_typevar = base_params[i]\n attr = base_map.get(base_typevar)\n if attr is not None:\n subclass_map[arg] = attr\n\n if len(subclass_map) != len(base_map):\n raise TypeError(\n f'{cls.__name__}: missing one or more type arguments for'\n f' base {org.__name__!r}'\n )\n\n cls._type_param_map = subclass_map\n\n cls._non_type_params = {\n i: p for i, p in enumerate(generic_params)\n if p not in cls._type_param_map\n }",
"def __init__(self, fields):\n self.__init_handle_by_constructor__(_make.TupleType, fields)",
"def infer(self):\n # Update ext_type.symtab\n self.type_infer_init_method()\n\n # Type infer the rest of the methods (with fixed attribute table!)\n self.type_infer_methods()",
"def prepare_arguments(self, ftyp, args):\n # Determine fixed and variable arguments:\n if ftyp.is_vararg:\n fixed_amount = len(ftyp.arguments)\n fixed_args = args[:fixed_amount]\n var_args = args[fixed_amount:]\n else:\n fixed_args = args\n var_args = []\n\n # Evaluate arguments:\n ir_arguments = []\n\n # If return value is complex, reserve room for it an pass pointer\n if ftyp.return_type.is_struct:\n size, alignment = self.data_layout(ftyp.return_type)\n rval_alloc = self.emit(ir.Alloc(\"rval_alloc\", size, alignment))\n rval_ptr = self.emit(ir.AddressOf(rval_alloc, \"rval_ptr\"))\n ir_arguments.append(rval_ptr)\n else:\n rval_alloc = None\n\n # Place other arguments:\n for argument in fixed_args:\n value = self.gen_expr(argument, rvalue=True)\n ir_arguments.append(value)\n\n # Handle variable arguments:\n if ftyp.is_vararg:\n vararg_ptr = self.gen_fill_varargs(var_args)\n ir_arguments.append(vararg_ptr)\n else:\n assert not var_args\n\n return ir_arguments, rval_alloc",
"def __init__(self, aType):\n if not isinstance(aType, TypeType):\n aType = type(aType)\n self.aType = aType\n try:\n self.fast_validate = CoercableTypes[aType]\n except:\n self.fast_validate = (11, aType)",
"def _infer_variable_types_from_data(raw_data):\n raise NotImplementedError()",
"def resolver(cls) -> Callable:\n annotations = dict(cls.__annotations__)\n annotations.pop('apply', None)\n defaults = {name: getattr(cls, name) for name in annotations}\n return functools.partial(resolve_annotations, annotations=annotations, defaults=defaults)",
"def __init__(self, **kwargs):\n for type_hint in self.__fields_types__.values():\n if type_hint is ForwardRef or ForwardRef in get_args(type_hint):\n raise Warning(\"Not all type hints were evaluated.\")\n errors = []\n for name in kwargs:\n if ((getattr(self, name, None) is not None\n and name not in self.__fields_types__)\n or name in self._forbidden_fields):\n errors.append(f\" This attribute name is reserved: '{name}'.\")\n if errors:\n raise ValueError(\"\\n\" + \"\\n\".join(errors))\n for k, v in kwargs.items():\n setattr(self, k, v)",
"def instantiate_classes(self, node):\n clslist = []\n for cls in node.classes:\n if cls.wrap_as == \"struct\":\n clslist.append(cls)\n options = cls.options\n if cls.wrap.python and options.PY_struct_arg == \"class\":\n self.add_struct_ctor(cls)\n self.process_class(node, cls)\n elif cls.template_arguments:\n orig_typemap = cls.typemap\n if orig_typemap.cxx_instantiation is None:\n orig_typemap.cxx_instantiation = {}\n # Replace class with new class for each template instantiation.\n # targs -> ast.TemplateArgument\n for i, targs in enumerate(cls.template_arguments):\n newcls = cls.clone()\n clslist.append(newcls)\n\n # If single template argument, use its name; else sequence.\n # XXX - maybe change to names\n # i.e. _int_double However <std::string,int> is a problem.\n if targs.fmtdict and 'template_suffix' in targs.fmtdict:\n class_suffix = targs.fmtdict['template_suffix']\n elif len(targs.asts) == 1:\n ntypemap = targs.asts[0].typemap\n if ntypemap.template_suffix:\n class_suffix = ntypemap.template_suffix\n else:\n class_suffix = \"_\" + ntypemap.flat_name\n else:\n class_suffix = \"_\" + str(i)\n\n # Update name of class.\n # name_api - vector_0 or vector_int (Fortran and C names)\n # name_instantiation - vector<int>\n if targs.fmtdict and \"cxx_class\" in targs.fmtdict:\n newcls.name_api = targs.fmtdict[\"cxx_class\"]\n else:\n newcls.name_api = cls.name + class_suffix\n newcls.name_instantiation = cls.name + targs.instantiation\n newcls.scope_file[-1] += class_suffix\n\n if targs.fmtdict:\n newcls.user_fmt.update(targs.fmtdict)\n if targs.options:\n newcls.options.update(targs.options)\n \n # Remove defaulted attributes then reset with current values.\n newcls.delete_format_templates()\n newcls.default_format()\n\n newcls.typemap = typemap.create_class_typemap(newcls)\n if targs.instantiation in orig_typemap.cxx_instantiation:\n print(\"instantiate_classes: {} already in \"\n \"typemap.cxx_instantiation\".format(targs.instantiation))\n orig_typemap.cxx_instantiation[targs.instantiation] = newcls.typemap\n\n self.template_typedef(newcls, targs)\n\n self.push_instantiate_scope(newcls, targs)\n self.process_class(newcls, newcls)\n self.pop_instantiate_scope()\n else:\n clslist.append(cls)\n self.process_class(cls, cls)\n\n node.classes = clslist",
"def _type_def_helper(name, args, env: Env) -> typing.Tuple[Basic, typing.Dict[str, Undecided]]:\n\n new_basic = make_basic(name)\n env.set_named_type(name, new_basic)\n _ty_args = OrderedDict((arg, Undecided()) for arg in args)\n env.undecided_types.update(_ty_args)\n return new_basic, _ty_args",
"def _make_constructor(name, type_, attrs, kwargs):\n d = dict(attrs)\n d['_sumtype_attribs'] = [x for x in attrs]\n t = type(name, (type_,), d)\n t = attr.s(t, repr_ns=type_.__name__, **kwargs)\n return t",
"def resolve_type_ids(self):\n if self.general_class_id:\n if self.general_class_id in class_ids:\n self.general_class_name = class_ids[self.general_class_id]\n else:\n if self.general_class_id in type_ids:\n self.general_class_name = type_ids[self.general_class_id]\n\n self.general_class = self.find_general_class(self.general_class_id)\n\n for attribute_ in self.attributes:\n attribute_.resolve_type_ids()\n\n if len(self.attributes) == 1 :\n attribute_ = self.attributes[0]\n attribute_.resolve_type_ids()\n self.general_class_name = attribute_.base_type_name + \" # one attribute, no need for seperate type, just use base type of the attribute\"",
"def parse_class_init_keys(cls) -> Tuple[str, Optional[str], Optional[str]]:\n init_parameters = inspect.signature(cls.__init__).parameters\n # docs claims the params are always ordered\n # https://docs.python.org/3/library/inspect.html#inspect.Signature.parameters\n init_params = list(init_parameters.values())\n # self is always first\n n_self = init_params[0].name\n\n def _get_first_if_any(\n params: List[inspect.Parameter],\n param_type: Literal[\n inspect._ParameterKind.VAR_POSITIONAL, inspect._ParameterKind.VAR_KEYWORD\n ],\n ) -> Optional[str]:\n for p in params:\n if p.kind == param_type:\n return p.name\n return None\n\n n_args = _get_first_if_any(init_params, inspect.Parameter.VAR_POSITIONAL)\n n_kwargs = _get_first_if_any(init_params, inspect.Parameter.VAR_KEYWORD)\n\n return n_self, n_args, n_kwargs",
"def post_init(cls: Type[U]) -> Type[U]:\n if not isinstance(cls, type):\n raise TypeError(\"Can only decorate classes\")\n if not hasattr(cls, \"__post_init__\"):\n raise TypeError(\"The class must have a __post_init__() method\")\n # Ignore the first argument which is the \"self\" argument\n sig = init_sig = _sig_without(inspect.signature(cls.__init__), 0)\n previous = [(cls, \"__init__\", sig)]\n for parent in reversed(cls.__mro__):\n if hasattr(parent, \"__post_init__\"):\n post_sig = _sig_without(\n inspect.signature(getattr(parent, \"__post_init__\")), 0\n )\n try:\n sig = _sig_merge(sig, post_sig)\n except Exception as err:\n # find the incompatibility\n for parent, method, psig in previous:\n try:\n _sig_merge(psig, post_sig)\n except Exception:\n break\n else:\n raise TypeError(\n \"__post_init__ signature is incompatible with the class\"\n ) from err\n raise TypeError(\n f\"__post_init__() is incompatible with {parent.__qualname__}{method}()\"\n ) from err\n # No exception\n previous.append((parent, \"__post_init__\", post_sig))\n # handles type annotations and defaults\n # inspired by the dataclasses modules\n params = list(sig.parameters.values())\n localns = (\n {\n f\"__type_{p.name}\": p.annotation\n for p in params\n if p.annotation is not inspect.Parameter.empty\n }\n | {\n f\"__default_{p.name}\": p.default\n for p in params\n if p.default is not inspect.Parameter.empty\n }\n | cls.__dict__\n )\n for i, p in enumerate(params):\n if p.default is not inspect.Parameter.empty:\n p = p.replace(default=Variable(f\"__default_{p.name}\"))\n if p.annotation is not inspect.Parameter.empty:\n p = p.replace(annotation=f\"__type_{p.name}\")\n params[i] = p\n new_sig = inspect.Signature(params)\n # Build the new __init__ source code\n self_ = \"self\" if \"self\" not in sig.parameters else \"__post_init_self\"\n init_lines = [\n f\"def __init__({self_}, {_sig_to_def(new_sig)}) -> None:\",\n f\"__original_init({self_}, {_sig_to_call(init_sig)})\",\n ]\n for parent, method, psig in previous[1:]:\n if hasattr(parent, \"__post_init__\"):\n if parent is not cls:\n init_lines.append(\n f\"super({parent.__qualname__}, {self_}).{method}({_sig_to_call(psig)})\"\n )\n else:\n init_lines.append(f\"{self_}.{method}({_sig_to_call(psig)})\")\n init_src = \"\\n \".join(init_lines)\n # Build the factory function source code\n local_vars = \", \".join(localns.keys())\n factory_src = (\n f\"def __make_init__(__original_init, {local_vars}):\\n\"\n f\" {init_src}\\n\"\n \" return __init__\"\n )\n # Create new __init__ with the factory\n globalns = inspect.getmodule(cls).__dict__\n ns: dict[str, Any] = {}\n exec(factory_src, globalns, ns)\n init = ns[\"__make_init__\"](cls.__init__, **localns)\n self_param = inspect.Parameter(self_, inspect.Parameter.POSITIONAL_ONLY)\n init.__signature__ = inspect.Signature(\n parameters=[self_param] + list(sig.parameters.values()), return_annotation=None\n )\n setattr(cls, \"__init__\", init)\n return cls",
"def _cast_types(args):\n\targs.x_val = None if args.x_val == 'None' else int(args.x_val)\n\targs.test_size = float(args.test_size)\n\targs.alpha = float(args.alpha)\n\targs.fit_prior = (args.fit_prior in ['True', \"True\", 'true', \"true\"])\n\n\t# class_prior - array like type (problem to convert)\n\tif args.class_prior == \"None\" or args.class_prior == 'None':\n\t\targs.class_prior = None\n\n\t# --------- #\n\treturn args",
"def _preprocess_typecheck(argSig, argspecs, slf_or_clsm=False):\n # todo: Maybe move also slf-logic here\n vargs = argspecs.varargs\n try:\n kw = argspecs.keywords\n except AttributeError:\n kw = argspecs.varkw\n try:\n kwonly = argspecs.kwonlyargs\n except AttributeError:\n kwonly = None\n if not vargs is None or not kw is None:\n arg_type_lst = list(get_Tuple_params(argSig))\n if not vargs is None:\n vargs_pos = (len(argspecs.args)-1) \\\n if slf_or_clsm else len(argspecs.args)\n # IndexErrors in this section indicate that a child-method was\n # checked against a parent's type-info with the child featuring\n # a more wider type on signature level (e.g. adding vargs)\n try:\n vargs_type = typing.Sequence[arg_type_lst[vargs_pos]]\n except IndexError:\n vargs_type = typing.Sequence[typing.Any]\n try:\n arg_type_lst[vargs_pos] = vargs_type\n except IndexError:\n arg_type_lst.append(vargs_type)\n if not kw is None:\n kw_pos = len(argspecs.args)\n if slf_or_clsm:\n kw_pos -= 1\n if not vargs is None:\n kw_pos += 1\n if not kwonly is None:\n kw_pos += len(kwonly)\n try:\n kw_type = typing.Dict[str, arg_type_lst[kw_pos]]\n except IndexError:\n kw_type = typing.Dict[str, typing.Any]\n try:\n arg_type_lst[kw_pos] = kw_type\n except IndexError:\n arg_type_lst.append(kw_type)\n return typing.Tuple[tuple(arg_type_lst)]\n else:\n return argSig",
"def __call__(self, *args):\n return TypeCall(self, args)",
"def test_nested_class():\n class TestClass2(object):\n arg1 = None # type: int\n arg2 = None # type: str\n\n class TestClass3(object):\n pass\n\n def func(self, value):\n # type: (int) -> None\n\n class TestClass4(object):\n arg4 = None # type: int\n\n def func(self, value):\n # type: (int) -> None\n\n class TestClass5(object):\n arg5 = None # type: int\n\n arg6 = None # type: bool\n\n arg3 = None # type: bool\n\n assert get_type_hints(TestClass2) == {\n 'arg1': int,\n 'arg2': str,\n 'arg3': bool\n }",
"def __init__(self, aType):\n if not isinstance(aType, TypeType):\n aType = type(aType)\n self.aType = aType\n self.fast_validate = (12, aType)"
] | [
"0.61208606",
"0.59640104",
"0.5496596",
"0.5320413",
"0.5247586",
"0.52279824",
"0.5196888",
"0.5152734",
"0.510851",
"0.50768083",
"0.50480545",
"0.50430393",
"0.5022698",
"0.4972331",
"0.4889604",
"0.48878282",
"0.48712242",
"0.48436376",
"0.48328483",
"0.4810371",
"0.48076242",
"0.48028493",
"0.47890443",
"0.47866452",
"0.47587466",
"0.46991077",
"0.46972626",
"0.46949744",
"0.46689403",
"0.46600604"
] | 0.6475425 | 0 |
Creates a new boto assignment mock class with the given fields supplied with the specified values. | def make_boto_assignment(values):
assignment = mock.MagicMock()
assignment.AssignmentId = str(uuid.uuid4())
assignment.HITId = str(uuid.uuid4())
assignment.WorkerId = str(uuid.uuid4())
assignment.answers = [[]]
for key, value in values.items():
answer_mock = mock.MagicMock()
answer_mock.qid = key
answer_mock.fields = [value]
assignment.answers[0].append(answer_mock)
return assignment | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, **attributes):\n self.set(**attributes)",
"def __init__(self, **kwargs):\n default_values = {\n 'name': 'Organization Name',\n 'ubi': 'Unified Business Identifier',\n 'address_line_1': '',\n 'address_line_2': '',\n 'city': '',\n 'state': '',\n 'zipcode': '',\n 'county': '',\n 'phone': '',\n 'license_id': '',\n 'license_type': '',\n 'license_status': '',\n 'license_creation_date': ''\n }\n\n # Set instance properties from keyword arguments or default values\n for (attr, default) in default_values.items():\n setattr(self, attr, kwargs.get(attr, default))",
"def __init__(self, field_names=..., **kwargs) -> None:\n ...",
"def build(cls, **kwargs):\n new_object = cls()\n fields = get_fields(cls)\n fields = dict((field.field_name, field) for field in fields)\n for name, value in kwargs.items():\n object.__setattr__(new_object, name, value)\n \n return new_object",
"def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)",
"def __init__(self, a=\"a\", b=\"b\"):\n self.a = a\n self.b = b",
"def __init__(__self__, *,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n if attributes is not None:\n pulumi.set(__self__, \"attributes\", attributes)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if disable_status_check is not None:\n pulumi.set(__self__, \"disable_status_check\", disable_status_check)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if masters is not None:\n pulumi.set(__self__, \"masters\", masters)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project_id is not None:\n pulumi.set(__self__, \"project_id\", project_id)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value_specs is not None:\n pulumi.set(__self__, \"value_specs\", value_specs)",
"def __init__(__self__, *,\n attributes: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n description: Optional[pulumi.Input[str]] = None,\n disable_status_check: Optional[pulumi.Input[bool]] = None,\n email: Optional[pulumi.Input[str]] = None,\n masters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n name: Optional[pulumi.Input[str]] = None,\n project_id: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[str]] = None,\n ttl: Optional[pulumi.Input[int]] = None,\n type: Optional[pulumi.Input[str]] = None,\n value_specs: Optional[pulumi.Input[Mapping[str, Any]]] = None):\n if attributes is not None:\n pulumi.set(__self__, \"attributes\", attributes)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if disable_status_check is not None:\n pulumi.set(__self__, \"disable_status_check\", disable_status_check)\n if email is not None:\n pulumi.set(__self__, \"email\", email)\n if masters is not None:\n pulumi.set(__self__, \"masters\", masters)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if project_id is not None:\n pulumi.set(__self__, \"project_id\", project_id)\n if region is not None:\n pulumi.set(__self__, \"region\", region)\n if ttl is not None:\n pulumi.set(__self__, \"ttl\", ttl)\n if type is not None:\n pulumi.set(__self__, \"type\", type)\n if value_specs is not None:\n pulumi.set(__self__, \"value_specs\", value_specs)",
"def make(cls, **kwargs):\r\n return cls().fill(**kwargs)",
"def __init__(self, tableValues=None, json=None):\n if tableValues is not None:\n self.class_id = tableValues[0]\n self.crop_id = tableValues[1]\n self.target = tableValues[2]\n self.type = tableValues[3]\n self.latitude = tableValues[4]\n self.longitude = tableValues[5]\n self.orientation = tableValues[6]\n self.shape = tableValues[7]\n self.background_color = tableValues[8]\n self.alphanumeric = tableValues[9]\n self.alphanumeric_color = tableValues[10]\n self.description = tableValues[11]\n self.submitted = tableValues[12]\n elif json is not None:\n for prop in self.allProps():\n if prop in json:\n setattr(self, prop, json[prop])",
"def __init__(self, **kwargs: Any):\n for name, value in kwargs.items():\n setattr(self, name, value)",
"def __init__(self, a, b):\n self.a = a\n self.b = b",
"def __init__(self, *, bucket_arn: typing.Optional[str]=None, bucket_domain_name: typing.Optional[str]=None, bucket_dual_stack_domain_name: typing.Optional[str]=None, bucket_name: typing.Optional[str]=None, bucket_regional_domain_name: typing.Optional[str]=None, bucket_website_new_url_format: typing.Optional[bool]=None, bucket_website_url: typing.Optional[str]=None, encryption_key: typing.Optional[aws_cdk.aws_kms.IKey]=None):\n self._values = {\n }\n if bucket_arn is not None: self._values[\"bucket_arn\"] = bucket_arn\n if bucket_domain_name is not None: self._values[\"bucket_domain_name\"] = bucket_domain_name\n if bucket_dual_stack_domain_name is not None: self._values[\"bucket_dual_stack_domain_name\"] = bucket_dual_stack_domain_name\n if bucket_name is not None: self._values[\"bucket_name\"] = bucket_name\n if bucket_regional_domain_name is not None: self._values[\"bucket_regional_domain_name\"] = bucket_regional_domain_name\n if bucket_website_new_url_format is not None: self._values[\"bucket_website_new_url_format\"] = bucket_website_new_url_format\n if bucket_website_url is not None: self._values[\"bucket_website_url\"] = bucket_website_url\n if encryption_key is not None: self._values[\"encryption_key\"] = encryption_key",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, **kwargs):\n for key, val in kwargs.items():\n setattr(self, key, val)",
"def __init__(self, keys, values):\n self.keys = keys\n self.values = values",
"def mock_amazon():\n amazon = Amazon()\n amazon.carrot1 = 'cenoura normal'\n amazon.carrot2 = 'cenoura radioativa'\n amazon.carrot_number = 575\n return amazon",
"def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def __init__(self, **kwargs):\n for key, value in kwargs.items():\n setattr(self, key, value)",
"def test_attributes_set_from_passed_values(self):\n\n expected_attributes = {\n \"columns\": [\"a\", \"b\", \"c\"],\n \"copy\": False,\n \"verbose\": True,\n }\n\n x = BaseTransformer(**expected_attributes)\n\n h.test_object_attributes(\n obj=x,\n expected_attributes=expected_attributes,\n msg=\"Attributes set in init from passed values\",\n )",
"def __init__(self, value_module=None, value_class=None, **kwargs):\n kwc=kwargs.copy()\n AbstractIdentifiable.__init__(self, **kwc)\n self._template_attrs = {}\n self._value_module = value_module or 'coverage_model.parameter_values'\n self._value_class = value_class or 'NumericValue'",
"def __init__(\n self,\n values: Mapping[str, ATTRIBUTE_TYPES],\n data_model: Optional[DataModel] = None,\n data_model_name: str = \"\",\n ):\n _values = deepcopy(values)\n self._values = _values\n if data_model is not None:\n self.data_model = data_model\n else:\n self.data_model = generate_data_model(data_model_name, values)\n self._check_consistency()",
"def __init__(self, **kwargs):\n self.subscriberid = None # kwargs.get('subscriberid', str(uuid.uuid4()))\n self.email = kwargs['email']\n self.first_name = kwargs.get('first_name', \"Feedback\")\n self.last_name = kwargs.get('last_name', \"Test\")\n self.company = kwargs.get('company', \"SmartBrief\")\n self.title = kwargs.get('title', \"Selenium Tester\")\n self.city = kwargs.get('city', \"Washington\")\n self.state = kwargs.get('state', \"DC\")\n self.country = kwargs.get('country', \"United States\")\n self.zipcode = kwargs.get('zipcode', \"20004\")\n self.mail_format_id = 1\n self.marketing_message = 'true'\n # self.position_level = kwargs.get('positionLevel')\n # etc",
"def test_constructor_fill_fields(self):\r\n builder = IndicatorBuilder(\"url\")\r\n\r\n self.assertEqual(builder.url, \"url\")\r\n self.assertEqual(builder.data, {})",
"def __init__(self, *args, **kwargs):\r\n if kwargs:\r\n for key, value in kwargs.items():\r\n\r\n if key == \"created_at\" or key == \"updated_at\":\r\n setattr(self, key, datetime.strptime(value,\r\n \"%Y-%m-%dT%H:%M:%S.%f\"))\r\n\r\n elif key != \"__class__\":\r\n setattr(self, key, value)\r\n\r\n else:\r\n self.id = str(uuid.uuid4())\r\n self.created_at = datetime.now()\r\n self.updated_at = datetime.now()\r\n models.storage.new(self)",
"def __init__(self, *args, **kwargs):\n for key, value in kwargs.items():\n if key == \"created_at\":\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, key, value)\n\n if key == \"updated_at\":\n value = datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, key, value)\n\n if key == \"__class__\":\n continue\n else:\n setattr(self, key, value)\n\n if len(kwargs) == 0:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now()\n self.updated_at = datetime.now()\n storage.new(self)",
"def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"",
"def __init__(self, a, b, c):\r\n self.a = a\r\n self.b = b\r\n self.c = c",
"def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)"
] | [
"0.5649793",
"0.56196696",
"0.561045",
"0.55781025",
"0.54637",
"0.5424052",
"0.53738207",
"0.53738207",
"0.5343687",
"0.53170776",
"0.526791",
"0.52672625",
"0.5237828",
"0.52287626",
"0.52287626",
"0.52287626",
"0.5224568",
"0.52163136",
"0.52153605",
"0.52097565",
"0.5206123",
"0.52055746",
"0.5191246",
"0.5189696",
"0.5184643",
"0.5184514",
"0.5176637",
"0.51754993",
"0.51725864",
"0.5162071"
] | 0.73545164 | 0 |
Adds some html content after the first plugin from a specific placeholder gets rendered | def add_extra_html(instance, placeholder, rendered_content, original_context):
html_before = getattr(placeholder, '_extra_html_before', '')
html_after = getattr(placeholder, '_extra_html_after', '')
if not html_before and not html_after:
return rendered_content
template_data = ['{{rendered_content|safe}}']
context = Context({'rendered_content': rendered_content})
if html_before:
template_data.insert(0, '{{html_before|safe}}')
context.update({'html_before': html_before})
del placeholder._extra_html_before
if html_after:
template_data.append('{{html_after|safe}}')
context.update({'html_after': html_after})
del placeholder._extra_html_after
return Template(''.join(template_data)).render(context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_render_placeholder_cache(self):\n invalidate_cms_page_cache()\n ex = Example1(\n char_1='one',\n char_2='two',\n char_3='tree',\n char_4='four'\n )\n ex.save()\n ph1 = ex.placeholder\n ###\n # add the test plugin\n ##\n test_plugin = add_plugin(ph1, u\"TextPlugin\", u\"en\", body=\"Some text\")\n test_plugin.save()\n\n # asserting initial text\n context = SekizaiContext()\n context['request'] = self.get_request()\n text = render_placeholder(ph1, context)\n self.assertEqual(text, \"Some text\")\n\n # deleting local plugin cache\n del ph1._plugins_cache\n test_plugin.body = 'Other text'\n test_plugin.save()\n\n # plugin text has changed, so the placeholder rendering\n text = render_placeholder(ph1, context)\n self.assertEqual(text, \"Other text\")",
"def add_cloud_plugin_content(self, content):",
"def handle_data(self, data):\n if data.strip():\n self._content_list.append((self._current_tag, data))\n self._html += f\"{{{'placeholder_'+str(self._index)}}}\"\n self._index += 1",
"def test_cms_plugins_htmlsitemap_static_placeholder(self):\n self.create_page_tree()\n\n page = PageFactory(title__title=\"Sitemap\")\n self.assertEqual(StaticPlaceholder.objects.count(), 1)\n placeholder = StaticPlaceholder.objects.get()\n\n context = self.get_practical_plugin_context({\"current_page\": page})\n parent_instance = add_plugin(placeholder.draft, HTMLSitemapPlugin, \"en\")\n add_plugin(\n placeholder.draft,\n plugin_type=\"HTMLSitemapPagePlugin\",\n language=\"en\",\n target=parent_instance,\n )\n\n html = context[\"cms_content_renderer\"].render_placeholder(\n placeholder.draft, context=context, language=\"en\"\n )\n self.assertHTMLEqual(\n html,\n \"\"\"\n <div class=\"sitemap\">\n <ul>\n <li><a href=\"/en/root/\">Root</a>\n <ul>\n <li><a href=\"/en/root/parent/\">Parent</a>\n <ul>\n <li><a href=\"/en/root/parent/page/\">Page</a></li>\n <li><a href=\"/en/root/parent/sibling/\">Sibling</a></li>\n </ul>\n </li>\n <li><a href=\"/en/root/uncle/\">Uncle</a></li>\n </ul>\n </li>\n <li><a href=\"/en/sitemap/\">Sitemap</a></li>\n </ul>\n </div>\n \"\"\",\n )",
"def set_content(self, widget):\n\t\tpass",
"def test_placeholder_inherit_content(self):\n from pages import settings as pages_settings\n setattr(pages_settings, \"PAGE_USE_SITE_ID\", False)\n author = User.objects.all()[0]\n p1 = self.new_page(content={'inher':'parent-content'})\n p2 = self.new_page()\n template = django.template.loader.get_template('pages/tests/test7.html')\n context = Context({'current_page': p2, 'lang':'en-us'})\n self.assertEqual(template.render(context), '')\n \n p2.move_to(p1, position='first-child')\n self.assertEqual(template.render(context), 'parent-content')",
"def _build_plugin_content(self):\n return Step(\n name=\"plugin_content\",\n tipe=\"content\",\n value=self.steps.current.value,\n index=self.steps.current.index,\n )",
"def on_first_registration(self):\n self.main.tabify_plugins(self.main.help, self)\n self.dockwidget.hide()",
"def add_cloud_plugin_content(self, content):\r\n self._cloud_plugin_content.append(content)",
"def addMarkupcallback(self):\n if self.currentSeedNameComboBox.currentIndex == self.currentSeedNameComboBox.count - 1:\n self.currentSeedNameComboBox.setCurrentIndex( self.lastLabelIndex )\n interactionNode = slicer.app.applicationLogic().GetInteractionNode()\n interactionNode.SetCurrentInteractionMode(interactionNode.Place)",
"def plugin_first_label():\n return \"first\"",
"def add_default_content(self):\n data = get_default_eventpage_data()\n\n for i, section in enumerate(data):\n section[\"position\"] = i\n section[\"content\"] = render_to_string(section[\"template\"])\n del section[\"template\"]\n self.content.create(**section)",
"def save(self, placeholder, slide_obj):\n placeholder.text = self.text",
"def plugin_second_label():\n return \"second\"",
"def prepost_hook_one(self) -> None:\n self.poutput(\"one\")",
"def reload_placeholder(update):\n pass",
"def add_widget():\n\n return render_template('add_widget.html', widget=None)",
"def __searchNext(self):\n if self.__replaceWidget.isVisible():\n self.__replaceWidget.findNext()\n else:\n self.__searchWidget.findNext()",
"def addContent(text):",
"def test_copy_textplugin(self):\n page = create_page(\"page\", \"nav_playground.html\", \"en\")\n \n placeholder = page.placeholders.get(slot='body')\n\n plugin_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=1,\n language=self.FIRST_LANG)\n plugin_base.insert_at(None, position='last-child', save=False)\n\n plugin = Text(body='')\n plugin_base.set_base_attr(plugin)\n plugin.save()\n\n plugin_ref_1_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=1,\n language=self.FIRST_LANG)\n plugin_ref_1_base.insert_at(plugin_base, position='last-child', save=False)\n\n plugin_ref_1 = Text(body='')\n plugin_ref_1_base.set_base_attr(plugin_ref_1)\n plugin_ref_1.save()\n\n plugin_ref_2_base = CMSPlugin(\n plugin_type='TextPlugin',\n placeholder=placeholder,\n position=2,\n language=self.FIRST_LANG)\n plugin_ref_2_base.insert_at(plugin_base, position='last-child', save=False)\n\n plugin_ref_2 = Text(body='')\n plugin_ref_2_base.set_base_attr(plugin_ref_2)\n\n plugin_ref_2.save()\n\n plugin.body = plugin_tags_to_admin_html(' {{ plugin_object %s }} {{ plugin_object %s }} ' % (str(plugin_ref_1.pk), str(plugin_ref_2.pk)))\n plugin.save()\n self.assertEquals(plugin.pk, 1)\n page_data = self.get_new_page_data()\n\n #create 2nd language page\n page_data.update({\n 'language': self.SECOND_LANG,\n 'title': \"%s %s\" % (page.get_title(), self.SECOND_LANG),\n })\n response = self.client.post(URL_CMS_PAGE_CHANGE % page.pk + \"?language=%s\" % self.SECOND_LANG, page_data)\n self.assertRedirects(response, URL_CMS_PAGE)\n\n self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 0)\n self.assertEquals(CMSPlugin.objects.count(), 3)\n self.assertEquals(Page.objects.all().count(), 1)\n\n copy_data = {\n 'placeholder': placeholder.pk,\n 'language': self.SECOND_LANG,\n 'copy_from': self.FIRST_LANG,\n }\n response = self.client.post(URL_CMS_PAGE + \"copy-plugins/\", copy_data)\n self.assertEquals(response.status_code, 200)\n self.assertEqual(response.content.count('<li '), 3)\n # assert copy success\n self.assertEquals(CMSPlugin.objects.filter(language=self.FIRST_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.filter(language=self.SECOND_LANG).count(), 3)\n self.assertEquals(CMSPlugin.objects.count(), 6)\n\n new_plugin = Text.objects.get(pk=6)\n self.assertEquals(plugin_tags_to_id_list(new_plugin.body), [u'4', u'5'])",
"def remove_new_talk_placeholder_text(self):\r\n self.newTalkWidget.talkDetailsWidget.titleLineEdit.setPlaceholderText(\"\")\r\n self.newTalkWidget.talkDetailsWidget.presenterLineEdit.setPlaceholderText(\"\")\r\n self.newTalkWidget.talkDetailsWidget.categoryLineEdit.setPlaceholderText(\"\")\r\n self.newTalkWidget.talkDetailsWidget.eventLineEdit.setPlaceholderText(\"\")\r\n self.newTalkWidget.talkDetailsWidget.roomLineEdit.setPlaceholderText(\"\")",
"def make_placeholder(fig: Figure) -> None:\n fig.add_artist(FancyBboxPatch(\n xy = (0.35, 0.45),\n width = 0.3,\n height = 0.1,\n boxstyle = 'Round, pad=0.015',\n linewidth = 3,\n edgecolor = 'red',\n facecolor = 'lightpink',\n alpha = 0.5\n ))\n fig.text(\n x = 0.5,\n y = 0.5,\n s = \"Placeholder\",\n ha = \"center\",\n va = \"center\",\n fontsize = 'xx-large',\n fontweight = 'bold',\n alpha = 0.5\n )",
"def add_after ( self ):\n self.add_item( 1 )",
"def addBgMarkupcallback(self):\n if self.currentSeedNameComboBox.currentIndex != self.currentSeedNameComboBox.count - 1:\n self.lastLabelIndex = self.currentSeedNameComboBox.currentIndex\n self.currentSeedNameComboBox.setCurrentIndex( self.currentSeedNameComboBox.count - 1 )\n interactionNode = slicer.app.applicationLogic().GetInteractionNode()\n interactionNode.SetCurrentInteractionMode(interactionNode.Place)",
"def render_content(tab):\n # rander_holder = True\n for index in range(len(figs)):\n \"\"\"Render by start and callback.\"\"\"\n tabbi = f'tab-{index+1}'\n print(tabbi, 'in tabbi')\n if tab == tabbi:\n # returns the complete content for the browser\n return get_content(key_list[index], index)",
"def test_placeholders(self):\n page = create_page('page', 'page.html', 'en', published=True)\n url = reverse('api:placeholder-list')\n response = self.client.get(url, formst='json')\n self.assertEqual(len(response.data), 1)\n self.assertEqual(response.data[0]['slot'], 'content')\n page2 = create_page('page2', 'feature.html', 'en', published=True)\n response = self.client.get(url, formst='json')\n self.assertEqual(len(response.data), 3)\n self.assertEqual(response.data[1]['slot'], 'feature')\n self.assertEqual(response.data[2]['slot'], 'content')",
"def plugin_final_label():\n return \"final\"",
"def do(self, jQuery):",
"def handleContentComplete():",
"def prepost_hook_two(self) -> None:\n self.poutput(\"two\")"
] | [
"0.6580603",
"0.6060545",
"0.58660084",
"0.5402581",
"0.5378079",
"0.53726566",
"0.53519636",
"0.5350145",
"0.5341262",
"0.5288138",
"0.52814656",
"0.5248689",
"0.52272385",
"0.52205735",
"0.5199112",
"0.51644737",
"0.5150898",
"0.5070091",
"0.5054961",
"0.4986646",
"0.4973687",
"0.49724028",
"0.49486512",
"0.49431232",
"0.49342793",
"0.49091172",
"0.48945612",
"0.4880249",
"0.4873762",
"0.48525217"
] | 0.61032104 | 1 |
Test task with error in command. | def test_cmd_error(self):
task = Task("uid", False, False, "does_not_exist", None, ".")
task._checkpoint_dir = tmp_checkpoint_dir()
with self.assertRaisesRegexp(RuntimeError, ".*executing Task's command:.*"):
task.run()
task.shell = True
with self.assertRaisesRegexp(RuntimeError, ".*executing Task's command:.*"):
task.run()
task._dry_run = True
task.run() # No longer raises RuntimeError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_verify_error(self):\n task = Task(\"uid\", False, False, \"echo\", \"does_not_exist\", \".\", \"A\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's verification:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's verification:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError",
"def test_error(self) -> None:\n context: Dict[str, ArtifactDescriptor] = dict()\n cmd = ModuleCommand(\n package_id='error', \n command_id='error',\n arguments=[],\n packages=None\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(2)\n self.assertEqual(controller.task_id, '000')\n self.assertEqual(controller.state, 'ERROR')\n self.assertEqual(len(controller.outputs.stdout), 0)\n self.assertNotEqual(len(controller.outputs.stderr), 0)",
"def error_check(command):\r\n\r\n # TODO\r",
"def _test_run_with_short_error_msg(self, task_class):\r\n task_entry = self._create_input_entry()\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n expected_message = \"x\" * 900\r\n with self.assertRaises(TestTaskFailure):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\r\n # compare with entry in table:\r\n entry = InstructorTask.objects.get(id=task_entry.id)\r\n self.assertEquals(entry.task_state, FAILURE)\r\n self.assertGreater(1023, len(entry.task_output))\r\n output = json.loads(entry.task_output)\r\n self.assertEquals(output['exception'], 'TestTaskFailure')\r\n self.assertEquals(output['message'], expected_message)\r\n self.assertEquals(output['traceback'][-3:], \"...\")",
"def _test_run_with_long_error_msg(self, task_class):\r\n task_entry = self._create_input_entry()\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n expected_message = \"x\" * 1500\r\n with self.assertRaises(TestTaskFailure):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\r\n # compare with entry in table:\r\n entry = InstructorTask.objects.get(id=task_entry.id)\r\n self.assertEquals(entry.task_state, FAILURE)\r\n self.assertGreater(1023, len(entry.task_output))\r\n output = json.loads(entry.task_output)\r\n self.assertEquals(output['exception'], 'TestTaskFailure')\r\n self.assertEquals(output['message'], expected_message[:len(output['message']) - 3] + \"...\")\r\n self.assertTrue('traceback' not in output)",
"def test_example(self, _, cmd):\n out = subprocess.run(cmd, shell=True)\n self.assertFalse(out.returncode)",
"def task_error(t_msg):\n print 'ERROR: ' + t_msg + ': ' + traceback.format_exc()\n TaskComm.set_state('ERROR', t_msg + ': ' + traceback.format_exc())",
"def test_norn(task, result):\n if type(result) != str:\n c_print(f\"*** {task.host}: ERROR running Nornir task ***\")",
"def test_task_add():\n pytest.fail('Not implemented yet.')",
"def test_command(self):\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', 'blah', 'blah', 'blah',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n # Send bad url to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.URL_BAD):\r\n call_command('git_export', 'foo/bar/baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n # Send bad course_id to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.BAD_COURSE):\r\n call_command('git_export', 'foo/bar:baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)",
"def test_shell_bad_command():\n out, err = shell_command(\"ls adasdasdas\")\n assert out is None\n assert \"adasdasdas\" in err",
"def test_non_existant_command(self):\n with self.assertRaises(AttributeError):\n self.executor.this_command_doesnt_exist.install().batch()",
"def _test_run_with_failure(self, task_class, expected_message):\r\n task_entry = self._create_input_entry()\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n with self.assertRaises(TestTaskFailure):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\r\n # compare with entry in table:\r\n entry = InstructorTask.objects.get(id=task_entry.id)\r\n self.assertEquals(entry.task_state, FAILURE)\r\n output = json.loads(entry.task_output)\r\n self.assertEquals(output['exception'], 'TestTaskFailure')\r\n self.assertEquals(output['message'], expected_message)",
"def test_run_cmd_simple_negative(caplog):\n caplog.set_level(logging.DEBUG)\n cmd = \"ls /tmp/this/file/isindeednotthereatall\"\n with pytest.raises(CommandFailed) as excinfo:\n utils.run_cmd(cmd)\n assert \"No such file or directory\" in str(excinfo.value)\n # check that run_cmd logged the run as expected\n assert caplog.records[0].levelname == 'INFO'\n assert caplog.records[0].message == f'Executing command: {cmd}'\n assert caplog.records[1].levelname == 'DEBUG'\n assert caplog.records[1].message == 'Command stdout is empty'\n assert caplog.records[2].levelname == 'WARNING'\n assert caplog.records[2].message.startswith(\"Command stderr: ls:\")\n assert \"No such file or directory\" in caplog.records[2].message\n assert caplog.records[3].levelname == 'DEBUG'\n assert caplog.records[3].message == 'Command return code: 2'",
"def run_check_errors(cmd):\n if type(cmd) == str:\n cmd = cmd.split()\n output = subprocess.run(cmd, capture_output=True, text=True)\n if output.stderr != \"\":\n print_cmd = \" \".join(map(str, cmd))\n sys.exit(\n f\"The error {output.stderr} was generated when running {print_cmd}. Exiting.\"\n )\n return",
"def add_failure(self, task: Task, exception: Any) -> None: # noqa: DAR101\n super().add_failure(task, exception)\n self._add_summary(task, _TaskExitCode.FAIL)",
"def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))",
"def test_runFailed(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c', 'print \"hi\"; raise SystemExit(1)'])\n self.assertEquals(exc.exitStatus, 1)\n self.assertEquals(exc.exitSignal, None)\n self.assertEquals(exc.output, \"hi\\n\")",
"def test_task_finder_exception(test_operator, task_name, exception):\n with pytest.raises(exception):\n test_operator.find_task(task_name)",
"def test_run_and_check_result(self):\n # Run a successful command.\n result = build_cmake_project.run_and_check_result('echo hello world')\n self.assertTrue(result)\n\n # Run a failure command.\n try:\n result = build_cmake_project.run_and_check_result('unexistent --command')\n except subprocess.CalledProcessError:\n self.fail('Exception thrown when running unexistent command.')\n self.assertFalse(result)",
"def assert_cmd_fail(self, cmds):\n print('@ %s [supposed to fail]' % cmds)\n try:\n self.cc.batch_command(cmds)\n except CommandFailed:\n pass\n else:\n msg = 'Command %r did not fail.' % cmds\n raise Exception(msg)",
"def test_no_args(self):\n errstring = \"Error: the following arguments are required: course_id\"\n with self.assertRaisesRegex(CommandError, errstring):\n call_command('export_olx')",
"def fail(msg):\n\n # Not sure if simply raising the exception is clearer.\n raise CommandFailed(msg)",
"def test_task_failed(self):\n\n task1 = FailedTask(mock.Mock(), total_retries=0)\n task2 = mock.Mock(execute_after=0)\n\n g = TaskDependencyGraph(MockWorkflowContext())\n seq = g.sequence()\n seq.add(task1, task2)\n\n with limited_sleep_mock():\n self.assertRaisesRegex(WorkflowFailed, 'failtask', g.execute)\n self.assertTrue(task1.is_terminated)\n self.assertFalse(task2.apply_async.called)",
"def _runCommandRaiseIfFail (self, command, killTimeout = DEAFULT_KILL_TIMEOUT, warningTimeout = DEAFULT_WARNING_TIMEOUT, shell=False):\n (rc,outText,errText) = self._runCommand(command, killTimeout = killTimeout, warningTimeout = warningTimeout, shell = shell)\n if rc != 0:\n self._log(\"run-command-raising\").warning(\"Command returned '%s', raising exception\", rc)\n raise SdUtilsError(\"Failed running command %s\" % command)\n return (outText,errText)",
"def assert_console_raises(self, exception, **kwargs):\n with self.assertRaises(exception):\n self.execute_console_task(**kwargs)",
"def test_traffic_analysis_command_for_invalid_arguments(\n args, err_msg, err_type, mock_client\n):\n from IllumioCore import traffic_analysis_command\n\n with pytest.raises(err_type) as err:\n traffic_analysis_command(mock_client, args)\n assert str(err.value) == err_msg",
"def cantRunTask(self, task):\n canRun = False\n reason = \"\"\n try:\n if task._errors:\n canRun = False\n reason = \"could not create task\"\n return\n if task.dryRun and not task.configurator.canDryRun(task):\n canRun = False\n reason = \"dry run not supported\"\n return\n missing = []\n skipDependencyCheck = False\n if not skipDependencyCheck:\n dependencies = list(task.target.getOperationalDependencies())\n missing = [\n dep for dep in dependencies if not dep.operational and dep.required\n ]\n if missing:\n reason = \"required dependencies not operational: %s\" % \", \".join(\n [\"%s is %s\" % (dep.name, dep.status.name) for dep in missing]\n )\n else:\n errors = task.configSpec.findInvalidateInputs(task.inputs)\n if errors:\n reason = \"invalid inputs: %s\" % str(errors)\n else:\n preErrors = task.configSpec.findInvalidPreconditions(task.target)\n if preErrors:\n reason = \"invalid preconditions: %s\" % str(preErrors)\n else:\n errors = task.configurator.canRun(task)\n if not errors or not isinstance(errors, bool):\n reason = \"configurator declined: %s\" % str(errors)\n else:\n canRun = True\n except Exception:\n UnfurlTaskError(task, \"cantRunTask failed unexpectedly\")\n reason = \"unexpected exception in cantRunTask\"\n canRun = False\n\n if canRun:\n return False\n else:\n logger.info(\"could not run task %s: %s\", task, reason)\n return \"could not run: \" + reason",
"def test_handles_error(self):\n with self.assertRaises(ForcedExit):\n self.run_command(mkdtemp())\n\n self.assertResults(\n result_with_hint(\n u'This repository has not been initialized.',\n GIT_REPO_NOT_INITIALIZED),\n self.error)",
"def test_no_args(self):\r\n errstring = \"export requires two arguments\"\r\n with self.assertRaisesRegexp(CommandError, errstring):\r\n self.command.handle()"
] | [
"0.753934",
"0.7510016",
"0.7355977",
"0.7223339",
"0.68849444",
"0.67912555",
"0.67883474",
"0.6771038",
"0.67178786",
"0.67099845",
"0.6694828",
"0.669401",
"0.6689271",
"0.6554793",
"0.6538062",
"0.6508295",
"0.647738",
"0.64662874",
"0.6459306",
"0.6447651",
"0.64386034",
"0.6428404",
"0.64185244",
"0.64182675",
"0.6412859",
"0.6407723",
"0.64031583",
"0.6372469",
"0.636119",
"0.635749"
] | 0.8290416 | 0 |
Test task with error in verification. | def test_verify_error(self):
task = Task("uid", False, False, "echo", "does_not_exist", ".", "A")
task._checkpoint_dir = tmp_checkpoint_dir()
with self.assertRaisesRegexp(RuntimeError, ".*executing Task's verification:.*"):
task.run()
task.shell = True
with self.assertRaisesRegexp(RuntimeError, ".*executing Task's verification:.*"):
task.run()
task._dry_run = True
task.run() # No longer raises RuntimeError | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _test_run_with_short_error_msg(self, task_class):\r\n task_entry = self._create_input_entry()\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n expected_message = \"x\" * 900\r\n with self.assertRaises(TestTaskFailure):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\r\n # compare with entry in table:\r\n entry = InstructorTask.objects.get(id=task_entry.id)\r\n self.assertEquals(entry.task_state, FAILURE)\r\n self.assertGreater(1023, len(entry.task_output))\r\n output = json.loads(entry.task_output)\r\n self.assertEquals(output['exception'], 'TestTaskFailure')\r\n self.assertEquals(output['message'], expected_message)\r\n self.assertEquals(output['traceback'][-3:], \"...\")",
"def test_task_add():\n pytest.fail('Not implemented yet.')",
"def _test_run_with_failure(self, task_class, expected_message):\r\n task_entry = self._create_input_entry()\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n with self.assertRaises(TestTaskFailure):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\r\n # compare with entry in table:\r\n entry = InstructorTask.objects.get(id=task_entry.id)\r\n self.assertEquals(entry.task_state, FAILURE)\r\n output = json.loads(entry.task_output)\r\n self.assertEquals(output['exception'], 'TestTaskFailure')\r\n self.assertEquals(output['message'], expected_message)",
"def test_cmd_error(self):\n task = Task(\"uid\", False, False, \"does_not_exist\", None, \".\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError",
"def test_task_add_invalid_form():\n pytest.fail('Not implemented yet.')",
"def _test_run_with_long_error_msg(self, task_class):\r\n task_entry = self._create_input_entry()\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n expected_message = \"x\" * 1500\r\n with self.assertRaises(TestTaskFailure):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id, expected_message)\r\n # compare with entry in table:\r\n entry = InstructorTask.objects.get(id=task_entry.id)\r\n self.assertEquals(entry.task_state, FAILURE)\r\n self.assertGreater(1023, len(entry.task_output))\r\n output = json.loads(entry.task_output)\r\n self.assertEquals(output['exception'], 'TestTaskFailure')\r\n self.assertEquals(output['message'], expected_message[:len(output['message']) - 3] + \"...\")\r\n self.assertTrue('traceback' not in output)",
"def test_error(self) -> None:\n context: Dict[str, ArtifactDescriptor] = dict()\n cmd = ModuleCommand(\n package_id='error', \n command_id='error',\n arguments=[],\n packages=None\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(2)\n self.assertEqual(controller.task_id, '000')\n self.assertEqual(controller.state, 'ERROR')\n self.assertEqual(len(controller.outputs.stdout), 0)\n self.assertNotEqual(len(controller.outputs.stderr), 0)",
"def test_verification_failed(self):\n pass",
"def test_task_failed(self):\n\n task1 = FailedTask(mock.Mock(), total_retries=0)\n task2 = mock.Mock(execute_after=0)\n\n g = TaskDependencyGraph(MockWorkflowContext())\n seq = g.sequence()\n seq.add(task1, task2)\n\n with limited_sleep_mock():\n self.assertRaisesRegex(WorkflowFailed, 'failtask', g.execute)\n self.assertTrue(task1.is_terminated)\n self.assertFalse(task2.apply_async.called)",
"def _test_undefined_problem(self, task_class):\r\n task_entry = self._create_input_entry()\r\n with self.assertRaises(ItemNotFoundError):\r\n self._run_task_with_mock_celery(task_class, task_entry.id, task_entry.task_id)",
"def test_solve_task(self):\n pass",
"def test_failed():\n assert False",
"def test_fail(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_failed, use_instances=True)\n run_info = TrialInfo(config=2, instance=\"test\", seed=0, budget=0.0)\n\n runner.submit_trial(run_info)\n run_info, run_value = next(runner.iter_results())\n\n # Make sure the traceback message is included\n assert \"traceback\" in run_value.additional_info\n assert \"RuntimeError\" in run_value.additional_info[\"traceback\"]",
"def add_failure(self, task: Task, exception: Any) -> None: # noqa: DAR101\n super().add_failure(task, exception)\n self._add_summary(task, _TaskExitCode.FAIL)",
"def task_is_failure(task):\n\n if task and task.state == 'FAILURE':\n return True\n return False",
"def test_task_errors(self):\r\n user = User(\r\n email_addr=\"[email protected]\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n db.session.add(user)\r\n db.session.commit()\r\n user = db.session.query(User).first()\r\n app = App(\r\n name='Application',\r\n short_name='app',\r\n description='desc',\r\n owner_id=user.id)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n task = Task(app_id=None)\r\n db.session.add(task)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()",
"def _assert_task_failure(self, entry_id, task_type, problem_url_name, expected_message):\r\n instructor_task = InstructorTask.objects.get(id=entry_id)\r\n self.assertEqual(instructor_task.task_state, FAILURE)\r\n self.assertEqual(instructor_task.requester.username, 'instructor')\r\n self.assertEqual(instructor_task.task_type, task_type)\r\n task_input = json.loads(instructor_task.task_input)\r\n self.assertFalse('student' in task_input)\r\n self.assertEqual(task_input['problem_url'], InstructorTaskModuleTestCase.problem_location(problem_url_name).to_deprecated_string())\r\n status = json.loads(instructor_task.task_output)\r\n self.assertEqual(status['exception'], 'ZeroDivisionError')\r\n self.assertEqual(status['message'], expected_message)\r\n # check status returned:\r\n status = InstructorTaskModuleTestCase.get_task_status(instructor_task.task_id)\r\n self.assertEqual(status['message'], expected_message)",
"def test_norn(task, result):\n if type(result) != str:\n c_print(f\"*** {task.host}: ERROR running Nornir task ***\")",
"def test_task_finder_exception(test_operator, task_name, exception):\n with pytest.raises(exception):\n test_operator.find_task(task_name)",
"def test_failure_result(self):\n dr = EventualResult(fail(RuntimeError()), None)\n self.assertRaises(RuntimeError, dr.wait, 0.1)",
"def on_task_failure(task, exc, task_id, args, kwargs, einfo):\n message = 'Task {} failed w/ args: {}, {}\\n{}'\n log.error(message.format(task.name, args, kwargs, einfo.traceback))",
"def test_case_01(self):\n if True:\n self.fail()",
"def test_errback(self):\n def callable():\n raise TestException()\n\n clock = task.Clock()\n d = task.deferLater(clock, 1, callable)\n clock.advance(1)\n return self.assertFailure(d, TestException)",
"def test_failure(self):\n\n @sync_performer\n def fail(dispatcher, intent):\n raise intent\n\n dispatcher = lambda _: fail\n self.assertThat(\n sync_perform(\n dispatcher, Effect(ValueError(\"oh dear\")).on(error=lambda e: e)\n ),\n MatchesException(ValueError(\"oh dear\")),\n )",
"def test_task_with_two_int_validation_parameters_validate_exceptions_task87(data, expected_value):\r\n\r\n with pytest.raises(expected_value):\r\n algo.Task87.validate_data(data)",
"def task_error(t_msg):\n print 'ERROR: ' + t_msg + ': ' + traceback.format_exc()\n TaskComm.set_state('ERROR', t_msg + ': ' + traceback.format_exc())",
"def test_task_run_errors(self):\r\n user = User(\r\n email_addr=\"[email protected]\",\r\n name=\"johndoe\",\r\n fullname=\"John Doe\",\r\n locale=\"en\")\r\n db.session.add(user)\r\n db.session.commit()\r\n\r\n user = db.session.query(User).first()\r\n app = App(\r\n name='Application',\r\n short_name='app',\r\n description='desc',\r\n owner_id=user.id)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n task = Task(app_id=app.id)\r\n db.session.add(task)\r\n db.session.commit()\r\n\r\n task_run = TaskRun(app_id=None, task_id=task.id)\r\n db.session.add(task_run)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()\r\n\r\n task_run = TaskRun(app_id=app.id, task_id=None)\r\n db.session.add(task_run)\r\n assert_raises(IntegrityError, db.session.commit)\r\n db.session.rollback()",
"def cantRunTask(self, task):\n canRun = False\n reason = \"\"\n try:\n if task._errors:\n canRun = False\n reason = \"could not create task\"\n return\n if task.dryRun and not task.configurator.canDryRun(task):\n canRun = False\n reason = \"dry run not supported\"\n return\n missing = []\n skipDependencyCheck = False\n if not skipDependencyCheck:\n dependencies = list(task.target.getOperationalDependencies())\n missing = [\n dep for dep in dependencies if not dep.operational and dep.required\n ]\n if missing:\n reason = \"required dependencies not operational: %s\" % \", \".join(\n [\"%s is %s\" % (dep.name, dep.status.name) for dep in missing]\n )\n else:\n errors = task.configSpec.findInvalidateInputs(task.inputs)\n if errors:\n reason = \"invalid inputs: %s\" % str(errors)\n else:\n preErrors = task.configSpec.findInvalidPreconditions(task.target)\n if preErrors:\n reason = \"invalid preconditions: %s\" % str(preErrors)\n else:\n errors = task.configurator.canRun(task)\n if not errors or not isinstance(errors, bool):\n reason = \"configurator declined: %s\" % str(errors)\n else:\n canRun = True\n except Exception:\n UnfurlTaskError(task, \"cantRunTask failed unexpectedly\")\n reason = \"unexpected exception in cantRunTask\"\n canRun = False\n\n if canRun:\n return False\n else:\n logger.info(\"could not run task %s: %s\", task, reason)\n return \"could not run: \" + reason",
"def test_task_with_one_int_validation_parameter_validate_exceptions(number, expected_value):\r\n\r\n with pytest.raises(expected_value):\r\n algo.TaskWithOneIntValidationParameter.validate_data(number)",
"def test_task_error_handler(self, mocked_find):\n\n setup_identity_cache()\n\n mocked_find.side_effect = KeyError(\"Error forced for testing\")\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)\n\n self.assertEqual(\n response.json(),\n {\"errors\": [\"Service temporarily unavailable, try again later.\"]},\n )\n\n new_task = Task.objects.all()[0]\n new_notification = Notification.objects.all()[0]\n\n self.assertTrue(new_notification.error)\n self.assertEqual(\n new_notification.notes,\n {\n \"errors\": [\n \"Error: KeyError('Error forced for testing') while setting up \"\n \"task. See task itself for details.\"\n ]\n },\n )\n self.assertEqual(new_notification.task, new_task)"
] | [
"0.73252994",
"0.73072",
"0.73054326",
"0.7165724",
"0.7095742",
"0.70697004",
"0.70417243",
"0.69686437",
"0.6939323",
"0.67261666",
"0.6702976",
"0.66879827",
"0.661982",
"0.6608488",
"0.6605837",
"0.6578812",
"0.6557815",
"0.65573394",
"0.650568",
"0.6491746",
"0.64669424",
"0.64587617",
"0.64054585",
"0.6397519",
"0.6389861",
"0.6387753",
"0.6385746",
"0.63714516",
"0.6369253",
"0.63578093"
] | 0.817874 | 0 |
List the iDRAC configuration settings | def list_idrac_settings(self):
return self._idrac_cfg.list_idrac_settings() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_config():\n console = Console()\n _config = loadConfig()\n json_data = richJSON.from_data({**asdict(_config)})\n console.print(Panel(json_data, title=\"SubmarineCliConfig\"))",
"def list_conf(self, kwargs):\n self.display(\n self.engine.query(\n self.engine.ALL_FILTER(),\n ALL, base=','.join([\"CN=Configuration\", self.engine.base_dn])\n ),\n True\n )",
"def list(self):\n for item in self._config:\n item.list()",
"def list_configuration(config_file = CONFIG_FILE):\n conf = get_configuration(config_file)\n display_configuration(config_file, 'secret wallet configuration is located', conf)",
"def config_list_options(section):\n return __CONFIG.items(section)",
"def _list_settings(self, settings=None):\n if settings == None:\n settings = fileIO.load_json(\"settings.json\")\n print(\"The list of settings is: \")\n for i in settings:\n print(\"{0}: {1}\".format(i, settings[i]))\n return(None)",
"def antenny_list_configs(self):\n return self.antenny_config.list_configs()",
"def print_config(self):\n for key in CONFIG_KEYS:\n print('--- ' + key + ' ---')\n print(CONFIG_KEYS[key])",
"def test_config_list():\n client = TestClient()\n client.run('config list')\n assert \"Supported Conan *experimental* global.conf and [conf] properties:\" in client.out\n for key, description in BUILT_IN_CONFS.items():\n assert \"{}: {}\".format(key, description) in client.out",
"def antenny_config_print_values(self):\n return self.antenny_config.print_values()",
"def print_settings(config):\n print(\"----------------------------------------\")\n print(\"SETTINGS\")\n print(\"----------------------------------------\")\n for key, value in config:\n print(\"%s=%s\" % (key, value))\n print(\"----------------------------------------\")",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def list_nic_settings(self, nic_id):\n return self._nic_cfg.list_nic_settings(nic_id)",
"def print_config(self):\n for key in self._config.keys():\n print('[{0}] = {1}'.format(key, self._config[key]))",
"def list_configurations(ctx):\n config_set = __ensure_configuration_exists(ctx)\n formatter = ConfigSetListFormatter.build(config_set, format='plain')\n out = formatter.format()\n\n click.echo(out)",
"def printSettings(self, value=None):\n\t\tout = []\n\t\tif value:\n\t\t\tfor item in self.listMatchingSettings(value):\n\t\t\t\tout.append(str(item[0]) + ' : ' + str(item[1]) + '\\nDesc: ' + str(item[2]))\n\t\telse:\n\t\t\tfor key in sorted(self.settings.iterkeys()):\n\t\t\t\tout.append(str(key) + ' : ' + str(self.settings[key][0]) + '\\nDesc: ' + str(self.settings[key][1]))\n\t\treturn out",
"def list_configurations(MaxResults=None, NextToken=None):\n pass",
"def list(obj):\n # lists pf9-express config files\n pf9_exp_conf_dir = obj['pf9_exp_conf_dir']\n\n if os.path.exists(pf9_exp_conf_dir):\n count = 1\n result = PrettyTable()\n result.field_names = [\"#\",\"Active\", \"Conf\", \"Management Plane\", \"Region\"]\n files = [f for f in os.listdir(pf9_exp_conf_dir) if os.path.isfile(os.path.join(pf9_exp_conf_dir, f))]\n\n for f in files:\n active = False\n if f == 'express.conf':\n active = True\n with open(pf9_exp_conf_dir + f, 'r') as config_file:\n config = Utils().config_to_dict(config_file)\n if active:\n result.add_row([count,'*', config[\"name\"], config[\"du_url\"], config[\"os_region\"]])\n else:\n result.add_row([count,' ', config[\"name\"], config[\"du_url\"], config[\"os_region\"]])\n count = count + 1\n\n click.echo(result)\n\n else:\n click.echo('No Platform9 management plane configs exist')",
"def list(self):\n\n config = self.get_config()\n client = config['client']\n default_config = config[client]\n\n msg.run('Saved options for client %s' % client)\n msg.inf('Default application (%s)' % default_config.get('defapp'))\n msg.inf('environment (%s)' % default_config['environment'])\n msg.inf('databases prod (%s) test (%s)' %\n (default_config['database'],\n default_config['test_database']))\n msg.inf('Image (%s)' % default_config['image'])\n msg.inf('Nginx (%s) Debug (%s) Verbose (%s)' %\n (default_config['nginx'],\n default_config['debug'],\n default_config['verbose'])\n )\n msg.run('\\nOther clients in this environment')\n clients = [item for item in config if item != 'client']\n\n msg.inf(', '.join(clients))",
"def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list",
"def get_acls():\n return config.get_cfg_storage(ID_ACL)",
"def get_settings(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout)",
"def config_show(cibfile=None):\n return item_show(item=\"config\", item_id=None, extra_args=None, cibfile=cibfile)",
"def showConf(self):\n\n return json.dumps(\n self.config, sort_keys=True, indent=4, separators=(',', ': '))",
"def get_settings_from_config(self):\n return [self.wiki.config.parts_enabled,\n self.wiki.config.pages_per_filepart_history,\n self.wiki.config.revs_per_filepart_history,\n self.wiki.config.numparts_for_abstract,\n self.wiki.config.numparts_for_pagelogs,\n self.wiki.config.pages_per_filepart_abstract,\n self.wiki.config.recombine_metacurrent,\n self.wiki.config.recombine_history,\n self.wiki.config.checkpoint_time]",
"def config(self):\n return \"\\n\".join([ c.config(True) for p, c in self.configs_ ])",
"def vinet_configs(connection):\n assert connection\n query = \"\"\"select * from configs()\"\"\"\n return [item.strip() for item in sqlio.read_sql_query(query, connection)['name']]",
"def _config_list(res, ctx):\n\n if _has_error_code(res):\n return print_errors(res, ctx)\n\n lines = []\n for config in res['configs']:\n line = '* ' if config['current'] else ' '\n\n if ctx.verbose:\n line += config['mtime'] + ' '\n\n line += config['name']\n lines.append(line)\n\n return \"\\n\".join(lines)",
"def list(cls):\n\n db = get_db_handle()\n\n secret_basic_configs = []\n for secret in db.secret_table.select():\n secret_basic_configs.append(secret.get_detail_dict())\n\n return secret_basic_configs"
] | [
"0.6941069",
"0.68482906",
"0.6822821",
"0.6753772",
"0.63854384",
"0.6331615",
"0.6239787",
"0.6238235",
"0.62348956",
"0.62288743",
"0.61923677",
"0.6163021",
"0.6163021",
"0.61327124",
"0.61241364",
"0.60989195",
"0.6065206",
"0.6030643",
"0.599483",
"0.59766704",
"0.5961531",
"0.59536994",
"0.59427685",
"0.59334767",
"0.59307057",
"0.5925711",
"0.5899213",
"0.58384365",
"0.58379215",
"0.58106333"
] | 0.7821567 | 0 |
Creates a configuration job for applying all pending changes to an iDRAC. | def commit_pending_idrac_changes(
self,
idrac_fqdd='iDRAC.Embedded.1',
reboot=False,
start_time='TIME_NOW'):
return self._job_mgmt.create_config_job(
resource_uri=uris.DCIM_iDRACCardService,
cim_creation_class_name='DCIM_iDRACCardService',
cim_name='DCIM:iDRACCardService',
target=idrac_fqdd,
reboot=reboot,
start_time=start_time) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def commit_pending_raid_changes(self, raid_controller, reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=ironic_uris.DCIM_RAIDService,\n cim_creation_class_name='DCIM_RAIDService',\n cim_name='DCIM:RAIDService',\n target=raid_controller,\n reboot=reboot,\n start_time=start_time)",
"def apply(self):\n changed = False\n job_schedule_exists = False\n results = netapp_utils.get_cserver(self.server)\n cserver = netapp_utils.setup_ontap_zapi(\n module=self.module, vserver=results)\n netapp_utils.ems_log_event(\"na_ontap_job_schedule\", cserver)\n job_details = self.get_job_schedule()\n if job_details:\n job_schedule_exists = True\n if self.state == 'absent': # delete\n changed = True\n elif self.state == 'present': # modify\n if job_details['job_minutes'] != str(self.job_minutes):\n changed = True\n else:\n if self.state == 'present': # create\n changed = True\n if changed:\n if self.module.check_mode:\n pass\n else:\n if self.state == 'present': # execute create\n if not job_schedule_exists:\n self.create_job_schedule()\n else: # execute modify minute\n self.modify_minute_job_schedule()\n elif self.state == 'absent': # execute delete\n self.delete_job_schedule()\n self.module.exit_json(changed=changed)",
"def apply(self) -> None:\n _ba.apply_config()",
"def create_job_configuration(start_time: str) -> ItemsJobConfig:\n # Create job configuration\n config = {\n 'source_url': os.getenv(\"ITEMS_SOURCE_URL\", default=\"\"),\n 'dest_new_url': os.getenv(\"ITEMS_DEST_NEW_URL\", default=\"\"),\n 'dest_updates_url': os.getenv(\"ITEMS_DEST_UPDATES_URL\", default=\"\"),\n 'caiasoft_api_key': os.getenv('CAIASOFT_API_KEY', default=\"\"),\n 'storage_dir': os.getenv('ITEMS_STORAGE_DIR', default=\"\"),\n 'last_success_lookup': os.getenv('ITEMS_LAST_SUCCESS_LOOKUP', default=\"\")\n }\n\n job_id_prefix = \"caia.items\"\n\n job_config = ItemsJobConfig(config, job_id_prefix, start_time)\n logger.info(f\"Job Id: {job_config['job_id']}\")\n logger.debug(f\"job_config={job_config}\")\n\n return job_config",
"def apply_config(dts, acg, xact, action, scratch):\n self.log.debug(\"Apply Config\")\n return rwtypes.RwStatus.SUCCESS",
"def abandon_pending_idrac_changes(self, idrac_fqdd):\n self._job_mgmt.delete_pending_config(\n resource_uri=uris.DCIM_iDRACCardService,\n cim_creation_class_name='DCIM_iDRACCardService',\n cim_name='DCIM:iDRACCardService',\n target=idrac_fqdd)",
"def backup(ctx):\n config_path = ctx.obj['config_path']\n logger = ctx.obj['logger']\n\n config = Config(config_path)\n scheduler = BlockingScheduler(\n executors={'default': ThreadPoolExecutor(max_workers=1)}\n )\n\n for job in config.jobs.values():\n logger.info(f'filesystem={job.filesystem} '\n f'cron=\"{job.cron}\" '\n 'msg=\"Adding job.\"')\n scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True)\n\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n pass",
"def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config",
"def commit_pending_bios_changes(self, reboot=False, start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=ironic_uris.DCIM_BIOSService,\n cim_creation_class_name='DCIM_BIOSService',\n cim_name='DCIM:BIOSService',\n target=self.BIOS_DEVICE_FQDD,\n reboot=reboot,\n start_time=start_time)",
"def commit_pending_nic_changes(self, nic_id, reboot=False):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id,\n reboot=reboot)",
"def update_from_config(self, job_configs, factory, reconfigure, namespace_to_reconfigure=None):\n self.jobs.filter_by_name(job_configs)\n\n def map_to_job_and_schedule(job_schedulers):\n for job_scheduler in job_schedulers:\n if reconfigure:\n job_scheduler.schedule()\n yield job_scheduler.get_job()\n\n def reconfigure_filter(config):\n if not reconfigure or not namespace_to_reconfigure:\n return True\n else:\n return config.namespace == namespace_to_reconfigure\n\n seq = (factory.build(config) for config in job_configs.values() if reconfigure_filter(config))\n return map_to_job_and_schedule(filter(self.add, seq))",
"def submit_janitor_cronjob(cfg: ElasticBlastConfig):\n dry_run = cfg.cluster.dry_run\n\n janitor_schedule = ELB_DFLT_JANITOR_SCHEDULE_GCP\n if ELB_JANITOR_SCHEDULE in os.environ:\n janitor_schedule = os.environ[ELB_JANITOR_SCHEDULE]\n logging.debug(f'Overriding janitor schedule to \"{janitor_schedule}\"')\n\n subs = {\n 'ELB_DOCKER_IMAGE' : ELB_JANITOR_DOCKER_IMAGE_GCP,\n 'ELB_GCP_PROJECT' : cfg.gcp.project,\n 'ELB_GCP_REGION' : cfg.gcp.region,\n 'ELB_GCP_ZONE' : cfg.gcp.zone,\n 'ELB_RESULTS' : cfg.cluster.results,\n 'ELB_CLUSTER_NAME' : cfg.cluster.name,\n 'ELB_JANITOR_SCHEDULE' : janitor_schedule\n }\n logging.debug(f\"Submitting ElasticBLAST janitor cronjob: {ELB_JANITOR_DOCKER_IMAGE_GCP}\")\n with TemporaryDirectory() as d:\n set_extraction_path(d)\n cronjob_yaml = os.path.join(d, 'elb-cronjob.yaml')\n with open(cronjob_yaml, 'wt') as f:\n f.write(substitute_params(resource_string('elastic_blast', 'templates/elb-janitor-cronjob.yaml.template').decode(), subs))\n cmd = f\"kubectl --context={cfg.appstate.k8s_ctx} apply -f {cronjob_yaml}\"\n if dry_run:\n logging.info(cmd)\n else:\n safe_exec(cmd)",
"def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(\n args, require_sim_name=True)\n if targets_yaml is None:\n return job_configs\n\n specconfig = NAME_FACTORY.resolve_specconfig(args)\n\n astro_priors = args['astro_priors']\n write_full = args.get('write_full', False)\n\n targets = load_yaml(targets_yaml)\n base_config = dict(nsims=args['nsims'],\n seed=args['seed'],\n specconfig=specconfig)\n\n for target_name, profile_list in list(targets.items()):\n for profile in profile_list:\n for astro_prior in astro_priors:\n if is_null(astro_prior):\n astro_prior = 'none'\n full_key = \"%s:%s:%s:%s\" % (\n target_name, profile, sim, astro_prior)\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n sim_name=sim,\n profile=profile,\n astro_prior=astro_prior,\n fullpath=True)\n limitfile = NAME_FACTORY.sim_dmlimitsfile(**name_keys)\n first = args['seed']\n last = first + args['nsims'] - 1\n outfile = limitfile.replace(\n '_SEED.fits', '_collected_%06i_%06i.fits' %\n (first, last))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n if not write_full:\n outfile = None\n summaryfile = limitfile.replace(\n '_SEED.fits', '_summary_%06i_%06i.fits' %\n (first, last))\n job_config = base_config.copy()\n job_config.update(dict(limitfile=limitfile,\n astro_prior=astro_prior,\n outfile=outfile,\n summaryfile=summaryfile,\n logfile=logfile))\n job_configs[full_key] = job_config\n\n return job_configs",
"def create_config_job(self,\n resource_uri,\n cim_creation_class_name,\n cim_name,\n target,\n cim_system_creation_class_name='DCIM_ComputerSystem',\n cim_system_name='DCIM:ComputerSystem',\n reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri,\n cim_creation_class_name,\n cim_name,\n target,\n cim_system_creation_class_name,\n cim_system_name,\n reboot,\n start_time)",
"def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])",
"def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(\n args, require_sim_name=True)\n if roster_yaml is None:\n return job_configs\n\n specconfig = NAME_FACTORY.resolve_specconfig(args)\n\n astro_priors = args['astro_priors']\n write_full = args['write_full']\n first = args['seed']\n last = first + args['nsims'] - 1\n\n base_config = dict(nsims=args['nsims'],\n seed=args['seed'])\n\n roster_dict = load_yaml(roster_yaml)\n for roster_name in list(roster_dict.keys()):\n for astro_prior in astro_priors:\n if is_null(astro_prior):\n astro_prior = 'none'\n full_key = \"%s:%s:%s\" % (roster_name, sim, astro_prior)\n name_keys = dict(target_type=ttype,\n roster_name=roster_name,\n sim_name=sim,\n astro_prior=astro_prior,\n fullpath=True)\n\n limitfile = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)\n outfile = limitfile.replace(\n '_SEED.fits', '_collected_%06i_%06i.fits' %\n (first, last))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n if not write_full:\n outfile = None\n summaryfile = limitfile.replace('_SEED.fits', '_summary.fits')\n\n job_config = base_config.copy()\n job_config.update(dict(limitfile=limitfile,\n specconfig=specconfig,\n astro_prior=astro_prior,\n outfile=outfile,\n summaryfile=summaryfile,\n logfile=logfile))\n job_configs[full_key] = job_config\n\n return job_configs",
"def setup_jobs(self):\n transfer_args = [\"analysis_type\", \"perturbation\", \"num_permutations\", \"permutation_test_statistic\", \"loss_function\",\n \"importance_significance_level\", \"window_search_algorithm\", \"window_effect_size_threshold\"]\n jobs = [None] * self.num_jobs\n for idx in range(self.num_jobs):\n # Create and launch condor job\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n input_files = [features_filename, self.args.model_filename, self.args.model_loader_filename, self.args.data_filename]\n job_dir = f\"{self.args.output_dir}/outputs_{idx}\"\n cmd = f\"python3 -m anamod.core.worker -worker_idx {idx}\"\n for arg in transfer_args:\n if hasattr(self.args, arg):\n cmd += f\" -{arg} {getattr(self.args, arg)}\"\n # Relative file paths for non-shared FS, absolute for shared FS\n for name, path in dict(output_dir=job_dir, features_filename=features_filename, model_filename=self.args.model_filename,\n model_loader_filename=self.args.model_loader_filename, data_filename=self.args.data_filename).items():\n cmd += f\" -{name} {os.path.abspath(path)}\" if self.args.shared_filesystem else f\" -{name} {os.path.basename(path)}\"\n job = CondorJobWrapper(cmd, input_files, job_dir, shared_filesystem=self.args.shared_filesystem,\n memory=f\"{self.args.memory_requirement}GB\", disk=f\"{self.args.disk_requirement}GB\",\n avoid_bad_hosts=self.args.avoid_bad_hosts, retry_arbitrary_failures=self.args.retry_arbitrary_failures,\n cleanup=self.args.cleanup)\n jobs[idx] = job\n return jobs",
"def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")",
"def update_fcoe_configs(self):\n # Nothing to be done if no reordering has occurred.\n reordered = self.udev.reordered_devices\n if not reordered:\n return\n\n # Skip if we have already completed this stage\n if self.fcoe_confs:\n return\n\n # Generate candidate list of fcoe conf files, with\n # associated rule, that need to be processed\n reordered_files = tuple((r, os.path.join(self.syspaths.fcoe_dir,\n \"cfg-%s\" % r['from']))\n for r in reordered)\n\n # At this stage changes have been prepared but are not yet\n # committed to disk\n self._fcoe_confs = self._process_candidate_conf_files(reordered_files)",
"def apply_config(\n hostname: str, config: str, dry_run: bool, job_id: Optional[int] = None, scheduled_by: Optional[str] = None\n) -> NornirJobResult:\n logger = get_logger()\n\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n if not dev:\n raise Exception(\"Device {} not found\".format(hostname))\n elif not (dev.state == DeviceState.MANAGED or dev.state == DeviceState.UNMANAGED):\n raise Exception(\"Device {} is in invalid state: {}\".format(hostname, dev.state))\n\n nr = cnaas_init()\n nr_filtered, _, _ = inventory_selector(nr, hostname=hostname)\n\n try:\n nrresult = nr_filtered.run(task=push_static_config, config=config, dry_run=dry_run, job_id=job_id)\n except Exception as e:\n logger.exception(\"Exception in apply_config: {}\".format(e))\n else:\n if not dry_run:\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n dev.state = DeviceState.UNMANAGED\n dev.synchronized = False\n\n return NornirJobResult(nrresult=nrresult)",
"def apply(self, cleanup=False, activate=True):\n logger.info('applying network configs...')\n restart_interfaces = []\n restart_bridges = []\n update_files = {}\n all_file_names = []\n\n for interface_name, iface_data in self.interface_data.iteritems():\n route_data = self.route_data.get(interface_name, '')\n interface_path = ifcfg_config_path(interface_name)\n route_path = route_config_path(interface_name)\n all_file_names.append(interface_path)\n all_file_names.append(route_path)\n if (utils.diff(interface_path, iface_data) or\n utils.diff(route_path, route_data)):\n restart_interfaces.append(interface_name)\n restart_interfaces.extend(self.child_members(interface_name))\n update_files[interface_path] = iface_data\n update_files[route_path] = route_data\n logger.info('No changes required for interface: %s' %\n interface_name)\n\n for bridge_name, bridge_data in self.bridge_data.iteritems():\n route_data = self.route_data.get(bridge_name, '')\n bridge_path = bridge_config_path(bridge_name)\n bridge_route_path = route_config_path(bridge_name)\n all_file_names.append(bridge_path)\n all_file_names.append(bridge_route_path)\n if (utils.diff(bridge_path, bridge_data) or\n utils.diff(bridge_route_path, route_data)):\n restart_bridges.append(bridge_name)\n restart_interfaces.extend(self.child_members(bridge_name))\n update_files[bridge_path] = bridge_data\n update_files[bridge_route_path] = route_data\n logger.info('No changes required for bridge: %s' % bridge_name)\n\n if cleanup:\n for ifcfg_file in glob.iglob(cleanup_pattern()):\n if ifcfg_file not in all_file_names:\n interface_name = ifcfg_file[len(cleanup_pattern()) - 1:]\n if interface_name != 'lo':\n logger.info('cleaning up interface: %s'\n % interface_name)\n self.ifdown(interface_name)\n self.remove_config(ifcfg_file)\n\n if activate:\n for interface in restart_interfaces:\n self.ifdown(interface)\n\n for bridge in restart_bridges:\n self.ifdown(bridge, iftype='bridge')\n\n for oldname, newname in self.renamed_interfaces.iteritems():\n self.ifrename(oldname, newname)\n\n for location, data in update_files.iteritems():\n self.write_config(location, data)\n\n if activate:\n for bridge in restart_bridges:\n self.ifup(bridge, iftype='bridge')\n\n for interface in restart_interfaces:\n self.ifup(interface)\n\n return update_files",
"def _initJobs(self):\n super(DigestManager, self)._initJobs()\n conf = self.config.container_manager\n\n job4 = LoopingCall(self.performRequestedScan)\n job4.start(float(conf.activescan_interval))\n self.jobs.append(job4)",
"def configure_aaa_accounting_update_periodic(device,interval):\n logger.info(f\"Configuring aaa accounting update newinfo periodic\")\n\n configs=f\"aaa accounting update newinfo periodic {interval}\"\n try:\n device.configure(configs)\n except SubCommandFailure as e:\n raise SubCommandFailure(f\"Could not configure aaa accounting update newinfo periodic. Error:\\n{e}\")",
"def prepare_rw_jobs(self, repeats):\n \n \n #The tasks we need to go through to append the report output\n taskList = [\n 'steadyState',\n 'timeCourse',\n 'scan',\n 'metabolicControlAnalysis',\n 'optimization',\n 'parameterFitting',\n 'fluxMode',\n 'lyapunovExponents',\n 'timeScaleSeparationAnalysis',\n 'sensitivities',\n 'moieties'\n ]\n \n \n task_report_targets = {} #Store the report output targets \n #Create a new COPASI file for each repeat\n #Keep a note of the output files we're creating\n model_files = []\n output_files = []\n for i in range(repeats):\n #For each task, if the report output is set, append it with '_i'\n for taskName in taskList:\n try:\n task = self._getTask(taskName)\n report = task.find(xmlns + 'Report')\n if i==0:\n task_report_targets[taskName] = report.attrib['target']\n report.attrib['target'] = str(i) + '_' + task_report_targets[taskName]\n if i==0:\n if task.attrib['scheduled'] == 'true':\n output_files.append(task_report_targets[taskName])\n \n except:\n pass #It's possible not every task has a report set. If this is the case, ignore it!\n \n filename = 'auto_copasi_1.%d.cps'%i\n target = os.path.join(self.path, filename)\n model_files.append(filename)\n \n self.write(target)\n \n return model_files, output_files",
"def schedule_deploy():\n\n logger.info(\"Scheduling deploy\")\n scheduler.schedule_job(\"op_deploy\", {}, \"#general\", 60)",
"def compile_cron_jobs(self, **kwargs):\n\n def get_cron_schedule(minutes: int, day: int) -> str:\n \"\"\"Returns the cron schedule expression for the specified parameters.\"\"\"\n return ( # day + 1, since 0 == Sunday\n f\"{minutes % 60} {minutes // 60} * * {day + 1}\"\n )\n\n def get_next_course_message(i: int, courses: list) -> str:\n \"\"\"Returns the string of the cron job that should be ran for the upcoming course.\"\"\"\n course = (\n None\n if i + 1 >= len(courses)\n or courses[i].time.day != courses[i + 1].time.day\n else courses[i + 1]\n )\n\n return (\n notify_no_more_courses\n if course is None\n else notify_next_course_message.format(\n course.name,\n course.type,\n course.time.start - courses[i].time.end,\n \"?\" if course.classroom is None else course.classroom.number,\n )\n )\n\n # check if the script is running as root; if not, call itself as root\n if not os.geteuid() == 0:\n call([\"sudo\", \"-E\", *sys.argv])\n sys.exit()\n\n courses = self.get_sorted_courses(include_unscheduled=False)\n\n cron_file = \"/etc/crontab\"\n user = os.getlogin()\n\n # comments to encapsulate the generated cron jobs\n cron_file_comments = {\n \"beginning\": (\n \"# BEGINNING: course schedule crons (autogenerated, do not change)\"\n ),\n \"end\": \"# END: course schedule crons\",\n }\n\n # if it doesn't exist, create it\n if not os.path.exists(cron_file):\n open(cron_file, \"w\")\n\n with open(cron_file, \"r+\") as f:\n contents = f.readlines()\n f.seek(0)\n\n # write to file till we reach the end or the comment section is skipped, so we\n # can add the new course-related cron jobs\n i = 0\n while i < len(contents):\n if contents[i].strip() == cron_file_comments[\"beginning\"]:\n while contents[i].strip() != cron_file_comments[\"end\"]:\n i += 1\n\n i += 1\n break\n else:\n f.write(contents[i])\n\n i += 1\n\n f.write(cron_file_comments[\"beginning\"] + \"\\n\")\n\n for j, course in enumerate(courses):\n # the messages regarding the course\n messages = [\n (\n get_cron_schedule(course.time.end - 5, course.weekday()),\n get_next_course_message(j, courses),\n ),\n (\n get_cron_schedule(course.time.start, course.weekday()),\n f\"{notify_started_message} <i>{course.name}\"\n f\" ({course.type})</i>.\",\n ),\n ]\n\n for cron_schedule, body in messages:\n f.write(f\"{cron_schedule} {user}{hack} {notify_command} '{body}'\\n\")\n\n f.write(cron_file_comments[\"end\"] + \"\\n\")\n\n # write the rest of the file\n while i < len(contents):\n f.write(contents[i])\n i += 1\n\n # cut whatever is left\n f.truncate()\n\n print(f\"Course messages generated and saved to {cron_file}.\")",
"def task_generate_job_batch():\n return {\n # force doit to always mark the task\n # as not up-to-date (unless target removed)\n 'uptodate': [False],\n 'file_dep': ['generate_job_batch.py'],\n 'task_dep': ['create_folders'],\n #'targets': ['.running_jobs/list_of_jobs.txt'],\n 'actions': ['python generate_job_batch.py'],\n }",
"def create_nic_config_job(\n self,\n nic_id,\n reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id,\n reboot=reboot,\n start_time=start_time)",
"def restore_running_jobs(self):\n jobs = self.restart_dict['running_jobs']\n if not jobs or not any([job for job in jobs.values()]):\n del self.restart_dict['running_jobs']\n self.running_jobs = dict()\n logger.debug('It seems that there are no running jobs specified in the ARC restart file. '\n 'Assuming all jobs have finished.')\n else:\n for spc_label in jobs.keys():\n if spc_label not in self.running_jobs.keys():\n self.running_jobs[spc_label] = list()\n for job_description in jobs[spc_label]:\n if ('conformer' not in job_description or job_description['conformer'] is None) \\\n and ('tsg' not in job_description or job_description['tsg'] is None):\n self.running_jobs[spc_label].append(job_description['job_name'])\n elif 'conformer' in job_description:\n self.running_jobs[spc_label].append(f'conformer{job_description[\"conformer\"]}')\n elif 'tsg' in job_description:\n self.running_jobs[spc_label].append(f'tsg{job_description[\"tsg\"]}')\n for species in self.species_list:\n if species.label == spc_label:\n break\n else:\n raise SchedulerError(f'Could not find species {spc_label} in the restart file')\n job_description['species'] = [self.species_dict[label] for label in job_description['species_labels']] \\\n if 'species_labels' in job_description else None\n if 'species_labels' in job_description:\n del job_description['species_labels']\n job_description['reactions'] = [self.rxn_dict[i] for i in job_description['reaction_indices']] \\\n if 'reaction_indices' in job_description else None\n if 'reaction_indices' in job_description:\n del job_description['reaction_indices']\n job = job_factory(**job_description)\n if spc_label not in self.job_dict.keys():\n self.job_dict[spc_label] = dict()\n if job_description['job_type'] not in self.job_dict[spc_label].keys():\n if ('conformer' not in job_description or job_description['conformer'] is None) \\\n and ('tsg' not in job_description or job_description['tsg'] is None):\n self.job_dict[spc_label][job_description['job_type']] = dict()\n elif 'conformers' not in self.job_dict[spc_label].keys():\n self.job_dict[spc_label]['conformers'] = dict()\n elif 'tsg' not in self.job_dict[spc_label].keys():\n self.job_dict[spc_label]['tsg'] = dict()\n if ('conformer' not in job_description or job_description['conformer'] is None) \\\n and ('tsg' not in job_description or job_description['tsg'] is None):\n self.job_dict[spc_label][job_description['job_type']][job_description['job_name']] = job\n elif 'conformer' in job_description and job_description['conformer'] is not None:\n if 'conformers' not in self.job_dict[spc_label].keys():\n self.job_dict[spc_label]['conformers'] = dict()\n self.job_dict[spc_label]['conformers'][int(job_description['conformer'])] = job\n # don't generate additional conformers for this species\n self.dont_gen_confs.append(spc_label)\n elif 'tsg' in job_description and job_description['tsg'] is not None:\n if 'tsg' not in self.job_dict[spc_label].keys():\n self.job_dict[spc_label]['tsg'] = dict()\n self.job_dict[spc_label]['tsg'][int(job_description['tsg'])] = job\n self.server_job_ids.append(job.job_id)\n if self.job_dict:\n content = 'Restarting ARC, tracking the following jobs spawned in a previous session:'\n for spc_label in self.job_dict.keys():\n content += f'\\n{spc_label}: '\n for job_type in self.job_dict[spc_label].keys():\n for job_name in self.job_dict[spc_label][job_type].keys():\n if job_type not in ['conformers', 'tsg']:\n content += job_name + ', '\n elif job_type == 'conformers':\n content += self.job_dict[spc_label][job_type][job_name].job_name \\\n + f' (conformer{job_name}), '\n elif job_type == 'tsg':\n content += self.job_dict[spc_label][job_type][job_name].job_name \\\n + f' (tsg{job_name}), '\n content += '\\n\\n'\n logger.info(content)",
"def build_job_configs(self, args):\n job_configs = {}\n\n components = Component.build_from_yamlfile(args['comp'])\n\n datafile = args['data']\n if datafile is None or datafile == 'None':\n return job_configs\n NAME_FACTORY.update_base_dict(args['data'])\n outdir_base = os.path.join(NAME_FACTORY.base_dict['basedir'], 'counts_cubes')\n\n inputfiles = create_inputlist(args['ft1file'])\n num_files = len(inputfiles)\n\n for comp in components:\n zcut = \"zmax%i\" % comp.zmax\n\n mktimelist = copy.copy(comp.mktimefilters)\n if not mktimelist:\n mktimelist.append('none')\n evtclasslist_keys = copy.copy(comp.evtclasses)\n if not evtclasslist_keys:\n evtclasslist_vals = [NAME_FACTORY.base_dict['evclass']]\n else:\n evtclasslist_vals = copy.copy(evtclasslist_keys)\n\n for mktimekey in mktimelist:\n for evtclassval in evtclasslist_vals:\n fullkey = comp.make_key(\n '%s_%s_{ebin_name}_%s_{evtype_name}' %\n (evtclassval, zcut, mktimekey))\n\n name_keys = dict(zcut=zcut,\n ebin=comp.ebin_name,\n psftype=comp.evtype_name,\n coordsys=comp.coordsys,\n irf_ver=NAME_FACTORY.irf_ver(),\n mktime=mktimekey,\n evclass=evtclassval,\n fullpath=True)\n\n ccube_name = os.path.basename(NAME_FACTORY.ccube(**name_keys))\n outfile = os.path.join(outdir_base, ccube_name)\n infiles = _make_input_file_list(outfile, num_files)\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n job_configs[fullkey] = dict(args=infiles,\n output=outfile,\n logfile=logfile)\n\n return job_configs"
] | [
"0.5808297",
"0.57016754",
"0.5615414",
"0.55733514",
"0.5548497",
"0.53404003",
"0.52409655",
"0.5188993",
"0.51454985",
"0.51232415",
"0.51201946",
"0.51161766",
"0.5100845",
"0.50881994",
"0.5061088",
"0.50133616",
"0.50033385",
"0.49992248",
"0.49333566",
"0.4930864",
"0.49250102",
"0.48988807",
"0.48837668",
"0.48595187",
"0.48486647",
"0.48260432",
"0.48188353",
"0.48108566",
"0.4795413",
"0.47926793"
] | 0.6341404 | 0 |
Applies all pending changes on the BIOS by creating a config job | def commit_pending_bios_changes(self, reboot=False, start_time='TIME_NOW'):
return self._job_mgmt.create_config_job(
resource_uri=ironic_uris.DCIM_BIOSService,
cim_creation_class_name='DCIM_BIOSService',
cim_name='DCIM:BIOSService',
target=self.BIOS_DEVICE_FQDD,
reboot=reboot,
start_time=start_time) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply(self) -> None:\n _ba.apply_config()",
"def update(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.updateJobs(server, jobs)",
"def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()",
"def apply(self):\n changed = False\n job_schedule_exists = False\n results = netapp_utils.get_cserver(self.server)\n cserver = netapp_utils.setup_ontap_zapi(\n module=self.module, vserver=results)\n netapp_utils.ems_log_event(\"na_ontap_job_schedule\", cserver)\n job_details = self.get_job_schedule()\n if job_details:\n job_schedule_exists = True\n if self.state == 'absent': # delete\n changed = True\n elif self.state == 'present': # modify\n if job_details['job_minutes'] != str(self.job_minutes):\n changed = True\n else:\n if self.state == 'present': # create\n changed = True\n if changed:\n if self.module.check_mode:\n pass\n else:\n if self.state == 'present': # execute create\n if not job_schedule_exists:\n self.create_job_schedule()\n else: # execute modify minute\n self.modify_minute_job_schedule()\n elif self.state == 'absent': # execute delete\n self.delete_job_schedule()\n self.module.exit_json(changed=changed)",
"def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")",
"def update_db(self):\n for tool in self.watchdb:\n if 'jobs' not in self.watchdb[tool]:\n continue\n for jname in self.watchdb[tool]['jobs']:\n job = self.watchdb[tool]['jobs'][jname]\n if 'timeout' in job:\n # Waiting on a restart or throttled,\n # leave the current state alone\n continue\n # Mark as dead pending verification of state from qstat\n job['state'] = 'DEAD'\n\n # Update the known state of all jobs from qstat data\n xml = ET.fromstring(subprocess.check_output(\n ['/usr/bin/qstat', '-u', '*', '-xml']))\n for j in xml.iter('job_list'):\n tool = j.find('JB_owner').text\n try:\n self.read_config(tool)\n except IOError:\n logger.exception('Failed to read config for %s', tool)\n continue\n\n if tool not in self.watchdb or 'jobs' not in self.watchdb[tool]:\n # Not watching any jobs for this tool\n continue\n\n jname = j.find('JB_name').text\n if jname not in self.watchdb[tool]['jobs']:\n # Not watching this job for this tool\n continue\n\n # Update the watched job's state\n job = self.watchdb[tool]['jobs'][jname]\n job['jname'] = jname\n job['state'] = j.find('state').text\n\n since_xml = j.find('JAT_start_time')\n if since_xml is None:\n since_xml = j.find('JB_submission_time')\n job['since'] = datetime.datetime.strptime(\n since_xml.text, '%Y-%m-%dT%H:%M:%S')\n\n if 'timeout' in job:\n del job['timeout']",
"def apply_config(dts, acg, xact, action, scratch):\n self.log.debug(\"Apply Config\")\n return rwtypes.RwStatus.SUCCESS",
"def _run_system_update(args):\n mem_types = set([\"memory\", \"jvm_opts\"])\n args = defaults.update_check_args(args, \"Could not do upgrade of bcbio_system.yaml\")\n system_file = os.path.join(args.datadir, \"galaxy\", \"bcbio_system.yaml\")\n with open(system_file) as in_handle:\n config = yaml.safe_load(in_handle)\n out = copy.deepcopy(config)\n mems = []\n for attrs in config.get(\"resources\", {}).itervalues():\n for key, value in attrs.iteritems():\n if key in mem_types:\n mems.append((key, value))\n common_mem = _calculate_common_memory(mems)\n for prog, attrs in config.get(\"resources\", {}).iteritems():\n for key, value in attrs.iteritems():\n if key == \"cores\":\n out['resources'][prog][key] = int(args.cores)\n elif key in mem_types:\n out[\"resources\"][prog][key] = _update_memory(key, value, args.memory,\n common_mem)\n bak_file = system_file + \".bak%s\" % datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n shutil.move(system_file, bak_file)\n with open(system_file, \"w\") as out_handle:\n yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)",
"def bootstrap_config(self):\n self.logger.info(\"applying bootstrap configuration\")\n self.wait_write(\"\\r\", None)\n # Wait for the prompt\n time.sleep(1)\n self.wait_write(\"system-view\", \"<HPE>\")\n self.wait_write(\"ssh server enable\", \"[HPE]\")\n self.wait_write(\"user-interface class vty\", \"[HPE]\")\n self.wait_write(\"authentication-mode scheme\", \"[HPE-line-class-vty]\")\n self.wait_write(\"protocol inbound ssh\", \"[HPE-line-class-vty]\")\n self.wait_write(\"quit\", \"[HPE-line-class-vty]\")\n self.wait_write(\"local-user %s\" % (self.username), \"[HPE]\")\n self.wait_write(\"password simple %s\" % (self.password), \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"service-type ssh\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"authorization-attribute user-role network-admin\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"quit\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"interface GigabitEthernet%s/0\" % (self.num_nics + 1), \"[HPE]\")\n self.wait_write(\"ip address 10.0.0.15 255.255.255.0\", \"[HPE-GigabitEthernet%s/0]\" % (self.num_nics + 1))\n self.wait_write(\"quit\", \"[HPE-GigabitEthernet%s/0]\" % (self.num_nics + 1))\n self.wait_write(\"quit\", \"[HPE]\")\n self.wait_write(\"quit\", \"<HPE>\")\n self.logger.info(\"completed bootstrap configuration\")",
"def update_preprocessing_gmwmi(self, new):\n self.stages[\"Preprocessing\"].config.gmwmi_seeding = new\n self.stages[\"Registration\"].config.gmwmi_seeding = new",
"def process_system(self):\n if self.already_processed or self.dont_run or not self.system_valid:\n return\n\n self.reorder_udev_rules()\n self.update_fcoe_configs()\n self.update_ifaces_configs()\n\n self.commit()",
"def preservation_config_after_reset_and_preconfigured_deploy(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfigure_ml2_vlan_range\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n self.fuel_web.stop_reset_env_wait(cluster_id)\n\n self.show_step(3)\n config = utils.get_config_template('nova_cpu')\n structured_config_nova = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n role='controller')\n config = utils.get_config_template('neutron')\n structured_config_neutron = get_structured_config_dict(config)\n\n self.show_step(4)\n self.fuel_web.wait_nodes_get_online_state(\n self.env.d_env.nodes().slaves[:4], timeout=10 * 60)\n\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(5)\n self.fuel_web.run_ostf(\n cluster_id=cluster_id)\n\n self.show_step(6)\n controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['controller'])\n structured_config = {}\n structured_config.update(structured_config_neutron)\n structured_config.update(structured_config_nova)\n self.check_config_on_remote(controllers, structured_config)\n\n self.show_step(7)\n self.show_step(8)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n self.check_ml2_vlan_range(os_conn)\n\n self.show_step(9)\n self.show_step(10)\n self.check_overcommit_ratio(os_conn, cluster_id)\n\n snapshot = \"preservation_config_after_reset_and_preconfigured_deploy\"\n self.env.make_snapshot(snapshot, is_make=True)",
"def bootstrap_post():\n\timport os\n\tprint('status','running bootstrap_post from bootstrap.py')\n\tos.system('make config')\n\treturn",
"def _configure_all_tasks(self, config, job_exe, job_type):\n\n config.set_task_ids(job_exe.get_cluster_id())\n\n for task_type in config.get_task_types():\n # Configure env vars describing allocated task resources\n env_vars = {}\n nvidia_docker_label = None\n\n for resource in config.get_resources(task_type).resources:\n env_name = 'ALLOCATED_%s' % normalize_env_var_name(resource.name)\n env_vars[env_name] = '%.1f' % resource.value # Assumes scalar resources\n if resource.name == \"gpus\" and int(resource.value) > 0:\n gpu_list = GPUManager.get_nvidia_docker_label(job_exe.node_id, job_exe.job_id)\n nvidia_docker_label = DockerParameter('env','NVIDIA_VISIBLE_DEVICES={}'.format(gpu_list.strip(',')))\n\n # Configure env vars for Scale meta-data\n env_vars['SCALE_JOB_ID'] = unicode(job_exe.job_id)\n env_vars['SCALE_EXE_NUM'] = unicode(job_exe.exe_num)\n if job_exe.recipe_id:\n env_vars['SCALE_RECIPE_ID'] = unicode(job_exe.recipe_id)\n if job_exe.batch_id:\n env_vars['SCALE_BATCH_ID'] = unicode(job_exe.batch_id)\n\n # Configure workspace volumes\n workspace_volumes = {}\n for task_workspace in config.get_workspaces(task_type):\n logger.debug(self._workspaces)\n workspace_model = self._workspaces[task_workspace.name]\n # TODO: Should refactor workspace broker to return a Volume object and remove BrokerVolume\n if workspace_model.volume:\n vol_name = get_workspace_volume_name(job_exe, task_workspace.name)\n cont_path = get_workspace_volume_path(workspace_model.name)\n if workspace_model.volume.host:\n host_path = workspace_model.volume.remote_path\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=True, host_path=host_path)\n else:\n driver = workspace_model.volume.driver\n driver_opts = {}\n # TODO: Hack alert for nfs broker, as stated above, we should return Volume from broker\n if driver == 'nfs':\n driver_opts = {'share': workspace_model.volume.remote_path}\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=False, driver=driver,\n driver_opts=driver_opts)\n workspace_volumes[task_workspace.name] = volume\n\n config.add_to_task(task_type, env_vars=env_vars, wksp_volumes=workspace_volumes)\n\n # Labels for metric grouping\n job_id_label = DockerParameter('label', 'scale-job-id={}'.format(job_exe.job_id))\n job_execution_id_label = DockerParameter('label', 'scale-job-execution-id={}'.format(job_exe.exe_num))\n job_type_name_label = DockerParameter('label', 'scale-job-type-name={}'.format(job_type.name))\n job_type_version_label = DockerParameter('label', 'scale-job-type-version={}'.format(job_type.version))\n main_label = DockerParameter('label', 'scale-task-type=main')\n if nvidia_docker_label:\n nvidia_runtime_param = DockerParameter('runtime', 'nvidia')\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label, nvidia_docker_label, nvidia_runtime_param])\n else:\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label])\n\n if not job_type.is_system:\n pre_label = DockerParameter('label', 'scale-task-type=pre')\n post_label = DockerParameter('label', 'scale-task-type=post')\n config.add_to_task('pre', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, pre_label])\n config.add_to_task('post', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, post_label])\n\n # Configure tasks for logging\n if settings.LOGGING_ADDRESS is not None:\n log_driver = DockerParameter('log-driver', 'fluentd')\n fluent_precision = DockerParameter('log-opt', 'fluentd-sub-second-precision=true')\n log_address = DockerParameter('log-opt', 'fluentd-address=%s' % settings.LOGGING_ADDRESS)\n if not job_type.is_system:\n pre_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('pre'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('pre', docker_params=[log_driver, fluent_precision, log_address, pre_task_tag])\n post_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('post'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('post', docker_params=[log_driver, fluent_precision, log_address, post_task_tag])\n # TODO: remove es_urls parameter when Scale no longer supports old style job types\n\n # Post task needs ElasticSearch URL to grab logs for old artifact registration\n es_param = DockerParameter('env', 'ELASTICSEARCH_URL=%s' % settings.ELASTICSEARCH_URL)\n config.add_to_task('post', docker_params=[es_param])\n main_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('main'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('main', docker_params=[log_driver, fluent_precision, log_address, main_task_tag])",
"def _apply_workload_editing(self):\n\n # call functions corresponding to fill_in types\n for wl_edit_config in self.config.model[\"workload_editing\"]:\n\n # select the appropriate workload_filling strategy\n editor = workload_editing_types[wl_edit_config[\"type\"]](self.workload_set.workloads)\n editor.apply(wl_edit_config)",
"def update_job_state(self, job):",
"def apply_config(\n hostname: str, config: str, dry_run: bool, job_id: Optional[int] = None, scheduled_by: Optional[str] = None\n) -> NornirJobResult:\n logger = get_logger()\n\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n if not dev:\n raise Exception(\"Device {} not found\".format(hostname))\n elif not (dev.state == DeviceState.MANAGED or dev.state == DeviceState.UNMANAGED):\n raise Exception(\"Device {} is in invalid state: {}\".format(hostname, dev.state))\n\n nr = cnaas_init()\n nr_filtered, _, _ = inventory_selector(nr, hostname=hostname)\n\n try:\n nrresult = nr_filtered.run(task=push_static_config, config=config, dry_run=dry_run, job_id=job_id)\n except Exception as e:\n logger.exception(\"Exception in apply_config: {}\".format(e))\n else:\n if not dry_run:\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n dev.state = DeviceState.UNMANAGED\n dev.synchronized = False\n\n return NornirJobResult(nrresult=nrresult)",
"def apply_configs(task):\n\n if \"3750X\" in task.host[\"sw_model\"]:\n # run 3750X function\n aaa_3750x(task)\n\n # apply global config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_dot1x_global.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x global configuration applied ***\")\n # apply snmp config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_snmp.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: SNMP configuration applied ***\")\n # apply interface config file for each host\n task.run(task=napalm_configure, filename=f\"configs/{task.host}_dot1x_intf.txt\")\n # print completed hosts\n c_print(f\"*** {task.host}: dot1x interface configuration applied ***\")",
"def backup(ctx):\n config_path = ctx.obj['config_path']\n logger = ctx.obj['logger']\n\n config = Config(config_path)\n scheduler = BlockingScheduler(\n executors={'default': ThreadPoolExecutor(max_workers=1)}\n )\n\n for job in config.jobs.values():\n logger.info(f'filesystem={job.filesystem} '\n f'cron=\"{job.cron}\" '\n 'msg=\"Adding job.\"')\n scheduler.add_job(job.start, 'cron', **job.cron, coalesce=True)\n\n try:\n scheduler.start()\n except (KeyboardInterrupt, SystemExit):\n pass",
"def build(cfg, jobs, watch):\n libjobs.buildJobs(cfg, jobs, watch)",
"def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info",
"def commit_pending_nic_changes(self, nic_id, reboot=False):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id,\n reboot=reboot)",
"def update_fcoe_configs(self):\n # Nothing to be done if no reordering has occurred.\n reordered = self.udev.reordered_devices\n if not reordered:\n return\n\n # Skip if we have already completed this stage\n if self.fcoe_confs:\n return\n\n # Generate candidate list of fcoe conf files, with\n # associated rule, that need to be processed\n reordered_files = tuple((r, os.path.join(self.syspaths.fcoe_dir,\n \"cfg-%s\" % r['from']))\n for r in reordered)\n\n # At this stage changes have been prepared but are not yet\n # committed to disk\n self._fcoe_confs = self._process_candidate_conf_files(reordered_files)",
"def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")",
"def build_and_boot(self, config_path):\n try:\n onlinecpus = int(self.cv_HOST.host_run_command(\"lscpu --online -e|wc -l\")[-1])\n except Exception:\n onlinecpus = 20\n\n self.cv_HOST.host_run_command(f\"cd {config_path}\")\n self.cv_HOST.host_run_command(\"make olddefconfig\")\n try:\n self.cv_HOST.host_run_command(\"make -j %s\" %onlinecpus , timeout=self.host_cmd_timeout)\n cmd = f\"make -j modules_install && make install\"\n if not self.cv_HOST.host_run_command(cmd):\n self.fail(\"module installation failed\")\n except Exception:\n self.fail(\"compile and build of gcov kernel failed\")",
"def refresh_config(self):\n\t\treturn Job(SDK.PrlVm_RefreshConfig(self.handle)[0])",
"def _configure_regular_job(config, job_exe, job_type, system_logging_level):\n config.create_tasks(['pull', 'pre', 'main', 'post'])\n config.add_to_task('pull', args=create_pull_command(job_exe.docker_image))\n config.add_to_task('pre', args=PRE_TASK_COMMAND_ARGS)\n config.add_to_task('post', args=POST_TASK_COMMAND_ARGS)\n\n # Configure input workspaces\n ro_input_workspaces = {}\n rw_input_workspaces = {}\n for input_workspace in config.get_input_workspace_names():\n ro_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RO)\n rw_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RW)\n config.add_to_task('pre', workspaces=ro_input_workspaces)\n config.add_to_task('main', workspaces=ro_input_workspaces)\n # Post tasks have access to input workspaces in case input files need moved as part of parse results\n config.add_to_task('post', workspaces=rw_input_workspaces)\n\n # Configure output workspaces\n output_workspaces = {}\n for output_workspace in config.get_output_workspace_names():\n output_workspaces[output_workspace] = TaskWorkspace(output_workspace, MODE_RW)\n config.add_to_task('post', workspaces=output_workspaces)\n\n # Configure input/output mounts\n input_mnt_name = 'scale_input_mount'\n output_mnt_name = 'scale_output_mount'\n input_vol_name = get_job_exe_input_vol_name(job_exe)\n output_vol_name = get_job_exe_output_vol_name(job_exe)\n input_vol_ro = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RO, is_host=False)\n input_vol_rw = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RW, is_host=False)\n output_vol_ro = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RO, is_host=False)\n output_vol_rw = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RW, is_host=False)\n\n config.add_to_task('pre', mount_volumes={input_mnt_name: input_vol_rw, output_mnt_name: output_vol_rw},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n config.add_to_task('main', mount_volumes={input_mnt_name: input_vol_ro, output_mnt_name: output_vol_rw})\n config.add_to_task('post', mount_volumes={output_mnt_name: output_vol_ro},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n\n\n # Configure output directory\n env_vars = {'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH, 'INPUT_METADATA': SCALE_INPUT_METADATA_PATH}\n args = config._get_task_dict('main')['args']\n\n args = environment_expansion(env_vars, args)\n\n config.add_to_task('main', args=args, env_vars=env_vars)\n\n # Configure task resources\n resources = job_exe.get_resources()\n # Pull-task and pre-task require full amount of resources\n config.add_to_task('pull', resources=resources)\n config.add_to_task('pre', resources=resources)\n # Main-task no longer requires the input file space\n resources.subtract(NodeResources([Disk(job_exe.input_file_size)]))\n config.add_to_task('main', resources=resources)\n # Post-task no longer requires any disk space\n resources.remove_resource('disk')\n config.add_to_task('post', resources=resources)",
"def _update_deploy_specs(self):\n for cluster in self.CLUSTERS:\n deployspec_name = PushUtil.get_deployspec_name(cluster)\n QueueClusterConfigUpdates.update_deployspec(\n deployspec_name, cluster, self._release_name)",
"def pibooth_configure(cfg):",
"def finish_provisioning():\n # Disable the Bluetooth interface.\n cfg = config.load(writable=True)\n cfg.set(\"bluetooth.enable\", False)\n cfg.commit()"
] | [
"0.6116701",
"0.58427554",
"0.5800472",
"0.57430637",
"0.5740192",
"0.57239777",
"0.5652123",
"0.5632307",
"0.550247",
"0.54999334",
"0.54730254",
"0.54384893",
"0.5424404",
"0.5407469",
"0.53721374",
"0.535191",
"0.53240836",
"0.53215873",
"0.5314885",
"0.52854794",
"0.5280034",
"0.5264259",
"0.5259454",
"0.52416724",
"0.5238246",
"0.52376163",
"0.5198769",
"0.5169443",
"0.5168661",
"0.51681054"
] | 0.61689615 | 0 |
Abandon all pending changes to a NIC. | def abandon_pending_nic_changes(self, nic_id):
self._job_mgmt.delete_pending_config(
resource_uri=uris.DCIM_NICService,
cim_creation_class_name='DCIM_NICService',
cim_name='DCIM:NICService',
target=nic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forget(self):\n self.ingress_tbl.clear()\n self.rootsw_tbl.clear()",
"def wifi_off(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE00\")\n time.sleep(100e-3)",
"def off_all(self):\n self._set_status(\"off\", \"11111111\")",
"def test_cancel_changes(self):\n with InterfacesSettings() as s:\n ActionChains(browser.driver).drag_and_drop(\n s.interfaces[0].networks['public'],\n s.interfaces[1].networks_box).perform()\n ActionChains(browser.driver).drag_and_drop(\n s.interfaces[0].networks['storage'],\n s.interfaces[2].networks_box).perform()\n\n s.cancel_changes.click()\n time.sleep(1)\n self.assertIn(\n 'storage', s.interfaces[0].networks,\n 'storage at eht0')\n self.assertIn(\n 'public', s.interfaces[0].networks,\n 'public at eht0')",
"def reset_working_packets(self):\n self._write_packet = None\n self._send_packet = None",
"def unblockAll():\n result = subprocess.Popen(\"/sbin/iptables -F INPUT 2>&1\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not flush INPUT chain. Error: %s.\" % (result))\n result = subprocess.Popen(\"/usr/sbin/ipset destroy 2>&1\", shell=True, stdout=subprocess.PIPE).stdout.read()\n if result.strip() != \"\":\n logger.error(\"Could not destroy all ipsets. Error: %s.\" % (result))\n sys.exit(255)",
"def stop_network_nat(self):\n\t\tcmd = [\"/sbin/iptables\",\"-t\",\"nat\",\"-F\"]\n\t\toutput = self.check_output_safe(cmd)\n\t\tself.log.info(\"iptalbes fllushed.\")",
"async def async_turn_off(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.async_update()",
"def ethernet_off(self):\n if not self.healthy:\n self.health_check()\n if not self._ethernet_switch:\n raise errors.CapabilityNotReadyError(\n device_name=self._device_name,\n msg=\"Not set up for ethernet switching.\")\n self._ethernet_switch.switch_power.power_off(self.ethernet_port_number)",
"def killCAN(mIface, bbid):\n for i in xrange(100):\n mIface.can_pass(bbid, ModuleIface.CAN_NONE)",
"def _drop_io_state(self, state):\n if self._state & state:\n self._state = self._state & (~state)\n self._update_handler(self._state)",
"def rc_off(self):\n # reset control values\n channels = [1500] * 8\n controlout = OverrideRCIn(channels=channels)\n self.contolp.publish(controlout)\n self.rate.sleep()\n # send twice to make sure\n controlout = OverrideRCIn(channels=channels)\n self.contolp.publish(controlout)",
"def forget_unicast_address(self):\n self.send_packet('\\xb3')",
"def bcp_reset(self):\n self.machine.bcp.transport.send_to_all_clients(\"reset\")",
"def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)",
"def cancelUnblockVis(self):\n if self.__nextSetZoneDoneEvent is not None:\n self.ignore(self.__nextSetZoneDoneEvent)\n self.__nextSetZoneDoneEvent = None",
"def kill_pending(self):\n for req in self._outbox:\n if not req.Test():\n req.Cancel()\n self._outbox = []",
"def reset(self):\n print('Network reset to its original copy')\n self.net = self.copy.copy()\n self.current_threshold = None\n self.method = None",
"def turn_off(self):\n self.handleCommand(1)\n self._state = STATE_OFF",
"async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()",
"def unmask_all(self):\n self.Active.mask = False\n self.Confirmed.mask = False\n self.Deaths.mask = False\n self.NewDeaths.mask = False\n self.NewCases.mask = False",
"def reset(self):\n for layer in self.network:\n layer.clean()",
"def off(self):\n self._set_state(on=False)",
"def unrequest_changes(self):\n self._check_if_open()\n return super(BitbucketCloudBase, self).delete(\"request-changes\")",
"def disconnect(self):\n \n self.net.active(False)",
"def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)",
"def interrupt(v):\n print(\" \" + bcolors.OKBLUE + \"[!] Detected CTRL+C ! restoring setting, please wait...\" + bcolors.ENDC)\n bash = \"ip link delete dummy type dummy\"\n os.system(bash)\n if v.spoof:\n restoreSpoof(v)\n if v.ntpStatus:\n ntpToggle(v)\n print(\" \" + bcolors.OKGREEN + \"Done\")\n print(\" --------------------------------------------------------\" + bcolors.ENDC)\n exit()",
"def soft_reset():",
"def off(self):\n self.rs485.write_command('#{}bf'.format(self.address))\n time.sleep(1)",
"def setOff(self, command):\r\n self.setDriver('ST', 0)"
] | [
"0.6279971",
"0.6219855",
"0.5948782",
"0.5866468",
"0.5858203",
"0.58529437",
"0.5757653",
"0.5757473",
"0.57033193",
"0.5649227",
"0.5646219",
"0.5580135",
"0.5578004",
"0.5567007",
"0.5552631",
"0.5535985",
"0.5527143",
"0.5524624",
"0.5520514",
"0.5509373",
"0.5508314",
"0.55065256",
"0.5501202",
"0.54924554",
"0.5461233",
"0.5451795",
"0.54501617",
"0.54468596",
"0.5440032",
"0.54355395"
] | 0.6980403 | 0 |
Apply all pending changes to a NIC by creating a configuration job. | def commit_pending_nic_changes(self, nic_id, reboot=False):
return self._job_mgmt.create_config_job(
resource_uri=uris.DCIM_NICService,
cim_creation_class_name='DCIM_NICService',
cim_name='DCIM:NICService',
target=nic_id,
reboot=reboot) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def apply(self, cleanup=False, activate=True):\n logger.info('applying network configs...')\n restart_interfaces = []\n restart_bridges = []\n update_files = {}\n all_file_names = []\n\n for interface_name, iface_data in self.interface_data.iteritems():\n route_data = self.route_data.get(interface_name, '')\n interface_path = ifcfg_config_path(interface_name)\n route_path = route_config_path(interface_name)\n all_file_names.append(interface_path)\n all_file_names.append(route_path)\n if (utils.diff(interface_path, iface_data) or\n utils.diff(route_path, route_data)):\n restart_interfaces.append(interface_name)\n restart_interfaces.extend(self.child_members(interface_name))\n update_files[interface_path] = iface_data\n update_files[route_path] = route_data\n logger.info('No changes required for interface: %s' %\n interface_name)\n\n for bridge_name, bridge_data in self.bridge_data.iteritems():\n route_data = self.route_data.get(bridge_name, '')\n bridge_path = bridge_config_path(bridge_name)\n bridge_route_path = route_config_path(bridge_name)\n all_file_names.append(bridge_path)\n all_file_names.append(bridge_route_path)\n if (utils.diff(bridge_path, bridge_data) or\n utils.diff(bridge_route_path, route_data)):\n restart_bridges.append(bridge_name)\n restart_interfaces.extend(self.child_members(bridge_name))\n update_files[bridge_path] = bridge_data\n update_files[bridge_route_path] = route_data\n logger.info('No changes required for bridge: %s' % bridge_name)\n\n if cleanup:\n for ifcfg_file in glob.iglob(cleanup_pattern()):\n if ifcfg_file not in all_file_names:\n interface_name = ifcfg_file[len(cleanup_pattern()) - 1:]\n if interface_name != 'lo':\n logger.info('cleaning up interface: %s'\n % interface_name)\n self.ifdown(interface_name)\n self.remove_config(ifcfg_file)\n\n if activate:\n for interface in restart_interfaces:\n self.ifdown(interface)\n\n for bridge in restart_bridges:\n self.ifdown(bridge, iftype='bridge')\n\n for oldname, newname in self.renamed_interfaces.iteritems():\n self.ifrename(oldname, newname)\n\n for location, data in update_files.iteritems():\n self.write_config(location, data)\n\n if activate:\n for bridge in restart_bridges:\n self.ifup(bridge, iftype='bridge')\n\n for interface in restart_interfaces:\n self.ifup(interface)\n\n return update_files",
"def apply(self):\n changed = False\n job_schedule_exists = False\n results = netapp_utils.get_cserver(self.server)\n cserver = netapp_utils.setup_ontap_zapi(\n module=self.module, vserver=results)\n netapp_utils.ems_log_event(\"na_ontap_job_schedule\", cserver)\n job_details = self.get_job_schedule()\n if job_details:\n job_schedule_exists = True\n if self.state == 'absent': # delete\n changed = True\n elif self.state == 'present': # modify\n if job_details['job_minutes'] != str(self.job_minutes):\n changed = True\n else:\n if self.state == 'present': # create\n changed = True\n if changed:\n if self.module.check_mode:\n pass\n else:\n if self.state == 'present': # execute create\n if not job_schedule_exists:\n self.create_job_schedule()\n else: # execute modify minute\n self.modify_minute_job_schedule()\n elif self.state == 'absent': # execute delete\n self.delete_job_schedule()\n self.module.exit_json(changed=changed)",
"def create_nic_config_job(\n self,\n nic_id,\n reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id,\n reboot=reboot,\n start_time=start_time)",
"def abandon_pending_nic_changes(self, nic_id):\n self._job_mgmt.delete_pending_config(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id)",
"def _ApplyNicMods(self, idx, nic, params, private):\n changes = []\n\n for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:\n if key in params:\n changes.append((\"nic.%s/%d\" % (key, idx), params[key]))\n setattr(nic, key, params[key])\n\n new_net = params.get(constants.INIC_NETWORK, nic.network)\n new_net_uuid = self.cfg.LookupNetwork(new_net)\n if new_net_uuid != nic.network:\n changes.append((\"nic.network/%d\" % idx, new_net))\n nic.network = new_net_uuid\n\n if private.filled:\n nic.nicparams = private.filled\n\n for (key, val) in nic.nicparams.items():\n changes.append((\"nic.%s/%d\" % (key, idx), val))\n\n if self.op.hotplug:\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,\n constants.HOTPLUG_TARGET_NIC,\n nic, None, idx)\n changes.append((\"nic/%d\" % idx, msg))\n\n return changes",
"def apply(self) -> None:\n _ba.apply_config()",
"def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")",
"def update_ifaces_configs(self):\n # Nothing to be done if no reordering has occurred.\n reordered = self.udev.reordered_devices\n if not reordered:\n return\n\n # Skip if we have already completed this stage\n if self.ifaces_confs:\n return\n\n # Generate candidate list of iface conf files, with\n # associated rule, that need to be processed.\n reordered_files = tuple((r, os.path.join(self.syspaths.ifaces_dir,\n r['from']))\n for r in reordered)\n\n ifaces_confs = self._process_candidate_conf_files(reordered_files)\n\n # Process the main interfaces file, and if it was modified, then\n # include it in the list of interface conf objects to be tracked\n conf = ConfFile(self.syspaths.ifaces_file, self.syspaths)\n conf.replace(self.remap_renamer)\n if conf.dirty:\n ifaces_confs.append(conf)\n\n # At this stage changes have been prepared but are not yet\n # committed to disk\n self._ifaces_confs = ifaces_confs",
"def apply_config(\n hostname: str, config: str, dry_run: bool, job_id: Optional[int] = None, scheduled_by: Optional[str] = None\n) -> NornirJobResult:\n logger = get_logger()\n\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n if not dev:\n raise Exception(\"Device {} not found\".format(hostname))\n elif not (dev.state == DeviceState.MANAGED or dev.state == DeviceState.UNMANAGED):\n raise Exception(\"Device {} is in invalid state: {}\".format(hostname, dev.state))\n\n nr = cnaas_init()\n nr_filtered, _, _ = inventory_selector(nr, hostname=hostname)\n\n try:\n nrresult = nr_filtered.run(task=push_static_config, config=config, dry_run=dry_run, job_id=job_id)\n except Exception as e:\n logger.exception(\"Exception in apply_config: {}\".format(e))\n else:\n if not dry_run:\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n dev.state = DeviceState.UNMANAGED\n dev.synchronized = False\n\n return NornirJobResult(nrresult=nrresult)",
"def modif_network(self):\n print \"preparation du fichier network interfaces\"\n if version_os[\"OS\"] == \"CentOS\":\n self.exec_cmd(\"cp %s/etc/sysconfig/network_scripts/ifcfg-eth0 %s/etc/sysconfig/network_scripts/ifcfg-eth0.pre.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n else:\n self.exec_cmd(\"cp %s/etc/network/interfaces %s/etc/network/interfaces.post.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"cp %s/etc/network/interfaces.pre.p2v %s/etc/network/interfaces\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))",
"def commit_pending_bios_changes(self, reboot=False, start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=ironic_uris.DCIM_BIOSService,\n cim_creation_class_name='DCIM_BIOSService',\n cim_name='DCIM:BIOSService',\n target=self.BIOS_DEVICE_FQDD,\n reboot=reboot,\n start_time=start_time)",
"def set_new_configuration(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n ip = IPRoute()\r\n index = ip.link_lookup(ifname='eth0')[0]\r\n ip.link('set', index=index, state='up')\r\n ip.addr('add', index, address=configuration_data[0][0], mask=24)\r\n ip.close()",
"def update_interfaces_config(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n all_devices = devices[\"other_devices\"]\n all_devices.update(devices[\"dpdk_devices\"])\n all_devices.update(devices[\"kernel_devices\"])\n\n current_ifcs = {}\n interfaces = {}\n if \"interfaces\" in node:\n current_ifcs = node[\"interfaces\"]\n if current_ifcs:\n for ifc in current_ifcs.values():\n dvid = ifc[\"pci_address\"]\n if dvid in all_devices:\n VppPCIUtil.vpp_create_interface(\n interfaces, dvid, all_devices[dvid]\n )\n node[\"interfaces\"] = interfaces\n\n self.updateconfig()",
"def run_config(self, device, command, *argv, **kwarg):\n ############# Implement me ################\n if not IxnetworkIxiaClientImpl.ixnet:\n return 0, \"Ixia not connected\"\n params = kwarg[\"params\"]\n if not params or not params[0]:\n return 0, \"Need to specify config file name\"\n param = params[0]\n fname = param[\"config_file_name\"]\n name = os.path.basename(fname)\n if command == \"load_config\":\n files = IxnetworkIxiaClientImpl.session.GetFileList()\n found = False\n for f in files[\"files\"]:\n if f[\"name\"] == name:\n found = True\n break\n if not found:\n out = IxnetworkIxiaClientImpl.session.UploadFile(fname, name)\n out = IxnetworkIxiaClientImpl.ixnet.LoadConfig(Files(name))\n # get the traffic items back\n IxnetworkIxiaClientImpl.tis = IxnetworkIxiaClientImpl.ixnet.Traffic.TrafficItem.find()\n elif command == \"save_config\":\n out = IxnetworkIxiaClientImpl.ixnet.SaveConfig(Files(name))\n out += IxnetworkIxiaClientImpl.session.DownloadFile(name, fname)\n return 0, out",
"def _apply(self):\n s = [(iptables_save, iptables_restore, self.ipv4)]\n if self.use_ipv6:\n s += [(ip6tables_save, ip6tables_restore, self.ipv6)]\n\n for save, restore, tables in s:\n all_tables, _err = save()\n all_lines = all_tables.split('\\n')\n for table_name, table in six.iteritems(tables):\n start, end = self._find_table(all_lines, table_name)\n all_lines[start:end] = self._modify_rules(\n all_lines[start:end], table, table_name)\n table.dirty = False\n restore('\\n'.join(all_lines))",
"def apply_config(dts, acg, xact, action, scratch):\n self.log.debug(\"Apply Config\")\n return rwtypes.RwStatus.SUCCESS",
"def iface_config(self, iface, *args, **kwargs):\n if not set(kwargs).issubset({'intf_ip_addr', 'netns', 'adminMode'}):\n raise NotImplementedError(\"Method is not implemented for current kwargs.\")\n if kwargs.get('netns', False):\n # Create network namespaces for current iface\n self.create_namespaces(iface)\n del kwargs['netns']\n if 'intf_ip_addr' in kwargs:\n kwargs['ipAddr'] = \"{}/24\".format(kwargs['intf_ip_addr'])\n if iface in self.namespaces:\n self._lhost.ui.enter_namespace(self.namespaces[iface])\n self._lhost.ui.modify_ports([iface], **kwargs)\n if iface in self.namespaces:\n self._lhost.ui.exit_namespace()",
"def commit_pending_idrac_changes(\n self,\n idrac_fqdd='iDRAC.Embedded.1',\n reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_iDRACCardService,\n cim_creation_class_name='DCIM_iDRACCardService',\n cim_name='DCIM:iDRACCardService',\n target=idrac_fqdd,\n reboot=reboot,\n start_time=start_time)",
"def deploy_net(self, desired_config): # pylint: disable=too-many-locals\n self._bigip.refresh_net()\n\n # Get the list of route tasks\n LOGGER.debug(\"Getting route tasks...\")\n existing = self._bigip.get_routes()\n desired = desired_config.get('routes', dict())\n\n (create_routes, update_routes, delete_routes) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # Get the list of arp tasks\n LOGGER.debug(\"Getting arp tasks...\")\n existing = self._bigip.get_arps()\n desired = desired_config.get('arps', dict())\n\n (create_arps, update_arps, delete_arps) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # Get the list of tunnel tasks\n LOGGER.debug(\"Getting tunnel tasks...\")\n existing = self._bigip.get_fdb_tunnels()\n desired = desired_config.get('fdbTunnels', dict())\n (create_tunnels, update_tunnels, delete_tunnels) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # If there are pre-existing (user-created) tunnels that we are\n # managing, we want to only update these tunnels.\n LOGGER.debug(\"Getting pre-existing tunnel update tasks...\")\n desired = desired_config.get('userFdbTunnels', dict())\n update_existing_tunnels = self._get_user_tunnel_tasks(desired)\n\n LOGGER.debug(\"Building task lists...\")\n create_tasks = create_arps + create_tunnels + create_routes\n update_tasks = update_arps + update_tunnels + update_existing_tunnels + update_routes\n delete_tasks = delete_arps + delete_tunnels + delete_routes\n\n taskq_len = len(create_tasks) + len(update_tasks) + len(delete_tasks)\n\n return self._run_tasks(\n taskq_len, create_tasks, update_tasks, delete_tasks)",
"def do_nic_update(cc, args):\n\n patch = utils.args_array_to_patch(args.attributes[0])\n result = cc.nic.update(args.uuid, patch)\n cliutils.print_dict(result)",
"def ipv4_interface_setup(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current interfaces with IP addresses\n current_ints = VPPUtil.get_int_ip(node)\n if current_ints != {}:\n print(\"\\nThese are the current interfaces with IP addresses:\")\n for items in sorted(current_ints.items()):\n name = items[0]\n value = items[1]\n if \"address\" not in value:\n address = \"Not Set\"\n else:\n address = value[\"address\"]\n print(\"{:30} {:20} {:10}\".format(name, address, value[\"state\"]))\n question = \"\\nWould you like to keep this configuration \" \"[Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n else:\n print(\"\\nThere are currently no interfaces with IP \" \"addresses.\")\n\n # Create a script that add the ip addresses to the interfaces\n # and brings the interfaces up\n ints_with_addrs = self._ipv4_interface_setup_questions(node)\n content = \"\"\n for ints in ints_with_addrs:\n name = ints[\"name\"]\n addr = ints[\"addr\"]\n setipstr = \"set int ip address {} {}\\n\".format(name, addr)\n setintupstr = \"set int state {} up\\n\".format(name)\n content += setipstr + setintupstr\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/set_int_ipv4_and_up\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))",
"def iptables_apply():\n\n with settings(warn_only=True):\n run(\"sudo iptables-restore < /etc/iptables.rules\")",
"def network_config(self):\n\n if self._network_config:\n return self._network_config\n\n interfaces = self.metadata.get('interfaces')\n\n if not interfaces:\n raise Exception(\"Unable to get meta-data from server....\")\n\n # Convert Vultr network configuration to cloudinit.net format\n\n # Example JSON:\n # [\n # {\n # \"ipv4\": {\n # \"additional\": [\n # {\n # \"address\": \"192.0.2.3\",\n # \"netmask\": \"255.255.255.0\"\n # }\n # ],\n # \"address\": \"192.0.2.2\",\n # \"gateway\": \"192.0.2.1\",\n # \"netmask\": \"255.255.255.0\"\n # },\n # \"ipv6\": {\n # \"additional\": [\n # {\n # \"network\": \"2001:0db8:0:2::\",\n # \"prefix\": \"64\"\n # }\n # ],\n # \"address\": \"2001:0db8:0:1:5428:d5ff:fe28:1910\",\n # \"network\": \"2001:0db8:0:1::\",\n # \"prefix\": \"64\"\n # },\n # \"mac\": \"00:00:00:00:00:00\",\n # \"network-type\": \"public\"\n # },\n # ......\n # ]\n\n nic_configs = []\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n LOG.debug(\"nic mapping: %s\", macs_to_nics)\n\n config = []\n for vultr_ip_dict in interfaces:\n mac = vultr_ip_dict[\"mac\"]\n\n if mac not in macs_to_nics:\n raise ValueError(\"Did not find network interface on system \"\n \"with mac '%s'. Cannot apply configuration: %s\"\n % (mac_address, nic))\n if_name = macs_to_nics[mac] # if_name = string 'eth0', ...\n if_config= {\n 'type': 'physical',\n 'mac_address': mac,\n 'name': if_name,\n 'subnets': [{\n 'type': 'dhcp',\n 'control': 'auto',\n }\n ]\n }\n config.append(if_config)\n\n LOG.debug(\"nic '%s' configuration: %s\", if_name, if_config)\n\n LOG.debug(\"added dns servers: %s\", self.dns_servers)\n config.append({'type': 'nameserver', 'address': self.dns_servers})\n\n return {'version': 1, 'config': config}",
"def update(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.updateJobs(server, jobs)",
"def create_network_postcommit(self, context):\n for _switch in self.switches:\n self._add_to_switch(_switch, context)",
"def main():\n\n # endpdoint = \"restconf/data/ietf-interfaces:interfaces\"\n # endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface={name}\"\n\n if len(argv) > 1:\n try:\n inventory = load_inventory(argv[1])\n except FileExistsError as err:\n print(\"FileExistsError: \", err)\n else:\n print(\"You must provide a path to your inventory file.\")\n sys.exit()\n\n r1 = inventory['dev-r1']\n loop = [interface for interface in r1[\"interface\"] if interface[\"name\"] == \"Loopback0\"][0]\n\n payload = render_payload(\n loop,\n \"interface.j2\"\n )\n\n session = create_session(r1[\"username\"], r1[\"password\"])\n endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface=Loopback0\"\n results = put_request(r1[\"host\"],session, endpoint, payload)\n print(results)\n\n save_endpoint = \"restconf/operations/cisco-ia:save-config/\"\n saved = save_config(r1[\"host\"], session, save_endpoint)\n\n # target_routers = [\"dev-r1\"]\n\n # for host_key, attribs in inventory.items():\n\n # if host_key in target_routers:\n # print(f\"configuring interfaces on {host_key}\")\n\n # # create a session imported from restconf_api\n # session = create_session(attribs)\n\n # # get all interfaces\n # results = get_interface(attribs, session, \"Loopback0\")\n\n # interface = results[\"ietf-interfaces:interface\"]\n\n # print(json.dumps(interface))\n # # convert to yaml\n # # yaml_output = yaml.safe_dump(results)\n # # with open(\"vars/interfaces.yml\", \"w\") as file:\n # # file.write(yaml_output)\n\n # # results = update_interfaces(attribs, session)\n # # print(results.text, results.status_code)\n\n # # print(get_interfaces(attribs, session))",
"def pre_network_ipam_update(self, resource_id, resource_dict):\n pass",
"def test_iosxr_netconf_edit_config(nornir, iosxr_config_payload):\n nr = nornir.filter(name=DEVICE_NAME)\n result = nr.run(netconf_edit_config, config=iosxr_config_payload, target=\"candidate\", xmldict=True)\n assert not result[DEVICE_NAME].result[\"errors\"]\n assert result[DEVICE_NAME].result[\"ok\"]\n\n # print_result(result)\n\n # Commit Config\n result = nr.run(netconf_commit, xmldict=True)\n assert result[DEVICE_NAME].result[\"ok\"]\n print_result(result)",
"def save(self, context=None):\n pass\n # updates = self.obj_get_changes()\n # updated_nic = self.dbapi.update_nic(self.uuid, updates)\n # self._from_db_object(self, updated_nic)",
"def _update_addresses(self, real_ifname, interface, old_interface):\n\n def _gen_cmd(cmd, address):\n \"\"\"\n Generates an `ip addr (add|del) <cidr> dev <ifname>` command.\n \"\"\"\n family = {4: 'inet', 6: 'inet6'}[address[0].version]\n args = ['addr', cmd, '%s/%s' % (address[0], address[1])]\n if family == 'inet' and cmd == 'add':\n args += ['brd', '+']\n args += ['dev', real_ifname]\n if family == 'inet6':\n args = ['-6'] + args\n return args\n\n add = functools.partial(_gen_cmd, 'add')\n delete = functools.partial(_gen_cmd, 'del')\n mutator = lambda a: (a.ip, a.prefixlen)\n\n self._update_set(real_ifname, interface, old_interface,\n 'all_addresses', add, delete, mutator)"
] | [
"0.61803114",
"0.61700237",
"0.61677605",
"0.59301126",
"0.5694504",
"0.56244195",
"0.5596565",
"0.5453252",
"0.54015476",
"0.52718323",
"0.5243111",
"0.52348137",
"0.523452",
"0.51573193",
"0.5134133",
"0.51217157",
"0.5073438",
"0.50459903",
"0.5040554",
"0.49998853",
"0.49991626",
"0.49981514",
"0.49698335",
"0.49542814",
"0.494143",
"0.49223545",
"0.4918528",
"0.4899333",
"0.48955724",
"0.48434287"
] | 0.7144519 | 0 |
Creates a configuration job. In CIM (Common Information Model), weak association is used to name an instance of one class in the context of an instance of another class. SystemName and SystemCreationClassName are the attributes of the scoping system, while Name and CreationClassName are the attributes of the instance of the class, on which the CreateTargetedConfigJob method is invoked. | def create_config_job(self,
resource_uri,
cim_creation_class_name,
cim_name,
target,
cim_system_creation_class_name='DCIM_ComputerSystem',
cim_system_name='DCIM:ComputerSystem',
reboot=False,
start_time='TIME_NOW'):
return self._job_mgmt.create_config_job(
resource_uri,
cim_creation_class_name,
cim_name,
target,
cim_system_creation_class_name,
cim_system_name,
reboot,
start_time) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def createJob(self, joboptions, previousId=None):\n root = self.manifest.getRootResource()\n assert self.manifest.tosca\n job = Job(self, root, joboptions, previousId)\n\n if (\n self.manifest.localEnv\n and not joboptions.parentJob\n and not joboptions.startTime\n ):\n logPath = self.manifest.getJobLogPath(job.getStartTime(), \".log\")\n if not os.path.isdir(os.path.dirname(logPath)):\n os.makedirs(os.path.dirname(logPath))\n initLogging(logfile=logPath)\n path = self.manifest.path\n if joboptions.planOnly:\n logger.info(\"creating %s plan for %s\", joboptions.workflow, path)\n else:\n logger.info(\"starting %s job for %s\", joboptions.workflow, path)\n\n WorkflowPlan = Plan.getPlanClassForWorkflow(joboptions.workflow)\n if not WorkflowPlan:\n raise UnfurlError(\"unknown workflow: %s\" % joboptions.workflow)\n job.plan = WorkflowPlan(root, self.manifest.tosca, joboptions)\n return job",
"def create_labeling_job(LabelingJobName=None, LabelAttributeName=None, InputConfig=None, OutputConfig=None, RoleArn=None, LabelCategoryConfigS3Uri=None, StoppingConditions=None, LabelingJobAlgorithmsConfig=None, HumanTaskConfig=None, Tags=None):\n pass",
"def create_compilation_job(CompilationJobName=None, RoleArn=None, InputConfig=None, OutputConfig=None, StoppingCondition=None):\n pass",
"def setup_classic_job(self, create_job_path=True, upload_id=None):\n upload = self.setup_upload(upload_id)\n oqp = OqParams()\n oqp.job_type = \"classical\"\n oqp.upload = upload\n oqp.region_grid_spacing = 0.01\n oqp.min_magnitude = 5.0\n oqp.investigation_time = 50.0\n oqp.component = \"gmroti50\"\n oqp.imt = \"pga\"\n oqp.truncation_type = \"twosided\"\n oqp.truncation_level = 3\n oqp.reference_vs30_value = 760\n oqp.imls = [\n 0.005, 0.007, 0.0098, 0.0137, 0.0192, 0.0269, 0.0376, 0.0527,\n 0.0738, 0.103, 0.145, 0.203, 0.284, 0.397, 0.556, 0.778]\n oqp.poes = [0.01, 0.10]\n oqp.realizations = 1\n from django.contrib.gis import geos\n oqp.region = geos.Polygon(\n ((-122.2, 38.0), (-121.7, 38.0), (-121.7, 37.5),\n (-122.2, 37.5), (-122.2, 38.0)))\n oqp.save()\n job = OqJob(oq_params=oqp, owner=upload.owner, job_type=\"classical\")\n job.save()\n if create_job_path:\n job.path = os.path.join(upload.path, str(job.id))\n os.mkdir(job.path)\n os.chmod(job.path, 0777)\n job.save()\n return job",
"def create_config(self) -> None:\n pass",
"def create_config(self) -> None:\n pass",
"def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(args)\n if sim is not None:\n raise ValueError(\"Found 'sim' argument on AnalyzeExtension_SG config.\")\n if targets_yaml is None:\n return job_configs\n\n targets = load_yaml(targets_yaml)\n config_yaml = 'config.yaml'\n\n base_config = dict(roi_baseline=args['roi_baseline'],\n make_plots=args['make_plots'])\n\n for target_name, target_list in targets.items():\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n fullpath=True)\n target_dir = NAME_FACTORY.targetdir(**name_keys)\n config_path = os.path.join(target_dir, config_yaml)\n logfile = make_nfs_path(os.path.join(\n target_dir, \"%s_%s.log\" % (self.linkname, target_name)))\n job_config = base_config.copy()\n job_config.update(dict(config=config_path,\n logfile=logfile))\n job_configs[target_name] = job_config\n\n return job_configs",
"def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config",
"def create_configuration(EngineType=None, EngineVersion=None, Name=None, Tags=None):\n pass",
"def write_config_for_job(job_script, filename, include_defaults, fail_on_missing):\n j = Job()\n j.script = job_script\n j._load_script()\n component_names = [c.__class__.__name__ for c in j.components]\n write_config('job.cfg', component_names, include_defaults, fail_on_missing)",
"def create(cls,configuration):\n raise NotImplementedError('Abstract method has not been implemented')",
"def __init__(self, namespace_model_instance=None, nexus=None,\n remote_user=None, remote_pass=None, private_key_file=None,\n delegate=None, default_task_role=None, default_run_from=None):\n \n super(ConfigModel, self).__init__(nexus=nexus)\n self.namespace_model_instance = namespace_model_instance\n self.remote_user = remote_user\n self.remote_pass = remote_pass\n self.private_key_file = private_key_file\n self.default_task_role = default_task_role\n self.default_run_from = default_run_from\n self.delegate = delegate\n clone_dict = {}\n #NOTE! _node_dict is an inverted dictionary (the string keys are\n #stored as values\n for v, k in self._node_dict.items():\n if not isinstance(v, _ConfigTask):\n raise ConfigException(\"'%s' is not a task\" % k)\n clone = v.clone()\n clone._set_delegate(self)\n clone._set_model_instance(self)\n clone_dict[v] = clone\n for etan in v._embedded_exittask_attrnames():\n clone_dict[getattr(v, etan)] = getattr(clone, etan)\n setattr(self, k, clone)\n _ = getattr(self, k) #this primes the reference machinery\n self.dependencies = [d.clone(clone_dict)\n for d in self.get_class_dependencies()]\n #default option values\n opts = object.__getattribute__(self, _config_options)\n for k, v in opts.items():\n if k == _default_task_role and self.default_task_role is None:\n self.default_task_role = v\n elif k == _remote_user and self.remote_user is None:\n self.remote_user = v\n elif k == _private_key_file and self.private_key_file is None:\n self.private_key_file = v\n elif k == _default_run_from and self.default_run_from is None:\n self.default_run_from = v",
"def createCfg_project(self, jobOptions):\n last_line = '%s %s %s %s' % (jobOptions['projection_module'], self.era, jobOptions['histName'], jobOptions['outputFile'])\n if self.projection_module != 'puHist':\n last_line += ' %.6e' % jobOptions['ref_genWeight']\n lines = jobOptions['inputFiles'] + [ '', last_line ]\n assert(len(lines) >= 3)\n createFile(jobOptions['cfgFile_path'], lines, nofNewLines = 1)",
"def _create_job(self,\n name,\n environment_string,\n description='',\n platform='LINUX'):\n job = data_types.Job()\n job.name = name\n if environment_string.strip():\n job.environment_string = environment_string\n job.platform = platform\n job.descripton = description\n job.put()\n\n return job",
"def create_job_configuration(start_time: str) -> ItemsJobConfig:\n # Create job configuration\n config = {\n 'source_url': os.getenv(\"ITEMS_SOURCE_URL\", default=\"\"),\n 'dest_new_url': os.getenv(\"ITEMS_DEST_NEW_URL\", default=\"\"),\n 'dest_updates_url': os.getenv(\"ITEMS_DEST_UPDATES_URL\", default=\"\"),\n 'caiasoft_api_key': os.getenv('CAIASOFT_API_KEY', default=\"\"),\n 'storage_dir': os.getenv('ITEMS_STORAGE_DIR', default=\"\"),\n 'last_success_lookup': os.getenv('ITEMS_LAST_SUCCESS_LOOKUP', default=\"\")\n }\n\n job_id_prefix = \"caia.items\"\n\n job_config = ItemsJobConfig(config, job_id_prefix, start_time)\n logger.info(f\"Job Id: {job_config['job_id']}\")\n logger.debug(f\"job_config={job_config}\")\n\n return job_config",
"def _create_job(self, tjc, machine, build_url, project, revision, build_type, build_abi,\n build_platform, build_sdk, builder_type, t):\n logger = utils.getLogger()\n logger.debug('AutophoneTreeherder.create_job: %s', t)\n assert self.url and revision, 'AutophoneTreeherder.create_job: no url/revision'\n\n if len(revision) != 40:\n logger.warning('AutophoneTreeherder using revision with length %d: %s',\n len(revision), revision)\n\n logger.info('creating Treeherder job %s for %s %s, revision: %s',\n t.job_guid, t.name, project, revision)\n if not t.job_guid:\n logger.error(\n '_create_job: invalid job_guid %s for test %s, '\n 'machine: %s, build_url: %s, project: %s, revision: %s, '\n 'build_type: %s, build_abi: %s, build_platform: %s, '\n 'build_sdk: %s, builder_type: %s',\n t.name, t.job_guid, machine, build_url, project,\n revision, build_type, build_abi, build_platform,\n build_sdk, builder_type)\n raise Exception('Can not create Treeherder Job with invalid test job_guid')\n\n logger.debug('AutophoneTreeherder.create_job: test config_file=%s, config sections=%s',\n t.config_file, t.cfg.sections())\n\n tj = tjc.get_job()\n tj.add_tier(self.options.treeherder_tier)\n tj.add_revision(revision)\n tj.add_project(project)\n tj.add_job_guid(t.job_guid)\n tj.add_job_name(t.job_name)\n tj.add_job_symbol(t.job_symbol)\n tj.add_group_name(t.group_name)\n tj.add_group_symbol(t.group_symbol)\n tj.add_product_name('fennec')\n\n tj.add_machine(machine)\n build_platform = platform(architecture(build_abi),\n build_platform,\n build_sdk)\n build_architecture = architecture(build_abi)\n machine_platform = platform(architecture(t.phone.abi),\n t.phone.os,\n build_sdk)\n machine_architecture = architecture(t.phone.abi)\n tj.add_build_info('android', build_platform, build_architecture)\n tj.add_machine_info('android', machine_platform, machine_architecture)\n tj.add_option_collection({build_type: True})\n\n # Add job details for storing information regarding the build (so we can\n # retrigger them)\n job_details = [\n {'title': title, 'value': str(value)} for (title, value) in [\n ('config_file', t.config_file),\n ('chunk', t.chunk),\n ('builder_type', builder_type)\n ]\n ]\n job_details.append({'title': 'build_url',\n 'value': 'build_url',\n 'url': build_url})\n tj.add_artifact('Job Info', 'json', {\n 'job_details': job_details\n })\n\n return tj",
"def createConfiguration(self, sourceType, **params):\n loader = CMSSWAPILoader(self.cmssw['ScramArch'],\n self.cmssw['CMSSWVersion'],\n self.cmssw['CMSPath'])\n\n try:\n loader.load()\n except Exception, ex:\n logging.error(\"Couldn't load CMSSW libraries: %s\" % ex)\n return None\n\n import FWCore.ParameterSet.Config as cms\n\n # building process\n # either from config file, from the framework(release) or from scratch\n if params.has_key('configFile'):\n\n if params.has_key('outputModuleDetails'):\n\n self.process = self.createProcessFromFile(params['configFile'],\n stripOutputModules = True)\n\n elif params.has_key('outputModuleTemplate'):\n\n self.process = self.createProcessFromFile(params['configFile'],\n stripOutputModules = False)\n\n moduleTemplate = params['outputModuleTemplate']\n\n # override primary dataset\n if moduleTemplate.has_key('primaryDataset'):\n for outputModule in self.process.outputModules.values():\n outputModule.dataset.primaryDataset = cms.untracked.string(moduleTemplate['primaryDataset'])\n\n # override compression level\n## if moduleTemplate.has_key('compressionLevel'):\n## for outputModule in self.process.outputModules.values():\n## outputModule.compressionLevel = cms.untracked.int32(moduleTemplate['compressionLevel'])\n\n else:\n logging.error(\"Neither output module details or template specified\")\n \n elif params.has_key('processName'):\n\n if params.has_key('outputModuleDetails'):\n\n self.process = self.createProcessFromScratch(params['processName'],\n configName = params.get('configName', 'auto-config'),\n configVersion = params.get(\"configVersion\", time.strftime(\"%d-%b-%Y-%H:%M:%S\")))\n\n else:\n logging.error(\"No output module details specified\")\n\n else:\n logging.error(\"Neither config file, framework config code or process name specified\")\n\n # check if it worked\n if self.process == None:\n logging.error(\"Cannot build process, bailing out\")\n loader.unload()\n return None\n\n # recreate source\n self.process.source = cms.Source(sourceType, fileNames = cms.untracked.vstring())\n\n # configure firstFreeID (works around a bug processing 2_0_X streamer files)\n if ( self.cmssw['version1'] == 2 and self.cmssw['version2'] == 0 ) \\\n and sourceType == 'NewEventStreamFileReader':\n\n self.process.source.firstFreeID = cms.untracked.uint32(65536)\n\n # configure lazy download\n # (supported earlier than 2_1_8, but we don't use these releases anymore)\n if ( self.cmssw['version1'] > 2 ) \\\n or ( self.cmssw['version1'] == 2 and self.cmssw['version2'] > 1 ) \\\n or ( self.cmssw['version1'] == 2 and self.cmssw['version2'] == 1 and self.cmssw['version3'] >= 8 ):\n\n self.configureLazyDownload(\n params.get(\"enableLazyDownload\", None) == True\n )\n\n # configure fastCloning and noEventSort\n # (supported earlier than 2_1_8, but we don't use these releases anymore)\n fastCloning = False\n if ( ( self.cmssw['version1'] > 2 ) \\\n or ( self.cmssw['version1'] == 2 and self.cmssw['version2'] > 1 ) \\\n or ( self.cmssw['version1'] == 2 and self.cmssw['version2'] == 1 and self.cmssw['version3'] >= 8 ) ) \\\n and sourceType == 'PoolSource':\n\n fastCloning = True\n if params.get(\"noEventSort\", None) == True:\n self.process.source.noEventSort = cms.untracked.bool(True)\n\n # add output modules\n if params.has_key('outputModuleDetails'):\n\n for moduleName, moduleDetails in params['outputModuleDetails'].items():\n logging.debug(\"Adding output module %s to workflow\" % moduleName)\n self.addOutputModule(moduleName,\n moduleDetails['dataTier'],\n primaryDataset = moduleDetails.get(\"primaryDataset\", None),\n selectEvents = moduleDetails.get(\"SelectEvents\", None),\n setEventContentInOutput = params.get(\"setEventContentInOutput\", False),\n compressionLevel = moduleDetails.get(\"compressionLevel\", None))\n\n # apply generic modifiers to output modules\n # at the moment only fastCloning\n self.modifyOutputModules(fastCloning = fastCloning)\n\n cfgInterface = CMSSWConfig()\n loadedConfig = cfgInterface.loadConfiguration(self.process)\n loadedConfig.validateForProduction()\n\n # complete the output module info in workflow\n for moduleName, outMod in cfgInterface.outputModules.items():\n\n # easy for output modules we added\n if params.has_key(\"outputModuleDetails\"):\n outMod.update(params[\"outputModuleDetails\"][moduleName])\n\n # if we kept the output modules from the configs it's harder\n # need to combine info from template and config (processed dataset)\n elif params.has_key('outputModuleTemplate'):\n\n template = params[\"outputModuleTemplate\"]\n outMod.update(template)\n\n if outMod.has_key(\"processingString\"):\n processingString = str(outMod[\"processingString\"])\n elif outMod.has_key(\"filterName\"):\n processingString = str(outMod[\"filterName\"])\n else:\n processingString = None\n\n if processingString == None:\n outMod['processedDataset'] = \"%s-%s\" % (template[\"acquisitionEra\"],\n template[\"processingVersion\"])\n else:\n outMod['processedDataset'] = \"%s-%s-%s\" % (template[\"acquisitionEra\"],\n processingString,\n template[\"processingVersion\"])\n\n if params.has_key(\"configFile\"):\n cfgInterface.originalCfg = file(params['configFile']).read()\n\n loader.unload()\n \n return cfgInterface",
"def build_configuration(\n cls,\n class_name,\n module_name=\"great_expectations.datasource\",\n data_asset_type=None,\n batch_kwargs_generators=None,\n **kwargs,\n ):\n verify_dynamic_loading_support(module_name=module_name)\n class_ = load_class(class_name=class_name, module_name=module_name)\n configuration = class_.build_configuration(\n data_asset_type=data_asset_type,\n batch_kwargs_generators=batch_kwargs_generators,\n **kwargs,\n )\n return configuration",
"def Create(self,\n parent,\n specs=None,\n config_path=None,\n display_name=None,\n python_package_uri=None,\n args=None,\n command=None,\n kms_key_name=None,\n network=None,\n service_account=None):\n if not python_package_uri:\n python_package_uri = []\n\n job_spec = self.messages.GoogleCloudAiplatformV1beta1CustomJobSpec()\n job_spec.network = network\n job_spec.serviceAccount = service_account\n if config_path:\n data = yaml.load_path(config_path)\n if data:\n job_spec = messages_util.DictToMessageWithErrorCheck(\n data, self.messages.GoogleCloudAiplatformV1beta1CustomJobSpec)\n\n worker_pool_specs = []\n if specs is not None:\n for spec in specs:\n machine_type = spec.get('machine-type')\n if not spec.get('replica-count'):\n replica_count = 1\n else:\n replica_count = int(spec.get('replica-count'))\n container_image_uri = spec.get('container-image-uri')\n python_image_uri = spec.get('python-image-uri')\n python_module = spec.get('python-module')\n machine_spec = (\n self.messages.GoogleCloudAiplatformV1beta1MachineSpec(\n machineType=machine_type))\n\n worker_pool_spec = (\n self.messages.GoogleCloudAiplatformV1beta1WorkerPoolSpec(\n replicaCount=replica_count, machineSpec=machine_spec))\n if container_image_uri:\n worker_pool_spec.containerSpec = (\n self.messages.GoogleCloudAiplatformV1beta1ContainerSpec(\n imageUri=container_image_uri))\n if args is not None:\n worker_pool_spec.containerSpec.args = args\n if command is not None:\n worker_pool_spec.containerSpec.command = command\n\n if python_package_uri or python_image_uri or python_module:\n worker_pool_spec.pythonPackageSpec = (\n self.messages.GoogleCloudAiplatformV1beta1PythonPackageSpec(\n executorImageUri=python_image_uri,\n packageUris=python_package_uri,\n pythonModule=python_module))\n if args is not None:\n worker_pool_spec.pythonPackageSpec.args = args\n\n worker_pool_specs.append(worker_pool_spec)\n\n if worker_pool_specs:\n job_spec.workerPoolSpecs = worker_pool_specs\n validation.ValidateWorkerPoolSpec(job_spec.workerPoolSpecs)\n\n custom_job = (\n self.messages.GoogleCloudAiplatformV1beta1CustomJob(\n displayName=display_name,\n jobSpec=job_spec))\n\n if kms_key_name is not None:\n custom_job.encryptionSpec = self.messages.GoogleCloudAiplatformV1beta1EncryptionSpec(\n kmsKeyName=kms_key_name)\n\n return self._service.Create(\n self.messages.AiplatformProjectsLocationsCustomJobsCreateRequest(\n parent=parent, googleCloudAiplatformV1beta1CustomJob=custom_job))",
"def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(\n args, require_sim_name=True)\n if targets_yaml is None:\n return job_configs\n\n specconfig = NAME_FACTORY.resolve_specconfig(args)\n\n astro_priors = args['astro_priors']\n write_full = args.get('write_full', False)\n\n targets = load_yaml(targets_yaml)\n base_config = dict(nsims=args['nsims'],\n seed=args['seed'],\n specconfig=specconfig)\n\n for target_name, profile_list in list(targets.items()):\n for profile in profile_list:\n for astro_prior in astro_priors:\n if is_null(astro_prior):\n astro_prior = 'none'\n full_key = \"%s:%s:%s:%s\" % (\n target_name, profile, sim, astro_prior)\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n sim_name=sim,\n profile=profile,\n astro_prior=astro_prior,\n fullpath=True)\n limitfile = NAME_FACTORY.sim_dmlimitsfile(**name_keys)\n first = args['seed']\n last = first + args['nsims'] - 1\n outfile = limitfile.replace(\n '_SEED.fits', '_collected_%06i_%06i.fits' %\n (first, last))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n if not write_full:\n outfile = None\n summaryfile = limitfile.replace(\n '_SEED.fits', '_summary_%06i_%06i.fits' %\n (first, last))\n job_config = base_config.copy()\n job_config.update(dict(limitfile=limitfile,\n astro_prior=astro_prior,\n outfile=outfile,\n summaryfile=summaryfile,\n logfile=logfile))\n job_configs[full_key] = job_config\n\n return job_configs",
"def create_task(self, name, target, config=None, comment=\"\"):\n\n if not config:\n config = \"Full and fast\"\n\n request = \"\"\"<create_task>\n <name>%s</name>\n <comment>%s</comment>\n <config id=\"%s\"/>\n <target id=\"%s\"/>\n </create_task>\"\"\" % (name, comment, config, target)\n\n return self.make_xml_request(request, xml_result=True).get(\"id\")",
"def generate_config(self):\n\n cfgmgr = ConfigManager()\n\n script_dir = os.path.join(cfgmgr.getRoot(), 'rules')\n\n if not os.path.exists(script_dir):\n print('Creating rules directory \\\"{0}\\\"'.format(script_dir))\n\n os.makedirs(script_dir)\n else:\n if not self.getArgs().force:\n sys.stderr.write('Script directory \\\"{0}\\\" already exists.\\n'\n 'Use --force to overwrite current'\n ' scripts\\n'.format(script_dir))\n\n sys.exit(1)\n\n print('Overwriting any scripts in directory \\\"{0}\\\"'.format(\n script_dir))\n\n # Determine UGE cell directory from environment\n if not os.getenv('SGE_ROOT') or not os.getenv('SGE_CELL'):\n print('Error: UGE environment is not sourced', file=sys.stderr)\n\n sys.exit(1)\n\n cell_dir = os.path.join(os.getenv('SGE_ROOT'), os.getenv('SGE_CELL'))\n\n template_vars = {\n 'tortuga_root': cfgmgr.getRoot(),\n 'uge_cell_dir': cell_dir,\n 'script_dir': script_dir,\n 'burst_swprofile': self.getArgs().software_profile,\n 'burst_hwprofile': self.getArgs().hardware_profile,\n 'burst_queue': 'burst.q',\n 'polling_interval': self.getArgs().polling_interval,\n 'slots_per_host': self.getArgs().slots_per_host,\n }\n\n env = Environment(loader=FileSystemLoader('templates'),\n undefined=StrictUndefined)\n\n for filename in glob.glob('templates/*.j2'):\n# print('Processing template {0}'.format(\n# os.path.basename(filename)))\n\n template = env.get_template(os.path.basename(filename))\n\n dstfile = os.path.join(\n script_dir,\n os.path.splitext(os.path.basename(filename))[0])\n\n print(' - writing {0}'.format(os.path.basename(dstfile)))\n\n with open(dstfile, 'w') as outfp:\n template.stream(template_vars).dump(outfp)",
"def test_config_step_create(self):\n\n config_step = self.ConfigStep.create({\n 'name': 'test_step',\n 'job_type': 'create_build',\n 'number_builds': 2,\n 'make_orphan': True,\n })\n\n config = self.Config.create({'name': 'test_config'})\n config_step.create_config_ids = [config.id]\n\n config_step._run_create_build(self.parent_build, '/tmp/essai')\n self.assertEqual(len(self.parent_build.children_ids), 2, 'Two sub-builds should have been generated')\n\n # check that the result will be ignored by parent build\n for child_build in self.parent_build.children_ids:\n self.assertTrue(child_build.orphan_result, 'An orphan result config step should mark the build as orphan_result')\n child_build.local_result = 'ko'\n\n self.assertFalse(self.parent_build.global_result)",
"def create_job(self, context=None):\n return self._client.call_method(\n 'UserAndJobState.create_job',\n [], self._service_ver, context)",
"def __init__(self, name, cfg_class, init_args=None, **kwargs):\n if not issubclass(cfg_class, ConfigModel):\n raise ConfigException(\"The cfg_class parameter isn't a subclass of ConfigModel\")\n super(ConfigClassTask, self).__init__(name, **kwargs)\n self.cfg_class = cfg_class\n self.init_args = None\n self._init_args = init_args if init_args else ()\n self.instance = None\n self.dependencies = []\n self.rendezvous = RendezvousTask(\"{}-rendezvous\".format(name))\n self.graph = None",
"def create_training_job(TrainingJobName=None, HyperParameters=None, AlgorithmSpecification=None, RoleArn=None, InputDataConfig=None, OutputDataConfig=None, ResourceConfig=None, VpcConfig=None, StoppingCondition=None, Tags=None, EnableNetworkIsolation=None):\n pass",
"def create(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.createJobs(server, jobs)",
"def create_instance(\n self, base_config: dict, labels: dict, wait_for_operation: bool = True\n ) -> Tuple[dict, str]:\n return",
"def _create_job_config(\n self,\n experiment_id: str,\n params: Optional[dict],\n pipeline_package_path: Optional[str],\n pipeline_id: Optional[str],\n version_id: Optional[str],\n enable_caching: Optional[bool],\n ):\n\n class JobConfig:\n\n def __init__(self, spec, resource_references):\n self.spec = spec\n self.resource_references = resource_references\n\n params = params or {}\n pipeline_json_string = None\n if pipeline_package_path:\n pipeline_obj = self._extract_pipeline_yaml(pipeline_package_path)\n\n # Caching option set at submission time overrides the compile time settings.\n if enable_caching is not None:\n self._override_caching_options(pipeline_obj, enable_caching)\n\n pipeline_json_string = json.dumps(pipeline_obj)\n api_params = [\n kfp_server_api.V1Parameter(\n name=sanitize_k8s_name(name=k, allow_capital_underscore=True),\n value=str(v) if type(v) not in (list, dict) else json.dumps(v))\n for k, v in params.items()\n ]\n resource_references = []\n key = kfp_server_api.models.V1ResourceKey(\n id=experiment_id,\n type=kfp_server_api.models.V1ResourceType.EXPERIMENT)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key, relationship=kfp_server_api.models.V1Relationship.OWNER)\n resource_references.append(reference)\n\n if version_id:\n key = kfp_server_api.models.V1ResourceKey(\n id=version_id,\n type=kfp_server_api.models.V1ResourceType.PIPELINE_VERSION)\n reference = kfp_server_api.models.V1ResourceReference(\n key=key,\n relationship=kfp_server_api.models.V1Relationship.CREATOR)\n resource_references.append(reference)\n\n spec = kfp_server_api.models.V1PipelineSpec(\n pipeline_id=pipeline_id,\n workflow_manifest=pipeline_json_string,\n parameters=api_params)\n return JobConfig(spec=spec, resource_references=resource_references)",
"def job_create(self, sender, name=None):\n self._require_running()\n name = name or self.DEFAULT_JOB_NAME\n job_id = uuid.uuid4().hex\n assert job_id not in self._jobs\n assert sender is not None\n assert sender.connection\n job = Job(\n job_id,\n name,\n self._session_root.joinpath(job_id),\n sender,\n self._loop\n )\n self._jobs[job_id] = job\n self._jobs_by_connection[sender.connection][job_id] = job\n self._log.debug('Created job %s', job)\n return job_id"
] | [
"0.5764026",
"0.5601463",
"0.55534786",
"0.55200636",
"0.5493523",
"0.5493523",
"0.54148227",
"0.5414623",
"0.5357444",
"0.5340673",
"0.52934015",
"0.52929705",
"0.5279595",
"0.5178549",
"0.51695275",
"0.515132",
"0.5135416",
"0.51059264",
"0.50979036",
"0.50941366",
"0.5064611",
"0.5034969",
"0.50033766",
"0.49995923",
"0.4997571",
"0.4994028",
"0.497905",
"0.49726704",
"0.49679685",
"0.4951854"
] | 0.7400147 | 0 |
Creates a configuration job for applying all pending changes to a NIC. | def create_nic_config_job(
self,
nic_id,
reboot=False,
start_time='TIME_NOW'):
return self._job_mgmt.create_config_job(
resource_uri=uris.DCIM_NICService,
cim_creation_class_name='DCIM_NICService',
cim_name='DCIM:NICService',
target=nic_id,
reboot=reboot,
start_time=start_time) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def commit_pending_nic_changes(self, nic_id, reboot=False):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id,\n reboot=reboot)",
"def apply(self):\n changed = False\n job_schedule_exists = False\n results = netapp_utils.get_cserver(self.server)\n cserver = netapp_utils.setup_ontap_zapi(\n module=self.module, vserver=results)\n netapp_utils.ems_log_event(\"na_ontap_job_schedule\", cserver)\n job_details = self.get_job_schedule()\n if job_details:\n job_schedule_exists = True\n if self.state == 'absent': # delete\n changed = True\n elif self.state == 'present': # modify\n if job_details['job_minutes'] != str(self.job_minutes):\n changed = True\n else:\n if self.state == 'present': # create\n changed = True\n if changed:\n if self.module.check_mode:\n pass\n else:\n if self.state == 'present': # execute create\n if not job_schedule_exists:\n self.create_job_schedule()\n else: # execute modify minute\n self.modify_minute_job_schedule()\n elif self.state == 'absent': # execute delete\n self.delete_job_schedule()\n self.module.exit_json(changed=changed)",
"def abandon_pending_nic_changes(self, nic_id):\n self._job_mgmt.delete_pending_config(\n resource_uri=uris.DCIM_NICService,\n cim_creation_class_name='DCIM_NICService',\n cim_name='DCIM:NICService',\n target=nic_id)",
"def apply(self, cleanup=False, activate=True):\n logger.info('applying network configs...')\n restart_interfaces = []\n restart_bridges = []\n update_files = {}\n all_file_names = []\n\n for interface_name, iface_data in self.interface_data.iteritems():\n route_data = self.route_data.get(interface_name, '')\n interface_path = ifcfg_config_path(interface_name)\n route_path = route_config_path(interface_name)\n all_file_names.append(interface_path)\n all_file_names.append(route_path)\n if (utils.diff(interface_path, iface_data) or\n utils.diff(route_path, route_data)):\n restart_interfaces.append(interface_name)\n restart_interfaces.extend(self.child_members(interface_name))\n update_files[interface_path] = iface_data\n update_files[route_path] = route_data\n logger.info('No changes required for interface: %s' %\n interface_name)\n\n for bridge_name, bridge_data in self.bridge_data.iteritems():\n route_data = self.route_data.get(bridge_name, '')\n bridge_path = bridge_config_path(bridge_name)\n bridge_route_path = route_config_path(bridge_name)\n all_file_names.append(bridge_path)\n all_file_names.append(bridge_route_path)\n if (utils.diff(bridge_path, bridge_data) or\n utils.diff(bridge_route_path, route_data)):\n restart_bridges.append(bridge_name)\n restart_interfaces.extend(self.child_members(bridge_name))\n update_files[bridge_path] = bridge_data\n update_files[bridge_route_path] = route_data\n logger.info('No changes required for bridge: %s' % bridge_name)\n\n if cleanup:\n for ifcfg_file in glob.iglob(cleanup_pattern()):\n if ifcfg_file not in all_file_names:\n interface_name = ifcfg_file[len(cleanup_pattern()) - 1:]\n if interface_name != 'lo':\n logger.info('cleaning up interface: %s'\n % interface_name)\n self.ifdown(interface_name)\n self.remove_config(ifcfg_file)\n\n if activate:\n for interface in restart_interfaces:\n self.ifdown(interface)\n\n for bridge in restart_bridges:\n self.ifdown(bridge, iftype='bridge')\n\n for oldname, newname in self.renamed_interfaces.iteritems():\n self.ifrename(oldname, newname)\n\n for location, data in update_files.iteritems():\n self.write_config(location, data)\n\n if activate:\n for bridge in restart_bridges:\n self.ifup(bridge, iftype='bridge')\n\n for interface in restart_interfaces:\n self.ifup(interface)\n\n return update_files",
"def multiple_apply_config(self):\n\n self.show_step(1, initialize=True)\n self.env.revert_snapshot(\"reconfiguration_scalability\")\n\n self.show_step(2)\n cluster_id = self.fuel_web.get_last_created_cluster()\n computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(\n cluster_id, ['compute'])\n target_compute = computes[0]\n config = utils.get_config_template('nova_disk')\n structured_config_old = get_structured_config_dict(config)\n\n config['nova_config'][\n 'DEFAULT/default_ephemeral_format']['value'] = 'ext3'\n structured_config_new = get_structured_config_dict(config)\n self.fuel_web.client.upload_configuration(config,\n cluster_id,\n node_id=target_compute['id'])\n\n self.show_step(3)\n service_name = 'nova-compute'\n uptimes = self.get_service_uptime([target_compute], service_name)\n\n self.show_step(4)\n task = self.fuel_web.client.apply_configuration(\n cluster_id,\n node_id=target_compute['id'])\n self.fuel_web.assert_task_success(task, timeout=900, interval=5)\n\n self.show_step(5)\n self.check_service_was_restarted([target_compute],\n uptimes, service_name)\n\n self.show_step(6)\n for compute in computes:\n if compute == target_compute:\n self.check_config_on_remote([compute], structured_config_new)\n target_hypervisor_name = compute['fqdn']\n else:\n hypervisor_name = compute['fqdn']\n self.check_config_on_remote([compute], structured_config_old)\n\n self.show_step(7)\n os_conn = os_actions.OpenStackActions(\n self.fuel_web.get_public_vip(cluster_id))\n\n self.show_step(8)\n self.show_step(9)\n self.show_step(10)\n self.show_step(11)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=target_hypervisor_name,\n fs_type='ext3')\n self.show_step(12)\n self.show_step(13)\n self.show_step(14)\n self.show_step(15)\n self.check_nova_ephemeral_disk(os_conn, cluster_id,\n hypervisor_name=hypervisor_name)\n\n self.env.make_snapshot(\"multiple_apply_config\")",
"def commit_pending_bios_changes(self, reboot=False, start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=ironic_uris.DCIM_BIOSService,\n cim_creation_class_name='DCIM_BIOSService',\n cim_name='DCIM:BIOSService',\n target=self.BIOS_DEVICE_FQDD,\n reboot=reboot,\n start_time=start_time)",
"def create_config_job(self,\n resource_uri,\n cim_creation_class_name,\n cim_name,\n target,\n cim_system_creation_class_name='DCIM_ComputerSystem',\n cim_system_name='DCIM:ComputerSystem',\n reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri,\n cim_creation_class_name,\n cim_name,\n target,\n cim_system_creation_class_name,\n cim_system_name,\n reboot,\n start_time)",
"def apply(self) -> None:\n _ba.apply_config()",
"def apply_config(\n hostname: str, config: str, dry_run: bool, job_id: Optional[int] = None, scheduled_by: Optional[str] = None\n) -> NornirJobResult:\n logger = get_logger()\n\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n if not dev:\n raise Exception(\"Device {} not found\".format(hostname))\n elif not (dev.state == DeviceState.MANAGED or dev.state == DeviceState.UNMANAGED):\n raise Exception(\"Device {} is in invalid state: {}\".format(hostname, dev.state))\n\n nr = cnaas_init()\n nr_filtered, _, _ = inventory_selector(nr, hostname=hostname)\n\n try:\n nrresult = nr_filtered.run(task=push_static_config, config=config, dry_run=dry_run, job_id=job_id)\n except Exception as e:\n logger.exception(\"Exception in apply_config: {}\".format(e))\n else:\n if not dry_run:\n with sqla_session() as session:\n dev: Device = session.query(Device).filter(Device.hostname == hostname).one_or_none()\n dev.state = DeviceState.UNMANAGED\n dev.synchronized = False\n\n return NornirJobResult(nrresult=nrresult)",
"def update_ifaces_configs(self):\n # Nothing to be done if no reordering has occurred.\n reordered = self.udev.reordered_devices\n if not reordered:\n return\n\n # Skip if we have already completed this stage\n if self.ifaces_confs:\n return\n\n # Generate candidate list of iface conf files, with\n # associated rule, that need to be processed.\n reordered_files = tuple((r, os.path.join(self.syspaths.ifaces_dir,\n r['from']))\n for r in reordered)\n\n ifaces_confs = self._process_candidate_conf_files(reordered_files)\n\n # Process the main interfaces file, and if it was modified, then\n # include it in the list of interface conf objects to be tracked\n conf = ConfFile(self.syspaths.ifaces_file, self.syspaths)\n conf.replace(self.remap_renamer)\n if conf.dirty:\n ifaces_confs.append(conf)\n\n # At this stage changes have been prepared but are not yet\n # committed to disk\n self._ifaces_confs = ifaces_confs",
"def commit_pending_idrac_changes(\n self,\n idrac_fqdd='iDRAC.Embedded.1',\n reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri=uris.DCIM_iDRACCardService,\n cim_creation_class_name='DCIM_iDRACCardService',\n cim_name='DCIM:iDRACCardService',\n target=idrac_fqdd,\n reboot=reboot,\n start_time=start_time)",
"def setup_schedule():\n for project in Project.select():\n if (project.schedule_interval is not None) and (project.schedule_interval > 0):\n schedule.add_job(pull_build_project, \"interval\", id=\"building_\" + str(project.id),\n hours=project.schedule_interval,\n args=[project, \"master\"])",
"def _ApplyNicMods(self, idx, nic, params, private):\n changes = []\n\n for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:\n if key in params:\n changes.append((\"nic.%s/%d\" % (key, idx), params[key]))\n setattr(nic, key, params[key])\n\n new_net = params.get(constants.INIC_NETWORK, nic.network)\n new_net_uuid = self.cfg.LookupNetwork(new_net)\n if new_net_uuid != nic.network:\n changes.append((\"nic.network/%d\" % idx, new_net))\n nic.network = new_net_uuid\n\n if private.filled:\n nic.nicparams = private.filled\n\n for (key, val) in nic.nicparams.items():\n changes.append((\"nic.%s/%d\" % (key, idx), val))\n\n if self.op.hotplug:\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,\n constants.HOTPLUG_TARGET_NIC,\n nic, None, idx)\n changes.append((\"nic/%d\" % idx, msg))\n\n return changes",
"def configure_queued_job(self, job):\n\n config = ExecutionConfiguration()\n data = job.get_job_data()\n\n # Add input file meta-data\n input_files_dict = self._create_input_file_dict(data)\n config.set_input_files(input_files_dict)\n\n # Set up env vars for job's input data\n input_values = data.get_injected_input_values(input_files_dict)\n interface = job.job_type_rev.get_input_interface()\n\n env_vars = {}\n if isinstance(data, JobData):\n # call job.data.job_data.JobData.get_injected_env_vars\n env_vars = data.get_injected_env_vars(input_files_dict, interface)\n else:\n # call old job.configuration.data.job_data.get_injected_env_vars\n # TODO: remove once old JobData class is no longer used\n env_vars = data.get_injected_env_vars(input_files_dict)\n\n task_workspaces = {}\n if job.job_type.is_system:\n # Add any workspaces needed for this system job\n task_workspaces = QueuedExecutionConfigurator._system_job_workspaces(job)\n else:\n # Set any output workspaces needed\n output_workspaces = {}\n if job.input and 'version' in job.input and job.input['version'] == '1.0':\n # Set output workspaces using legacy job data\n self._cache_workspace_names(data.get_output_workspace_ids())\n output_workspaces = {}\n for output, workspace_id in data.get_output_workspaces().items():\n output_workspaces[output] = self._cached_workspace_names[workspace_id]\n config.set_output_workspaces(output_workspaces)\n if not output_workspaces:\n # Set output workspaces from job configuration\n output_workspaces = {}\n job_config = job.get_job_configuration()\n interface = SeedManifest(job.job_type_rev.manifest, do_validate=False)\n for output_name in interface.get_file_output_names():\n output_workspace = job_config.get_output_workspace(output_name)\n if output_workspace:\n output_workspaces[output_name] = output_workspace\n config.set_output_workspaces(output_workspaces)\n\n # Create main task with fields populated from input data\n args = job.get_job_interface().get_injected_command_args(input_values, env_vars)\n config.create_tasks(['main'])\n config.add_to_task('main', args=args, env_vars=env_vars, workspaces=task_workspaces)\n return config",
"def create_config(self):\n\n #FIXME: Try to do it over loops ie. self[attr].set_config()\n for attr, value in self.__dict__.items():\n if attr == \"connection\":\n self.connection.set_config(self.cfg)\n if attr == \"ipv4\":\n self.ipv4.set_config(self.cfg)\n if attr == \"ipv6\":\n self.ipv6.set_config(self.cfg)\n if attr == \"_802_3_ethernet\" and not value == \"none\":\n self._802_3_ethernet.set_config(self.cfg)\n if attr == \"_802_11_wireless\" and not value == \"none\":\n self._802_11_wireless.set_config(self.cfg)\n if attr == \"_802_11_wireless_security\" and not value == \"none\":\n self._802_11_wireless_security.set_config(self.cfg)",
"def iface_config(self, iface, *args, **kwargs):\n if not set(kwargs).issubset({'intf_ip_addr', 'netns', 'adminMode'}):\n raise NotImplementedError(\"Method is not implemented for current kwargs.\")\n if kwargs.get('netns', False):\n # Create network namespaces for current iface\n self.create_namespaces(iface)\n del kwargs['netns']\n if 'intf_ip_addr' in kwargs:\n kwargs['ipAddr'] = \"{}/24\".format(kwargs['intf_ip_addr'])\n if iface in self.namespaces:\n self._lhost.ui.enter_namespace(self.namespaces[iface])\n self._lhost.ui.modify_ports([iface], **kwargs)\n if iface in self.namespaces:\n self._lhost.ui.exit_namespace()",
"def network_config(self):\n\n if self._network_config:\n return self._network_config\n\n interfaces = self.metadata.get('interfaces')\n\n if not interfaces:\n raise Exception(\"Unable to get meta-data from server....\")\n\n # Convert Vultr network configuration to cloudinit.net format\n\n # Example JSON:\n # [\n # {\n # \"ipv4\": {\n # \"additional\": [\n # {\n # \"address\": \"192.0.2.3\",\n # \"netmask\": \"255.255.255.0\"\n # }\n # ],\n # \"address\": \"192.0.2.2\",\n # \"gateway\": \"192.0.2.1\",\n # \"netmask\": \"255.255.255.0\"\n # },\n # \"ipv6\": {\n # \"additional\": [\n # {\n # \"network\": \"2001:0db8:0:2::\",\n # \"prefix\": \"64\"\n # }\n # ],\n # \"address\": \"2001:0db8:0:1:5428:d5ff:fe28:1910\",\n # \"network\": \"2001:0db8:0:1::\",\n # \"prefix\": \"64\"\n # },\n # \"mac\": \"00:00:00:00:00:00\",\n # \"network-type\": \"public\"\n # },\n # ......\n # ]\n\n nic_configs = []\n macs_to_nics = cloudnet.get_interfaces_by_mac()\n LOG.debug(\"nic mapping: %s\", macs_to_nics)\n\n config = []\n for vultr_ip_dict in interfaces:\n mac = vultr_ip_dict[\"mac\"]\n\n if mac not in macs_to_nics:\n raise ValueError(\"Did not find network interface on system \"\n \"with mac '%s'. Cannot apply configuration: %s\"\n % (mac_address, nic))\n if_name = macs_to_nics[mac] # if_name = string 'eth0', ...\n if_config= {\n 'type': 'physical',\n 'mac_address': mac,\n 'name': if_name,\n 'subnets': [{\n 'type': 'dhcp',\n 'control': 'auto',\n }\n ]\n }\n config.append(if_config)\n\n LOG.debug(\"nic '%s' configuration: %s\", if_name, if_config)\n\n LOG.debug(\"added dns servers: %s\", self.dns_servers)\n config.append({'type': 'nameserver', 'address': self.dns_servers})\n\n return {'version': 1, 'config': config}",
"def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (targets_yaml, sim) = NAME_FACTORY.resolve_targetfile(\n args, require_sim_name=True)\n if targets_yaml is None:\n return job_configs\n\n specconfig = NAME_FACTORY.resolve_specconfig(args)\n\n astro_priors = args['astro_priors']\n write_full = args.get('write_full', False)\n\n targets = load_yaml(targets_yaml)\n base_config = dict(nsims=args['nsims'],\n seed=args['seed'],\n specconfig=specconfig)\n\n for target_name, profile_list in list(targets.items()):\n for profile in profile_list:\n for astro_prior in astro_priors:\n if is_null(astro_prior):\n astro_prior = 'none'\n full_key = \"%s:%s:%s:%s\" % (\n target_name, profile, sim, astro_prior)\n name_keys = dict(target_type=ttype,\n target_name=target_name,\n sim_name=sim,\n profile=profile,\n astro_prior=astro_prior,\n fullpath=True)\n limitfile = NAME_FACTORY.sim_dmlimitsfile(**name_keys)\n first = args['seed']\n last = first + args['nsims'] - 1\n outfile = limitfile.replace(\n '_SEED.fits', '_collected_%06i_%06i.fits' %\n (first, last))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n if not write_full:\n outfile = None\n summaryfile = limitfile.replace(\n '_SEED.fits', '_summary_%06i_%06i.fits' %\n (first, last))\n job_config = base_config.copy()\n job_config.update(dict(limitfile=limitfile,\n astro_prior=astro_prior,\n outfile=outfile,\n summaryfile=summaryfile,\n logfile=logfile))\n job_configs[full_key] = job_config\n\n return job_configs",
"def set_new_configuration(self):\r\n with open('new_config.json', 'rt') as jsonfile:\r\n configuration = jsonfile.read()\r\n configuration_data = json.loads(configuration)\r\n ip = IPRoute()\r\n index = ip.link_lookup(ifname='eth0')[0]\r\n ip.link('set', index=index, state='up')\r\n ip.addr('add', index, address=configuration_data[0][0], mask=24)\r\n ip.close()",
"def build_job_configs(self, args):\n job_configs = {}\n\n ttype = args['ttype']\n (roster_yaml, sim) = NAME_FACTORY.resolve_rosterfile(\n args, require_sim_name=True)\n if roster_yaml is None:\n return job_configs\n\n specconfig = NAME_FACTORY.resolve_specconfig(args)\n\n astro_priors = args['astro_priors']\n write_full = args['write_full']\n first = args['seed']\n last = first + args['nsims'] - 1\n\n base_config = dict(nsims=args['nsims'],\n seed=args['seed'])\n\n roster_dict = load_yaml(roster_yaml)\n for roster_name in list(roster_dict.keys()):\n for astro_prior in astro_priors:\n if is_null(astro_prior):\n astro_prior = 'none'\n full_key = \"%s:%s:%s\" % (roster_name, sim, astro_prior)\n name_keys = dict(target_type=ttype,\n roster_name=roster_name,\n sim_name=sim,\n astro_prior=astro_prior,\n fullpath=True)\n\n limitfile = NAME_FACTORY.sim_stackedlimitsfile(**name_keys)\n outfile = limitfile.replace(\n '_SEED.fits', '_collected_%06i_%06i.fits' %\n (first, last))\n logfile = make_nfs_path(outfile.replace('.fits', '.log'))\n if not write_full:\n outfile = None\n summaryfile = limitfile.replace('_SEED.fits', '_summary.fits')\n\n job_config = base_config.copy()\n job_config.update(dict(limitfile=limitfile,\n specconfig=specconfig,\n astro_prior=astro_prior,\n outfile=outfile,\n summaryfile=summaryfile,\n logfile=logfile))\n job_configs[full_key] = job_config\n\n return job_configs",
"def _configure_regular_job(config, job_exe, job_type, system_logging_level):\n config.create_tasks(['pull', 'pre', 'main', 'post'])\n config.add_to_task('pull', args=create_pull_command(job_exe.docker_image))\n config.add_to_task('pre', args=PRE_TASK_COMMAND_ARGS)\n config.add_to_task('post', args=POST_TASK_COMMAND_ARGS)\n\n # Configure input workspaces\n ro_input_workspaces = {}\n rw_input_workspaces = {}\n for input_workspace in config.get_input_workspace_names():\n ro_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RO)\n rw_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RW)\n config.add_to_task('pre', workspaces=ro_input_workspaces)\n config.add_to_task('main', workspaces=ro_input_workspaces)\n # Post tasks have access to input workspaces in case input files need moved as part of parse results\n config.add_to_task('post', workspaces=rw_input_workspaces)\n\n # Configure output workspaces\n output_workspaces = {}\n for output_workspace in config.get_output_workspace_names():\n output_workspaces[output_workspace] = TaskWorkspace(output_workspace, MODE_RW)\n config.add_to_task('post', workspaces=output_workspaces)\n\n # Configure input/output mounts\n input_mnt_name = 'scale_input_mount'\n output_mnt_name = 'scale_output_mount'\n input_vol_name = get_job_exe_input_vol_name(job_exe)\n output_vol_name = get_job_exe_output_vol_name(job_exe)\n input_vol_ro = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RO, is_host=False)\n input_vol_rw = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RW, is_host=False)\n output_vol_ro = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RO, is_host=False)\n output_vol_rw = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RW, is_host=False)\n\n config.add_to_task('pre', mount_volumes={input_mnt_name: input_vol_rw, output_mnt_name: output_vol_rw},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n config.add_to_task('main', mount_volumes={input_mnt_name: input_vol_ro, output_mnt_name: output_vol_rw})\n config.add_to_task('post', mount_volumes={output_mnt_name: output_vol_ro},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n\n\n # Configure output directory\n env_vars = {'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH, 'INPUT_METADATA': SCALE_INPUT_METADATA_PATH}\n args = config._get_task_dict('main')['args']\n\n args = environment_expansion(env_vars, args)\n\n config.add_to_task('main', args=args, env_vars=env_vars)\n\n # Configure task resources\n resources = job_exe.get_resources()\n # Pull-task and pre-task require full amount of resources\n config.add_to_task('pull', resources=resources)\n config.add_to_task('pre', resources=resources)\n # Main-task no longer requires the input file space\n resources.subtract(NodeResources([Disk(job_exe.input_file_size)]))\n config.add_to_task('main', resources=resources)\n # Post-task no longer requires any disk space\n resources.remove_resource('disk')\n config.add_to_task('post', resources=resources)",
"def operation_cost(tau=0):\n iface_on = 0.2\n iface_off = 0.2 \n ipconfig = 2\n reconf_ip_bin = tau\n op_cost = 2 * (iface_on + iface_off) + reconf_ip_bin * ipconfig\n return op_cost",
"def _configure_all_tasks(self, config, job_exe, job_type):\n\n config.set_task_ids(job_exe.get_cluster_id())\n\n for task_type in config.get_task_types():\n # Configure env vars describing allocated task resources\n env_vars = {}\n nvidia_docker_label = None\n\n for resource in config.get_resources(task_type).resources:\n env_name = 'ALLOCATED_%s' % normalize_env_var_name(resource.name)\n env_vars[env_name] = '%.1f' % resource.value # Assumes scalar resources\n if resource.name == \"gpus\" and int(resource.value) > 0:\n gpu_list = GPUManager.get_nvidia_docker_label(job_exe.node_id, job_exe.job_id)\n nvidia_docker_label = DockerParameter('env','NVIDIA_VISIBLE_DEVICES={}'.format(gpu_list.strip(',')))\n\n # Configure env vars for Scale meta-data\n env_vars['SCALE_JOB_ID'] = unicode(job_exe.job_id)\n env_vars['SCALE_EXE_NUM'] = unicode(job_exe.exe_num)\n if job_exe.recipe_id:\n env_vars['SCALE_RECIPE_ID'] = unicode(job_exe.recipe_id)\n if job_exe.batch_id:\n env_vars['SCALE_BATCH_ID'] = unicode(job_exe.batch_id)\n\n # Configure workspace volumes\n workspace_volumes = {}\n for task_workspace in config.get_workspaces(task_type):\n logger.debug(self._workspaces)\n workspace_model = self._workspaces[task_workspace.name]\n # TODO: Should refactor workspace broker to return a Volume object and remove BrokerVolume\n if workspace_model.volume:\n vol_name = get_workspace_volume_name(job_exe, task_workspace.name)\n cont_path = get_workspace_volume_path(workspace_model.name)\n if workspace_model.volume.host:\n host_path = workspace_model.volume.remote_path\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=True, host_path=host_path)\n else:\n driver = workspace_model.volume.driver\n driver_opts = {}\n # TODO: Hack alert for nfs broker, as stated above, we should return Volume from broker\n if driver == 'nfs':\n driver_opts = {'share': workspace_model.volume.remote_path}\n volume = Volume(vol_name, cont_path, task_workspace.mode, is_host=False, driver=driver,\n driver_opts=driver_opts)\n workspace_volumes[task_workspace.name] = volume\n\n config.add_to_task(task_type, env_vars=env_vars, wksp_volumes=workspace_volumes)\n\n # Labels for metric grouping\n job_id_label = DockerParameter('label', 'scale-job-id={}'.format(job_exe.job_id))\n job_execution_id_label = DockerParameter('label', 'scale-job-execution-id={}'.format(job_exe.exe_num))\n job_type_name_label = DockerParameter('label', 'scale-job-type-name={}'.format(job_type.name))\n job_type_version_label = DockerParameter('label', 'scale-job-type-version={}'.format(job_type.version))\n main_label = DockerParameter('label', 'scale-task-type=main')\n if nvidia_docker_label:\n nvidia_runtime_param = DockerParameter('runtime', 'nvidia')\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label, nvidia_docker_label, nvidia_runtime_param])\n else:\n config.add_to_task('main', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, main_label])\n\n if not job_type.is_system:\n pre_label = DockerParameter('label', 'scale-task-type=pre')\n post_label = DockerParameter('label', 'scale-task-type=post')\n config.add_to_task('pre', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, pre_label])\n config.add_to_task('post', docker_params=[job_id_label, job_type_name_label, job_type_version_label,\n job_execution_id_label, post_label])\n\n # Configure tasks for logging\n if settings.LOGGING_ADDRESS is not None:\n log_driver = DockerParameter('log-driver', 'fluentd')\n fluent_precision = DockerParameter('log-opt', 'fluentd-sub-second-precision=true')\n log_address = DockerParameter('log-opt', 'fluentd-address=%s' % settings.LOGGING_ADDRESS)\n if not job_type.is_system:\n pre_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('pre'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('pre', docker_params=[log_driver, fluent_precision, log_address, pre_task_tag])\n post_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('post'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('post', docker_params=[log_driver, fluent_precision, log_address, post_task_tag])\n # TODO: remove es_urls parameter when Scale no longer supports old style job types\n\n # Post task needs ElasticSearch URL to grab logs for old artifact registration\n es_param = DockerParameter('env', 'ELASTICSEARCH_URL=%s' % settings.ELASTICSEARCH_URL)\n config.add_to_task('post', docker_params=[es_param])\n main_task_tag = DockerParameter('log-opt', 'tag=%s|%s|%s|%s|%s' % (config.get_task_id('main'),\n job_type.name,\n job_type.version,\n job_exe.job_id,\n job_exe.exe_num))\n config.add_to_task('main', docker_params=[log_driver, fluent_precision, log_address, main_task_tag])",
"def update_interfaces_config(self):\n\n for i in self._nodes.items():\n node = i[1]\n devices = node[\"devices\"]\n all_devices = devices[\"other_devices\"]\n all_devices.update(devices[\"dpdk_devices\"])\n all_devices.update(devices[\"kernel_devices\"])\n\n current_ifcs = {}\n interfaces = {}\n if \"interfaces\" in node:\n current_ifcs = node[\"interfaces\"]\n if current_ifcs:\n for ifc in current_ifcs.values():\n dvid = ifc[\"pci_address\"]\n if dvid in all_devices:\n VppPCIUtil.vpp_create_interface(\n interfaces, dvid, all_devices[dvid]\n )\n node[\"interfaces\"] = interfaces\n\n self.updateconfig()",
"def build(cfg, jobs, watch):\n libjobs.buildJobs(cfg, jobs, watch)",
"def update_from_config(self, job_configs, factory, reconfigure, namespace_to_reconfigure=None):\n self.jobs.filter_by_name(job_configs)\n\n def map_to_job_and_schedule(job_schedulers):\n for job_scheduler in job_schedulers:\n if reconfigure:\n job_scheduler.schedule()\n yield job_scheduler.get_job()\n\n def reconfigure_filter(config):\n if not reconfigure or not namespace_to_reconfigure:\n return True\n else:\n return config.namespace == namespace_to_reconfigure\n\n seq = (factory.build(config) for config in job_configs.values() if reconfigure_filter(config))\n return map_to_job_and_schedule(filter(self.add, seq))",
"def deploy_net(self, desired_config): # pylint: disable=too-many-locals\n self._bigip.refresh_net()\n\n # Get the list of route tasks\n LOGGER.debug(\"Getting route tasks...\")\n existing = self._bigip.get_routes()\n desired = desired_config.get('routes', dict())\n\n (create_routes, update_routes, delete_routes) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # Get the list of arp tasks\n LOGGER.debug(\"Getting arp tasks...\")\n existing = self._bigip.get_arps()\n desired = desired_config.get('arps', dict())\n\n (create_arps, update_arps, delete_arps) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # Get the list of tunnel tasks\n LOGGER.debug(\"Getting tunnel tasks...\")\n existing = self._bigip.get_fdb_tunnels()\n desired = desired_config.get('fdbTunnels', dict())\n (create_tunnels, update_tunnels, delete_tunnels) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # If there are pre-existing (user-created) tunnels that we are\n # managing, we want to only update these tunnels.\n LOGGER.debug(\"Getting pre-existing tunnel update tasks...\")\n desired = desired_config.get('userFdbTunnels', dict())\n update_existing_tunnels = self._get_user_tunnel_tasks(desired)\n\n LOGGER.debug(\"Building task lists...\")\n create_tasks = create_arps + create_tunnels + create_routes\n update_tasks = update_arps + update_tunnels + update_existing_tunnels + update_routes\n delete_tasks = delete_arps + delete_tunnels + delete_routes\n\n taskq_len = len(create_tasks) + len(update_tasks) + len(delete_tasks)\n\n return self._run_tasks(\n taskq_len, create_tasks, update_tasks, delete_tasks)",
"def modif_network(self):\n print \"preparation du fichier network interfaces\"\n if version_os[\"OS\"] == \"CentOS\":\n self.exec_cmd(\"cp %s/etc/sysconfig/network_scripts/ifcfg-eth0 %s/etc/sysconfig/network_scripts/ifcfg-eth0.pre.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n else:\n self.exec_cmd(\"cp %s/etc/network/interfaces %s/etc/network/interfaces.post.p2v\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))\n self.exec_cmd(\"cp %s/etc/network/interfaces.pre.p2v %s/etc/network/interfaces\" % (self.rep_vhosts_vm,self.rep_vhosts_vm))",
"def apply_config(dts, acg, xact, action, scratch):\n self.log.debug(\"Apply Config\")\n return rwtypes.RwStatus.SUCCESS",
"def task_generate_job_batch():\n return {\n # force doit to always mark the task\n # as not up-to-date (unless target removed)\n 'uptodate': [False],\n 'file_dep': ['generate_job_batch.py'],\n 'task_dep': ['create_folders'],\n #'targets': ['.running_jobs/list_of_jobs.txt'],\n 'actions': ['python generate_job_batch.py'],\n }"
] | [
"0.6852771",
"0.57400745",
"0.5662997",
"0.55841076",
"0.52505565",
"0.5179049",
"0.5168917",
"0.5116165",
"0.50684077",
"0.50249934",
"0.50060546",
"0.49951974",
"0.49877572",
"0.4984114",
"0.49398127",
"0.48890617",
"0.4845154",
"0.48202658",
"0.4804889",
"0.47988567",
"0.47828218",
"0.47646555",
"0.4732945",
"0.4730711",
"0.47226134",
"0.47195533",
"0.47052026",
"0.4695873",
"0.4689713",
"0.46781695"
] | 0.63792557 | 1 |
Creates a reboot job. | def create_reboot_job(self,
reboot_type='graceful_reboot_with_forced_shutdown'):
return self._job_mgmt.create_reboot_job(reboot_type) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_job_schedule(self):\n job_schedule_create = netapp_utils.zapi\\\n .NaElement.create_node_with_children(\n 'job-schedule-cron-create',\n **{'job-schedule-name': self.name})\n job_schedule_create.add_node_with_children(\n 'job-schedule-cron-minute',\n **{'cron-minute': str(self.job_minutes)})\n try:\n self.server.invoke_successfully(job_schedule_create,\n enable_tunneling=True)\n except netapp_utils.zapi.NaApiError as error:\n self.module.fail_json(msg='Error creating job schedule %s: %s'\n % (self.name, to_native(error)),\n exception=traceback.format_exc())",
"def create_job(api_instance, job):\n api_response = api_instance.create_namespaced_job(\n body=job, namespace=\"default\", pretty=True\n )\n logger.info(\"Job created with status='%s'\" % str(api_response.status))\n return api_response",
"def create_job(self, name: str) -> Slurm:\n LOG.info(\"Create a slurm job with name %s\", name)\n job = Slurm(\n name,\n {\"account\": self.account, \"time\": self.time,},\n scripts_dir=str(self.scripts_dir),\n log_dir=str(self.log_dir),\n )\n return job",
"def create_config_job(self,\n resource_uri,\n cim_creation_class_name,\n cim_name,\n target,\n cim_system_creation_class_name='DCIM_ComputerSystem',\n cim_system_name='DCIM:ComputerSystem',\n reboot=False,\n start_time='TIME_NOW'):\n return self._job_mgmt.create_config_job(\n resource_uri,\n cim_creation_class_name,\n cim_name,\n target,\n cim_system_creation_class_name,\n cim_system_name,\n reboot,\n start_time)",
"def reboot(*args, **kwargs):\n try:\n master.main_exit()\n except Exception:\n log.error(\"main_exit error\")\n with open('/tmp/reboot', 'w+') as f:\n f.write(\"REBOOT\")\n log.info(\"Reboot ...\")",
"def create_job(self, job):\n call = subprocess.Popen(self.cli + [PlatformJenkinsJavaCLI.CREATE_JOB, job.name], stdin=subprocess.PIPE)\n out, err = call.communicate(input=platform_ci.jjb.get_job_as_xml(job, self.template_dir))\n call.wait()\n if call.returncode != 0:\n logging.info(out)\n logging.error(err)\n raise PlatformJenkinsException(\"Creating job failed: \" + job.name)",
"def create(cfg, jobs):\n server = jenkins_utils.server_factory(cfg)\n libjobs.createJobs(server, jobs)",
"def test_hostmgr_restart_job_succeeds(self, failure_tester):\n job = failure_tester.job(job_file=\"test_job_no_container.yaml\")\n job.create()\n\n # Restart immediately, so that tasks will be in various\n # stages of launch\n assert 0 != failure_tester.fw.restart(failure_tester.hostmgr, \"leader\")\n\n job.wait_for_state()",
"def test_create_node_reboot_item(self):\n pass",
"def create_job(self, context=None):\n return self._client.call_method(\n 'UserAndJobState.create_job',\n [], self._service_ver, context)",
"def restart(self):\n\t\treturn Job(SDK.PrlVm_Restart(self.handle)[0])",
"def create_job(jobtype, server):\n name = generate_job_name(jobtype)\n job = Job.objects.create(jobtype=jobtype, server=server, name=name)\n return job",
"def CreateSchedulingMessage(messages,\n maintenance_policy,\n preemptible,\n restart_on_failure,\n node_affinities=None,\n min_node_cpu=None,\n location_hint=None,\n maintenance_freeze_duration=None,\n maintenance_interval=None,\n provisioning_model=None,\n instance_termination_action=None,\n host_error_timeout_seconds=None,\n max_run_duration=None,\n termination_time=None,\n local_ssd_recovery_timeout=None):\n # Note: We always specify automaticRestart=False for preemptible VMs. This\n # makes sense, since no-restart-on-failure is defined as \"store-true\", and\n # thus can't be given an explicit value. Hence it either has its default\n # value (in which case we override it for convenience's sake to the only\n # setting that makes sense for preemptible VMs), or the user actually\n # specified no-restart-on-failure, the only usable setting.\n on_host_maintenance = CreateOnHostMaintenanceMessage(messages,\n maintenance_policy)\n if preemptible or provisioning_model == 'SPOT':\n scheduling = messages.Scheduling(\n automaticRestart=False,\n onHostMaintenance=on_host_maintenance,\n preemptible=True)\n else:\n scheduling = messages.Scheduling(\n automaticRestart=restart_on_failure,\n onHostMaintenance=on_host_maintenance)\n\n if provisioning_model:\n scheduling.provisioningModel = (\n messages.Scheduling.ProvisioningModelValueValuesEnum(provisioning_model)\n )\n\n if instance_termination_action:\n scheduling.instanceTerminationAction = (\n messages.Scheduling.InstanceTerminationActionValueValuesEnum(\n instance_termination_action\n )\n )\n\n if max_run_duration is not None:\n scheduling.maxRunDuration = messages.Duration(seconds=max_run_duration)\n\n if local_ssd_recovery_timeout is not None:\n scheduling.localSsdRecoveryTimeout = messages.Duration(\n seconds=local_ssd_recovery_timeout\n )\n\n if termination_time:\n scheduling.terminationTime = times.FormatDateTime(termination_time)\n\n if node_affinities:\n scheduling.nodeAffinities = node_affinities\n\n if min_node_cpu is not None:\n scheduling.minNodeCpus = int(min_node_cpu)\n\n if location_hint:\n scheduling.locationHint = location_hint\n\n if maintenance_freeze_duration:\n scheduling.maintenanceFreezeDurationHours = maintenance_freeze_duration // 3600\n\n if maintenance_interval:\n scheduling.maintenanceInterval = messages.Scheduling.MaintenanceIntervalValueValuesEnum(\n maintenance_interval)\n\n if host_error_timeout_seconds:\n scheduling.hostErrorTimeoutSeconds = host_error_timeout_seconds\n return scheduling",
"def create_custom_job(\n type,\n project,\n location,\n payload,\n gcp_resources,\n):\n remote_runner = job_remote_runner.JobRemoteRunner(\n type, project, location, gcp_resources\n )\n\n try:\n # Create custom job if it does not exist\n job_name = remote_runner.check_if_job_exists()\n if job_name is None:\n job_name = remote_runner.create_job(\n create_custom_job_with_client,\n insert_system_labels_into_payload(payload),\n )\n\n # Poll custom job status until \"JobState.JOB_STATE_SUCCEEDED\"\n remote_runner.poll_job(get_custom_job_with_client, job_name)\n except (ConnectionError, RuntimeError) as err:\n error_util.exit_with_internal_error(err.args[0])",
"def reboot(self):\n module = 'reboot'\n method = 'POST'\n print(self.device + ' Calling reboot command on the device')\n response = self.axapi_call(module, method,'')\n if '2' in str(response.status_code):\n print(self.device + ' Reboot command successfully received, device will reboot momentarily, please wait')\n else:\n print(self.device + ' There was an error in issuing the reboot command, device may not have rebooted, please verify manually')",
"def reboot_instance(InstanceId=None):\n pass",
"def _create_job(self,\n name,\n environment_string,\n description='',\n platform='LINUX'):\n job = data_types.Job()\n job.name = name\n if environment_string.strip():\n job.environment_string = environment_string\n job.platform = platform\n job.descripton = description\n job.put()\n\n return job",
"def create_new_cron_job(cron, python_file_path, text_file_path, output_dir_path):\n remove_all_previous_jobs(cron)\n py_command = \"/usr/local/bin/python3 {} --filepath {} --output_dir_path {}\".format(python_file_path,\n text_file_path,\n output_dir_path)\n print(py_command)\n job = cron.new(command=py_command)\n job.minute.every(5)\n\n cron.write()\n\n return job",
"def reboot(self,request):\n\t\tresult = True\n\t\tPopen(['/sbin/reboot']) # that's all\n\t\tself.finished(request.id,result)",
"def IssueReboot():\n if sys.platform.startswith('win'):\n subprocess.call(['shutdown', '-r', '-f', '-t', '1'])\n elif sys.platform in ('darwin', 'posix', 'linux2'):\n subprocess.call(['sudo', 'shutdown', '-r', 'now'])\n else:\n raise NotImplementedError('Implement IssueReboot function '\n 'for %s' % sys.platform)",
"def reboot(self,\n context,\n instance,\n network_info,\n reboot_type,\n block_device_info=None,\n bad_volumes_callback=None):\n azure_name = self._get_omni_name_from_instance(instance)\n utils.restart_instance(self.compute_client, drv_conf.resource_group,\n azure_name)",
"def reboot_trima(runner):\r\n runner.AddCommand(\"reboot\\n\",'',False)\r\n runner.Run()\r\n runner.ResetCommands()",
"def reboot(self):\n raise NotImplementedError",
"def reboot(self, name=None):\n raise NotImplementedError",
"def net_service_restart(self):\n\t\treturn Job(SDK.PrlSrv_NetServiceRestart(self.handle)[0])",
"def reboot(self, save_before_reboot=False) -> tuple[int, AnyStr]:\n return 5, gettext(\"Reboot not ready\")",
"def create(cls, job_id: str) -> \"JobManifest\":\n now = datetime.datetime.now(datetime.timezone.utc)\n return JobManifest(creation_time=now, job_id=job_id, orbit_ids=[], task_ids=[])",
"def create_recurring_run(\n self,\n experiment_id: str,\n job_name: str,\n description: Optional[str] = None,\n start_time: Optional[str] = None,\n end_time: Optional[str] = None,\n interval_second: Optional[int] = None,\n cron_expression: Optional[str] = None,\n max_concurrency: Optional[int] = 1,\n no_catchup: Optional[bool] = None,\n params: Optional[dict] = None,\n pipeline_package_path: Optional[str] = None,\n pipeline_id: Optional[str] = None,\n version_id: Optional[str] = None,\n enabled: bool = True,\n enable_caching: Optional[bool] = None,\n service_account: Optional[str] = None,\n ) -> kfp_server_api.V1Job:\n\n job_config = self._create_job_config(\n experiment_id=experiment_id,\n params=params,\n pipeline_package_path=pipeline_package_path,\n pipeline_id=pipeline_id,\n version_id=version_id,\n enable_caching=enable_caching,\n )\n\n if all([interval_second, cron_expression\n ]) or not any([interval_second, cron_expression]):\n raise ValueError(\n 'Either interval_second or cron_expression is required')\n if interval_second is not None:\n trigger = kfp_server_api.models.V1Trigger(\n periodic_schedule=kfp_server_api.models.V1PeriodicSchedule(\n start_time=start_time,\n end_time=end_time,\n interval_second=interval_second))\n if cron_expression is not None:\n trigger = kfp_server_api.models.V1Trigger(\n cron_schedule=kfp_server_api.models.V1CronSchedule(\n start_time=start_time,\n end_time=end_time,\n cron=cron_expression))\n\n job_body = kfp_server_api.models.V1Job(\n enabled=enabled,\n pipeline_spec=job_config.spec,\n resource_references=job_config.resource_references,\n name=job_name,\n description=description,\n no_catchup=no_catchup,\n trigger=trigger,\n max_concurrency=max_concurrency,\n service_account=service_account)\n return self._job_api.create_job(body=job_body)",
"def reboot(self, *args, **kwargs):\n log_tag = self.get_log_tag()\n self.logger.info(\"{} Attempting to reset the Treerunner board\"\n \"\".format(log_tag))\n cmd = \"shutdown > /dev/null 2>&1\"\n self.exec_command_ssh(cmd, background=True)\n self.logger.info(\"{} Waiting for the Treerunner board to come\"\n \" back online\".format(log_tag))\n time.sleep(30)\n # Start the sshd server daemon\n self.start_sshd_server()",
"def runClusterBootstrap(c_session, i, b, boot_restart_file, options):\n\n # open log\n log = open(options.log,\"a\",0)\n log.write(\"\\n\\n %s: Cluster bootstrapping enabled. Running job \"\\\n \"%05i\\n\" % (timeStr(), i+1)\n )\n\n c_job = c_session.createJobTemplate()\n\n # run itself!\n #c_job.remoteCommand = \"%s %s\" % (sys.executable, sys.argv[0])\n c_job.remoteCommand = \"%s\" % sys.executable\n\n fileout_details = os.path.splitext(os.path.basename(options.outfile))\n thisjob_fileout = \"%s/%s_%04i%s\" % (options.tmpdir,\n fileout_details[0],\n i+1,\n fileout_details[1])\n log_details = os.path.splitext(os.path.basename(options.log))\n thisjob_log = \"%s/%s_%04i%s\" % (options.tmpdir,\n fileout_details[0],\n i+1,\n log_details[1])\n\n args = [sys.argv[0],\n \"-d\", options.datapath,\n \"-a\", options.feature_annot,\n \"-o\", thisjob_fileout,\n \"-l\", thisjob_log,\n \"-r\", options.script_file,\n \"-b\", str(b),\n \"--tmpdir\", options.tmpdir,\n \"--bootstrapslave\"]\n\n c_job.args = args\n\n if options.verbose:\n log.write(\"\\t\\tdrmaa command line:\\n\\t\\t%s %s\\n\" \\\n \"\" % (c_job.remoteCommand, \" \".join(c_job.args)))\n\n c_job.outputPath = \":%s\" % options.tmpdir\n c_job.errorPath = \":%s\" % options.tmpdir\n\n # pass current working directory (not that this is needed really, but hey!)\n c_job.nativeSpecification = \"-cwd\"\n\n # add support for different cluster queue specifications\n c_job.nativeSpecification = \"-clear -q '%s' %s\" \\\n \"\" % (options.clustq, c_job.nativeSpecification)\n\n if options.verbose:\n log.write(\"\\t\\tdrmaa output intermediates written to: %s\\n\" \\\n \"\" % options.tmpdir)\n\n c_job.jobEnvironment = os.environ\n jobid = c_session.runJob(c_job)\n\n log.write(\"\\t\\tJob submitted with id: %s\\n\" % jobid)\n\n log.close()\n\n return(jobid, \"%s/generic_wrapper.py.o%s\" % (options.tmpdir, jobid),\n \"%s/%s\" % (options.tmpdir, thisjob_fileout), thisjob_log)"
] | [
"0.57708925",
"0.5769493",
"0.5592167",
"0.55431616",
"0.5505705",
"0.5497413",
"0.53705674",
"0.53681993",
"0.5323312",
"0.5316519",
"0.5298177",
"0.5262912",
"0.52624005",
"0.5249387",
"0.52462703",
"0.5241022",
"0.5220556",
"0.5172969",
"0.51569784",
"0.50932425",
"0.5089434",
"0.5083834",
"0.5079726",
"0.506668",
"0.5065336",
"0.5056813",
"0.5047593",
"0.5042526",
"0.5013531",
"0.5000958"
] | 0.8060909 | 0 |
Deletes the given jobs. If no jobs are given, all jobs are deleted. | def delete_jobs(self, job_ids=['JID_CLEARALL']):
return self._job_mgmt.delete_jobs(job_ids) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self, jobs):\n assert isinstance(jobs, list), 'Jobs must be a list'\n assert len(jobs) > 0, 'One or more jobs required'\n\n req = list()\n if len(jobs) > 1:\n for r in self._batch_request(jobs):\n req.append(\n ''.join([self._scheduler_endpoint, '?', '&'.join(r)]))\n else:\n req = \"{}?job={}\".format(\n self._scheduler_endpoint, jobs[0])\n\n try:\n self._api_delete(req)\n except HTTPError as e:\n raise JobClientError(e.message)",
"def delete_jobs(self):\n jobs = self.get_jobs(self.age)\n print('Jobs queued for delete: ', jobs)\n for job in jobs:\n try: \n body = k_client.V1DeleteOptions(propagation_policy='Background')\n self.kube_v1_batch_client.delete_namespaced_job(job, body=body, namespace=self.project)\n self.kube_client.delete_namespaced_persistent_volume_claim(job+\"-storage-claim\", self.project, {})\n print('Deleted job: ', job)\n except ApiException as e:\n print(\"Exception when calling BatchV1Api -> delete_namespaced_job: %s\\n\" % e)\n exit(1)",
"def clear(self, job_keys=None):\n if job_keys:\n for job_key in job_keys:\n if job_key in self.jobs:\n job_name = 'lithops-{}'.format(job_key.lower())\n logger.debug('Deleting job {}'.format(job_name))\n try:\n self.batch_api.delete_namespaced_job(name=job_name,\n namespace=self.namespace,\n propagation_policy='Background')\n except Exception:\n pass\n self.jobs.remove(job_key)\n else:\n for job_key in self.jobs:\n job_name = 'lithops-{}'.format(job_key.lower())\n logger.debug('Deleting job {}'.format(job_name))\n try:\n self.batch_api.delete_namespaced_job(name=job_name,\n namespace=self.namespace,\n propagation_policy='Background')\n except Exception:\n pass\n self.jobs = []",
"def deleteJobs():\n deleteList = request.form.getlist(\"delete_job\")\n for jobId in deleteList:\n job = db.getJobs(jobId=jobId)[0]\n path = os.path.join(webapp.config['UPLOADED_JOBS_DEST'], job['appName'], jobId)\n shutil.rmtree(path, ignore_errors=True)\n db.deleteJob(jobId)\n return redirect(url_for('listJobs')), 302",
"def delete_all_jobs(self, jobstore=None):\n\n self._scheduler.remove_all_jobs(jobstore)",
"def delete_jobs(self, job_list, as_json=False):\n deletion_status = dict()\n for job_id in job_list:\n app_id = None\n if job_id.startswith('njs:'):\n # delete from njs\n is_deleted = True\n app_id = job_id[4:]\n elif job_id.startswith('method:'):\n # delete from njs_wrapper\n is_deleted = True\n app_id = job_id[7:]\n else:\n # delete from ujs (njs_wrapper?)\n is_deleted = False\n if app_id is not None:\n token = os.environ['KB_AUTH_TOKEN']\n njsClient = NarrativeJobService(URLS.job_service, token = token)\n try:\n status = njsClient.delete_app(app_id)\n if (not status == 'success') and ('was marked for deletion' not in status):\n is_deleted = False\n except Exception as e:\n # just return false until we get some better info from the NJS folks.\n is_deleted = False\n deletion_status[job_id] = is_deleted\n if as_json:\n import json\n deletion_status = json.dumps(deletion_status)\n return deletion_status",
"def __clear_jobs(self):\n namespace = self._config.cluster_config.namespace\n self.__logger.info(f'Clearing old jobs in current namespace: {namespace}')\n\n for job in self.__client.get(namespace=self._config.cluster_config.namespace)['items']:\n job_name = job['metadata']['name']\n self.__logger.info(f'Deleting: {job_name}')\n try:\n self.__client.custom_api.delete_namespaced_custom_object(\n PYTORCHJOB_GROUP,\n PYTORCHJOB_VERSION,\n namespace,\n PYTORCHJOB_PLURAL,\n job_name)\n except Exception as e:\n self.__logger.warning(f'Could not delete: {job_name}')\n print(e)",
"def terminateJobs(self, ids):\n #WARNING: terminateJobs modifies the running queue, which\n # fillJobQueue assumes can't happen\n queues = [self.__queue, self.__clientQueue, self.__running, self.__clientRunning]\n with self.__queueLock:\n for _, queue in enumerate(queues):\n toRemove = []\n for job in queue:\n if job is not None and job.identifier in ids:\n # this assumes that each uniqueHandle only exists once in any queue anywhere\n ids.remove(job.identifier)\n toRemove.append(job)\n for job in toRemove:\n # for fixed-spot queues, need to replace job with None not remove\n if isinstance(queue,list):\n job.kill()\n queue[queue.index(job)] = None\n # for variable queues, can just remove the job\n else:\n queue.remove(job)\n self.raiseADebug(f'Terminated job \"{job.identifier}\" by request.')\n if len(ids):\n self.raiseADebug('Tried to remove some jobs but not found in any queues:',', '.join(ids))",
"def delete_jobs(self, job_id_list: list, notify=True) -> requests.Response:\n\n data = {\n \"jobs\": {job_id: {} for job_id in job_id_list},\n \"notify\": notify\n }\n\n response = self._api_call(method=\"POST\", endpoint=DELETE_JOBS_ENDPOINT, data=data)\n return response",
"def clear_jobs():\n job_ids = [rec['id'] for rec in job_records()]\n for job_id in job_ids:\n remove_job(job_id)\n return job_ids",
"def clean(self, args):\n for j in self.jobs:\n j.clean(args)",
"def delete_all_species_jobs(self, label: str):\n logger.debug(f'Deleting all jobs for species {label}')\n for value in self.job_dict[label].values():\n if value in ['conformers', 'tsg']:\n for job_name, job in self.job_dict[label][value].items():\n if label in self.running_jobs.keys() and job_name in self.running_jobs[label] \\\n and job.execution_type != 'incore':\n logger.info(f'Deleted job {value}{job_name}')\n job.delete()\n for job_name, job in value.items():\n if label in self.running_jobs.keys() and job_name in self.running_jobs[label] \\\n and job.execution_type != 'incore':\n logger.info(f'Deleted job {job_name}')\n job.delete()\n self.running_jobs[label] = list()\n self.output[label]['paths'] = {key: '' if key != 'irc' else list() for key in self.output[label]['paths'].keys()}",
"def delete_scheduler_jobs(self, ids=None):\n try:\n self.logger.info('delete_scheduler_jobs called.')\n\n # Prepare query URL\n self.logger.info('Preparing query URL for delete_scheduler_jobs.')\n _url_path = '/public/scheduler'\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for delete_scheduler_jobs.')\n _headers = {\n 'accept': 'application/json',\n 'content-type': 'application/json; charset=utf-8'\n }\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for delete_scheduler_jobs.')\n _request = self.http_client.delete(\n _query_url,\n headers=_headers,\n parameters=APIHelper.json_serialize(ids))\n AuthManager.apply(_request, self.config)\n _context = self.execute_request(_request,\n name='delete_scheduler_jobs')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info('Validating response for delete_scheduler_jobs.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise",
"def empty(self):\n job_ids = self.connection._lrange(self.key, 0, -1)\n jobs = []\n for job_id in job_ids:\n try:\n job = self.connection.get_job(job_id)\n jobs.append(job)\n except NoSuchJobError:\n pass\n\n for job in jobs:\n job.delete()\n\n self.connection._delete(self.key)\n return len(jobs)",
"def clean_jobs():\n\n # Delete all job related resources.\n redis_controller.delete_pending_jobs_queue()\n redis_controller.delete_killed_jobs_queue()\n name_to_node_details = redis_controller.get_name_to_node_details()\n for _, node_details in name_to_node_details.items():\n node_hostname = node_details[\"hostname\"]\n for container_name in node_details[\"containers\"]:\n requests.delete(\n url=f\"http://{node_hostname}:{node_details['api_server']['port']}/containers/{container_name}\",\n )\n return {}",
"def delete(\n address: Optional[str],\n job_id: str,\n headers: Optional[str],\n verify: Union[bool, str],\n):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n client.delete_job(job_id)\n cli_logger.print(f\"Job '{job_id}' deleted successfully\")",
"def delete_all_jobs(self, phase=None, regex=None):\n\n self.check_all_jobs()\n\n if regex:\n pattern = re.compile(\"{}\".format(regex))\n groups = [pattern.match(self.table_dict.values()[i]).group()\n for i in range(len(self.table_dict.values()))]\n matching_tables = [groups[i] for i in range(len(groups))\n if groups[i] in self.table_dict.values()]\n\n if phase:\n phase = [phase[i].upper() for i in range(len(phase))]\n if regex:\n for key in self.job_dict.keys():\n if self.job_dict[key] in phase:\n if key in self.table_dict.keys():\n if self.table_dict[key] in matching_tables:\n result = self.session.delete(\n CosmoSim.QUERY_URL + \"/{}\".format(key),\n auth=(self.username, self.password),\n data={'follow': ''})\n if not result.ok:\n result.raise_for_status()\n warnings.warn(\"Deleted job: {0} (Table: {1})\"\n .format(key,\n self.table_dict[key]))\n if not regex:\n for key in self.job_dict.keys():\n if self.job_dict[key] in phase:\n result = self.session.delete(\n CosmoSim.QUERY_URL + \"/{}\".format(key),\n auth=(self.username, self.password),\n data={'follow': ''})\n if not result.ok:\n result.raise_for_status()\n warnings.warn(\"Deleted job: {}\".format(key))\n\n if not phase:\n if regex:\n for key in self.job_dict.keys():\n if key in self.table_dict.keys():\n if self.table_dict[key] in matching_tables:\n result = self.session.delete(\n CosmoSim.QUERY_URL + \"/{}\".format(key),\n auth=(self.username, self.password),\n data={'follow': ''})\n if not result.ok:\n result.raise_for_status()\n warnings.warn(\"Deleted job: {0} (Table: {1})\"\n .format(key, self.table_dict[key]))\n if not regex:\n for key in self.job_dict.keys():\n result = self.session.delete(\n CosmoSim.QUERY_URL + \"/{}\".format(key),\n auth=(self.username, self.password),\n data={'follow': ''})\n if not result.ok:\n result.raise_for_status()\n warnings.warn(\"Deleted job: {}\".format(key))\n\n self._existing_tables()\n return",
"def delete(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error':'Job Not Found'}, 404)\n\n JobModel.query.filter(JobModel.job_id == job_id).delete()\n\n return custom_response({'Message': 'Deleted'}, 204)",
"def delete_dependents(self, pipeline: Optional['Pipeline'] = None):\n connection = pipeline if pipeline is not None else self.connection\n for dependent_id in self.dependent_ids:\n try:\n job = Job.fetch(dependent_id, connection=self.connection, serializer=self.serializer)\n job.delete(pipeline=pipeline, remove_from_queue=False)\n except NoSuchJobError:\n # It could be that the dependent job was never saved to redis\n pass\n connection.delete(self.dependents_key)",
"def killBatchJobs(self, jobIDs):\n raise NotImplementedError('Abstract method: killBatchJobs')",
"def killall(self):\n\n for job_id, job in self.jobs:\n backend.kill( job )",
"def DeleteJob(self, job_urn, token=None):\n aff4.FACTORY.Delete(job_urn, token=token)",
"def deleteJob(self, jobId):\n params = {'id': jobId}\n try:\n return self.gc.delete(JobUtils.JOB_ID_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid job id:', jobId)\n return {}\n raise",
"def _delete_job(self, job):",
"def clear_jobs(self):\n with self._mutex:\n self._jobs = []",
"def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"job_id\", type=str, location=\"form\")\n args = parser.parse_args()\n job_id = args[\"job_id\"]\n if job_id is None or job_id == \"\":\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"job_id (str) parameter is required\"\n )\n\n get_job_info = get(\n config.Config.FLASK_ENDPOINT + \"/api/scheduler/job\",\n headers={\"X-SOCA-TOKEN\": config.Config.API_ROOT_KEY},\n params={\"job_id\": job_id},\n verify=False,\n ) # nosec\n\n if get_job_info.status_code != 200:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve this job. Job may have terminated\",\n }, 500\n else:\n job_info = get_job_info.json()[\"message\"]\n job_owner = job_info[\"Job_Owner\"].split(\"@\")[0]\n request_user = request.headers.get(\"X-SOCA-USER\")\n if request_user is None:\n return errors.all_errors(\"X-SOCA-USER_MISSING\")\n if request_user != job_owner:\n return errors.all_errors(\"CLIENT_NOT_OWNER\")\n try:\n qdel_command = config.Config.PBS_QDEL + \" \" + job_id\n try:\n delete_job = subprocess.check_output(shlex.split(qdel_command))\n return {\"success\": True, \"message\": \"Job deleted\"}\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to execute qdel command: \" + str(err),\n }, 500\n\n except Exception as err:\n return {\"success\": False, \"message\": \"Unknown error: \" + str(err)}, 500",
"async def process_jobs(*, jobs=1):\n # Here comes the magic! Extract a message from the channel\n # designated for `MyJobManager` and dispatch it to the\n # `MyJobManager` through the communicator. This hack allows to\n # test JobManager subclass without running the separate worker\n # process.\n for _ in range(jobs):\n message = await ch_layers.get_channel_layer().receive(\n MyJobManager.job_channel_name\n )\n await comm.send_input(message)\n # Wait until all job manager jobs finish.\n while len(comm.instance.finished_jobs) != jobs:\n await asyncio.sleep(0.01)\n # Cleanup finished jobs for the case when `process_jobs` is run\n # multiple times in a single test.\n comm.instance.finished_jobs.clear()",
"def queue_delete(queue):\n\n for job in queue.jobs:\n job_delete(job)\n if os.path.exists(queue.data_abspath):\n os.rmdir(queue.data_abspath)\n db.session.delete(queue)\n db.session.commit()",
"def delete_incomplete_jobs(dry_run=False):\n logger = fsurfer.log.get_logger()\n conn = fsurfer.helpers.get_db_client()\n cursor = conn.cursor()\n job_query = \"SELECT jobs.id, \" \\\n \" jobs.username, \" \\\n \" jobs.state, \" \\\n \" jobs.subject \" \\\n \"FROM freesurfer_interface.jobs AS jobs \" \\\n \"LEFT JOIN freesurfer_interface.job_run \" \\\n \" ON jobs.id = job_run.job_id \" \\\n \"WHERE jobs.state = 'DELETE PENDING' AND \" \\\n \" job_run.job_id IS NULL\"\n job_update = \"UPDATE freesurfer_interface.jobs \" \\\n \"SET state = 'DELETED' \" \\\n \"WHERE id = %s;\"\n try:\n cursor.execute(job_query)\n for row in cursor.fetchall():\n workflow_id = row[0]\n username = row[1]\n logger.info(\"Deleting workflow {0} for user {1}\".format(workflow_id,\n username))\n deletion_list = []\n # add input file\n input_files = get_input_files(workflow_id)\n if input_files is None:\n logger.error(\"Can't find input files for \" +\n \"workflow {0}\".format(workflow_id))\n else:\n deletion_list.extend(input_files)\n for entry in deletion_list:\n if dry_run:\n sys.stdout.write(\"Would delete {0}\\n\".format(entry))\n else:\n logger.info(\"Removing {0}\".format(entry))\n if not purge_workflow_file(entry):\n logger.error(\"Can't remove {0} for job {1}\".format(entry,\n workflow_id))\n logger.info(\"Setting workflow {0} to DELETED\".format(workflow_id))\n cursor.execute(job_update, [workflow_id])\n if dry_run:\n conn.rollback()\n else:\n conn.commit()\n except psycopg2.Error as e:\n logger.exception(\"Error: {0}\".format(e))\n return 1\n finally:\n conn.commit()\n conn.close()\n\n return 0",
"def delete(self, job_id):\n # Only admin can delete any job\n if not current_user.is_admin():\n return get_message_json('删除任务需要管理员权限'), HTTPStatus.FORBIDDEN\n\n try:\n result = jobs.delete_job_by_id(job_id)\n if result == 1:\n return get_message_json('已删除该任务'), HTTPStatus.OK\n else:\n if jobs.find_job_by_id(job_id) is None:\n return get_message_json('任务不存在'), HTTPStatus.NOT_FOUND\n return get_message_json('未知的任务删除失败'), HTTPStatus.BAD_REQUEST\n except Exception as err:\n return handle_internal_error(str(err))"
] | [
"0.8024807",
"0.7764335",
"0.73224807",
"0.72803247",
"0.7052461",
"0.6834618",
"0.68255013",
"0.67485654",
"0.6677148",
"0.6632033",
"0.6413806",
"0.6378145",
"0.63686645",
"0.63380384",
"0.63329905",
"0.6238826",
"0.6130432",
"0.61263555",
"0.612368",
"0.6074361",
"0.60686594",
"0.60634243",
"0.6008517",
"0.59771764",
"0.5970086",
"0.59528154",
"0.59033",
"0.579432",
"0.5683713",
"0.5665759"
] | 0.8079405 | 0 |
Obtain the legacy, nonUEFI, boot protocol of a NIC. | def get_nic_legacy_boot_protocol(self, nic_id):
return self._nic_cfg.get_nic_legacy_boot_protocol(nic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_nic_legacy_boot_protocol(self, nic_id, value):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, value)",
"def set_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'NONE')",
"def set_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.set_nic_legacy_boot_protocol(nic_id, 'PXE')",
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode",
"def is_nic_legacy_boot_protocol_none(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_none(nic_id)",
"def get_default_iface_name_linux():\n route = \"/proc/net/route\"\n with open(route) as f:\n for line in f.readlines():\n try:\n iface, dest, _, flags, _, _, _, _, _, _, _, = line.strip().split()\n if dest != '00000000' or not int(flags, 16) & 2:\n continue\n return iface\n except:\n continue",
"def protocol(self):\n return self._host[CONF_PROTOCOL]",
"def set_protocol(cls, interface_name, proto='provision'): # pragma: no cover\n if proto not in cls.supported_proto:\n return\n try:\n ret = cls.get_logical_ifname(interface_name, proto)\n if not ret:\n return\n os.system('uci set network.%s.proto=%s' % (ret, proto))\n os.system('uci commit network')\n os.system('/etc/init.d/network reload')\n if proto == cls.supported_proto[1]:\n os.system('sysctl -w net.ipv6.conf.%s.autoconf=0' % interface_name)\n os.system('sysctl -w net.ipv6.conf.%s.use_tempaddr=2' % interface_name)\n cls.logger.debug(\"set %s[%s] DCHP protocol to %s\", interface_name, ret, proto)\n except OSError as e:\n cls.logger.error(\"Got exception:%s\" % str(e))",
"def get_protocol(binding_id):\n binding_to_protocol = {VID_TAXII_HTTP_10: \"http\", VID_TAXII_HTTPS_10: \"https\"}\n try:\n return binding_to_protocol[binding_id]\n except:\n raise ValueError(\"Unknown Protocol Binding ID %s\" % binding_id)",
"def __ip_protocol(self, proto_num):\n if proto_num in self.protocols:\n return self.protocols[proto_num]\n return str(proto_num)",
"def get_logical_ifname(self, interface_name, proto='provision'): # pragma: no cover\n output = check_output(['uci', 'show', 'network'])\n network_list = output.strip().split('\\n')\n for config in network_list:\n cfg, option = config.split('=')\n net_prex = cfg.split(\".\")\n if net_prex[-1] == \"proto\" and str(option) != proto:\n ifname = '.'.join(net_prex[:-1]) + '.ifname'\n interface = check_output(['uci', 'get', ifname]).split('\\n')[0]\n if interface == interface_name:\n return net_prex[1]\n return ''",
"def is_nic_legacy_boot_protocol_pxe(self, nic_id):\n return self._nic_cfg.is_nic_legacy_boot_protocol_pxe(nic_id)",
"def ip_protocol(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_protocol\")",
"def find_nic():\n result = subprocess.run([\"iw\", \"dev\"], capture_output=True).stdout.decode()\n network_interface_controllers = wlan_code.findall(result)\n return network_interface_controllers",
"def ip_protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_protocol\")",
"def protocol(self):\n self._recv_protocol()\n return self._protocol",
"def proto_check(proto):\n # Check for TCP\n if proto == 6:\n return 'tcp'\n # Check for UDP\n elif proto == 17:\n return 'udp'\n else:\n return None",
"def get_ip_version(network):\n if netaddr.IPNetwork(network).version == 6:\n return \"IPv6\"\n elif netaddr.IPNetwork(network).version == 4:\n return \"IPv4\"",
"def get_ip_version(network):\r\n if netaddr.IPNetwork(network).version == 6:\r\n return \"IPv6\"\r\n elif netaddr.IPNetwork(network).version == 4:\r\n return \"IPv4\"",
"def port_nic():",
"def DetectWirelessInterface(self):\n iface = self.wifi.DetectWirelessInterface()\n if iface:\n print 'Automatically detected wireless interface ' + iface\n else:\n print \"Couldn't detect a wireless interface.\"\n return str(iface)",
"def protocol(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input['TargetServerProtocol']]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")",
"def protocol(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"protocol\")",
"def getProtocol(self) -> str:\n ...",
"def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()"
] | [
"0.69560814",
"0.66056216",
"0.63252735",
"0.62761366",
"0.610527",
"0.59243083",
"0.58971286",
"0.5884942",
"0.58824575",
"0.58585525",
"0.58509755",
"0.5845862",
"0.580865",
"0.57988596",
"0.5784832",
"0.57732546",
"0.5768472",
"0.57367367",
"0.57293147",
"0.5726689",
"0.5721213",
"0.5698573",
"0.56934905",
"0.5692268",
"0.5692268",
"0.5692268",
"0.5692268",
"0.5692268",
"0.5689939",
"0.5687867"
] | 0.81074166 | 0 |
Obtain the link status, up or down, of a NIC. | def get_nic_link_status(self, nic_id):
return self._nic_mgmt.get_nic_link_status(nic_id) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_interface_status(conn_obj, interface, device=\"dut\"):\n command = \"cat /sys/class/net/{}/operstate\".format(interface)\n if device==\"dut\":\n return utils_obj.remove_last_line_from_string(st.show(conn_obj, command, skip_tmpl=True))",
"def IsLinkup(nic,timeout):\n nic = nic.strip()\n current = time.time()\n timeout += current\n while current < timeout:\n data = os.popen(\"ipconfig\").read().split(\"Ethernet adapter\")\n for item in data:\n if item.count(nic) and item.count(\"isconnected\") == 0: #Connected\n return 1\n time.sleep(0.5)\n current = time.time()\n return 0",
"def IsLinkdown(nic,timeout):\n nic = nic.strip()\n current = time.time()\n timeout += current\n while current < timeout:\n data = os.popen(\"ipconfig\").read().split(\"Ethernet adapter\")\n for item in data:\n if item.count(nic) and item.count(\"isconnected\"): #Disconnected\n return 1\n time.sleep(0.5)\n current = time.time()\n return 0",
"def get_status(cls, client_object):\n return client_object.ovsdb.Interface.get_one(\n search='name=%s' % client_object.name).link_state",
"def status(ctx):\n return show_network_status()",
"def is_nic_link_up(self, nic_id):\n return self._nic_mgmt.is_nic_link_up(nic_id)",
"def network_state(self):\n states = {\n 0: \"NETWORK_EMPTY\",\n 1: \"NETWORK_IDLE\",\n 2: \"NETWORK_LOADING\",\n 3: \"NETWORK_NO_SOURCE\",\n }\n return states[self._el._parent.execute_script(\"return arguments[0].networkState\", self._el)]",
"def network_state(self):\n states = {\n 0: \"NETWORK_EMPTY\",\n 1: \"NETWORK_IDLE\",\n 2: \"NETWORK_LOADING\",\n 3: \"NETWORK_NO_SOURCE\",\n }\n return states[self._el._parent.execute_script(\"return arguments[0].networkState\", self._el)]",
"def get_network_status(self, who=\"all\", get_iterator=False):\r\n\r\n nsData = self.sendAndRecv(\"GETINFO ns/\"+who+\"\\r\\n\")[0][2]\r\n if get_iterator: return ns_body_iter(nsData)\r\n else: return parse_ns_body(nsData)",
"def get_uplink_cnt(self) -> int:\n try:\n self._serial.transmit(b'\\x53\\x00')\n response = self._get_reply(0x53, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)",
"def get_interfaces_status(device):\n\n try:\n out = device.parse('show ip interface brief')\n except SchemaEmptyParserError as e:\n log.error('No interface information found')\n return None\n\n # {'interface': {'GigabitEthernet1': {'interface_is_ok': 'YES',\n # 'ip_address': '172.16.1.210',\n # 'method': 'DHCP',\n # 'protocol': 'up',\n # 'status': 'up'},\n\n return {key: val.get('status') for key, val in out.get('interface', {}).items()}",
"def status(self):\n return self._select_interface(self._rc_status, self._http_status)",
"async def status(request: web.Request) -> web.Response:\n connectivity = {'status': 'none', 'interfaces': {}}\n try:\n connectivity['status'] = await nmcli.is_connected()\n connectivity['interfaces'] = {\n i.value: await nmcli.iface_info(i) for i in nmcli.NETWORK_IFACES\n }\n log.debug(\"Connectivity: {}\".format(connectivity['status']))\n log.debug(\"Interfaces: {}\".format(connectivity['interfaces']))\n status = 200\n except subprocess.CalledProcessError as e:\n log.error(\"CalledProcessError: {}\".format(e.stdout))\n status = 500\n except FileNotFoundError as e:\n log.error(\"FileNotFoundError: {}\".format(e))\n status = 500\n\n return web.json_response(connectivity, status=status)",
"def net_get_updown():\n\ttry:\n\t\tf = open(\"/proc/net/dev\", \"r\")\n\t\tdata = f.readlines(2000)\n\t\tf.close()\n\t\tnewNetUp = 0\n\t\tnewNetDown = 0\n\t\tfor i in data:\n\t\t\tif i.find(':') != -1 and i.strip().startswith('lo:') == False:\n\t\t\t\tv = i.split(':')[1].split()\n\t\t\t\tnewNetUp = float( v[8] )+newNetUp\n\t\t\t\tnewNetDown = float( v[0] )+newNetDown\n\n\t\n\t\treturn (newNetUp/1024), (newNetDown/1024)\n\texcept:\n\t\tprint(_(\"Can't open /proc/net/dev\"))\n\t\treturn 0,0",
"def get_downlink_cnt(self) -> int:\n\n try:\n self._serial.transmit(b'\\x55\\x00')\n response = self._get_reply(0x55, 4, 0.25)\n finally:\n self._gpio.sleep()\n\n return int.from_bytes(response[2:6], 'little', signed=False)",
"def interface_status(system_ip):\n\n click.secho(\"\\nRetrieving the Interface Status\")\n\n url = base_url + \"/device/interface/synced?deviceId={0}\".format(system_ip)\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get list of interface \" + str(response.text))\n exit()\n\n print(\"\\nInterfaces status for Device = \",system_ip)\n\n headers = [\"Interface Name\", \"IP address\", \"VPN ID\", \"Operational status\"]\n table = list()\n\n for item in items:\n if item.get('ip-address') != \"-\":\n tr = [item.get('ifname'), item.get('ip-address'),item.get('vpn-id'), item.get('if-oper-status')]\n table.append(tr)\n\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def net_stat_recv(x, interface=None):\n if not interface:\n interface = get_netiface()\n if interface:\n return psutil.net_io_counters(pernic=True)[interface].bytes_recv\n else:\n return 0",
"def status(NetPort):\n\t\n\tcommand = [_IPCONFIG, 'getifaddr', NetPort]\n\ttask = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t(stdout, stderr) = task.communicate()\n\treturn task.returncode",
"def _ifList(self):\n bNetworks = False\n for cmd in self.lstCmd[1:]:\n if cmd == 'networks' or cmd == 'n':\n bNetworks = True\n\n print 'enum interfaces ...'\n with self.wlan.enumInterfaces() as wlanIfData:\n # find each available network for each interface\n # for n,iface in enumerate(wlanIfData.ifaces):\n for n,iface in enumerate(wlanIfData):\n print \"%d : %-40s state:%s\" % (n,iface.strInterfaceDescription, iface.getState())\n if bNetworks:\n with self.wlan.getAvailableNetworks(iface) as wlanNetData:\n print ' %-15s %-30s %-15s %s' % ('Profile', 'SSID','Qual (dbm)','C:Connectable S:Secure P:Profile')\n print ' %-15s %-30s %-15s' % ('=======', '====','==========')\n for nw in wlanNetData:\n sConn = ' '\n sDesc = ''\n if nw.isConnectable():\n sDesc += 'C'\n if nw.isSecure():\n sDesc += 'S'\n if nw.isConnected():\n sConn = '*'\n if nw.hasProfile():\n sDesc += 'P'\n print ' %-15s %-30s %3d%% %.1f %s %s' % (nw.getProfileName(), nw.getSSID(), nw.getSignalQuality(), nw.getSignalQualityInDBM(), sConn, sDesc)",
"def status(self, adapter):\n if adapter not in self._connections:\n return \"idle\"\n return self._connections[adapter][\"status\"]",
"async def get_status(self) -> str:\n return await self.hw_device.status()",
"def get_status(self):\n status = self._status.get_message()\n \n if status == \"N\":\n return \"offline\"\n \n elif status == \"Y\":\n return \"online\"\n \n elif status == \"A\":\n return \"away\"\n \n elif status == \"B\":\n return \"busy\"",
"def check_connectivity(self):\n r = self.run_cmd(\"get-state\")\n return r.startswith(\"device\")",
"def network_interface(self): \n return self._network_interface",
"def is_if_up(ifname):\n with open('/sys/class/net/' + ifname + '/carrier', 'r') as f:\n status = f.readline()\n return (status == '1')",
"def status(self) -> 'outputs.ConnectionStatusResponse':\n return pulumi.get(self, \"status\")",
"def test_interface_status(device, actual, testcase):\n status = actual['interfaces']\n if_name = testcase['params']['interface']\n if_status = status.get(if_name)\n\n if not if_status:\n raise exc.MissingError(\n 'No status for interface',\n missing=if_name)\n\n actual_state = if_status['interfaceStatus']\n expected_state = testcase['expected']['state']\n\n # check expected down state condition\n\n if expected_state == 'down':\n if actual_state != 'disabled':\n raise exc.MismatchError(\n f'Interface {if_name} not down as expected',\n expected=expected_state,\n actual=actual_state\n )\n\n # if here, then interface is down as expected\n return True\n\n # check expected up state condition\n\n if actual_state != 'connected':\n raise exc.MismatchError(\n f'Interface {if_name} not up as expected',\n expected=expected_state,\n actual=actual_state\n )\n\n return True",
"def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)",
"def net_if_stats():\n ret = {}\n rawdict = cext.net_if_stats()\n for name, items in rawdict.items():\n if not PY3:\n assert isinstance(name, unicode), type(name)\n name = py2_strencode(name)\n isup, duplex, speed, mtu = items\n if hasattr(_common, 'NicDuplex'):\n duplex = _common.NicDuplex(duplex)\n ret[name] = _common.snicstats(isup, duplex, speed, mtu, '')\n return ret",
"def read_status(ctl):\n\tr = ctl.bus_read_struct_coherent(tm.status_addr, 'BBBBI')\n\treturn r"
] | [
"0.6978756",
"0.6962209",
"0.68197346",
"0.63944924",
"0.6278568",
"0.6132206",
"0.6125867",
"0.6125867",
"0.60397744",
"0.6013797",
"0.6013276",
"0.59495616",
"0.5935399",
"0.5887618",
"0.5884482",
"0.5877811",
"0.58749413",
"0.5842111",
"0.5832965",
"0.5764003",
"0.5742055",
"0.57284117",
"0.5658039",
"0.5650833",
"0.56254876",
"0.56096184",
"0.5602062",
"0.5600609",
"0.55930984",
"0.5526992"
] | 0.7465701 | 0 |
Obtain a setting of a NIC. | def get_nic_setting(self, nic_id, attribute_name):
return self._nic_cfg.get_nic_setting(nic_id, attribute_name) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_nic_settings(bmc):\n nic_settings = bmc.list_nics()\n return nic_settings",
"def get_setting(self, setting):\n return self.do_rpc(\"get_setting\", key=key)",
"def set_nic_setting(self, nic_id, attribute_name, value):\n return self._nic_cfg.set_nic_setting(nic_id, attribute_name, value)",
"def network_settings(self): # type: () -> t.Dict[str, t.Any]\n return self.inspection['NetworkSettings']",
"def get_setting(self, category, setting):\n category = self.get_setting_category(category)\n if not category:\n return None\n try:\n return category[setting]\n except KeyError:\n return None",
"def get_network_settings(self, nReserved = 0):\n\t\treturn Job(SDK.PrlVmGuest_GetNetworkSettings(self.handle, nReserved)[0])",
"def getnetwork(ipaddr):\n return '192.168.1.0/24'",
"def get_net(con):\n try:\n return con.virtual_network_read(fq_name=conf.get('default_net', 'UNEXPECTED_VALUE'))\n except NoIdError:\n log.debug('Unable to find net.')\n return None",
"def get_net_adapter(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetNetAdapter', self.handle, nIndex))",
"def get_net_adapter(self, nIndex):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetNetAdapter', self.handle, nIndex))",
"def _get_bios_setting(self, bios_property):\n headers, bios_uri, bios_settings = self._check_bios_resource([\n bios_property])\n return bios_settings[bios_property]",
"def set_nic_settings(self, nic_id, settings):\n return self._nic_cfg.set_nic_settings(nic_id, settings)",
"def get_setting(setting, override=None):\n attr_name = 'MUSES_{0}'.format(setting)\n if hasattr(settings, attr_name):\n return getattr(settings, attr_name)\n else:\n if hasattr(defaults, setting):\n return getattr(defaults, setting)\n else:\n return override",
"def get_network(self):\n return self.get_ip_network()[-1]",
"def get_network(self) -> Optional[str]:\n return self.get_value(self._network_attribute)",
"def get_setting(section, option):\n config = configparser.ConfigParser()\n config.read('settings.ini')\n value = config.get(section, option)\n\n return value",
"def network_interface(self): \n return self._network_interface",
"def networkMode(self):\n\n response = self.at.sendCommand(\"AT+CEREG?\")\n\n # If we failed to query the network mode, that's a paddlin'\n if not response:\n raise modem.AtError(response, \"Failed to query network mode\")\n\n lines = response.lines\n\n if len(lines) < 1:\n raise modem.AtError(response, \"Invalid network mode response\")\n\n fields = lines[0].split(\",\")\n\n # If there isn't at least the prefix and the current mode, that's a\n # paddlin'\n if len(fields) < 2:\n raise modem.AtError(response, \"Invalid network mode response\")\n\n try:\n return int(fields[1])\n\n except ValueError:\n raise modem.AtError(response, \"Invalid network mode\")",
"def get_attr(self, server, attribute):\n\t\tattribute = str(attribute)\n\t\tcfg = self.get_cfg(server)\n\t\tif cfg:\n\t\t\treturn cfg.get(attribute)",
"def port_nic():",
"def config_networking(\n self, network_obj, ip, netmask, gateway, domain, dns, guest_hostname\n ):\n\n global_ip = vim.vm.customization.GlobalIPSettings()\n adapter_map = vim.vm.customization.AdapterMapping()\n adapter_map.adapter = vim.vm.customization.IPSettings()\n adapter_map.macAddress = network_obj.macAddress\n if ip:\n adapter_map.adapter.ip = vim.vm.customization.FixedIp()\n adapter_map.adapter.ip.ipAddress = ip\n else:\n adapter_map.adapter.ip = vim.vm.customization.DhcpIpGenerator()\n adapter_map.adapter.subnetMask = netmask\n adapter_map.adapter.gateway = gateway\n global_ip.dnsServerList = dns\n adapter_map.adapter.dnsDomain = domain\n ident = vim.vm.customization.LinuxPrep()\n ident.hostName = vim.vm.customization.FixedName()\n if guest_hostname:\n ident.hostName.name = guest_hostname\n else:\n ident.hostName.name = self.vm_obj.name\n custom_spec = vim.vm.customization.Specification()\n custom_spec.nicSettingMap = [adapter_map]\n custom_spec.identity = ident\n custom_spec.globalIPSettings = global_ip\n return self.vm_obj.Customize(spec=custom_spec)",
"def get_setting(self, id):\n return __settings__.getSetting(id)",
"def get_by_id(cls, context, id):\n db_nic = cls.dbapi.get_nic_by_id(id)\n nic = cls._from_db_object(cls(context), db_nic)\n return nic",
"def fusion_api_get_network_set(self, uri=None, param='', api=None, headers=None):\n return self.network_set.get(uri=uri, api=api, headers=headers, param=param)",
"def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")",
"def network_config(self) -> 'outputs.NetworkConfigResponse':\n return pulumi.get(self, \"network_config\")",
"def get_network(self):\n return self._network",
"def get_nh_tun_dip(self):\n return int(self.get('nhr_tun_dip'))",
"def get_setting(setting_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSettingResult:\n __args__ = dict()\n __args__['settingName'] = setting_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:costmanagement:getSetting', __args__, opts=opts, typ=GetSettingResult).value\n\n return AwaitableGetSettingResult(\n cache=pulumi.get(__ret__, 'cache'),\n id=pulumi.get(__ret__, 'id'),\n kind=pulumi.get(__ret__, 'kind'),\n name=pulumi.get(__ret__, 'name'),\n scope=pulumi.get(__ret__, 'scope'),\n start_on=pulumi.get(__ret__, 'start_on'),\n type=pulumi.get(__ret__, 'type'))",
"def network(self) -> str:\n return pulumi.get(self, \"network\")"
] | [
"0.6571858",
"0.61692965",
"0.590563",
"0.58731306",
"0.58425283",
"0.57504684",
"0.5736546",
"0.5717614",
"0.56472576",
"0.56174994",
"0.5604399",
"0.5568864",
"0.55311763",
"0.5528393",
"0.55063206",
"0.54884344",
"0.5479084",
"0.5471765",
"0.5459784",
"0.5432316",
"0.53471994",
"0.5337927",
"0.53205913",
"0.5318265",
"0.5307765",
"0.5307765",
"0.53073937",
"0.52943647",
"0.52836925",
"0.5196874"
] | 0.7415658 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.